Merge "usb: dwc3: Increase the TxFIFO resize factor"
diff --git a/AndroidKernel.mk b/AndroidKernel.mk
index 3ab1fd8..f404b5a 100644
--- a/AndroidKernel.mk
+++ b/AndroidKernel.mk
@@ -162,14 +162,31 @@
echo $(KERNEL_CONFIG_OVERRIDE) >> $(KERNEL_OUT)/.config; \
$(MAKE) -C $(TARGET_KERNEL_SOURCE) O=$(BUILD_ROOT_LOC)$(KERNEL_OUT) $(KERNEL_MAKE_ENV) ARCH=$(KERNEL_ARCH) CROSS_COMPILE=$(KERNEL_CROSS_COMPILE) $(real_cc) oldconfig; fi
-$(TARGET_PREBUILT_INT_KERNEL): $(KERNEL_OUT) $(KERNEL_HEADERS_INSTALL)
+ifeq ($(TARGET_KERNEL_APPEND_DTB), true)
+TARGET_PREBUILT_INT_KERNEL_IMAGE := $(KERNEL_OUT)/arch/$(KERNEL_ARCH)/boot/Image
+$(TARGET_PREBUILT_INT_KERNEL_IMAGE): $(KERNEL_USR)
+$(TARGET_PREBUILT_INT_KERNEL_IMAGE): $(KERNEL_OUT) $(KERNEL_HEADERS_INSTALL)
+ $(hide) echo "Building kernel modules..."
+ $(MAKE) -C $(TARGET_KERNEL_SOURCE) O=$(BUILD_ROOT_LOC)$(KERNEL_OUT) $(KERNEL_MAKE_ENV) ARCH=$(KERNEL_ARCH) CROSS_COMPILE=$(KERNEL_CROSS_COMPILE) $(real_cc) $(KERNEL_CFLAGS) Image
+ $(MAKE) -C $(TARGET_KERNEL_SOURCE) O=$(BUILD_ROOT_LOC)$(KERNEL_OUT) $(KERNEL_MAKE_ENV) ARCH=$(KERNEL_ARCH) CROSS_COMPILE=$(KERNEL_CROSS_COMPILE) $(real_cc) $(KERNEL_CFLAGS) modules
+ $(MAKE) -C $(TARGET_KERNEL_SOURCE) O=$(BUILD_ROOT_LOC)$(KERNEL_OUT) INSTALL_MOD_PATH=$(BUILD_ROOT_LOC)../$(KERNEL_MODULES_INSTALL) INSTALL_MOD_STRIP=1 $(KERNEL_MAKE_ENV) ARCH=$(KERNEL_ARCH) CROSS_COMPILE=$(KERNEL_CROSS_COMPILE) $(real_cc) modules_install
+ $(mv-modules)
+ $(clean-module-folder)
+
+$(TARGET_PREBUILT_INT_KERNEL): $(TARGET_PREBUILT_INT_KERNEL_IMAGE)
$(hide) echo "Building kernel..."
$(hide) rm -rf $(KERNEL_OUT)/arch/$(KERNEL_ARCH)/boot/dts
$(MAKE) -C $(TARGET_KERNEL_SOURCE) O=$(BUILD_ROOT_LOC)$(KERNEL_OUT) $(KERNEL_MAKE_ENV) ARCH=$(KERNEL_ARCH) CROSS_COMPILE=$(KERNEL_CROSS_COMPILE) $(real_cc) $(KERNEL_CFLAGS)
+else
+TARGET_PREBUILT_INT_KERNEL_IMAGE := $(TARGET_PREBUILT_INT_KERNEL)
+$(TARGET_PREBUILT_INT_KERNEL): $(KERNEL_OUT) $(KERNEL_HEADERS_INSTALL)
+ $(hide) echo "Building kernel..."
+ $(MAKE) -C $(TARGET_KERNEL_SOURCE) O=$(BUILD_ROOT_LOC)$(KERNEL_OUT) $(KERNEL_MAKE_ENV) ARCH=$(KERNEL_ARCH) CROSS_COMPILE=$(KERNEL_CROSS_COMPILE) $(real_cc) $(KERNEL_CFLAGS)
$(MAKE) -C $(TARGET_KERNEL_SOURCE) O=$(BUILD_ROOT_LOC)$(KERNEL_OUT) $(KERNEL_MAKE_ENV) ARCH=$(KERNEL_ARCH) CROSS_COMPILE=$(KERNEL_CROSS_COMPILE) $(real_cc) $(KERNEL_CFLAGS) modules
$(MAKE) -C $(TARGET_KERNEL_SOURCE) O=$(BUILD_ROOT_LOC)$(KERNEL_OUT) INSTALL_MOD_PATH=$(BUILD_ROOT_LOC)../$(KERNEL_MODULES_INSTALL) INSTALL_MOD_STRIP=1 $(KERNEL_MAKE_ENV) ARCH=$(KERNEL_ARCH) CROSS_COMPILE=$(KERNEL_CROSS_COMPILE) $(real_cc) modules_install
$(mv-modules)
$(clean-module-folder)
+endif
$(KERNEL_HEADERS_INSTALL): $(KERNEL_OUT)
$(hide) if [ ! -z "$(KERNEL_HEADER_DEFCONFIG)" ]; then \
diff --git a/Documentation/admin-guide/hw-vuln/spectre.rst b/Documentation/admin-guide/hw-vuln/spectre.rst
index 25f3b25..e05e581 100644
--- a/Documentation/admin-guide/hw-vuln/spectre.rst
+++ b/Documentation/admin-guide/hw-vuln/spectre.rst
@@ -41,10 +41,11 @@
The following CVE entries describe Spectre variants:
- ============= ======================= =================
+ ============= ======================= ==========================
CVE-2017-5753 Bounds check bypass Spectre variant 1
CVE-2017-5715 Branch target injection Spectre variant 2
- ============= ======================= =================
+ CVE-2019-1125 Spectre v1 swapgs Spectre variant 1 (swapgs)
+ ============= ======================= ==========================
Problem
-------
@@ -78,6 +79,13 @@
over the network, see :ref:`[12] <spec_ref12>`. However such attacks
are difficult, low bandwidth, fragile, and are considered low risk.
+Note that, despite "Bounds Check Bypass" name, Spectre variant 1 is not
+only about user-controlled array bounds checks. It can affect any
+conditional checks. The kernel entry code interrupt, exception, and NMI
+handlers all have conditional swapgs checks. Those may be problematic
+in the context of Spectre v1, as kernel code can speculatively run with
+a user GS.
+
Spectre variant 2 (Branch Target Injection)
-------------------------------------------
@@ -132,6 +140,9 @@
1. A user process attacking the kernel
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+Spectre variant 1
+~~~~~~~~~~~~~~~~~
+
The attacker passes a parameter to the kernel via a register or
via a known address in memory during a syscall. Such parameter may
be used later by the kernel as an index to an array or to derive
@@ -144,7 +155,40 @@
potentially be influenced for Spectre attacks, new "nospec" accessor
macros are used to prevent speculative loading of data.
- Spectre variant 2 attacker can :ref:`poison <poison_btb>` the branch
+Spectre variant 1 (swapgs)
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ An attacker can train the branch predictor to speculatively skip the
+ swapgs path for an interrupt or exception. If they initialize
+ the GS register to a user-space value, if the swapgs is speculatively
+ skipped, subsequent GS-related percpu accesses in the speculation
+ window will be done with the attacker-controlled GS value. This
+ could cause privileged memory to be accessed and leaked.
+
+ For example:
+
+ ::
+
+ if (coming from user space)
+ swapgs
+ mov %gs:<percpu_offset>, %reg
+ mov (%reg), %reg1
+
+ When coming from user space, the CPU can speculatively skip the
+ swapgs, and then do a speculative percpu load using the user GS
+ value. So the user can speculatively force a read of any kernel
+ value. If a gadget exists which uses the percpu value as an address
+ in another load/store, then the contents of the kernel value may
+ become visible via an L1 side channel attack.
+
+ A similar attack exists when coming from kernel space. The CPU can
+ speculatively do the swapgs, causing the user GS to get used for the
+ rest of the speculative window.
+
+Spectre variant 2
+~~~~~~~~~~~~~~~~~
+
+ A spectre variant 2 attacker can :ref:`poison <poison_btb>` the branch
target buffer (BTB) before issuing syscall to launch an attack.
After entering the kernel, the kernel could use the poisoned branch
target buffer on indirect jump and jump to gadget code in speculative
@@ -280,11 +324,18 @@
The possible values in this file are:
- ======================================= =================================
- 'Mitigation: __user pointer sanitation' Protection in kernel on a case by
- case base with explicit pointer
- sanitation.
- ======================================= =================================
+ .. list-table::
+
+ * - 'Not affected'
+ - The processor is not vulnerable.
+ * - 'Vulnerable: __user pointer sanitization and usercopy barriers only; no swapgs barriers'
+ - The swapgs protections are disabled; otherwise it has
+ protection in the kernel on a case by case base with explicit
+ pointer sanitation and usercopy LFENCE barriers.
+ * - 'Mitigation: usercopy/swapgs barriers and __user pointer sanitization'
+ - Protection in the kernel on a case by case base with explicit
+ pointer sanitation, usercopy LFENCE barriers, and swapgs LFENCE
+ barriers.
However, the protections are put in place on a case by case basis,
and there is no guarantee that all possible attack vectors for Spectre
@@ -366,12 +417,27 @@
1. Kernel mitigation
^^^^^^^^^^^^^^^^^^^^
+Spectre variant 1
+~~~~~~~~~~~~~~~~~
+
For the Spectre variant 1, vulnerable kernel code (as determined
by code audit or scanning tools) is annotated on a case by case
basis to use nospec accessor macros for bounds clipping :ref:`[2]
<spec_ref2>` to avoid any usable disclosure gadgets. However, it may
not cover all attack vectors for Spectre variant 1.
+ Copy-from-user code has an LFENCE barrier to prevent the access_ok()
+ check from being mis-speculated. The barrier is done by the
+ barrier_nospec() macro.
+
+ For the swapgs variant of Spectre variant 1, LFENCE barriers are
+ added to interrupt, exception and NMI entry where needed. These
+ barriers are done by the FENCE_SWAPGS_KERNEL_ENTRY and
+ FENCE_SWAPGS_USER_ENTRY macros.
+
+Spectre variant 2
+~~~~~~~~~~~~~~~~~
+
For Spectre variant 2 mitigation, the compiler turns indirect calls or
jumps in the kernel into equivalent return trampolines (retpolines)
:ref:`[3] <spec_ref3>` :ref:`[9] <spec_ref9>` to go to the target
@@ -473,6 +539,12 @@
Spectre variant 2 mitigation can be disabled or force enabled at the
kernel command line.
+ nospectre_v1
+
+ [X86,PPC] Disable mitigations for Spectre Variant 1
+ (bounds check bypass). With this option data leaks are
+ possible in the system.
+
nospectre_v2
[X86] Disable all mitigations for the Spectre variant 2
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index a4dcb73..8923a10 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -2522,6 +2522,7 @@
Equivalent to: nopti [X86,PPC]
nospectre_v1 [PPC]
nobp=0 [S390]
+ nospectre_v1 [X86]
nospectre_v2 [X86,PPC,S390]
spectre_v2_user=off [X86]
spec_store_bypass_disable=off [X86,PPC]
@@ -2868,9 +2869,9 @@
nosmt=force: Force disable SMT, cannot be undone
via the sysfs control file.
- nospectre_v1 [PPC] Disable mitigations for Spectre Variant 1 (bounds
- check bypass). With this option data leaks are possible
- in the system.
+ nospectre_v1 [X66, PPC] Disable mitigations for Spectre Variant 1
+ (bounds check bypass). With this option data leaks
+ are possible in the system.
nospectre_v2 [X86,PPC_FSL_BOOK3E] Disable all mitigations for the Spectre variant 2
(indirect branch prediction) vulnerability. System may
@@ -3958,6 +3959,13 @@
Run specified binary instead of /init from the ramdisk,
used for early userspace startup. See initrd.
+ rdrand= [X86]
+ force - Override the decision by the kernel to hide the
+ advertisement of RDRAND support (this affects
+ certain AMD processors because of buggy BIOS
+ support, specifically around the suspend/resume
+ path).
+
rdt= [HW,X86,RDT]
Turn on/off individual RDT features. List is:
cmt, mbmtotal, mbmlocal, l3cat, l3cdp, l2cat, l2cdp,
diff --git a/Makefile b/Makefile
index 0ed40f8..0cda102 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 4
PATCHLEVEL = 19
-SUBLEVEL = 62
+SUBLEVEL = 69
EXTRAVERSION =
NAME = "People's Front"
@@ -434,6 +434,7 @@
KBUILD_LDFLAGS_MODULE := -T $(srctree)/scripts/module-common.lds
KBUILD_LDFLAGS :=
GCC_PLUGINS_CFLAGS :=
+CLANG_FLAGS :=
export ARCH SRCARCH CONFIG_SHELL HOSTCC KBUILD_HOSTCFLAGS CROSS_COMPILE AS LD CC
export CPP AR NM STRIP OBJCOPY OBJDUMP KBUILD_HOSTLDFLAGS KBUILD_HOSTLDLIBS
@@ -487,7 +488,7 @@
ifeq ($(cc-name),clang)
ifneq ($(CROSS_COMPILE),)
CLANG_TRIPLE ?= $(CROSS_COMPILE)
-CLANG_FLAGS := --target=$(notdir $(CLANG_TRIPLE:%-=%))
+CLANG_FLAGS += --target=$(notdir $(CLANG_TRIPLE:%-=%))
ifeq ($(shell $(srctree)/scripts/clang-android.sh $(CC) $(CLANG_FLAGS)), y)
$(error "Clang with Android --target detected. Did you specify CLANG_TRIPLE?")
endif
@@ -499,6 +500,7 @@
CLANG_FLAGS += --gcc-toolchain=$(GCC_TOOLCHAIN)
endif
CLANG_FLAGS += -no-integrated-as
+CLANG_FLAGS += -Werror=unknown-warning-option
KBUILD_CFLAGS += $(CLANG_FLAGS)
KBUILD_AFLAGS += $(CLANG_FLAGS)
export CLANG_FLAGS
diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig
index 74953e7..0cce541 100644
--- a/arch/arc/Kconfig
+++ b/arch/arc/Kconfig
@@ -199,7 +199,6 @@
config ARC_SMP_HALT_ON_RESET
bool "Enable Halt-on-reset boot mode"
- default y if ARC_UBOOT_SUPPORT
help
In SMP configuration cores can be configured as Halt-on-reset
or they could all start at same time. For Halt-on-reset, non
@@ -539,18 +538,6 @@
endif
-config ARC_UBOOT_SUPPORT
- bool "Support uboot arg Handling"
- default n
- help
- ARC Linux by default checks for uboot provided args as pointers to
- external cmdline or DTB. This however breaks in absence of uboot,
- when booting from Metaware debugger directly, as the registers are
- not zeroed out on reset by mdb and/or ARCv2 based cores. The bogus
- registers look like uboot args to kernel which then chokes.
- So only enable the uboot arg checking/processing if users are sure
- of uboot being in play.
-
config ARC_BUILTIN_DTB_NAME
string "Built in DTB"
help
diff --git a/arch/arc/configs/nps_defconfig b/arch/arc/configs/nps_defconfig
index 6e84060..621f594 100644
--- a/arch/arc/configs/nps_defconfig
+++ b/arch/arc/configs/nps_defconfig
@@ -31,7 +31,6 @@
# CONFIG_ARC_HAS_LLSC is not set
CONFIG_ARC_KVADDR_SIZE=402
CONFIG_ARC_EMUL_UNALIGNED=y
-CONFIG_ARC_UBOOT_SUPPORT=y
CONFIG_PREEMPT=y
CONFIG_NET=y
CONFIG_UNIX=y
diff --git a/arch/arc/configs/vdk_hs38_defconfig b/arch/arc/configs/vdk_hs38_defconfig
index 1e59a2e..e447ace6 100644
--- a/arch/arc/configs/vdk_hs38_defconfig
+++ b/arch/arc/configs/vdk_hs38_defconfig
@@ -13,7 +13,6 @@
CONFIG_ARC_PLAT_AXS10X=y
CONFIG_AXS103=y
CONFIG_ISA_ARCV2=y
-CONFIG_ARC_UBOOT_SUPPORT=y
CONFIG_ARC_BUILTIN_DTB_NAME="vdk_hs38"
CONFIG_PREEMPT=y
CONFIG_NET=y
diff --git a/arch/arc/configs/vdk_hs38_smp_defconfig b/arch/arc/configs/vdk_hs38_smp_defconfig
index b5c3f6c..c82cdb1 100644
--- a/arch/arc/configs/vdk_hs38_smp_defconfig
+++ b/arch/arc/configs/vdk_hs38_smp_defconfig
@@ -15,8 +15,6 @@
CONFIG_ISA_ARCV2=y
CONFIG_SMP=y
# CONFIG_ARC_TIMERS_64BIT is not set
-# CONFIG_ARC_SMP_HALT_ON_RESET is not set
-CONFIG_ARC_UBOOT_SUPPORT=y
CONFIG_ARC_BUILTIN_DTB_NAME="vdk_hs38_smp"
CONFIG_PREEMPT=y
CONFIG_NET=y
diff --git a/arch/arc/kernel/head.S b/arch/arc/kernel/head.S
index 208bf2c..a72bbda 100644
--- a/arch/arc/kernel/head.S
+++ b/arch/arc/kernel/head.S
@@ -100,7 +100,6 @@
st.ab 0, [r5, 4]
1:
-#ifdef CONFIG_ARC_UBOOT_SUPPORT
; Uboot - kernel ABI
; r0 = [0] No uboot interaction, [1] cmdline in r2, [2] DTB in r2
; r1 = magic number (always zero as of now)
@@ -109,7 +108,6 @@
st r0, [@uboot_tag]
st r1, [@uboot_magic]
st r2, [@uboot_arg]
-#endif
; setup "current" tsk and optionally cache it in dedicated r25
mov r9, @init_task
diff --git a/arch/arc/kernel/setup.c b/arch/arc/kernel/setup.c
index a121893..89c97dc 100644
--- a/arch/arc/kernel/setup.c
+++ b/arch/arc/kernel/setup.c
@@ -493,7 +493,6 @@ void __init handle_uboot_args(void)
bool use_embedded_dtb = true;
bool append_cmdline = false;
-#ifdef CONFIG_ARC_UBOOT_SUPPORT
/* check that we know this tag */
if (uboot_tag != UBOOT_TAG_NONE &&
uboot_tag != UBOOT_TAG_CMDLINE &&
@@ -525,7 +524,6 @@ void __init handle_uboot_args(void)
append_cmdline = true;
ignore_uboot_args:
-#endif
if (use_embedded_dtb) {
machine_desc = setup_machine_fdt(__dtb_start);
diff --git a/arch/arm/boot/dts/bcm47094-linksys-panamera.dts b/arch/arm/boot/dts/bcm47094-linksys-panamera.dts
index 36efe41..9e33c41 100644
--- a/arch/arm/boot/dts/bcm47094-linksys-panamera.dts
+++ b/arch/arm/boot/dts/bcm47094-linksys-panamera.dts
@@ -125,6 +125,9 @@
};
mdio-bus-mux {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
/* BIT(9) = 1 => external mdio */
mdio_ext: mdio@200 {
reg = <0x200>;
diff --git a/arch/arm/boot/dts/rk3288-veyron-mickey.dts b/arch/arm/boot/dts/rk3288-veyron-mickey.dts
index 1e0158a..a593d0a 100644
--- a/arch/arm/boot/dts/rk3288-veyron-mickey.dts
+++ b/arch/arm/boot/dts/rk3288-veyron-mickey.dts
@@ -124,10 +124,6 @@
};
};
-&emmc {
- /delete-property/mmc-hs200-1_8v;
-};
-
&i2c2 {
status = "disabled";
};
diff --git a/arch/arm/boot/dts/rk3288-veyron-minnie.dts b/arch/arm/boot/dts/rk3288-veyron-minnie.dts
index f95d0c5..6e89460 100644
--- a/arch/arm/boot/dts/rk3288-veyron-minnie.dts
+++ b/arch/arm/boot/dts/rk3288-veyron-minnie.dts
@@ -90,10 +90,6 @@
pwm-off-delay-ms = <200>;
};
-&emmc {
- /delete-property/mmc-hs200-1_8v;
-};
-
&gpio_keys {
pinctrl-0 = <&pwr_key_l &ap_lid_int_l &volum_down_l &volum_up_l>;
diff --git a/arch/arm/boot/dts/rk3288.dtsi b/arch/arm/boot/dts/rk3288.dtsi
index c706adf..440d678 100644
--- a/arch/arm/boot/dts/rk3288.dtsi
+++ b/arch/arm/boot/dts/rk3288.dtsi
@@ -227,6 +227,7 @@
<GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_HIGH)>,
<GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_HIGH)>;
clock-frequency = <24000000>;
+ arm,no-tick-in-suspend;
};
timer: timer@ff810000 {
diff --git a/arch/arm/kvm/coproc.c b/arch/arm/kvm/coproc.c
index fd6cde2..871fa50 100644
--- a/arch/arm/kvm/coproc.c
+++ b/arch/arm/kvm/coproc.c
@@ -658,13 +658,22 @@ int kvm_handle_cp14_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
}
static void reset_coproc_regs(struct kvm_vcpu *vcpu,
- const struct coproc_reg *table, size_t num)
+ const struct coproc_reg *table, size_t num,
+ unsigned long *bmap)
{
unsigned long i;
for (i = 0; i < num; i++)
- if (table[i].reset)
+ if (table[i].reset) {
+ int reg = table[i].reg;
+
table[i].reset(vcpu, &table[i]);
+ if (reg > 0 && reg < NR_CP15_REGS) {
+ set_bit(reg, bmap);
+ if (table[i].is_64bit)
+ set_bit(reg + 1, bmap);
+ }
+ }
}
static struct coproc_params decode_32bit_hsr(struct kvm_vcpu *vcpu)
@@ -1439,17 +1448,15 @@ void kvm_reset_coprocs(struct kvm_vcpu *vcpu)
{
size_t num;
const struct coproc_reg *table;
-
- /* Catch someone adding a register without putting in reset entry. */
- memset(vcpu->arch.ctxt.cp15, 0x42, sizeof(vcpu->arch.ctxt.cp15));
+ DECLARE_BITMAP(bmap, NR_CP15_REGS) = { 0, };
/* Generic chip reset first (so target could override). */
- reset_coproc_regs(vcpu, cp15_regs, ARRAY_SIZE(cp15_regs));
+ reset_coproc_regs(vcpu, cp15_regs, ARRAY_SIZE(cp15_regs), bmap);
table = get_target_table(vcpu->arch.target, &num);
- reset_coproc_regs(vcpu, table, num);
+ reset_coproc_regs(vcpu, table, num, bmap);
for (num = 1; num < NR_CP15_REGS; num++)
- WARN(vcpu_cp15(vcpu, num) == 0x42424242,
+ WARN(!test_bit(num, bmap),
"Didn't reset vcpu_cp15(vcpu, %zi)", num);
}
diff --git a/arch/arm/mach-davinci/sleep.S b/arch/arm/mach-davinci/sleep.S
index cd350de..efcd400 100644
--- a/arch/arm/mach-davinci/sleep.S
+++ b/arch/arm/mach-davinci/sleep.S
@@ -37,6 +37,7 @@
#define DEEPSLEEP_SLEEPENABLE_BIT BIT(31)
.text
+ .arch armv5te
/*
* Move DaVinci into deep sleep state
*
diff --git a/arch/arm/mach-rpc/dma.c b/arch/arm/mach-rpc/dma.c
index fb48f31..c4c9666 100644
--- a/arch/arm/mach-rpc/dma.c
+++ b/arch/arm/mach-rpc/dma.c
@@ -131,7 +131,7 @@ static irqreturn_t iomd_dma_handle(int irq, void *dev_id)
} while (1);
idma->state = ~DMA_ST_AB;
- disable_irq(irq);
+ disable_irq_nosync(irq);
return IRQ_HANDLED;
}
@@ -174,6 +174,9 @@ static void iomd_enable_dma(unsigned int chan, dma_t *dma)
DMA_FROM_DEVICE : DMA_TO_DEVICE);
}
+ idma->dma_addr = idma->dma.sg->dma_address;
+ idma->dma_len = idma->dma.sg->length;
+
iomd_writeb(DMA_CR_C, dma_base + CR);
idma->state = DMA_ST_AB;
}
diff --git a/arch/arm64/boot/Makefile b/arch/arm64/boot/Makefile
index abc6b05..ee0625d 100644
--- a/arch/arm64/boot/Makefile
+++ b/arch/arm64/boot/Makefile
@@ -29,14 +29,15 @@
DTB_OBJS := $(shell find $(obj)/dts/ -name \*.dtb)
# Add RTIC DTB to the DTB list if RTIC MPGen is enabled
+# Note, we keep this for compatibility with
+# BUILD_ARM64_APPENDED_DTB_IMAGE targets.
+# The rtic_mp.dts would be generated with the vmlinux if
+# MPGen enabled (RTIC_MPGEN defined).
ifdef RTIC_MPGEN
DTB_OBJS += rtic_mp.dtb
endif
rtic_mp.dtb: vmlinux FORCE
- $(RTIC_MPGEN) --objcopy="${OBJCOPY}" --objdump="${OBJDUMP}" \
- --binpath="" --vmlinux="vmlinux" --config=${KCONFIG_CONFIG} \
- --cc="${CC} ${KBUILD_AFLAGS}" --dts=rtic_mp.dts && \
$(DTC) -O dtb -o rtic_mp.dtb -b 0 $(DTC_FLAGS) rtic_mp.dts
$(obj)/Image: vmlinux FORCE
diff --git a/arch/arm64/boot/dts/rockchip/rk3399.dtsi b/arch/arm64/boot/dts/rockchip/rk3399.dtsi
index df7e62d..cea44a7 100644
--- a/arch/arm64/boot/dts/rockchip/rk3399.dtsi
+++ b/arch/arm64/boot/dts/rockchip/rk3399.dtsi
@@ -1643,11 +1643,11 @@
reg = <0x0 0xff914000 0x0 0x100>, <0x0 0xff915000 0x0 0x100>;
interrupts = <GIC_SPI 43 IRQ_TYPE_LEVEL_HIGH 0>;
interrupt-names = "isp0_mmu";
- clocks = <&cru ACLK_ISP0_NOC>, <&cru HCLK_ISP0_NOC>;
+ clocks = <&cru ACLK_ISP0_WRAPPER>, <&cru HCLK_ISP0_WRAPPER>;
clock-names = "aclk", "iface";
#iommu-cells = <0>;
+ power-domains = <&power RK3399_PD_ISP0>;
rockchip,disable-mmu-reset;
- status = "disabled";
};
isp1_mmu: iommu@ff924000 {
@@ -1655,11 +1655,11 @@
reg = <0x0 0xff924000 0x0 0x100>, <0x0 0xff925000 0x0 0x100>;
interrupts = <GIC_SPI 44 IRQ_TYPE_LEVEL_HIGH 0>;
interrupt-names = "isp1_mmu";
- clocks = <&cru ACLK_ISP1_NOC>, <&cru HCLK_ISP1_NOC>;
+ clocks = <&cru ACLK_ISP1_WRAPPER>, <&cru HCLK_ISP1_WRAPPER>;
clock-names = "aclk", "iface";
#iommu-cells = <0>;
+ power-domains = <&power RK3399_PD_ISP1>;
rockchip,disable-mmu-reset;
- status = "disabled";
};
hdmi_sound: hdmi-sound {
diff --git a/arch/arm64/configs/vendor/bengal-perf_defconfig b/arch/arm64/configs/vendor/bengal-perf_defconfig
new file mode 100644
index 0000000..d06d4f6
--- /dev/null
+++ b/arch/arm64/configs/vendor/bengal-perf_defconfig
@@ -0,0 +1,528 @@
+# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_AUDIT=y
+# CONFIG_AUDITSYSCALL is not set
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_PREEMPT=y
+CONFIG_IRQ_TIME_ACCOUNTING=y
+CONFIG_SCHED_WALT=y
+CONFIG_TASKSTATS=y
+CONFIG_TASK_XACCT=y
+CONFIG_TASK_IO_ACCOUNTING=y
+CONFIG_RCU_EXPERT=y
+CONFIG_RCU_FAST_NO_HZ=y
+CONFIG_RCU_NOCB_CPU=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_CPU_MAX_BUF_SHIFT=17
+CONFIG_MEMCG=y
+CONFIG_MEMCG_SWAP=y
+CONFIG_BLK_CGROUP=y
+CONFIG_RT_GROUP_SCHED=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CPUSETS=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_CGROUP_BPF=y
+CONFIG_SCHED_CORE_CTL=y
+CONFIG_NAMESPACES=y
+# CONFIG_UTS_NS is not set
+# CONFIG_PID_NS is not set
+CONFIG_SCHED_AUTOGROUP=y
+CONFIG_SCHED_TUNE=y
+CONFIG_BLK_DEV_INITRD=y
+# CONFIG_RD_XZ is not set
+# CONFIG_RD_LZO is not set
+# CONFIG_RD_LZ4 is not set
+# CONFIG_FHANDLE is not set
+CONFIG_KALLSYMS_ALL=y
+CONFIG_BPF_SYSCALL=y
+CONFIG_EMBEDDED=y
+# CONFIG_COMPAT_BRK is not set
+CONFIG_SLAB_FREELIST_RANDOM=y
+CONFIG_SLAB_FREELIST_HARDENED=y
+CONFIG_PROFILING=y
+CONFIG_ARCH_QCOM=y
+CONFIG_ARCH_BENGAL=y
+CONFIG_PCI=y
+CONFIG_SCHED_MC=y
+CONFIG_NR_CPUS=8
+CONFIG_HZ_100=y
+CONFIG_SECCOMP=y
+# CONFIG_UNMAP_KERNEL_AT_EL0 is not set
+CONFIG_ARMV8_DEPRECATED=y
+CONFIG_SWP_EMULATION=y
+CONFIG_CP15_BARRIER_EMULATION=y
+CONFIG_SETEND_EMULATION=y
+# CONFIG_ARM64_VHE is not set
+CONFIG_RANDOMIZE_BASE=y
+CONFIG_BUILD_ARM64_APPENDED_DTB_IMAGE=y
+CONFIG_COMPAT=y
+CONFIG_PM_AUTOSLEEP=y
+CONFIG_PM_WAKELOCKS=y
+CONFIG_PM_WAKELOCKS_LIMIT=0
+# CONFIG_PM_WAKELOCKS_GC is not set
+CONFIG_ENERGY_MODEL=y
+CONFIG_CPU_IDLE=y
+CONFIG_ARM_CPUIDLE=y
+CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_GOV_POWERSAVE=y
+CONFIG_CPU_FREQ_GOV_USERSPACE=y
+CONFIG_CPU_FREQ_GOV_ONDEMAND=y
+CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
+CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y
+CONFIG_MSM_TZ_LOG=y
+CONFIG_ARM64_CRYPTO=y
+CONFIG_CRYPTO_SHA1_ARM64_CE=y
+CONFIG_CRYPTO_SHA2_ARM64_CE=y
+CONFIG_CRYPTO_GHASH_ARM64_CE=y
+CONFIG_CRYPTO_AES_ARM64_CE_CCM=y
+CONFIG_CRYPTO_AES_ARM64_CE_BLK=y
+CONFIG_CRYPTO_AES_ARM64_NEON_BLK=y
+CONFIG_PANIC_ON_REFCOUNT_ERROR=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_MODULE_SIG=y
+CONFIG_MODULE_SIG_FORCE=y
+CONFIG_MODULE_SIG_SHA512=y
+# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PARTITION_ADVANCED=y
+# CONFIG_IOSCHED_DEADLINE is not set
+CONFIG_CFQ_GROUP_IOSCHED=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_CMA=y
+CONFIG_ZSMALLOC=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=y
+CONFIG_XFRM_INTERFACE=y
+CONFIG_XFRM_STATISTICS=y
+CONFIG_NET_KEY=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_ROUTE_VERBOSE=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_NET_IPGRE_DEMUX=y
+CONFIG_SYN_COOKIES=y
+CONFIG_NET_IPVTI=y
+CONFIG_INET_AH=y
+CONFIG_INET_ESP=y
+CONFIG_INET_IPCOMP=y
+CONFIG_INET_UDP_DIAG=y
+CONFIG_INET_DIAG_DESTROY=y
+CONFIG_IPV6_ROUTER_PREF=y
+CONFIG_IPV6_ROUTE_INFO=y
+CONFIG_IPV6_OPTIMISTIC_DAD=y
+CONFIG_INET6_AH=y
+CONFIG_INET6_ESP=y
+CONFIG_INET6_IPCOMP=y
+CONFIG_IPV6_MIP6=y
+CONFIG_IPV6_VTI=y
+CONFIG_IPV6_MULTIPLE_TABLES=y
+CONFIG_IPV6_SUBTREES=y
+CONFIG_NETFILTER=y
+CONFIG_NF_CONNTRACK=y
+CONFIG_NF_CONNTRACK_SECMARK=y
+CONFIG_NF_CONNTRACK_EVENTS=y
+CONFIG_NF_CONNTRACK_AMANDA=y
+CONFIG_NF_CONNTRACK_FTP=y
+CONFIG_NF_CONNTRACK_H323=y
+CONFIG_NF_CONNTRACK_IRC=y
+CONFIG_NF_CONNTRACK_NETBIOS_NS=y
+CONFIG_NF_CONNTRACK_PPTP=y
+CONFIG_NF_CONNTRACK_SANE=y
+CONFIG_NF_CONNTRACK_TFTP=y
+CONFIG_NF_CT_NETLINK=y
+CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y
+CONFIG_NETFILTER_XT_TARGET_CONNMARK=y
+CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y
+CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y
+CONFIG_NETFILTER_XT_TARGET_HARDIDLETIMER=y
+CONFIG_NETFILTER_XT_TARGET_LOG=y
+CONFIG_NETFILTER_XT_TARGET_MARK=y
+CONFIG_NETFILTER_XT_TARGET_NFLOG=y
+CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y
+CONFIG_NETFILTER_XT_TARGET_NOTRACK=y
+CONFIG_NETFILTER_XT_TARGET_TEE=y
+CONFIG_NETFILTER_XT_TARGET_TPROXY=y
+CONFIG_NETFILTER_XT_TARGET_TRACE=y
+CONFIG_NETFILTER_XT_TARGET_SECMARK=y
+CONFIG_NETFILTER_XT_TARGET_TCPMSS=y
+CONFIG_NETFILTER_XT_MATCH_BPF=y
+CONFIG_NETFILTER_XT_MATCH_COMMENT=y
+CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y
+CONFIG_NETFILTER_XT_MATCH_CONNMARK=y
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y
+CONFIG_NETFILTER_XT_MATCH_DSCP=y
+CONFIG_NETFILTER_XT_MATCH_ESP=y
+CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y
+CONFIG_NETFILTER_XT_MATCH_HELPER=y
+CONFIG_NETFILTER_XT_MATCH_IPRANGE=y
+# CONFIG_NETFILTER_XT_MATCH_L2TP is not set
+CONFIG_NETFILTER_XT_MATCH_LENGTH=y
+CONFIG_NETFILTER_XT_MATCH_LIMIT=y
+CONFIG_NETFILTER_XT_MATCH_MAC=y
+CONFIG_NETFILTER_XT_MATCH_MARK=y
+CONFIG_NETFILTER_XT_MATCH_MULTIPORT=y
+CONFIG_NETFILTER_XT_MATCH_OWNER=y
+CONFIG_NETFILTER_XT_MATCH_POLICY=y
+CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA2=y
+# CONFIG_NETFILTER_XT_MATCH_SCTP is not set
+CONFIG_NETFILTER_XT_MATCH_SOCKET=y
+CONFIG_NETFILTER_XT_MATCH_STATE=y
+CONFIG_NETFILTER_XT_MATCH_STATISTIC=y
+CONFIG_NETFILTER_XT_MATCH_STRING=y
+CONFIG_NETFILTER_XT_MATCH_TIME=y
+CONFIG_NETFILTER_XT_MATCH_U32=y
+CONFIG_IP_NF_IPTABLES=y
+CONFIG_IP_NF_MATCH_AH=y
+CONFIG_IP_NF_MATCH_ECN=y
+CONFIG_IP_NF_MATCH_RPFILTER=y
+CONFIG_IP_NF_MATCH_TTL=y
+CONFIG_IP_NF_FILTER=y
+CONFIG_IP_NF_TARGET_REJECT=y
+CONFIG_IP_NF_NAT=y
+CONFIG_IP_NF_TARGET_MASQUERADE=y
+CONFIG_IP_NF_TARGET_NETMAP=y
+CONFIG_IP_NF_TARGET_REDIRECT=y
+CONFIG_IP_NF_MANGLE=y
+CONFIG_IP_NF_RAW=y
+CONFIG_IP_NF_SECURITY=y
+CONFIG_IP_NF_ARPTABLES=y
+CONFIG_IP_NF_ARPFILTER=y
+CONFIG_IP_NF_ARP_MANGLE=y
+CONFIG_IP6_NF_IPTABLES=y
+CONFIG_IP6_NF_MATCH_RPFILTER=y
+CONFIG_IP6_NF_FILTER=y
+CONFIG_IP6_NF_TARGET_REJECT=y
+CONFIG_IP6_NF_MANGLE=y
+CONFIG_IP6_NF_RAW=y
+CONFIG_BRIDGE_NF_EBTABLES=y
+CONFIG_BRIDGE_EBT_BROUTE=y
+CONFIG_IP_SCTP=y
+CONFIG_L2TP=y
+CONFIG_L2TP_V3=y
+CONFIG_L2TP_IP=y
+CONFIG_L2TP_ETH=y
+CONFIG_BRIDGE=y
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_HTB=y
+CONFIG_NET_SCH_PRIO=y
+CONFIG_NET_SCH_MULTIQ=y
+CONFIG_NET_SCH_NETEM=y
+CONFIG_NET_SCH_INGRESS=y
+CONFIG_NET_CLS_FW=y
+CONFIG_NET_CLS_U32=y
+CONFIG_CLS_U32_MARK=y
+CONFIG_NET_CLS_FLOW=y
+CONFIG_NET_CLS_BPF=y
+CONFIG_NET_EMATCH=y
+CONFIG_NET_EMATCH_CMP=y
+CONFIG_NET_EMATCH_NBYTE=y
+CONFIG_NET_EMATCH_U32=y
+CONFIG_NET_EMATCH_META=y
+CONFIG_NET_EMATCH_TEXT=y
+CONFIG_NET_CLS_ACT=y
+CONFIG_NET_ACT_GACT=y
+CONFIG_NET_ACT_MIRRED=y
+CONFIG_NET_ACT_SKBEDIT=y
+CONFIG_DNS_RESOLVER=y
+CONFIG_QRTR=y
+CONFIG_QRTR_SMD=y
+CONFIG_SOCKEV_NLMCAST=y
+CONFIG_BT=y
+CONFIG_CFG80211=y
+CONFIG_RFKILL=y
+CONFIG_FW_LOADER_USER_HELPER=y
+CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y
+CONFIG_DMA_CMA=y
+CONFIG_ZRAM=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=8192
+CONFIG_UID_SYS_STATS=y
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_SG=y
+CONFIG_CHR_DEV_SCH=y
+CONFIG_SCSI_CONSTANTS=y
+CONFIG_SCSI_SCAN_ASYNC=y
+CONFIG_SCSI_UFSHCD=y
+CONFIG_SCSI_UFSHCD_PLATFORM=y
+CONFIG_SCSI_UFS_QCOM=y
+CONFIG_MD=y
+CONFIG_BLK_DEV_DM=y
+CONFIG_DM_CRYPT=y
+CONFIG_DM_UEVENT=y
+CONFIG_DM_VERITY=y
+CONFIG_DM_VERITY_FEC=y
+CONFIG_NETDEVICES=y
+CONFIG_BONDING=y
+CONFIG_DUMMY=y
+CONFIG_TUN=y
+CONFIG_RMNET=y
+CONFIG_PHYLIB=y
+CONFIG_PPP=y
+CONFIG_PPP_BSDCOMP=y
+CONFIG_PPP_DEFLATE=y
+CONFIG_PPP_FILTER=y
+CONFIG_PPP_MPPE=y
+CONFIG_PPP_MULTILINK=y
+CONFIG_PPPOE=y
+CONFIG_PPTP=y
+CONFIG_PPPOL2TP=y
+CONFIG_PPP_ASYNC=y
+CONFIG_PPP_SYNC_TTY=y
+CONFIG_WCNSS_MEM_PRE_ALLOC=y
+CONFIG_CLD_LL_CORE=y
+CONFIG_CNSS_GENL=y
+CONFIG_INPUT_EVDEV=y
+CONFIG_KEYBOARD_GPIO=y
+# CONFIG_INPUT_MOUSE is not set
+CONFIG_INPUT_JOYSTICK=y
+CONFIG_JOYSTICK_XPAD=y
+CONFIG_INPUT_TOUCHSCREEN=y
+CONFIG_INPUT_MISC=y
+CONFIG_INPUT_UINPUT=y
+# CONFIG_SERIO_SERPORT is not set
+# CONFIG_VT is not set
+# CONFIG_LEGACY_PTYS is not set
+# CONFIG_DEVMEM is not set
+CONFIG_SERIAL_MSM_GENI=y
+CONFIG_SERIAL_MSM_GENI_HALF_SAMPLING=y
+CONFIG_HW_RANDOM=y
+CONFIG_DIAG_CHAR=y
+CONFIG_MSM_ADSPRPC=y
+CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_QCOM_GENI=y
+CONFIG_SPI=y
+CONFIG_SPI_QCOM_GENI=y
+CONFIG_SPI_SPIDEV=y
+CONFIG_SPMI=y
+CONFIG_PINCTRL_QCOM_SPMI_PMIC=y
+CONFIG_PINCTRL_BENGAL=y
+CONFIG_GPIO_SYSFS=y
+CONFIG_POWER_RESET_QCOM=y
+CONFIG_POWER_RESET_XGENE=y
+CONFIG_POWER_RESET_SYSCON=y
+CONFIG_THERMAL=y
+CONFIG_THERMAL_WRITABLE_TRIPS=y
+CONFIG_THERMAL_GOV_USER_SPACE=y
+CONFIG_DEVFREQ_THERMAL=y
+CONFIG_QCOM_SPMI_TEMP_ALARM=y
+CONFIG_THERMAL_TSENS=y
+CONFIG_QTI_ADC_TM=y
+CONFIG_MFD_SPMI_PMIC=y
+CONFIG_REGULATOR=y
+CONFIG_REGULATOR_QCOM_SMD_RPM=y
+CONFIG_REGULATOR_STUB=y
+CONFIG_MEDIA_SUPPORT=y
+CONFIG_MEDIA_CAMERA_SUPPORT=y
+CONFIG_MEDIA_CONTROLLER=y
+CONFIG_VIDEO_V4L2_SUBDEV_API=y
+CONFIG_VIDEO_ADV_DEBUG=y
+CONFIG_VIDEO_FIXED_MINOR_RANGES=y
+CONFIG_V4L_PLATFORM_DRIVERS=y
+CONFIG_V4L_TEST_DRIVERS=y
+CONFIG_VIDEO_VIM2M=y
+CONFIG_VIDEO_VICODEC=y
+CONFIG_DRM=y
+# CONFIG_DRM_MSM is not set
+CONFIG_FB_VIRTUAL=y
+CONFIG_BACKLIGHT_LCD_SUPPORT=y
+CONFIG_BACKLIGHT_CLASS_DEVICE=y
+CONFIG_LOGO=y
+# CONFIG_LOGO_LINUX_MONO is not set
+# CONFIG_LOGO_LINUX_VGA16 is not set
+CONFIG_SOUND=y
+CONFIG_SND=y
+CONFIG_SND_DYNAMIC_MINORS=y
+CONFIG_SND_USB_AUDIO=y
+CONFIG_SND_SOC=y
+CONFIG_UHID=y
+CONFIG_HID_APPLE=y
+CONFIG_HID_ELECOM=y
+CONFIG_HID_MAGICMOUSE=y
+CONFIG_HID_MICROSOFT=y
+CONFIG_HID_MULTITOUCH=y
+CONFIG_HID_PLANTRONICS=y
+CONFIG_USB_XHCI_HCD=y
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_EHCI_HCD_PLATFORM=y
+CONFIG_USB_OHCI_HCD=y
+CONFIG_USB_OHCI_HCD_PLATFORM=y
+CONFIG_USB_STORAGE=y
+CONFIG_USB_DWC3=y
+CONFIG_USB_DWC3_MSM=y
+CONFIG_USB_ISP1760=y
+CONFIG_USB_ISP1760_HOST_ROLE=y
+CONFIG_USB_EHSET_TEST_FIXTURE=y
+CONFIG_USB_LINK_LAYER_TEST=y
+CONFIG_NOP_USB_XCEIV=y
+CONFIG_USB_QCOM_EMU_PHY=y
+CONFIG_USB_GADGET=y
+CONFIG_USB_GADGET_VBUS_DRAW=900
+CONFIG_USB_CONFIGFS=y
+CONFIG_USB_CONFIGFS_UEVENT=y
+CONFIG_USB_CONFIGFS_MASS_STORAGE=y
+CONFIG_USB_CONFIGFS_F_FS=y
+CONFIG_USB_CONFIGFS_F_MIDI=y
+CONFIG_USB_CONFIGFS_F_HID=y
+CONFIG_USB_CONFIGFS_F_DIAG=y
+CONFIG_USB_CONFIGFS_F_CDEV=y
+CONFIG_USB_CONFIGFS_F_CCID=y
+CONFIG_USB_CONFIGFS_F_QDSS=y
+CONFIG_USB_CONFIGFS_F_GSI=y
+CONFIG_MMC=y
+CONFIG_MMC_BLOCK_MINORS=32
+CONFIG_MMC_BLOCK_DEFERRED_RESUME=y
+CONFIG_MMC_TEST=y
+CONFIG_MMC_IPC_LOGGING=y
+CONFIG_MMC_SDHCI=y
+CONFIG_MMC_SDHCI_PLTFM=y
+CONFIG_MMC_SDHCI_MSM=y
+CONFIG_RTC_CLASS=y
+CONFIG_DMADEVICES=y
+CONFIG_QCOM_GPI_DMA=y
+CONFIG_UIO=y
+CONFIG_UIO_MSM_SHAREDMEM=y
+CONFIG_STAGING=y
+CONFIG_ASHMEM=y
+CONFIG_ION=y
+CONFIG_SPS=y
+CONFIG_SPS_SUPPORT_NDP_BAM=y
+CONFIG_IPA3=y
+CONFIG_IPA_WDI_UNIFIED_API=y
+CONFIG_RMNET_IPA3=y
+CONFIG_RNDIS_IPA=y
+CONFIG_IPA_UT=y
+CONFIG_QCOM_GENI_SE=y
+CONFIG_SM_GPUCC_BENGAL=y
+CONFIG_SM_DISPCC_BENGAL=y
+CONFIG_HWSPINLOCK=y
+CONFIG_HWSPINLOCK_QCOM=y
+CONFIG_MAILBOX=y
+CONFIG_QCOM_APCS_IPC=y
+CONFIG_MSM_QMP=y
+CONFIG_IOMMU_IO_PGTABLE_FAST=y
+CONFIG_ARM_SMMU=y
+CONFIG_QCOM_LAZY_MAPPING=y
+CONFIG_IOMMU_DEBUG=y
+CONFIG_IOMMU_DEBUG_TRACKING=y
+CONFIG_IOMMU_TESTS=y
+CONFIG_RPMSG_CHAR=y
+CONFIG_RPMSG_QCOM_GLINK_RPM=y
+CONFIG_RPMSG_QCOM_GLINK_SMEM=y
+CONFIG_QCOM_COMMAND_DB=y
+CONFIG_QCOM_CPUSS_DUMP=y
+CONFIG_QCOM_RUN_QUEUE_STATS=y
+CONFIG_QCOM_MDT_LOADER=y
+CONFIG_QPNP_PBS=y
+CONFIG_QCOM_QMI_HELPERS=y
+CONFIG_QCOM_SMEM=y
+CONFIG_QCOM_SMD_RPM=y
+CONFIG_QCOM_EARLY_RANDOM=y
+CONFIG_QCOM_MEMORY_DUMP_V2=y
+CONFIG_QCOM_SMP2P=y
+CONFIG_SETUP_SSR_NOTIF_TIMEOUTS=y
+CONFIG_SSR_SYSMON_NOTIF_TIMEOUT=20000
+CONFIG_SSR_SUBSYS_NOTIF_TIMEOUT=20000
+CONFIG_PANIC_ON_SSR_NOTIF_TIMEOUT=y
+CONFIG_QCOM_SECURE_BUFFER=y
+CONFIG_MSM_SERVICE_LOCATOR=y
+CONFIG_MSM_SERVICE_NOTIFIER=y
+CONFIG_MSM_SUBSYSTEM_RESTART=y
+CONFIG_MSM_PIL=y
+CONFIG_MSM_SYSMON_QMI_COMM=y
+CONFIG_MSM_PIL_SSR_GENERIC=y
+CONFIG_MSM_BOOT_STATS=y
+CONFIG_QCOM_DCC_V2=y
+CONFIG_QCOM_EUD=y
+CONFIG_QCOM_MINIDUMP=y
+CONFIG_QCOM_WATCHDOG_V2=y
+CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y
+CONFIG_QCOM_GLINK=y
+CONFIG_QCOM_GLINK_PKT=y
+CONFIG_QCOM_SMP2P_SLEEPSTATE=y
+CONFIG_MSM_CDSP_LOADER=y
+CONFIG_MSM_EVENT_TIMER=y
+CONFIG_MSM_PM=y
+CONFIG_QTEE_SHM_BRIDGE=y
+CONFIG_MEM_SHARE_QMI_SERVICE=y
+CONFIG_QMP_DEBUGFS_CLIENT=y
+CONFIG_ICNSS=y
+CONFIG_ICNSS_QMI=y
+CONFIG_DEVFREQ_GOV_PASSIVE=y
+CONFIG_ARM_QCOM_DEVFREQ_FW=y
+CONFIG_IIO=y
+CONFIG_QCOM_SPMI_ADC5=y
+CONFIG_PWM=y
+CONFIG_ARM_GIC_V3_ACL=y
+CONFIG_PHY_XGENE=y
+CONFIG_RAS=y
+CONFIG_ANDROID=y
+CONFIG_ANDROID_BINDER_IPC=y
+CONFIG_QCOM_QFPROM=y
+CONFIG_SLIMBUS=y
+CONFIG_EXT4_FS=y
+CONFIG_EXT4_FS_SECURITY=y
+CONFIG_F2FS_FS=y
+CONFIG_F2FS_FS_SECURITY=y
+CONFIG_FS_ENCRYPTION=y
+CONFIG_QUOTA=y
+CONFIG_QUOTA_NETLINK_INTERFACE=y
+CONFIG_QFMT_V2=y
+CONFIG_FUSE_FS=y
+CONFIG_OVERLAY_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_EFIVAR_FS=y
+CONFIG_ECRYPT_FS=y
+CONFIG_ECRYPT_FS_MESSAGING=y
+CONFIG_SDCARD_FS=y
+# CONFIG_NETWORK_FILESYSTEMS is not set
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y
+CONFIG_SECURITY=y
+CONFIG_HARDENED_USERCOPY=y
+CONFIG_HARDENED_USERCOPY_PAGESPAN=y
+CONFIG_FORTIFY_SOURCE=y
+CONFIG_SECURITY_SELINUX=y
+CONFIG_SECURITY_SMACK=y
+CONFIG_CRYPTO_GCM=y
+CONFIG_CRYPTO_XCBC=y
+CONFIG_CRYPTO_MD4=y
+CONFIG_CRYPTO_TWOFISH=y
+CONFIG_CRYPTO_ANSI_CPRNG=y
+CONFIG_CRYPTO_DEV_QCE=y
+CONFIG_XZ_DEC=y
+CONFIG_PRINTK_TIME=y
+CONFIG_DEBUG_INFO=y
+CONFIG_PAGE_OWNER=y
+# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_PANIC_TIMEOUT=-1
+CONFIG_SCHEDSTATS=y
+CONFIG_IPC_LOGGING=y
+CONFIG_CORESIGHT=y
+CONFIG_CORESIGHT_LINK_AND_SINK_TMC=y
+CONFIG_CORESIGHT_SOURCE_ETM4X=y
+CONFIG_CORESIGHT_DYNAMIC_REPLICATOR=y
+CONFIG_CORESIGHT_STM=y
+CONFIG_CORESIGHT_CTI=y
+CONFIG_CORESIGHT_TPDA=y
+CONFIG_CORESIGHT_TPDM=y
+CONFIG_CORESIGHT_HWEVENT=y
+CONFIG_CORESIGHT_DUMMY=y
+CONFIG_CORESIGHT_REMOTE_ETM=y
+CONFIG_CORESIGHT_TGU=y
diff --git a/arch/arm64/configs/vendor/bengal_defconfig b/arch/arm64/configs/vendor/bengal_defconfig
index 58f039b..b929b48 100644
--- a/arch/arm64/configs/vendor/bengal_defconfig
+++ b/arch/arm64/configs/vendor/bengal_defconfig
@@ -241,6 +241,7 @@
CONFIG_DNS_RESOLVER=y
CONFIG_QRTR=y
CONFIG_QRTR_SMD=y
+CONFIG_SOCKEV_NLMCAST=y
CONFIG_BT=y
CONFIG_CFG80211=y
CONFIG_RFKILL=y
@@ -264,6 +265,7 @@
CONFIG_SCSI_UFS_QCOM=y
CONFIG_MD=y
CONFIG_BLK_DEV_DM=y
+CONFIG_DM_CRYPT=y
CONFIG_DM_UEVENT=y
CONFIG_DM_VERITY=y
CONFIG_DM_VERITY_FEC=y
@@ -272,6 +274,7 @@
CONFIG_DUMMY=y
CONFIG_TUN=y
CONFIG_RMNET=y
+CONFIG_PHYLIB=y
CONFIG_PPP=y
CONFIG_PPP_BSDCOMP=y
CONFIG_PPP_DEFLATE=y
@@ -300,6 +303,7 @@
# CONFIG_DEVMEM is not set
CONFIG_SERIAL_MSM_GENI=y
CONFIG_SERIAL_MSM_GENI_CONSOLE=y
+CONFIG_SERIAL_MSM_GENI_HALF_SAMPLING=y
CONFIG_SERIAL_DEV_BUS=y
CONFIG_TTY_PRINTK=y
CONFIG_HW_RANDOM=y
@@ -411,9 +415,12 @@
CONFIG_RNDIS_IPA=y
CONFIG_IPA_UT=y
CONFIG_QCOM_GENI_SE=y
+CONFIG_SM_GPUCC_BENGAL=y
+CONFIG_SM_DISPCC_BENGAL=y
CONFIG_HWSPINLOCK=y
CONFIG_HWSPINLOCK_QCOM=y
CONFIG_MAILBOX=y
+CONFIG_QCOM_APCS_IPC=y
CONFIG_MSM_QMP=y
CONFIG_IOMMU_IO_PGTABLE_FAST=y
CONFIG_ARM_SMMU=y
@@ -422,10 +429,12 @@
CONFIG_IOMMU_DEBUG_TRACKING=y
CONFIG_IOMMU_TESTS=y
CONFIG_RPMSG_CHAR=y
+CONFIG_RPMSG_QCOM_GLINK_RPM=y
CONFIG_RPMSG_QCOM_GLINK_SMEM=y
CONFIG_QCOM_COMMAND_DB=y
CONFIG_QCOM_CPUSS_DUMP=y
CONFIG_QCOM_RUN_QUEUE_STATS=y
+CONFIG_QCOM_MDT_LOADER=y
CONFIG_QPNP_PBS=y
CONFIG_QCOM_QMI_HELPERS=y
CONFIG_QCOM_SMEM=y
@@ -454,6 +463,7 @@
CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y
CONFIG_QCOM_GLINK=y
CONFIG_QCOM_GLINK_PKT=y
+CONFIG_QCOM_SMP2P_SLEEPSTATE=y
CONFIG_MSM_CDSP_LOADER=y
CONFIG_MSM_EVENT_TIMER=y
CONFIG_MSM_PM=y
@@ -473,11 +483,13 @@
CONFIG_RAS=y
CONFIG_ANDROID=y
CONFIG_ANDROID_BINDER_IPC=y
+CONFIG_QCOM_QFPROM=y
CONFIG_SLIMBUS=y
CONFIG_EXT4_FS=y
CONFIG_EXT4_FS_SECURITY=y
CONFIG_F2FS_FS=y
CONFIG_F2FS_FS_SECURITY=y
+CONFIG_FS_ENCRYPTION=y
CONFIG_QUOTA=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
CONFIG_QFMT_V2=y
@@ -485,6 +497,8 @@
CONFIG_OVERLAY_FS=y
CONFIG_MSDOS_FS=y
CONFIG_VFAT_FS=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
CONFIG_EFIVAR_FS=y
CONFIG_ECRYPT_FS=y
CONFIG_ECRYPT_FS_MESSAGING=y
diff --git a/arch/arm64/configs/vendor/kona-perf_defconfig b/arch/arm64/configs/vendor/kona-perf_defconfig
index b8b8527..ad2767a 100644
--- a/arch/arm64/configs/vendor/kona-perf_defconfig
+++ b/arch/arm64/configs/vendor/kona-perf_defconfig
@@ -106,6 +106,7 @@
CONFIG_MEMORY_HOTPLUG_MOVABLE_NODE=y
CONFIG_MEMORY_HOTREMOVE=y
CONFIG_CMA=y
+CONFIG_CMA_DEBUGFS=y
CONFIG_CMA_AREAS=16
CONFIG_ZSMALLOC=y
CONFIG_BALANCE_ANON_FILE_RECLAIM=y
@@ -399,6 +400,7 @@
CONFIG_REGULATOR_STUB=y
CONFIG_MEDIA_SUPPORT=y
CONFIG_MEDIA_CAMERA_SUPPORT=y
+CONFIG_MEDIA_DIGITAL_TV_SUPPORT=y
CONFIG_MEDIA_RADIO_SUPPORT=y
CONFIG_MEDIA_CONTROLLER=y
CONFIG_VIDEO_V4L2_SUBDEV_API=y
@@ -410,6 +412,10 @@
CONFIG_MSM_CVP_V4L2=y
CONFIG_MSM_NPU=y
CONFIG_MSM_GLOBAL_SYNX=y
+CONFIG_DVB_MPQ=m
+CONFIG_DVB_MPQ_DEMUX=m
+CONFIG_DVB_MPQ_TSPP1=y
+CONFIG_TSPP=m
CONFIG_I2C_RTC6226_QCA=y
CONFIG_DRM=y
# CONFIG_DRM_MSM is not set
@@ -675,10 +681,10 @@
CONFIG_CORESIGHT_DYNAMIC_REPLICATOR=y
CONFIG_CORESIGHT_STM=y
CONFIG_CORESIGHT_CTI=y
+CONFIG_CORESIGHT_CTI_SAVE_DISABLE=y
CONFIG_CORESIGHT_TPDA=y
CONFIG_CORESIGHT_TPDM=y
CONFIG_CORESIGHT_HWEVENT=y
CONFIG_CORESIGHT_DUMMY=y
CONFIG_CORESIGHT_REMOTE_ETM=y
CONFIG_CORESIGHT_TGU=y
-CONFIG_CORESIGHT_LINK_LATE_DISABLE=y
diff --git a/arch/arm64/configs/vendor/kona_defconfig b/arch/arm64/configs/vendor/kona_defconfig
index f31319e..5e3ed0d 100644
--- a/arch/arm64/configs/vendor/kona_defconfig
+++ b/arch/arm64/configs/vendor/kona_defconfig
@@ -113,6 +113,7 @@
CONFIG_CMA=y
CONFIG_CMA_DEBUG=y
CONFIG_CMA_DEBUGFS=y
+CONFIG_CMA_ALLOW_WRITE_DEBUGFS=y
CONFIG_CMA_AREAS=16
CONFIG_ZSMALLOC=y
CONFIG_BALANCE_ANON_FILE_RECLAIM=y
@@ -412,6 +413,7 @@
CONFIG_REGULATOR_STUB=y
CONFIG_MEDIA_SUPPORT=y
CONFIG_MEDIA_CAMERA_SUPPORT=y
+CONFIG_MEDIA_DIGITAL_TV_SUPPORT=y
CONFIG_MEDIA_RADIO_SUPPORT=y
CONFIG_MEDIA_CONTROLLER=y
CONFIG_VIDEO_V4L2_SUBDEV_API=y
@@ -423,6 +425,10 @@
CONFIG_MSM_CVP_V4L2=y
CONFIG_MSM_NPU=y
CONFIG_MSM_GLOBAL_SYNX=y
+CONFIG_DVB_MPQ=m
+CONFIG_DVB_MPQ_DEMUX=m
+CONFIG_DVB_MPQ_TSPP1=y
+CONFIG_TSPP=m
CONFIG_I2C_RTC6226_QCA=y
CONFIG_DRM=y
# CONFIG_DRM_MSM is not set
@@ -560,6 +566,7 @@
CONFIG_RPMSG_QCOM_GLINK_SPSS=y
CONFIG_QCOM_COMMAND_DB=y
CONFIG_QCOM_MEM_OFFLINE=y
+CONFIG_BUG_ON_HW_MEM_ONLINE_FAIL=y
CONFIG_OVERRIDE_MEMORY_LIMIT=y
CONFIG_QCOM_CPUSS_DUMP=y
CONFIG_QCOM_RUN_QUEUE_STATS=y
@@ -761,4 +768,3 @@
CONFIG_CORESIGHT_DUMMY=y
CONFIG_CORESIGHT_REMOTE_ETM=y
CONFIG_CORESIGHT_TGU=y
-CONFIG_CORESIGHT_LINK_LATE_DISABLE=y
diff --git a/arch/arm64/configs/vendor/lito-perf_defconfig b/arch/arm64/configs/vendor/lito-perf_defconfig
index b745cb6..a22ea19 100644
--- a/arch/arm64/configs/vendor/lito-perf_defconfig
+++ b/arch/arm64/configs/vendor/lito-perf_defconfig
@@ -63,7 +63,6 @@
# CONFIG_EFI is not set
CONFIG_BUILD_ARM64_UNCOMPRESSED_KERNEL=y
CONFIG_COMPAT=y
-CONFIG_PM_AUTOSLEEP=y
CONFIG_PM_WAKELOCKS=y
CONFIG_PM_WAKELOCKS_LIMIT=0
# CONFIG_PM_WAKELOCKS_GC is not set
@@ -191,6 +190,7 @@
CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y
CONFIG_NETFILTER_XT_MATCH_QUOTA=y
CONFIG_NETFILTER_XT_MATCH_QUOTA2=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG=y
# CONFIG_NETFILTER_XT_MATCH_SCTP is not set
CONFIG_NETFILTER_XT_MATCH_SOCKET=y
CONFIG_NETFILTER_XT_MATCH_STATE=y
@@ -309,6 +309,7 @@
CONFIG_PPPOL2TP=y
CONFIG_PPP_ASYNC=y
CONFIG_PPP_SYNC_TTY=y
+CONFIG_USB_RTL8152=y
CONFIG_WCNSS_MEM_PRE_ALLOC=y
CONFIG_CLD_LL_CORE=y
CONFIG_CNSS_GENL=y
@@ -665,4 +666,3 @@
CONFIG_CORESIGHT_DUMMY=y
CONFIG_CORESIGHT_REMOTE_ETM=y
CONFIG_CORESIGHT_TGU=y
-CONFIG_CORESIGHT_LINK_LATE_DISABLE=y
diff --git a/arch/arm64/configs/vendor/lito_defconfig b/arch/arm64/configs/vendor/lito_defconfig
index a4302f6..a228ca8 100644
--- a/arch/arm64/configs/vendor/lito_defconfig
+++ b/arch/arm64/configs/vendor/lito_defconfig
@@ -63,7 +63,6 @@
CONFIG_RANDOMIZE_BASE=y
CONFIG_BUILD_ARM64_UNCOMPRESSED_KERNEL=y
CONFIG_COMPAT=y
-CONFIG_PM_AUTOSLEEP=y
CONFIG_PM_WAKELOCKS=y
CONFIG_PM_WAKELOCKS_LIMIT=0
# CONFIG_PM_WAKELOCKS_GC is not set
@@ -196,6 +195,7 @@
CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y
CONFIG_NETFILTER_XT_MATCH_QUOTA=y
CONFIG_NETFILTER_XT_MATCH_QUOTA2=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG=y
# CONFIG_NETFILTER_XT_MATCH_SCTP is not set
CONFIG_NETFILTER_XT_MATCH_SOCKET=y
CONFIG_NETFILTER_XT_MATCH_STATE=y
@@ -316,6 +316,7 @@
CONFIG_PPPOL2TP=y
CONFIG_PPP_ASYNC=y
CONFIG_PPP_SYNC_TTY=y
+CONFIG_USB_RTL8152=y
CONFIG_WCNSS_MEM_PRE_ALLOC=y
CONFIG_CLD_LL_CORE=y
CONFIG_CNSS_GENL=y
@@ -732,4 +733,3 @@
CONFIG_CORESIGHT_DUMMY=y
CONFIG_CORESIGHT_REMOTE_ETM=y
CONFIG_CORESIGHT_TGU=y
-CONFIG_CORESIGHT_LINK_LATE_DISABLE=y
diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h
index 2870259..50d3ac6 100644
--- a/arch/arm64/include/asm/assembler.h
+++ b/arch/arm64/include/asm/assembler.h
@@ -124,7 +124,11 @@
* RAS Error Synchronization barrier
*/
.macro esb
+#ifdef CONFIG_ARM64_RAS_EXTN
hint #16
+#else
+ nop
+#endif
.endm
/*
diff --git a/arch/arm64/include/asm/compat.h b/arch/arm64/include/asm/compat.h
index 1a037b9..cee28a0 100644
--- a/arch/arm64/include/asm/compat.h
+++ b/arch/arm64/include/asm/compat.h
@@ -159,6 +159,7 @@ static inline compat_uptr_t ptr_to_compat(void __user *uptr)
}
#define compat_user_stack_pointer() (user_stack_pointer(task_pt_regs(current)))
+#define COMPAT_MINSIGSTKSZ 2048
static inline void __user *arch_compat_alloc_user_space(long len)
{
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
index 1717ba1..510f687 100644
--- a/arch/arm64/include/asm/cpufeature.h
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -45,9 +45,10 @@
*/
enum ftr_type {
- FTR_EXACT, /* Use a predefined safe value */
- FTR_LOWER_SAFE, /* Smaller value is safe */
- FTR_HIGHER_SAFE,/* Bigger value is safe */
+ FTR_EXACT, /* Use a predefined safe value */
+ FTR_LOWER_SAFE, /* Smaller value is safe */
+ FTR_HIGHER_SAFE, /* Bigger value is safe */
+ FTR_HIGHER_OR_ZERO_SAFE, /* Bigger value is safe, but 0 is biggest */
};
#define FTR_STRICT true /* SANITY check strict matching required */
diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h
index 518882f..eba2027 100644
--- a/arch/arm64/include/asm/cputype.h
+++ b/arch/arm64/include/asm/cputype.h
@@ -89,6 +89,8 @@
#define ARM_CPU_PART_CORTEX_A76 0xD0B
#define ARM_CPU_PART_KRYO4G 0x804
#define ARM_CPU_PART_KRYO5S 0x805
+#define ARM_CPU_PART_KRYO2XX_GOLD 0x800
+#define ARM_CPU_PART_KRYO2XX_SILVER 0x801
#define APM_CPU_PART_POTENZA 0x000
diff --git a/arch/arm64/include/asm/dma-iommu.h b/arch/arm64/include/asm/dma-iommu.h
index 4cb442c..80397db 100644
--- a/arch/arm64/include/asm/dma-iommu.h
+++ b/arch/arm64/include/asm/dma-iommu.h
@@ -28,42 +28,8 @@ struct dma_iommu_mapping {
};
#ifdef CONFIG_ARM64_DMA_USE_IOMMU
-
-struct dma_iommu_mapping *
-__depr_arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base,
- size_t size);
-
-void __depr_arm_iommu_release_mapping(struct dma_iommu_mapping *mapping);
-
-int __depr_arm_iommu_attach_device(struct device *dev,
- struct dma_iommu_mapping *mapping);
-void __depr_arm_iommu_detach_device(struct device *dev);
-
void arm_iommu_put_dma_cookie(struct iommu_domain *domain);
#else /* !CONFIG_ARM64_DMA_USE_IOMMU */
-
-static inline struct dma_iommu_mapping *
-__depr_arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base,
- size_t size)
-{
- return NULL;
-}
-
-static inline void
-__depr_arm_iommu_release_mapping(struct dma_iommu_mapping *mapping)
-{
-}
-
-static inline int __depr_arm_iommu_attach_device(struct device *dev,
- struct dma_iommu_mapping *mapping)
-{
- return -ENODEV;
-}
-
-static inline void __depr_arm_iommu_detach_device(struct device *dev)
-{
-}
-
static inline void arm_iommu_put_dma_cookie(struct iommu_domain *domain) {}
#endif /* CONFIG_ARM64_DMA_USE_IOMMU */
diff --git a/arch/arm64/include/asm/efi.h b/arch/arm64/include/asm/efi.h
index 7ed3208..f52a296 100644
--- a/arch/arm64/include/asm/efi.h
+++ b/arch/arm64/include/asm/efi.h
@@ -94,7 +94,11 @@ static inline unsigned long efi_get_max_initrd_addr(unsigned long dram_base,
((protocol##_t *)instance)->f(instance, ##__VA_ARGS__)
#define alloc_screen_info(x...) &screen_info
-#define free_screen_info(x...)
+
+static inline void free_screen_info(efi_system_table_t *sys_table_arg,
+ struct screen_info *si)
+{
+}
/* redeclare as 'hidden' so the compiler will generate relative references */
extern struct screen_info screen_info __attribute__((__visibility__("hidden")));
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index f64d4e3..c9a1d5f 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -448,8 +448,8 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
PMD_TYPE_SECT)
#if defined(CONFIG_ARM64_64K_PAGES) || CONFIG_PGTABLE_LEVELS < 3
-#define pud_sect(pud) (0)
-#define pud_table(pud) (1)
+static inline bool pud_sect(pud_t pud) { return false; }
+static inline bool pud_table(pud_t pud) { return true; }
#else
#define pud_sect(pud) ((pud_val(pud) & PUD_TYPE_MASK) == \
PUD_TYPE_SECT)
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index 8181685..e1fcfca 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -211,8 +211,8 @@ static const struct arm64_ftr_bits ftr_ctr[] = {
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, 31, 1, 1), /* RES1 */
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_DIC_SHIFT, 1, 1),
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_IDC_SHIFT, 1, 1),
- ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_SAFE, CTR_CWG_SHIFT, 4, 0),
- ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_SAFE, CTR_ERG_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_OR_ZERO_SAFE, CTR_CWG_SHIFT, 4, 0),
+ ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_OR_ZERO_SAFE, CTR_ERG_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_DMINLINE_SHIFT, 4, 1),
/*
* Linux can handle differing I-cache policies. Userspace JITs will
@@ -454,6 +454,10 @@ static s64 arm64_ftr_safe_value(const struct arm64_ftr_bits *ftrp, s64 new,
case FTR_LOWER_SAFE:
ret = new < cur ? new : cur;
break;
+ case FTR_HIGHER_OR_ZERO_SAFE:
+ if (!cur || !new)
+ break;
+ /* Fallthrough */
case FTR_HIGHER_SAFE:
ret = new > cur ? new : cur;
break;
diff --git a/arch/arm64/kernel/ftrace.c b/arch/arm64/kernel/ftrace.c
index 57e9622..7eff8af 100644
--- a/arch/arm64/kernel/ftrace.c
+++ b/arch/arm64/kernel/ftrace.c
@@ -76,7 +76,7 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
if (offset < -SZ_128M || offset >= SZ_128M) {
#ifdef CONFIG_ARM64_MODULE_PLTS
- struct plt_entry trampoline;
+ struct plt_entry trampoline, *dst;
struct module *mod;
/*
@@ -104,24 +104,27 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
* is added in the future, but for now, the pr_err() below
* deals with a theoretical issue only.
*/
+ dst = mod->arch.ftrace_trampoline;
trampoline = get_plt_entry(addr);
- if (!plt_entries_equal(mod->arch.ftrace_trampoline,
- &trampoline)) {
- if (!plt_entries_equal(mod->arch.ftrace_trampoline,
- &(struct plt_entry){})) {
+ if (!plt_entries_equal(dst, &trampoline)) {
+ if (!plt_entries_equal(dst, &(struct plt_entry){})) {
pr_err("ftrace: far branches to multiple entry points unsupported inside a single module\n");
return -EINVAL;
}
/* point the trampoline to our ftrace entry point */
module_disable_ro(mod);
- *mod->arch.ftrace_trampoline = trampoline;
+ *dst = trampoline;
module_enable_ro(mod, true);
- /* update trampoline before patching in the branch */
- smp_wmb();
+ /*
+ * Ensure updated trampoline is visible to instruction
+ * fetch before we patch in the branch.
+ */
+ __flush_icache_range((unsigned long)&dst[0],
+ (unsigned long)&dst[1]);
}
- addr = (unsigned long)(void *)mod->arch.ftrace_trampoline;
+ addr = (unsigned long)dst;
#else /* CONFIG_ARM64_MODULE_PLTS */
return -EINVAL;
#endif /* CONFIG_ARM64_MODULE_PLTS */
diff --git a/arch/arm64/kernel/hw_breakpoint.c b/arch/arm64/kernel/hw_breakpoint.c
index 397d083..6ae2236 100644
--- a/arch/arm64/kernel/hw_breakpoint.c
+++ b/arch/arm64/kernel/hw_breakpoint.c
@@ -547,13 +547,14 @@ int hw_breakpoint_arch_parse(struct perf_event *bp,
/* Aligned */
break;
case 1:
- /* Allow single byte watchpoint. */
- if (hw->ctrl.len == ARM_BREAKPOINT_LEN_1)
- break;
case 2:
/* Allow halfword watchpoints and breakpoints. */
if (hw->ctrl.len == ARM_BREAKPOINT_LEN_2)
break;
+ case 3:
+ /* Allow single byte watchpoint. */
+ if (hw->ctrl.len == ARM_BREAKPOINT_LEN_1)
+ break;
default:
return -EINVAL;
}
diff --git a/arch/arm64/kernel/return_address.c b/arch/arm64/kernel/return_address.c
index 933adbc..0311fe5 100644
--- a/arch/arm64/kernel/return_address.c
+++ b/arch/arm64/kernel/return_address.c
@@ -11,6 +11,7 @@
#include <linux/export.h>
#include <linux/ftrace.h>
+#include <linux/kprobes.h>
#include <asm/stack_pointer.h>
#include <asm/stacktrace.h>
@@ -32,6 +33,7 @@ static int save_return_addr(struct stackframe *frame, void *d)
return 0;
}
}
+NOKPROBE_SYMBOL(save_return_addr);
void *return_address(unsigned int level)
{
@@ -55,3 +57,4 @@ void *return_address(unsigned int level)
return NULL;
}
EXPORT_SYMBOL_GPL(return_address);
+NOKPROBE_SYMBOL(return_address);
diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c
index 4989f7e..bb482ec 100644
--- a/arch/arm64/kernel/stacktrace.c
+++ b/arch/arm64/kernel/stacktrace.c
@@ -18,6 +18,7 @@
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/ftrace.h>
+#include <linux/kprobes.h>
#include <linux/sched.h>
#include <linux/sched/debug.h>
#include <linux/sched/task_stack.h>
@@ -85,6 +86,7 @@ int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame)
return 0;
}
+NOKPROBE_SYMBOL(unwind_frame);
void notrace walk_stackframe(struct task_struct *tsk, struct stackframe *frame,
int (*fn)(struct stackframe *, void *), void *data)
@@ -99,6 +101,7 @@ void notrace walk_stackframe(struct task_struct *tsk, struct stackframe *frame,
break;
}
}
+NOKPROBE_SYMBOL(walk_stackframe);
#ifdef CONFIG_STACKTRACE
struct stack_trace_data {
diff --git a/arch/arm64/kvm/regmap.c b/arch/arm64/kvm/regmap.c
index 7a5173e..4c2e96e 100644
--- a/arch/arm64/kvm/regmap.c
+++ b/arch/arm64/kvm/regmap.c
@@ -189,13 +189,18 @@ void vcpu_write_spsr32(struct kvm_vcpu *vcpu, unsigned long v)
switch (spsr_idx) {
case KVM_SPSR_SVC:
write_sysreg_el1(v, spsr);
+ break;
case KVM_SPSR_ABT:
write_sysreg(v, spsr_abt);
+ break;
case KVM_SPSR_UND:
write_sysreg(v, spsr_und);
+ break;
case KVM_SPSR_IRQ:
write_sysreg(v, spsr_irq);
+ break;
case KVM_SPSR_FIQ:
write_sysreg(v, spsr_fiq);
+ break;
}
}
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index d112af7..6da2bbd 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -626,7 +626,7 @@ static void reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
*/
val = ((pmcr & ~ARMV8_PMU_PMCR_MASK)
| (ARMV8_PMU_PMCR_MASK & 0xdecafbad)) & (~ARMV8_PMU_PMCR_E);
- __vcpu_sys_reg(vcpu, PMCR_EL0) = val;
+ __vcpu_sys_reg(vcpu, r->reg) = val;
}
static bool check_pmu_access_disabled(struct kvm_vcpu *vcpu, u64 flags)
@@ -968,13 +968,13 @@ static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
/* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */
#define DBG_BCR_BVR_WCR_WVR_EL1(n) \
{ SYS_DESC(SYS_DBGBVRn_EL1(n)), \
- trap_bvr, reset_bvr, n, 0, get_bvr, set_bvr }, \
+ trap_bvr, reset_bvr, 0, 0, get_bvr, set_bvr }, \
{ SYS_DESC(SYS_DBGBCRn_EL1(n)), \
- trap_bcr, reset_bcr, n, 0, get_bcr, set_bcr }, \
+ trap_bcr, reset_bcr, 0, 0, get_bcr, set_bcr }, \
{ SYS_DESC(SYS_DBGWVRn_EL1(n)), \
- trap_wvr, reset_wvr, n, 0, get_wvr, set_wvr }, \
+ trap_wvr, reset_wvr, 0, 0, get_wvr, set_wvr }, \
{ SYS_DESC(SYS_DBGWCRn_EL1(n)), \
- trap_wcr, reset_wcr, n, 0, get_wcr, set_wcr }
+ trap_wcr, reset_wcr, 0, 0, get_wcr, set_wcr }
/* Macro to expand the PMEVCNTRn_EL0 register */
#define PMU_PMEVCNTR_EL0(n) \
@@ -1359,7 +1359,7 @@ static const struct sys_reg_desc sys_reg_descs[] = {
{ SYS_DESC(SYS_CSSELR_EL1), NULL, reset_unknown, CSSELR_EL1 },
- { SYS_DESC(SYS_PMCR_EL0), access_pmcr, reset_pmcr, },
+ { SYS_DESC(SYS_PMCR_EL0), access_pmcr, reset_pmcr, PMCR_EL0 },
{ SYS_DESC(SYS_PMCNTENSET_EL0), access_pmcnten, reset_unknown, PMCNTENSET_EL0 },
{ SYS_DESC(SYS_PMCNTENCLR_EL0), access_pmcnten, NULL, PMCNTENSET_EL0 },
{ SYS_DESC(SYS_PMOVSCLR_EL0), access_pmovs, NULL, PMOVSSET_EL0 },
@@ -2072,13 +2072,19 @@ static int emulate_sys_reg(struct kvm_vcpu *vcpu,
}
static void reset_sys_reg_descs(struct kvm_vcpu *vcpu,
- const struct sys_reg_desc *table, size_t num)
+ const struct sys_reg_desc *table, size_t num,
+ unsigned long *bmap)
{
unsigned long i;
for (i = 0; i < num; i++)
- if (table[i].reset)
+ if (table[i].reset) {
+ int reg = table[i].reg;
+
table[i].reset(vcpu, &table[i]);
+ if (reg > 0 && reg < NR_SYS_REGS)
+ set_bit(reg, bmap);
+ }
}
/**
@@ -2576,18 +2582,16 @@ void kvm_reset_sys_regs(struct kvm_vcpu *vcpu)
{
size_t num;
const struct sys_reg_desc *table;
-
- /* Catch someone adding a register without putting in reset entry. */
- memset(&vcpu->arch.ctxt.sys_regs, 0x42, sizeof(vcpu->arch.ctxt.sys_regs));
+ DECLARE_BITMAP(bmap, NR_SYS_REGS) = { 0, };
/* Generic chip reset first (so target could override). */
- reset_sys_reg_descs(vcpu, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
+ reset_sys_reg_descs(vcpu, sys_reg_descs, ARRAY_SIZE(sys_reg_descs), bmap);
table = get_target_table(vcpu->arch.target, true, &num);
- reset_sys_reg_descs(vcpu, table, num);
+ reset_sys_reg_descs(vcpu, table, num, bmap);
for (num = 1; num < NR_SYS_REGS; num++) {
- if (WARN(__vcpu_sys_reg(vcpu, num) == 0x4242424242424242,
+ if (WARN(!test_bit(num, bmap),
"Didn't reset __vcpu_sys_reg(%zi)\n", num))
break;
}
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
index d12667d..430ea0e 100644
--- a/arch/arm64/mm/dma-mapping.c
+++ b/arch/arm64/mm/dma-mapping.c
@@ -1151,179 +1151,6 @@ static void arm_iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size)
set_dma_ops(dev, mapping.ops);
}
-/**
- * DEPRECATED
- * arm_iommu_create_mapping
- * @bus: pointer to the bus holding the client device (for IOMMU calls)
- * @base: start address of the valid IO address space
- * @size: maximum size of the valid IO address space
- *
- * Creates a mapping structure which holds information about used/unused
- * IO address ranges, which is required to perform memory allocation and
- * mapping with IOMMU aware functions.
- *
- * Clients may use iommu_domain_set_attr() to set additional flags prior
- * to calling arm_iommu_attach_device() to complete initialization.
- */
-struct dma_iommu_mapping *
-__depr_arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base,
- size_t size)
-{
- unsigned int bits = size >> PAGE_SHIFT;
- struct dma_iommu_mapping *mapping;
-
- if (!bits)
- return ERR_PTR(-EINVAL);
-
- mapping = kzalloc(sizeof(struct dma_iommu_mapping), GFP_KERNEL);
- if (!mapping)
- return ERR_PTR(-ENOMEM);
-
- mapping->base = base;
- mapping->bits = bits;
-
- mapping->domain = iommu_domain_alloc(bus);
- if (!mapping->domain)
- goto err_domain_alloc;
-
- mapping->init = false;
- return mapping;
-
-err_domain_alloc:
- kfree(mapping);
- return ERR_PTR(-ENOMEM);
-}
-EXPORT_SYMBOL(__depr_arm_iommu_create_mapping);
-
-/*
- * DEPRECATED
- * arm_iommu_release_mapping
- * @mapping: allocted via arm_iommu_create_mapping()
- *
- * Frees all resources associated with the iommu mapping.
- * The device associated with this mapping must be in the 'detached' state
- */
-void __depr_arm_iommu_release_mapping(struct dma_iommu_mapping *mapping)
-{
- if (!mapping)
- return;
-
- if (mapping->domain)
- iommu_domain_free(mapping->domain);
-
- kfree(mapping);
-}
-EXPORT_SYMBOL(__depr_arm_iommu_release_mapping);
-
-/**
- * DEPRECATED
- * arm_iommu_attach_device
- * @dev: valid struct device pointer
- * @mapping: io address space mapping structure (returned from
- * arm_iommu_create_mapping)
- *
- * Attaches specified io address space mapping to the provided device,
- * this replaces the dma operations (dma_map_ops pointer) with the
- * IOMMU aware version.
- *
- * Only configures dma_ops for a single device in the iommu_group.
- */
-int __depr_arm_iommu_attach_device(struct device *dev,
- struct dma_iommu_mapping *mapping)
-{
- int err;
- struct iommu_domain *domain;
- struct iommu_group *group;
-
- if (!dev || !mapping) {
- pr_err("%s: Error input is NULL\n", __func__);
- return -EINVAL;
- }
-
- group = dev->iommu_group;
- if (!group) {
- dev_err(dev, "No iommu associated with device\n");
- return -EINVAL;
- }
-
- domain = iommu_get_domain_for_dev(dev);
- if (domain) {
- int dynamic = 0;
-
- iommu_domain_get_attr(domain, DOMAIN_ATTR_DYNAMIC, &dynamic);
-
- if ((domain->type == IOMMU_DOMAIN_DMA) && dynamic) {
- dev_warn(dev, "Deprecated API %s in use! Continuing anyway\n",
- __func__);
- } else {
- dev_err(dev, "Device already attached to other iommu_domain\n");
- return -EINVAL;
- }
- }
-
- err = iommu_attach_group(mapping->domain, group);
- if (err) {
- dev_err(dev, "iommu_attach_group failed\n");
- return err;
- }
-
- err = arm_iommu_get_dma_cookie(dev, mapping);
- if (err) {
- dev_err(dev, "arm_iommu_get_dma_cookie failed\n");
- iommu_detach_group(domain, group);
- return err;
- }
-
- dev->archdata.mapping = mapping;
- set_dma_ops(dev, mapping->ops);
-
- pr_debug("Attached IOMMU controller to %s device.\n", dev_name(dev));
- return 0;
-}
-EXPORT_SYMBOL(__depr_arm_iommu_attach_device);
-
-/**
- * DEPRECATED
- * arm_iommu_detach_device
- * @dev: valid struct device pointer
- *
- * Detaches the provided device from a previously attached map.
- * This voids the dma operations (dma_map_ops pointer)
- */
-void __depr_arm_iommu_detach_device(struct device *dev)
-{
- struct iommu_domain *domain;
- int s1_bypass = 0;
-
- if (!dev->iommu_group) {
- dev_err(dev, "No iommu associated with device\n");
- return;
- }
-
- domain = iommu_get_domain_for_dev(dev);
- if (!domain) {
- dev_warn(dev, "Not attached\n");
- return;
- }
-
- iommu_domain_get_attr(domain, DOMAIN_ATTR_S1_BYPASS, &s1_bypass);
-
- /*
- * ION defers dma_unmap calls. Ensure they have all completed prior to
- * setting dma_ops to NULL.
- */
- if (msm_dma_unmap_all_for_dev(dev))
- dev_warn(dev, "IOMMU detach with outstanding mappings\n");
-
- iommu_detach_group(domain, dev->iommu_group);
- dev->archdata.mapping = NULL;
- if (!s1_bypass)
- set_dma_ops(dev, NULL);
-
- pr_debug("Detached IOMMU controller from %s device.\n", dev_name(dev));
-}
-EXPORT_SYMBOL(__depr_arm_iommu_detach_device);
-
#else /*!CONFIG_ARM64_DMA_USE_IOMMU */
static void arm_iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size)
diff --git a/arch/mips/kernel/cacheinfo.c b/arch/mips/kernel/cacheinfo.c
index 97d5239..428ef21 100644
--- a/arch/mips/kernel/cacheinfo.c
+++ b/arch/mips/kernel/cacheinfo.c
@@ -80,6 +80,8 @@ static int __populate_cache_leaves(unsigned int cpu)
if (c->tcache.waysize)
populate_cache(tcache, this_leaf, 3, CACHE_TYPE_UNIFIED);
+ this_cpu_ci->cpu_map_populated = true;
+
return 0;
}
diff --git a/arch/mips/kernel/i8253.c b/arch/mips/kernel/i8253.c
index 5f209f1..df7ddd2 100644
--- a/arch/mips/kernel/i8253.c
+++ b/arch/mips/kernel/i8253.c
@@ -32,7 +32,8 @@ void __init setup_pit_timer(void)
static int __init init_pit_clocksource(void)
{
- if (num_possible_cpus() > 1) /* PIT does not scale! */
+ if (num_possible_cpus() > 1 || /* PIT does not scale! */
+ !clockevent_state_periodic(&i8253_clockevent))
return 0;
return clocksource_i8253_init();
diff --git a/arch/mips/lantiq/irq.c b/arch/mips/lantiq/irq.c
index c4ef1c3..37caead 100644
--- a/arch/mips/lantiq/irq.c
+++ b/arch/mips/lantiq/irq.c
@@ -156,8 +156,9 @@ static int ltq_eiu_settype(struct irq_data *d, unsigned int type)
if (edge)
irq_set_handler(d->hwirq, handle_edge_irq);
- ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_C) |
- (val << (i * 4)), LTQ_EIU_EXIN_C);
+ ltq_eiu_w32((ltq_eiu_r32(LTQ_EIU_EXIN_C) &
+ (~(7 << (i * 4)))) | (val << (i * 4)),
+ LTQ_EIU_EXIN_C);
}
}
diff --git a/arch/parisc/boot/compressed/vmlinux.lds.S b/arch/parisc/boot/compressed/vmlinux.lds.S
index 4ebd4e6..41ebe97 100644
--- a/arch/parisc/boot/compressed/vmlinux.lds.S
+++ b/arch/parisc/boot/compressed/vmlinux.lds.S
@@ -42,8 +42,8 @@
#endif
_startcode_end = .;
- /* bootloader code and data starts behind area of extracted kernel */
- . = (SZ_end - SZparisc_kernel_start + KERNEL_BINARY_TEXT_START);
+ /* bootloader code and data starts at least behind area of extracted kernel */
+ . = MAX(ABSOLUTE(.), (SZ_end - SZparisc_kernel_start + KERNEL_BINARY_TEXT_START));
/* align on next page boundary */
. = ALIGN(4096);
diff --git a/arch/powerpc/boot/xz_config.h b/arch/powerpc/boot/xz_config.h
index e22e5b3..ebfadd3 100644
--- a/arch/powerpc/boot/xz_config.h
+++ b/arch/powerpc/boot/xz_config.h
@@ -20,10 +20,30 @@ static inline uint32_t swab32p(void *p)
#ifdef __LITTLE_ENDIAN__
#define get_le32(p) (*((uint32_t *) (p)))
+#define cpu_to_be32(x) swab32(x)
+static inline u32 be32_to_cpup(const u32 *p)
+{
+ return swab32p((u32 *)p);
+}
#else
#define get_le32(p) swab32p(p)
+#define cpu_to_be32(x) (x)
+static inline u32 be32_to_cpup(const u32 *p)
+{
+ return *p;
+}
#endif
+static inline uint32_t get_unaligned_be32(const void *p)
+{
+ return be32_to_cpup(p);
+}
+
+static inline void put_unaligned_be32(u32 val, void *p)
+{
+ *((u32 *)p) = cpu_to_be32(val);
+}
+
#define memeq(a, b, size) (memcmp(a, b, size) == 0)
#define memzero(buf, size) memset(buf, 0, size)
diff --git a/arch/powerpc/include/asm/cacheflush.h b/arch/powerpc/include/asm/cacheflush.h
index d5a8d7b..b189f7a 100644
--- a/arch/powerpc/include/asm/cacheflush.h
+++ b/arch/powerpc/include/asm/cacheflush.h
@@ -32,9 +32,12 @@
* not expect this type of fault. flush_cache_vmap is not exactly the right
* place to put this, but it seems to work well enough.
*/
-#define flush_cache_vmap(start, end) do { asm volatile("ptesync" ::: "memory"); } while (0)
+static inline void flush_cache_vmap(unsigned long start, unsigned long end)
+{
+ asm volatile("ptesync" ::: "memory");
+}
#else
-#define flush_cache_vmap(start, end) do { } while (0)
+static inline void flush_cache_vmap(unsigned long start, unsigned long end) { }
#endif
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c
index c72767a..fe3c6f3 100644
--- a/arch/powerpc/kernel/eeh.c
+++ b/arch/powerpc/kernel/eeh.c
@@ -360,10 +360,19 @@ static inline unsigned long eeh_token_to_phys(unsigned long token)
ptep = find_init_mm_pte(token, &hugepage_shift);
if (!ptep)
return token;
- WARN_ON(hugepage_shift);
- pa = pte_pfn(*ptep) << PAGE_SHIFT;
- return pa | (token & (PAGE_SIZE-1));
+ pa = pte_pfn(*ptep);
+
+ /* On radix we can do hugepage mappings for io, so handle that */
+ if (hugepage_shift) {
+ pa <<= hugepage_shift;
+ pa |= token & ((1ul << hugepage_shift) - 1);
+ } else {
+ pa <<= PAGE_SHIFT;
+ pa |= token & (PAGE_SIZE - 1);
+ }
+
+ return pa;
}
/*
diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S
index 262ba94..1bf6aae 100644
--- a/arch/powerpc/kernel/misc_64.S
+++ b/arch/powerpc/kernel/misc_64.S
@@ -135,7 +135,7 @@
subf r8,r6,r4 /* compute length */
add r8,r8,r5 /* ensure we get enough */
lwz r9,DCACHEL1LOGBLOCKSIZE(r10) /* Get log-2 of dcache block size */
- srw. r8,r8,r9 /* compute line count */
+ srd. r8,r8,r9 /* compute line count */
beqlr /* nothing to do? */
mtctr r8
0: dcbst 0,r6
@@ -153,7 +153,7 @@
subf r8,r6,r4 /* compute length */
add r8,r8,r5 /* ensure we get enough */
lwz r9,DCACHEL1LOGBLOCKSIZE(r10)/* Get log-2 of dcache block size */
- srw. r8,r8,r9 /* compute line count */
+ srd. r8,r8,r9 /* compute line count */
beqlr /* nothing to do? */
sync
isync
diff --git a/arch/powerpc/kernel/pci_of_scan.c b/arch/powerpc/kernel/pci_of_scan.c
index 98f0472..c101b32 100644
--- a/arch/powerpc/kernel/pci_of_scan.c
+++ b/arch/powerpc/kernel/pci_of_scan.c
@@ -45,6 +45,8 @@ unsigned int pci_parse_of_flags(u32 addr0, int bridge)
if (addr0 & 0x02000000) {
flags = IORESOURCE_MEM | PCI_BASE_ADDRESS_SPACE_MEMORY;
flags |= (addr0 >> 22) & PCI_BASE_ADDRESS_MEM_TYPE_64;
+ if (flags & PCI_BASE_ADDRESS_MEM_TYPE_64)
+ flags |= IORESOURCE_MEM_64;
flags |= (addr0 >> 28) & PCI_BASE_ADDRESS_MEM_TYPE_1M;
if (addr0 & 0x40000000)
flags |= IORESOURCE_PREFETCH
diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
index fd59fef..906b05c 100644
--- a/arch/powerpc/kernel/signal_32.c
+++ b/arch/powerpc/kernel/signal_32.c
@@ -1202,6 +1202,9 @@ SYSCALL_DEFINE0(rt_sigreturn)
goto bad;
if (MSR_TM_ACTIVE(msr_hi<<32)) {
+ /* Trying to start TM on non TM system */
+ if (!cpu_has_feature(CPU_FTR_TM))
+ goto bad;
/* We only recheckpoint on return if we're
* transaction.
*/
diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
index 14b0f5b..b5933d72 100644
--- a/arch/powerpc/kernel/signal_64.c
+++ b/arch/powerpc/kernel/signal_64.c
@@ -750,6 +750,11 @@ SYSCALL_DEFINE0(rt_sigreturn)
if (MSR_TM_ACTIVE(msr)) {
/* We recheckpoint on return. */
struct ucontext __user *uc_transact;
+
+ /* Trying to start TM on non TM system */
+ if (!cpu_has_feature(CPU_FTR_TM))
+ goto badframe;
+
if (__get_user(uc_transact, &uc->uc_link))
goto badframe;
if (restore_tm_sigcontexts(current, &uc->uc_mcontext,
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 578174a3..51cd66d 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -61,6 +61,11 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
return !!(v->arch.pending_exceptions) || kvm_request_pending(v);
}
+bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu)
+{
+ return kvm_arch_vcpu_runnable(vcpu);
+}
+
bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
{
return false;
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index 7296a42..cef0b7e 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -150,6 +150,8 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz
} else {
pdshift = PUD_SHIFT;
pu = pud_alloc(mm, pg, addr);
+ if (!pu)
+ return NULL;
if (pshift == PUD_SHIFT)
return (pte_t *)pu;
else if (pshift > PMD_SHIFT) {
@@ -158,6 +160,8 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz
} else {
pdshift = PMD_SHIFT;
pm = pmd_alloc(mm, pu, addr);
+ if (!pm)
+ return NULL;
if (pshift == PMD_SHIFT)
/* 16MB hugepage */
return (pte_t *)pm;
@@ -174,12 +178,16 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz
} else {
pdshift = PUD_SHIFT;
pu = pud_alloc(mm, pg, addr);
+ if (!pu)
+ return NULL;
if (pshift >= PUD_SHIFT) {
ptl = pud_lockptr(mm, pu);
hpdp = (hugepd_t *)pu;
} else {
pdshift = PMD_SHIFT;
pm = pmd_alloc(mm, pu, addr);
+ if (!pm)
+ return NULL;
ptl = pmd_lockptr(mm, pm);
hpdp = (hugepd_t *)pm;
}
diff --git a/arch/powerpc/platforms/4xx/uic.c b/arch/powerpc/platforms/4xx/uic.c
index 8b4dd0d..9e27cfe 100644
--- a/arch/powerpc/platforms/4xx/uic.c
+++ b/arch/powerpc/platforms/4xx/uic.c
@@ -158,6 +158,7 @@ static int uic_set_irq_type(struct irq_data *d, unsigned int flow_type)
mtdcr(uic->dcrbase + UIC_PR, pr);
mtdcr(uic->dcrbase + UIC_TR, tr);
+ mtdcr(uic->dcrbase + UIC_SR, ~mask);
raw_spin_unlock_irqrestore(&uic->lock, flags);
diff --git a/arch/powerpc/platforms/pseries/mobility.c b/arch/powerpc/platforms/pseries/mobility.c
index f0e30dc..7b60fcf 100644
--- a/arch/powerpc/platforms/pseries/mobility.c
+++ b/arch/powerpc/platforms/pseries/mobility.c
@@ -9,6 +9,7 @@
* 2 as published by the Free Software Foundation.
*/
+#include <linux/cpu.h>
#include <linux/kernel.h>
#include <linux/kobject.h>
#include <linux/smp.h>
@@ -344,11 +345,19 @@ void post_mobility_fixup(void)
if (rc)
printk(KERN_ERR "Post-mobility activate-fw failed: %d\n", rc);
+ /*
+ * We don't want CPUs to go online/offline while the device
+ * tree is being updated.
+ */
+ cpus_read_lock();
+
rc = pseries_devicetree_update(MIGRATION_SCOPE);
if (rc)
printk(KERN_ERR "Post-mobility device tree update "
"failed: %d\n", rc);
+ cpus_read_unlock();
+
/* Possibly switch to a new RFI flush type */
pseries_setup_rfi_flush();
diff --git a/arch/powerpc/sysdev/xive/common.c b/arch/powerpc/sysdev/xive/common.c
index 959a2a6..0b24b10 100644
--- a/arch/powerpc/sysdev/xive/common.c
+++ b/arch/powerpc/sysdev/xive/common.c
@@ -483,7 +483,7 @@ static int xive_find_target_in_mask(const struct cpumask *mask,
* Now go through the entire mask until we find a valid
* target.
*/
- for (;;) {
+ do {
/*
* We re-check online as the fallback case passes us
* an untested affinity mask
@@ -491,12 +491,11 @@ static int xive_find_target_in_mask(const struct cpumask *mask,
if (cpu_online(cpu) && xive_try_pick_target(cpu))
return cpu;
cpu = cpumask_next(cpu, mask);
- if (cpu == first)
- break;
/* Wrap around */
if (cpu >= nr_cpu_ids)
cpu = cpumask_first(mask);
- }
+ } while (cpu != first);
+
return -1;
}
diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
index dd6badc..74cfc1b 100644
--- a/arch/powerpc/xmon/xmon.c
+++ b/arch/powerpc/xmon/xmon.c
@@ -466,8 +466,10 @@ static int xmon_core(struct pt_regs *regs, int fromipi)
local_irq_save(flags);
hard_irq_disable();
- tracing_enabled = tracing_is_on();
- tracing_off();
+ if (!fromipi) {
+ tracing_enabled = tracing_is_on();
+ tracing_off();
+ }
bp = in_breakpoint_table(regs->nip, &offset);
if (bp != NULL) {
diff --git a/arch/riscv/include/asm/switch_to.h b/arch/riscv/include/asm/switch_to.h
index dd6b05b..d911a8c 100644
--- a/arch/riscv/include/asm/switch_to.h
+++ b/arch/riscv/include/asm/switch_to.h
@@ -23,7 +23,7 @@ extern void __fstate_restore(struct task_struct *restore_from);
static inline void __fstate_clean(struct pt_regs *regs)
{
- regs->sstatus |= (regs->sstatus & ~(SR_FS)) | SR_FS_CLEAN;
+ regs->sstatus = (regs->sstatus & ~SR_FS) | SR_FS_CLEAN;
}
static inline void fstate_save(struct task_struct *task,
diff --git a/arch/s390/include/asm/page.h b/arch/s390/include/asm/page.h
index 41e3908..0d75329 100644
--- a/arch/s390/include/asm/page.h
+++ b/arch/s390/include/asm/page.h
@@ -176,6 +176,8 @@ static inline int devmem_is_allowed(unsigned long pfn)
#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | \
VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+#define ARCH_ZONE_DMA_BITS 31
+
#include <asm-generic/memory_model.h>
#include <asm-generic/getorder.h>
diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S
index b43f8d3..18ede6e 100644
--- a/arch/s390/kernel/vmlinux.lds.S
+++ b/arch/s390/kernel/vmlinux.lds.S
@@ -31,10 +31,9 @@
SECTIONS
{
. = 0x100000;
- _stext = .; /* Start of text section */
.text : {
- /* Text and read-only data */
- _text = .;
+ _stext = .; /* Start of text section */
+ _text = .; /* Text and read-only data */
HEAD_TEXT
TEXT_TEXT
SCHED_TEXT
@@ -46,11 +45,10 @@
*(.text.*_indirect_*)
*(.fixup)
*(.gnu.warning)
+ . = ALIGN(PAGE_SIZE);
+ _etext = .; /* End of text section */
} :text = 0x0700
- . = ALIGN(PAGE_SIZE);
- _etext = .; /* End of text section */
-
NOTES :text :note
.dummy : { *(.dummy) } :data
diff --git a/arch/sh/boards/Kconfig b/arch/sh/boards/Kconfig
index 6394b4f..f42feab 100644
--- a/arch/sh/boards/Kconfig
+++ b/arch/sh/boards/Kconfig
@@ -8,27 +8,19 @@
bool
config SH_DEVICE_TREE
- bool "Board Described by Device Tree"
+ bool
select OF
select OF_EARLY_FLATTREE
select TIMER_OF
select COMMON_CLK
select GENERIC_CALIBRATE_DELAY
- help
- Select Board Described by Device Tree to build a kernel that
- does not hard-code any board-specific knowledge but instead uses
- a device tree blob provided by the boot-loader. You must enable
- drivers for any hardware you want to use separately. At this
- time, only boards based on the open-hardware J-Core processors
- have sufficient driver coverage to use this option; do not
- select it if you are using original SuperH hardware.
config SH_JCORE_SOC
bool "J-Core SoC"
- depends on SH_DEVICE_TREE && (CPU_SH2 || CPU_J2)
+ select SH_DEVICE_TREE
select CLKSRC_JCORE_PIT
select JCORE_AIC
- default y if CPU_J2
+ depends on CPU_J2
help
Select this option to include drivers core components of the
J-Core SoC, including interrupt controllers and timers.
diff --git a/arch/sh/include/asm/io.h b/arch/sh/include/asm/io.h
index 98cb8c8..0ae60d68 100644
--- a/arch/sh/include/asm/io.h
+++ b/arch/sh/include/asm/io.h
@@ -371,7 +371,11 @@ static inline int iounmap_fixed(void __iomem *addr) { return -EINVAL; }
#define ioremap_nocache ioremap
#define ioremap_uc ioremap
-#define iounmap __iounmap
+
+static inline void iounmap(void __iomem *addr)
+{
+ __iounmap(addr);
+}
/*
* Convert a physical pointer to a virtual kernel pointer for /dev/mem
diff --git a/arch/sh/kernel/hw_breakpoint.c b/arch/sh/kernel/hw_breakpoint.c
index d9ff3b4..2569ffc 100644
--- a/arch/sh/kernel/hw_breakpoint.c
+++ b/arch/sh/kernel/hw_breakpoint.c
@@ -160,6 +160,7 @@ int arch_bp_generic_fields(int sh_len, int sh_type,
switch (sh_type) {
case SH_BREAKPOINT_READ:
*gen_type = HW_BREAKPOINT_R;
+ break;
case SH_BREAKPOINT_WRITE:
*gen_type = HW_BREAKPOINT_W;
break;
diff --git a/arch/um/include/asm/mmu_context.h b/arch/um/include/asm/mmu_context.h
index fca34b2..129fb1d 100644
--- a/arch/um/include/asm/mmu_context.h
+++ b/arch/um/include/asm/mmu_context.h
@@ -53,7 +53,7 @@ static inline void activate_mm(struct mm_struct *old, struct mm_struct *new)
* when the new ->mm is used for the first time.
*/
__switch_mm(&new->context.id);
- down_write(&new->mmap_sem);
+ down_write_nested(&new->mmap_sem, 1);
uml_setup_stubs(new);
up_write(&new->mmap_sem);
}
diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
index 8dd1d5c..0387d7a 100644
--- a/arch/x86/boot/compressed/misc.c
+++ b/arch/x86/boot/compressed/misc.c
@@ -17,6 +17,7 @@
#include "pgtable.h"
#include "../string.h"
#include "../voffset.h"
+#include <asm/bootparam_utils.h>
/*
* WARNING!!
diff --git a/arch/x86/boot/compressed/misc.h b/arch/x86/boot/compressed/misc.h
index a423bdb..47fd18d 100644
--- a/arch/x86/boot/compressed/misc.h
+++ b/arch/x86/boot/compressed/misc.h
@@ -22,7 +22,6 @@
#include <asm/page.h>
#include <asm/boot.h>
#include <asm/bootparam.h>
-#include <asm/bootparam_utils.h>
#define BOOT_BOOT_H
#include "../ctype.h"
diff --git a/arch/x86/boot/string.c b/arch/x86/boot/string.c
index c4428a1..2622c07 100644
--- a/arch/x86/boot/string.c
+++ b/arch/x86/boot/string.c
@@ -34,6 +34,14 @@ int memcmp(const void *s1, const void *s2, size_t len)
return diff;
}
+/*
+ * Clang may lower `memcmp == 0` to `bcmp == 0`.
+ */
+int bcmp(const void *s1, const void *s2, size_t len)
+{
+ return memcmp(s1, s2, len);
+}
+
int strcmp(const char *str1, const char *str2)
{
const unsigned char *s1 = (const unsigned char *)str1;
diff --git a/arch/x86/entry/calling.h b/arch/x86/entry/calling.h
index e699b20..578b545 100644
--- a/arch/x86/entry/calling.h
+++ b/arch/x86/entry/calling.h
@@ -329,6 +329,23 @@ For 32-bit we have the following conventions - kernel is built with
#endif
+/*
+ * Mitigate Spectre v1 for conditional swapgs code paths.
+ *
+ * FENCE_SWAPGS_USER_ENTRY is used in the user entry swapgs code path, to
+ * prevent a speculative swapgs when coming from kernel space.
+ *
+ * FENCE_SWAPGS_KERNEL_ENTRY is used in the kernel entry non-swapgs code path,
+ * to prevent the swapgs from getting speculatively skipped when coming from
+ * user space.
+ */
+.macro FENCE_SWAPGS_USER_ENTRY
+ ALTERNATIVE "", "lfence", X86_FEATURE_FENCE_SWAPGS_USER
+.endm
+.macro FENCE_SWAPGS_KERNEL_ENTRY
+ ALTERNATIVE "", "lfence", X86_FEATURE_FENCE_SWAPGS_KERNEL
+.endm
+
#endif /* CONFIG_X86_64 */
/*
diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
index 206df09..ccb5e34 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -582,7 +582,7 @@
testb $3, CS-ORIG_RAX+8(%rsp)
jz 1f
SWAPGS
-
+ FENCE_SWAPGS_USER_ENTRY
/*
* Switch to the thread stack. The IRET frame and orig_ax are
* on the stack, as well as the return address. RDI..R12 are
@@ -612,8 +612,10 @@
UNWIND_HINT_FUNC
movq (%rdi), %rdi
+ jmp 2f
1:
-
+ FENCE_SWAPGS_KERNEL_ENTRY
+2:
PUSH_AND_CLEAR_REGS save_ret=1
ENCODE_FRAME_POINTER 8
@@ -1196,7 +1198,6 @@
#ifdef CONFIG_XEN
idtentry xennmi do_nmi has_error_code=0
idtentry xendebug do_debug has_error_code=0
-idtentry xenint3 do_int3 has_error_code=0
#endif
idtentry general_protection do_general_protection has_error_code=1
@@ -1241,6 +1242,13 @@
*/
SAVE_AND_SWITCH_TO_KERNEL_CR3 scratch_reg=%rax save_reg=%r14
+ /*
+ * The above SAVE_AND_SWITCH_TO_KERNEL_CR3 macro doesn't do an
+ * unconditional CR3 write, even in the PTI case. So do an lfence
+ * to prevent GS speculation, regardless of whether PTI is enabled.
+ */
+ FENCE_SWAPGS_KERNEL_ENTRY
+
ret
END(paranoid_entry)
@@ -1291,6 +1299,7 @@
* from user mode due to an IRET fault.
*/
SWAPGS
+ FENCE_SWAPGS_USER_ENTRY
/* We have user CR3. Change to kernel CR3. */
SWITCH_TO_KERNEL_CR3 scratch_reg=%rax
@@ -1312,6 +1321,8 @@
CALL_enter_from_user_mode
ret
+.Lerror_entry_done_lfence:
+ FENCE_SWAPGS_KERNEL_ENTRY
.Lerror_entry_done:
TRACE_IRQS_OFF
ret
@@ -1330,7 +1341,7 @@
cmpq %rax, RIP+8(%rsp)
je .Lbstep_iret
cmpq $.Lgs_change, RIP+8(%rsp)
- jne .Lerror_entry_done
+ jne .Lerror_entry_done_lfence
/*
* hack: .Lgs_change can fail with user gsbase. If this happens, fix up
@@ -1338,6 +1349,7 @@
* .Lgs_change's error handler with kernel gsbase.
*/
SWAPGS
+ FENCE_SWAPGS_USER_ENTRY
SWITCH_TO_KERNEL_CR3 scratch_reg=%rax
jmp .Lerror_entry_done
@@ -1352,6 +1364,7 @@
* gsbase and CR3. Switch to kernel gsbase and CR3:
*/
SWAPGS
+ FENCE_SWAPGS_USER_ENTRY
SWITCH_TO_KERNEL_CR3 scratch_reg=%rax
/*
@@ -1443,6 +1456,7 @@
swapgs
cld
+ FENCE_SWAPGS_USER_ENTRY
SWITCH_TO_KERNEL_CR3 scratch_reg=%rdx
movq %rsp, %rdx
movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp
diff --git a/arch/x86/entry/vdso/vclock_gettime.c b/arch/x86/entry/vdso/vclock_gettime.c
index e48ca3af..8a88e73 100644
--- a/arch/x86/entry/vdso/vclock_gettime.c
+++ b/arch/x86/entry/vdso/vclock_gettime.c
@@ -29,12 +29,12 @@ extern int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz);
extern time_t __vdso_time(time_t *t);
#ifdef CONFIG_PARAVIRT_CLOCK
-extern u8 pvclock_page
+extern u8 pvclock_page[PAGE_SIZE]
__attribute__((visibility("hidden")));
#endif
#ifdef CONFIG_HYPERV_TSCPAGE
-extern u8 hvclock_page
+extern u8 hvclock_page[PAGE_SIZE]
__attribute__((visibility("hidden")));
#endif
@@ -191,13 +191,24 @@ notrace static inline u64 vgetsns(int *mode)
if (gtod->vclock_mode == VCLOCK_TSC)
cycles = vread_tsc();
+
+ /*
+ * For any memory-mapped vclock type, we need to make sure that gcc
+ * doesn't cleverly hoist a load before the mode check. Otherwise we
+ * might end up touching the memory-mapped page even if the vclock in
+ * question isn't enabled, which will segfault. Hence the barriers.
+ */
#ifdef CONFIG_PARAVIRT_CLOCK
- else if (gtod->vclock_mode == VCLOCK_PVCLOCK)
+ else if (gtod->vclock_mode == VCLOCK_PVCLOCK) {
+ barrier();
cycles = vread_pvclock(mode);
+ }
#endif
#ifdef CONFIG_HYPERV_TSCPAGE
- else if (gtod->vclock_mode == VCLOCK_HVCLOCK)
+ else if (gtod->vclock_mode == VCLOCK_HVCLOCK) {
+ barrier();
cycles = vread_hvclock(mode);
+ }
#endif
else
return 0;
diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
index 130e81e..050368d 100644
--- a/arch/x86/include/asm/apic.h
+++ b/arch/x86/include/asm/apic.h
@@ -48,7 +48,7 @@ static inline void generic_apic_probe(void)
#ifdef CONFIG_X86_LOCAL_APIC
-extern unsigned int apic_verbosity;
+extern int apic_verbosity;
extern int local_apic_timer_c2_ok;
extern int disable_apic;
diff --git a/arch/x86/include/asm/bootparam_utils.h b/arch/x86/include/asm/bootparam_utils.h
index a07ffd2..d3983fd 100644
--- a/arch/x86/include/asm/bootparam_utils.h
+++ b/arch/x86/include/asm/bootparam_utils.h
@@ -18,6 +18,20 @@
* Note: efi_info is commonly left uninitialized, but that field has a
* private magic, so it is better to leave it unchanged.
*/
+
+#define sizeof_mbr(type, member) ({ sizeof(((type *)0)->member); })
+
+#define BOOT_PARAM_PRESERVE(struct_member) \
+ { \
+ .start = offsetof(struct boot_params, struct_member), \
+ .len = sizeof_mbr(struct boot_params, struct_member), \
+ }
+
+struct boot_params_to_save {
+ unsigned int start;
+ unsigned int len;
+};
+
static void sanitize_boot_params(struct boot_params *boot_params)
{
/*
@@ -36,19 +50,40 @@ static void sanitize_boot_params(struct boot_params *boot_params)
*/
if (boot_params->sentinel) {
/* fields in boot_params are left uninitialized, clear them */
- memset(&boot_params->ext_ramdisk_image, 0,
- (char *)&boot_params->efi_info -
- (char *)&boot_params->ext_ramdisk_image);
- memset(&boot_params->kbd_status, 0,
- (char *)&boot_params->hdr -
- (char *)&boot_params->kbd_status);
- memset(&boot_params->_pad7[0], 0,
- (char *)&boot_params->edd_mbr_sig_buffer[0] -
- (char *)&boot_params->_pad7[0]);
- memset(&boot_params->_pad8[0], 0,
- (char *)&boot_params->eddbuf[0] -
- (char *)&boot_params->_pad8[0]);
- memset(&boot_params->_pad9[0], 0, sizeof(boot_params->_pad9));
+ static struct boot_params scratch;
+ char *bp_base = (char *)boot_params;
+ char *save_base = (char *)&scratch;
+ int i;
+
+ const struct boot_params_to_save to_save[] = {
+ BOOT_PARAM_PRESERVE(screen_info),
+ BOOT_PARAM_PRESERVE(apm_bios_info),
+ BOOT_PARAM_PRESERVE(tboot_addr),
+ BOOT_PARAM_PRESERVE(ist_info),
+ BOOT_PARAM_PRESERVE(hd0_info),
+ BOOT_PARAM_PRESERVE(hd1_info),
+ BOOT_PARAM_PRESERVE(sys_desc_table),
+ BOOT_PARAM_PRESERVE(olpc_ofw_header),
+ BOOT_PARAM_PRESERVE(efi_info),
+ BOOT_PARAM_PRESERVE(alt_mem_k),
+ BOOT_PARAM_PRESERVE(scratch),
+ BOOT_PARAM_PRESERVE(e820_entries),
+ BOOT_PARAM_PRESERVE(eddbuf_entries),
+ BOOT_PARAM_PRESERVE(edd_mbr_sig_buf_entries),
+ BOOT_PARAM_PRESERVE(edd_mbr_sig_buffer),
+ BOOT_PARAM_PRESERVE(hdr),
+ BOOT_PARAM_PRESERVE(e820_table),
+ BOOT_PARAM_PRESERVE(eddbuf),
+ };
+
+ memset(&scratch, 0, sizeof(scratch));
+
+ for (i = 0; i < ARRAY_SIZE(to_save); i++) {
+ memcpy(save_base + to_save[i].start,
+ bp_base + to_save[i].start, to_save[i].len);
+ }
+
+ memcpy(boot_params, save_base, sizeof(*boot_params));
}
}
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
index 117644d..5694825 100644
--- a/arch/x86/include/asm/cpufeature.h
+++ b/arch/x86/include/asm/cpufeature.h
@@ -22,8 +22,8 @@ enum cpuid_leafs
CPUID_LNX_3,
CPUID_7_0_EBX,
CPUID_D_1_EAX,
- CPUID_F_0_EDX,
- CPUID_F_1_EDX,
+ CPUID_LNX_4,
+ CPUID_DUMMY,
CPUID_8000_0008_EBX,
CPUID_6_EAX,
CPUID_8000_000A_EDX,
diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
index 0cf7049..759f0a1 100644
--- a/arch/x86/include/asm/cpufeatures.h
+++ b/arch/x86/include/asm/cpufeatures.h
@@ -271,13 +271,18 @@
#define X86_FEATURE_XGETBV1 (10*32+ 2) /* XGETBV with ECX = 1 instruction */
#define X86_FEATURE_XSAVES (10*32+ 3) /* XSAVES/XRSTORS instructions */
-/* Intel-defined CPU QoS Sub-leaf, CPUID level 0x0000000F:0 (EDX), word 11 */
-#define X86_FEATURE_CQM_LLC (11*32+ 1) /* LLC QoS if 1 */
-
-/* Intel-defined CPU QoS Sub-leaf, CPUID level 0x0000000F:1 (EDX), word 12 */
-#define X86_FEATURE_CQM_OCCUP_LLC (12*32+ 0) /* LLC occupancy monitoring */
-#define X86_FEATURE_CQM_MBM_TOTAL (12*32+ 1) /* LLC Total MBM monitoring */
-#define X86_FEATURE_CQM_MBM_LOCAL (12*32+ 2) /* LLC Local MBM monitoring */
+/*
+ * Extended auxiliary flags: Linux defined - for features scattered in various
+ * CPUID levels like 0xf, etc.
+ *
+ * Reuse free bits when adding new feature flags!
+ */
+#define X86_FEATURE_CQM_LLC (11*32+ 0) /* LLC QoS if 1 */
+#define X86_FEATURE_CQM_OCCUP_LLC (11*32+ 1) /* LLC occupancy monitoring */
+#define X86_FEATURE_CQM_MBM_TOTAL (11*32+ 2) /* LLC Total MBM monitoring */
+#define X86_FEATURE_CQM_MBM_LOCAL (11*32+ 3) /* LLC Local MBM monitoring */
+#define X86_FEATURE_FENCE_SWAPGS_USER (11*32+ 4) /* "" LFENCE in user entry SWAPGS path */
+#define X86_FEATURE_FENCE_SWAPGS_KERNEL (11*32+ 5) /* "" LFENCE in kernel entry SWAPGS path */
/* AMD-defined CPU features, CPUID level 0x80000008 (EBX), word 13 */
#define X86_FEATURE_CLZERO (13*32+ 0) /* CLZERO instruction */
@@ -383,5 +388,6 @@
#define X86_BUG_L1TF X86_BUG(18) /* CPU is affected by L1 Terminal Fault */
#define X86_BUG_MDS X86_BUG(19) /* CPU is affected by Microarchitectural data sampling */
#define X86_BUG_MSBDS_ONLY X86_BUG(20) /* CPU is only affected by the MSDBS variant of BUG_MDS */
+#define X86_BUG_SWAPGS X86_BUG(21) /* CPU is affected by speculation through SWAPGS */
#endif /* _ASM_X86_CPUFEATURES_H */
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 7014dba..3245b95 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -1113,6 +1113,7 @@ struct kvm_x86_ops {
int (*update_pi_irte)(struct kvm *kvm, unsigned int host_irq,
uint32_t guest_irq, bool set);
void (*apicv_post_state_restore)(struct kvm_vcpu *vcpu);
+ bool (*dy_apicv_has_pending_interrupt)(struct kvm_vcpu *vcpu);
int (*set_hv_timer)(struct kvm_vcpu *vcpu, u64 guest_deadline_tsc);
void (*cancel_hv_timer)(struct kvm_vcpu *vcpu);
@@ -1427,25 +1428,29 @@ enum {
#define kvm_arch_vcpu_memslots_id(vcpu) ((vcpu)->arch.hflags & HF_SMM_MASK ? 1 : 0)
#define kvm_memslots_for_spte_role(kvm, role) __kvm_memslots(kvm, (role).smm)
+asmlinkage void __noreturn kvm_spurious_fault(void);
+
/*
* Hardware virtualization extension instructions may fault if a
* reboot turns off virtualization while processes are running.
- * Trap the fault and ignore the instruction if that happens.
+ * Usually after catching the fault we just panic; during reboot
+ * instead the instruction is ignored.
*/
-asmlinkage void kvm_spurious_fault(void);
-
-#define ____kvm_handle_fault_on_reboot(insn, cleanup_insn) \
- "666: " insn "\n\t" \
- "668: \n\t" \
- ".pushsection .fixup, \"ax\" \n" \
- "667: \n\t" \
- cleanup_insn "\n\t" \
- "cmpb $0, kvm_rebooting \n\t" \
- "jne 668b \n\t" \
- __ASM_SIZE(push) " $666b \n\t" \
- "jmp kvm_spurious_fault \n\t" \
- ".popsection \n\t" \
- _ASM_EXTABLE(666b, 667b)
+#define ____kvm_handle_fault_on_reboot(insn, cleanup_insn) \
+ "666: \n\t" \
+ insn "\n\t" \
+ "jmp 668f \n\t" \
+ "667: \n\t" \
+ "call kvm_spurious_fault \n\t" \
+ "668: \n\t" \
+ ".pushsection .fixup, \"ax\" \n\t" \
+ "700: \n\t" \
+ cleanup_insn "\n\t" \
+ "cmpb $0, kvm_rebooting\n\t" \
+ "je 667b \n\t" \
+ "jmp 668b \n\t" \
+ ".popsection \n\t" \
+ _ASM_EXTABLE(666b, 700b)
#define __kvm_handle_fault_on_reboot(insn) \
____kvm_handle_fault_on_reboot(insn, "")
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index f85f43d..a1d22e4 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -334,6 +334,7 @@
#define MSR_AMD64_PATCH_LEVEL 0x0000008b
#define MSR_AMD64_TSC_RATIO 0xc0000104
#define MSR_AMD64_NB_CFG 0xc001001f
+#define MSR_AMD64_CPUID_FN_1 0xc0011004
#define MSR_AMD64_PATCH_LOADER 0xc0010020
#define MSR_AMD64_OSVW_ID_LENGTH 0xc0010140
#define MSR_AMD64_OSVW_STATUS 0xc0010141
diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h
index 599c273..28cb2b3 100644
--- a/arch/x86/include/asm/nospec-branch.h
+++ b/arch/x86/include/asm/nospec-branch.h
@@ -202,7 +202,7 @@
" lfence;\n" \
" jmp 902b;\n" \
" .align 16\n" \
- "903: addl $4, %%esp;\n" \
+ "903: lea 4(%%esp), %%esp;\n" \
" pushl %[thunk_target];\n" \
" ret;\n" \
" .align 16\n" \
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index e375d4266..a046770 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -768,6 +768,7 @@ static __always_inline bool pv_vcpu_is_preempted(long cpu)
PV_RESTORE_ALL_CALLER_REGS \
FRAME_END \
"ret;" \
+ ".size " PV_THUNK_NAME(func) ", .-" PV_THUNK_NAME(func) ";" \
".popsection")
/* Get a reference to a callee-save function */
diff --git a/arch/x86/include/asm/traps.h b/arch/x86/include/asm/traps.h
index afbc872..b771bb3d 100644
--- a/arch/x86/include/asm/traps.h
+++ b/arch/x86/include/asm/traps.h
@@ -40,7 +40,7 @@ asmlinkage void simd_coprocessor_error(void);
asmlinkage void xen_divide_error(void);
asmlinkage void xen_xennmi(void);
asmlinkage void xen_xendebug(void);
-asmlinkage void xen_xenint3(void);
+asmlinkage void xen_int3(void);
asmlinkage void xen_overflow(void);
asmlinkage void xen_bounds(void);
asmlinkage void xen_invalid_op(void);
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index 02020f2..b316bd6 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -181,7 +181,7 @@ EXPORT_SYMBOL_GPL(local_apic_timer_c2_ok);
/*
* Debug level, exported for io_apic.c
*/
-unsigned int apic_verbosity;
+int apic_verbosity;
int pic_mode;
@@ -715,7 +715,7 @@ static __initdata unsigned long lapic_cal_pm1, lapic_cal_pm2;
static __initdata unsigned long lapic_cal_j1, lapic_cal_j2;
/*
- * Temporary interrupt handler.
+ * Temporary interrupt handler and polled calibration function.
*/
static void __init lapic_cal_handler(struct clock_event_device *dev)
{
@@ -799,7 +799,8 @@ calibrate_by_pmtimer(long deltapm, long *delta, long *deltatsc)
static int __init calibrate_APIC_clock(void)
{
struct clock_event_device *levt = this_cpu_ptr(&lapic_events);
- void (*real_handler)(struct clock_event_device *dev);
+ u64 tsc_perj = 0, tsc_start = 0;
+ unsigned long jif_start;
unsigned long deltaj;
long delta, deltatsc;
int pm_referenced = 0;
@@ -830,29 +831,65 @@ static int __init calibrate_APIC_clock(void)
apic_printk(APIC_VERBOSE, "Using local APIC timer interrupts.\n"
"calibrating APIC timer ...\n");
+ /*
+ * There are platforms w/o global clockevent devices. Instead of
+ * making the calibration conditional on that, use a polling based
+ * approach everywhere.
+ */
local_irq_disable();
- /* Replace the global interrupt handler */
- real_handler = global_clock_event->event_handler;
- global_clock_event->event_handler = lapic_cal_handler;
-
/*
* Setup the APIC counter to maximum. There is no way the lapic
* can underflow in the 100ms detection time frame
*/
__setup_APIC_LVTT(0xffffffff, 0, 0);
- /* Let the interrupts run */
+ /*
+ * Methods to terminate the calibration loop:
+ * 1) Global clockevent if available (jiffies)
+ * 2) TSC if available and frequency is known
+ */
+ jif_start = READ_ONCE(jiffies);
+
+ if (tsc_khz) {
+ tsc_start = rdtsc();
+ tsc_perj = div_u64((u64)tsc_khz * 1000, HZ);
+ }
+
+ /*
+ * Enable interrupts so the tick can fire, if a global
+ * clockevent device is available
+ */
local_irq_enable();
- while (lapic_cal_loops <= LAPIC_CAL_LOOPS)
- cpu_relax();
+ while (lapic_cal_loops <= LAPIC_CAL_LOOPS) {
+ /* Wait for a tick to elapse */
+ while (1) {
+ if (tsc_khz) {
+ u64 tsc_now = rdtsc();
+ if ((tsc_now - tsc_start) >= tsc_perj) {
+ tsc_start += tsc_perj;
+ break;
+ }
+ } else {
+ unsigned long jif_now = READ_ONCE(jiffies);
+
+ if (time_after(jif_now, jif_start)) {
+ jif_start = jif_now;
+ break;
+ }
+ }
+ cpu_relax();
+ }
+
+ /* Invoke the calibration routine */
+ local_irq_disable();
+ lapic_cal_handler(NULL);
+ local_irq_enable();
+ }
local_irq_disable();
- /* Restore the real event handler */
- global_clock_event->event_handler = real_handler;
-
/* Build delta t1-t2 as apic timer counts down */
delta = lapic_cal_t1 - lapic_cal_t2;
apic_printk(APIC_VERBOSE, "... lapic delta = %ld\n", delta);
@@ -904,10 +941,11 @@ static int __init calibrate_APIC_clock(void)
levt->features &= ~CLOCK_EVT_FEAT_DUMMY;
/*
- * PM timer calibration failed or not turned on
- * so lets try APIC timer based calibration
+ * PM timer calibration failed or not turned on so lets try APIC
+ * timer based calibration, if a global clockevent device is
+ * available.
*/
- if (!pm_referenced) {
+ if (!pm_referenced && global_clock_event) {
apic_printk(APIC_VERBOSE, "... verify APIC timer\n");
/*
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index da1f5e7..f86f912 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -799,6 +799,64 @@ static void init_amd_ln(struct cpuinfo_x86 *c)
msr_set_bit(MSR_AMD64_DE_CFG, 31);
}
+static bool rdrand_force;
+
+static int __init rdrand_cmdline(char *str)
+{
+ if (!str)
+ return -EINVAL;
+
+ if (!strcmp(str, "force"))
+ rdrand_force = true;
+ else
+ return -EINVAL;
+
+ return 0;
+}
+early_param("rdrand", rdrand_cmdline);
+
+static void clear_rdrand_cpuid_bit(struct cpuinfo_x86 *c)
+{
+ /*
+ * Saving of the MSR used to hide the RDRAND support during
+ * suspend/resume is done by arch/x86/power/cpu.c, which is
+ * dependent on CONFIG_PM_SLEEP.
+ */
+ if (!IS_ENABLED(CONFIG_PM_SLEEP))
+ return;
+
+ /*
+ * The nordrand option can clear X86_FEATURE_RDRAND, so check for
+ * RDRAND support using the CPUID function directly.
+ */
+ if (!(cpuid_ecx(1) & BIT(30)) || rdrand_force)
+ return;
+
+ msr_clear_bit(MSR_AMD64_CPUID_FN_1, 62);
+
+ /*
+ * Verify that the CPUID change has occurred in case the kernel is
+ * running virtualized and the hypervisor doesn't support the MSR.
+ */
+ if (cpuid_ecx(1) & BIT(30)) {
+ pr_info_once("BIOS may not properly restore RDRAND after suspend, but hypervisor does not support hiding RDRAND via CPUID.\n");
+ return;
+ }
+
+ clear_cpu_cap(c, X86_FEATURE_RDRAND);
+ pr_info_once("BIOS may not properly restore RDRAND after suspend, hiding RDRAND via CPUID. Use rdrand=force to reenable.\n");
+}
+
+static void init_amd_jg(struct cpuinfo_x86 *c)
+{
+ /*
+ * Some BIOS implementations do not restore proper RDRAND support
+ * across suspend and resume. Check on whether to hide the RDRAND
+ * instruction support via CPUID.
+ */
+ clear_rdrand_cpuid_bit(c);
+}
+
static void init_amd_bd(struct cpuinfo_x86 *c)
{
u64 value;
@@ -813,6 +871,13 @@ static void init_amd_bd(struct cpuinfo_x86 *c)
wrmsrl_safe(MSR_F15H_IC_CFG, value);
}
}
+
+ /*
+ * Some BIOS implementations do not restore proper RDRAND support
+ * across suspend and resume. Check on whether to hide the RDRAND
+ * instruction support via CPUID.
+ */
+ clear_rdrand_cpuid_bit(c);
}
static void init_amd_zn(struct cpuinfo_x86 *c)
@@ -855,6 +920,7 @@ static void init_amd(struct cpuinfo_x86 *c)
case 0x10: init_amd_gh(c); break;
case 0x12: init_amd_ln(c); break;
case 0x15: init_amd_bd(c); break;
+ case 0x16: init_amd_jg(c); break;
case 0x17: init_amd_zn(c); break;
}
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index a5cde74..ee7d176 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -32,6 +32,7 @@
#include <asm/e820/api.h>
#include <asm/hypervisor.h>
+static void __init spectre_v1_select_mitigation(void);
static void __init spectre_v2_select_mitigation(void);
static void __init ssb_select_mitigation(void);
static void __init l1tf_select_mitigation(void);
@@ -96,17 +97,11 @@ void __init check_bugs(void)
if (boot_cpu_has(X86_FEATURE_STIBP))
x86_spec_ctrl_mask |= SPEC_CTRL_STIBP;
- /* Select the proper spectre mitigation before patching alternatives */
+ /* Select the proper CPU mitigations before patching alternatives: */
+ spectre_v1_select_mitigation();
spectre_v2_select_mitigation();
-
- /*
- * Select proper mitigation for any exposure to the Speculative Store
- * Bypass vulnerability.
- */
ssb_select_mitigation();
-
l1tf_select_mitigation();
-
mds_select_mitigation();
arch_smt_update();
@@ -272,6 +267,98 @@ static int __init mds_cmdline(char *str)
early_param("mds", mds_cmdline);
#undef pr_fmt
+#define pr_fmt(fmt) "Spectre V1 : " fmt
+
+enum spectre_v1_mitigation {
+ SPECTRE_V1_MITIGATION_NONE,
+ SPECTRE_V1_MITIGATION_AUTO,
+};
+
+static enum spectre_v1_mitigation spectre_v1_mitigation __ro_after_init =
+ SPECTRE_V1_MITIGATION_AUTO;
+
+static const char * const spectre_v1_strings[] = {
+ [SPECTRE_V1_MITIGATION_NONE] = "Vulnerable: __user pointer sanitization and usercopy barriers only; no swapgs barriers",
+ [SPECTRE_V1_MITIGATION_AUTO] = "Mitigation: usercopy/swapgs barriers and __user pointer sanitization",
+};
+
+/*
+ * Does SMAP provide full mitigation against speculative kernel access to
+ * userspace?
+ */
+static bool smap_works_speculatively(void)
+{
+ if (!boot_cpu_has(X86_FEATURE_SMAP))
+ return false;
+
+ /*
+ * On CPUs which are vulnerable to Meltdown, SMAP does not
+ * prevent speculative access to user data in the L1 cache.
+ * Consider SMAP to be non-functional as a mitigation on these
+ * CPUs.
+ */
+ if (boot_cpu_has(X86_BUG_CPU_MELTDOWN))
+ return false;
+
+ return true;
+}
+
+static void __init spectre_v1_select_mitigation(void)
+{
+ if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V1) || cpu_mitigations_off()) {
+ spectre_v1_mitigation = SPECTRE_V1_MITIGATION_NONE;
+ return;
+ }
+
+ if (spectre_v1_mitigation == SPECTRE_V1_MITIGATION_AUTO) {
+ /*
+ * With Spectre v1, a user can speculatively control either
+ * path of a conditional swapgs with a user-controlled GS
+ * value. The mitigation is to add lfences to both code paths.
+ *
+ * If FSGSBASE is enabled, the user can put a kernel address in
+ * GS, in which case SMAP provides no protection.
+ *
+ * [ NOTE: Don't check for X86_FEATURE_FSGSBASE until the
+ * FSGSBASE enablement patches have been merged. ]
+ *
+ * If FSGSBASE is disabled, the user can only put a user space
+ * address in GS. That makes an attack harder, but still
+ * possible if there's no SMAP protection.
+ */
+ if (!smap_works_speculatively()) {
+ /*
+ * Mitigation can be provided from SWAPGS itself or
+ * PTI as the CR3 write in the Meltdown mitigation
+ * is serializing.
+ *
+ * If neither is there, mitigate with an LFENCE to
+ * stop speculation through swapgs.
+ */
+ if (boot_cpu_has_bug(X86_BUG_SWAPGS) &&
+ !boot_cpu_has(X86_FEATURE_PTI))
+ setup_force_cpu_cap(X86_FEATURE_FENCE_SWAPGS_USER);
+
+ /*
+ * Enable lfences in the kernel entry (non-swapgs)
+ * paths, to prevent user entry from speculatively
+ * skipping swapgs.
+ */
+ setup_force_cpu_cap(X86_FEATURE_FENCE_SWAPGS_KERNEL);
+ }
+ }
+
+ pr_info("%s\n", spectre_v1_strings[spectre_v1_mitigation]);
+}
+
+static int __init nospectre_v1_cmdline(char *str)
+{
+ spectre_v1_mitigation = SPECTRE_V1_MITIGATION_NONE;
+ return 0;
+}
+early_param("nospectre_v1", nospectre_v1_cmdline);
+
+#undef pr_fmt
#define pr_fmt(fmt) "Spectre V2 : " fmt
static enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init =
@@ -1196,7 +1283,7 @@ static ssize_t l1tf_show_state(char *buf)
static ssize_t mds_show_state(char *buf)
{
- if (!hypervisor_is_type(X86_HYPER_NATIVE)) {
+ if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) {
return sprintf(buf, "%s; SMT Host state unknown\n",
mds_strings[mds_mitigation]);
}
@@ -1258,7 +1345,7 @@ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr
break;
case X86_BUG_SPECTRE_V1:
- return sprintf(buf, "Mitigation: __user pointer sanitization\n");
+ return sprintf(buf, "%s\n", spectre_v1_strings[spectre_v1_mitigation]);
case X86_BUG_SPECTRE_V2:
return sprintf(buf, "%s%s%s%s%s%s\n", spectre_v2_strings[spectre_v2_enabled],
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 1073118..b33fdfa 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -808,6 +808,30 @@ static void init_speculation_control(struct cpuinfo_x86 *c)
}
}
+static void init_cqm(struct cpuinfo_x86 *c)
+{
+ if (!cpu_has(c, X86_FEATURE_CQM_LLC)) {
+ c->x86_cache_max_rmid = -1;
+ c->x86_cache_occ_scale = -1;
+ return;
+ }
+
+ /* will be overridden if occupancy monitoring exists */
+ c->x86_cache_max_rmid = cpuid_ebx(0xf);
+
+ if (cpu_has(c, X86_FEATURE_CQM_OCCUP_LLC) ||
+ cpu_has(c, X86_FEATURE_CQM_MBM_TOTAL) ||
+ cpu_has(c, X86_FEATURE_CQM_MBM_LOCAL)) {
+ u32 eax, ebx, ecx, edx;
+
+ /* QoS sub-leaf, EAX=0Fh, ECX=1 */
+ cpuid_count(0xf, 1, &eax, &ebx, &ecx, &edx);
+
+ c->x86_cache_max_rmid = ecx;
+ c->x86_cache_occ_scale = ebx;
+ }
+}
+
void get_cpu_cap(struct cpuinfo_x86 *c)
{
u32 eax, ebx, ecx, edx;
@@ -839,33 +863,6 @@ void get_cpu_cap(struct cpuinfo_x86 *c)
c->x86_capability[CPUID_D_1_EAX] = eax;
}
- /* Additional Intel-defined flags: level 0x0000000F */
- if (c->cpuid_level >= 0x0000000F) {
-
- /* QoS sub-leaf, EAX=0Fh, ECX=0 */
- cpuid_count(0x0000000F, 0, &eax, &ebx, &ecx, &edx);
- c->x86_capability[CPUID_F_0_EDX] = edx;
-
- if (cpu_has(c, X86_FEATURE_CQM_LLC)) {
- /* will be overridden if occupancy monitoring exists */
- c->x86_cache_max_rmid = ebx;
-
- /* QoS sub-leaf, EAX=0Fh, ECX=1 */
- cpuid_count(0x0000000F, 1, &eax, &ebx, &ecx, &edx);
- c->x86_capability[CPUID_F_1_EDX] = edx;
-
- if ((cpu_has(c, X86_FEATURE_CQM_OCCUP_LLC)) ||
- ((cpu_has(c, X86_FEATURE_CQM_MBM_TOTAL)) ||
- (cpu_has(c, X86_FEATURE_CQM_MBM_LOCAL)))) {
- c->x86_cache_max_rmid = ecx;
- c->x86_cache_occ_scale = ebx;
- }
- } else {
- c->x86_cache_max_rmid = -1;
- c->x86_cache_occ_scale = -1;
- }
- }
-
/* AMD-defined flags: level 0x80000001 */
eax = cpuid_eax(0x80000000);
c->extended_cpuid_level = eax;
@@ -896,6 +893,7 @@ void get_cpu_cap(struct cpuinfo_x86 *c)
init_scattered_cpuid_features(c);
init_speculation_control(c);
+ init_cqm(c);
/*
* Clear/Set all flags overridden by options, after probe.
@@ -954,6 +952,7 @@ static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c)
#define NO_L1TF BIT(3)
#define NO_MDS BIT(4)
#define MSBDS_ONLY BIT(5)
+#define NO_SWAPGS BIT(6)
#define VULNWL(_vendor, _family, _model, _whitelist) \
{ X86_VENDOR_##_vendor, _family, _model, X86_FEATURE_ANY, _whitelist }
@@ -977,29 +976,37 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = {
VULNWL_INTEL(ATOM_BONNELL, NO_SPECULATION),
VULNWL_INTEL(ATOM_BONNELL_MID, NO_SPECULATION),
- VULNWL_INTEL(ATOM_SILVERMONT, NO_SSB | NO_L1TF | MSBDS_ONLY),
- VULNWL_INTEL(ATOM_SILVERMONT_X, NO_SSB | NO_L1TF | MSBDS_ONLY),
- VULNWL_INTEL(ATOM_SILVERMONT_MID, NO_SSB | NO_L1TF | MSBDS_ONLY),
- VULNWL_INTEL(ATOM_AIRMONT, NO_SSB | NO_L1TF | MSBDS_ONLY),
- VULNWL_INTEL(XEON_PHI_KNL, NO_SSB | NO_L1TF | MSBDS_ONLY),
- VULNWL_INTEL(XEON_PHI_KNM, NO_SSB | NO_L1TF | MSBDS_ONLY),
+ VULNWL_INTEL(ATOM_SILVERMONT, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS),
+ VULNWL_INTEL(ATOM_SILVERMONT_X, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS),
+ VULNWL_INTEL(ATOM_SILVERMONT_MID, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS),
+ VULNWL_INTEL(ATOM_AIRMONT, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS),
+ VULNWL_INTEL(XEON_PHI_KNL, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS),
+ VULNWL_INTEL(XEON_PHI_KNM, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS),
VULNWL_INTEL(CORE_YONAH, NO_SSB),
- VULNWL_INTEL(ATOM_AIRMONT_MID, NO_L1TF | MSBDS_ONLY),
+ VULNWL_INTEL(ATOM_AIRMONT_MID, NO_L1TF | MSBDS_ONLY | NO_SWAPGS),
- VULNWL_INTEL(ATOM_GOLDMONT, NO_MDS | NO_L1TF),
- VULNWL_INTEL(ATOM_GOLDMONT_X, NO_MDS | NO_L1TF),
- VULNWL_INTEL(ATOM_GOLDMONT_PLUS, NO_MDS | NO_L1TF),
+ VULNWL_INTEL(ATOM_GOLDMONT, NO_MDS | NO_L1TF | NO_SWAPGS),
+ VULNWL_INTEL(ATOM_GOLDMONT_X, NO_MDS | NO_L1TF | NO_SWAPGS),
+ VULNWL_INTEL(ATOM_GOLDMONT_PLUS, NO_MDS | NO_L1TF | NO_SWAPGS),
+
+ /*
+ * Technically, swapgs isn't serializing on AMD (despite it previously
+ * being documented as such in the APM). But according to AMD, %gs is
+ * updated non-speculatively, and the issuing of %gs-relative memory
+ * operands will be blocked until the %gs update completes, which is
+ * good enough for our purposes.
+ */
/* AMD Family 0xf - 0x12 */
- VULNWL_AMD(0x0f, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS),
- VULNWL_AMD(0x10, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS),
- VULNWL_AMD(0x11, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS),
- VULNWL_AMD(0x12, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS),
+ VULNWL_AMD(0x0f, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS),
+ VULNWL_AMD(0x10, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS),
+ VULNWL_AMD(0x11, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS),
+ VULNWL_AMD(0x12, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS),
/* FAMILY_ANY must be last, otherwise 0x0f - 0x12 matches won't work */
- VULNWL_AMD(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS),
+ VULNWL_AMD(X86_FAMILY_ANY, NO_MELTDOWN | NO_L1TF | NO_MDS | NO_SWAPGS),
{}
};
@@ -1036,6 +1043,9 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
setup_force_cpu_bug(X86_BUG_MSBDS_ONLY);
}
+ if (!cpu_matches(NO_SWAPGS))
+ setup_force_cpu_bug(X86_BUG_SWAPGS);
+
if (cpu_matches(NO_MELTDOWN))
return;
diff --git a/arch/x86/kernel/cpu/cpuid-deps.c b/arch/x86/kernel/cpu/cpuid-deps.c
index 2c0bd38..fa07a22 100644
--- a/arch/x86/kernel/cpu/cpuid-deps.c
+++ b/arch/x86/kernel/cpu/cpuid-deps.c
@@ -59,6 +59,9 @@ static const struct cpuid_dep cpuid_deps[] = {
{ X86_FEATURE_AVX512_4VNNIW, X86_FEATURE_AVX512F },
{ X86_FEATURE_AVX512_4FMAPS, X86_FEATURE_AVX512F },
{ X86_FEATURE_AVX512_VPOPCNTDQ, X86_FEATURE_AVX512F },
+ { X86_FEATURE_CQM_OCCUP_LLC, X86_FEATURE_CQM_LLC },
+ { X86_FEATURE_CQM_MBM_TOTAL, X86_FEATURE_CQM_LLC },
+ { X86_FEATURE_CQM_MBM_LOCAL, X86_FEATURE_CQM_LLC },
{}
};
diff --git a/arch/x86/kernel/cpu/scattered.c b/arch/x86/kernel/cpu/scattered.c
index 772c219..5a52672 100644
--- a/arch/x86/kernel/cpu/scattered.c
+++ b/arch/x86/kernel/cpu/scattered.c
@@ -21,6 +21,10 @@ struct cpuid_bit {
static const struct cpuid_bit cpuid_bits[] = {
{ X86_FEATURE_APERFMPERF, CPUID_ECX, 0, 0x00000006, 0 },
{ X86_FEATURE_EPB, CPUID_ECX, 3, 0x00000006, 0 },
+ { X86_FEATURE_CQM_LLC, CPUID_EDX, 1, 0x0000000f, 0 },
+ { X86_FEATURE_CQM_OCCUP_LLC, CPUID_EDX, 0, 0x0000000f, 1 },
+ { X86_FEATURE_CQM_MBM_TOTAL, CPUID_EDX, 1, 0x0000000f, 1 },
+ { X86_FEATURE_CQM_MBM_LOCAL, CPUID_EDX, 2, 0x0000000f, 1 },
{ X86_FEATURE_CAT_L3, CPUID_EBX, 1, 0x00000010, 0 },
{ X86_FEATURE_CAT_L2, CPUID_EBX, 2, 0x00000010, 0 },
{ X86_FEATURE_CDP_L3, CPUID_ECX, 2, 0x00000010, 1 },
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index 7f89d60..cee45d4 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -830,6 +830,7 @@ asm(
"cmpb $0, " __stringify(KVM_STEAL_TIME_preempted) "+steal_time(%rax);"
"setne %al;"
"ret;"
+".size __raw_callee_save___kvm_vcpu_is_preempted, .-__raw_callee_save___kvm_vcpu_is_preempted;"
".popsection");
#endif
diff --git a/arch/x86/kernel/sysfb_efi.c b/arch/x86/kernel/sysfb_efi.c
index 623965e..897da52 100644
--- a/arch/x86/kernel/sysfb_efi.c
+++ b/arch/x86/kernel/sysfb_efi.c
@@ -231,9 +231,55 @@ static const struct dmi_system_id efifb_dmi_system_table[] __initconst = {
{},
};
+/*
+ * Some devices have a portrait LCD but advertise a landscape resolution (and
+ * pitch). We simply swap width and height for these devices so that we can
+ * correctly deal with some of them coming with multiple resolutions.
+ */
+static const struct dmi_system_id efifb_dmi_swap_width_height[] __initconst = {
+ {
+ /*
+ * Lenovo MIIX310-10ICR, only some batches have the troublesome
+ * 800x1280 portrait screen. Luckily the portrait version has
+ * its own BIOS version, so we match on that.
+ */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "MIIX 310-10ICR"),
+ DMI_EXACT_MATCH(DMI_BIOS_VERSION, "1HCN44WW"),
+ },
+ },
+ {
+ /* Lenovo MIIX 320-10ICR with 800x1280 portrait screen */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_VERSION,
+ "Lenovo MIIX 320-10ICR"),
+ },
+ },
+ {
+ /* Lenovo D330 with 800x1280 or 1200x1920 portrait screen */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_VERSION,
+ "Lenovo ideapad D330-10IGM"),
+ },
+ },
+ {},
+};
+
__init void sysfb_apply_efi_quirks(void)
{
if (screen_info.orig_video_isVGA != VIDEO_TYPE_EFI ||
!(screen_info.capabilities & VIDEO_CAPABILITY_SKIP_QUIRKS))
dmi_check_system(efifb_dmi_system_table);
+
+ if (screen_info.orig_video_isVGA == VIDEO_TYPE_EFI &&
+ dmi_check_system(efifb_dmi_swap_width_height)) {
+ u16 temp = screen_info.lfb_width;
+
+ screen_info.lfb_width = screen_info.lfb_height;
+ screen_info.lfb_height = temp;
+ screen_info.lfb_linelength = 4 * screen_info.lfb_width;
+ }
}
diff --git a/arch/x86/kvm/cpuid.h b/arch/x86/kvm/cpuid.h
index 9a327d5..d78a614 100644
--- a/arch/x86/kvm/cpuid.h
+++ b/arch/x86/kvm/cpuid.h
@@ -47,8 +47,6 @@ static const struct cpuid_reg reverse_cpuid[] = {
[CPUID_8000_0001_ECX] = {0x80000001, 0, CPUID_ECX},
[CPUID_7_0_EBX] = { 7, 0, CPUID_EBX},
[CPUID_D_1_EAX] = { 0xd, 1, CPUID_EAX},
- [CPUID_F_0_EDX] = { 0xf, 0, CPUID_EDX},
- [CPUID_F_1_EDX] = { 0xf, 1, CPUID_EDX},
[CPUID_8000_0008_EBX] = {0x80000008, 0, CPUID_EBX},
[CPUID_6_EAX] = { 6, 0, CPUID_EAX},
[CPUID_8000_000A_EDX] = {0x8000000a, 0, CPUID_EDX},
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index e0f982e..cdc0c46 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -4532,11 +4532,11 @@ static void update_permission_bitmask(struct kvm_vcpu *vcpu,
*/
/* Faults from writes to non-writable pages */
- u8 wf = (pfec & PFERR_WRITE_MASK) ? ~w : 0;
+ u8 wf = (pfec & PFERR_WRITE_MASK) ? (u8)~w : 0;
/* Faults from user mode accesses to supervisor pages */
- u8 uf = (pfec & PFERR_USER_MASK) ? ~u : 0;
+ u8 uf = (pfec & PFERR_USER_MASK) ? (u8)~u : 0;
/* Faults from fetches of non-executable pages*/
- u8 ff = (pfec & PFERR_FETCH_MASK) ? ~x : 0;
+ u8 ff = (pfec & PFERR_FETCH_MASK) ? (u8)~x : 0;
/* Faults from kernel mode fetches of user pages */
u8 smepf = 0;
/* Faults from kernel mode accesses of user pages */
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index ea454d3..0f33f00 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -5146,6 +5146,11 @@ static void svm_deliver_avic_intr(struct kvm_vcpu *vcpu, int vec)
kvm_vcpu_wake_up(vcpu);
}
+static bool svm_dy_apicv_has_pending_interrupt(struct kvm_vcpu *vcpu)
+{
+ return false;
+}
+
static void svm_ir_list_del(struct vcpu_svm *svm, struct amd_iommu_pi_data *pi)
{
unsigned long flags;
@@ -7203,6 +7208,7 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
.pmu_ops = &amd_pmu_ops,
.deliver_posted_interrupt = svm_deliver_avic_intr,
+ .dy_apicv_has_pending_interrupt = svm_dy_apicv_has_pending_interrupt,
.update_pi_irte = svm_update_pi_irte,
.setup_mce = svm_setup_mce,
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 4cf16378..2e310ea 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -10411,6 +10411,11 @@ static u8 vmx_has_apicv_interrupt(struct kvm_vcpu *vcpu)
return ((rvi & 0xf0) > (vppr & 0xf0));
}
+static bool vmx_dy_apicv_has_pending_interrupt(struct kvm_vcpu *vcpu)
+{
+ return pi_test_on(vcpu_to_pi_desc(vcpu));
+}
+
static void vmx_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap)
{
if (!kvm_vcpu_apicv_active(vcpu))
@@ -14387,6 +14392,7 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
.guest_apic_has_interrupt = vmx_guest_apic_has_interrupt,
.sync_pir_to_irr = vmx_sync_pir_to_irr,
.deliver_posted_interrupt = vmx_deliver_posted_interrupt,
+ .dy_apicv_has_pending_interrupt = vmx_dy_apicv_has_pending_interrupt,
.set_tss_addr = vmx_set_tss_addr,
.set_identity_map_addr = vmx_set_identity_map_addr,
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index cea6568..e10a7a4 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -9336,6 +9336,22 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
return kvm_vcpu_running(vcpu) || kvm_vcpu_has_events(vcpu);
}
+bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu)
+{
+ if (READ_ONCE(vcpu->arch.pv.pv_unhalted))
+ return true;
+
+ if (kvm_test_request(KVM_REQ_NMI, vcpu) ||
+ kvm_test_request(KVM_REQ_SMI, vcpu) ||
+ kvm_test_request(KVM_REQ_EVENT, vcpu))
+ return true;
+
+ if (vcpu->arch.apicv_active && kvm_x86_ops->dy_apicv_has_pending_interrupt(vcpu))
+ return true;
+
+ return false;
+}
+
bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
{
return vcpu->arch.preempted_in_kernel;
diff --git a/arch/x86/lib/cpu.c b/arch/x86/lib/cpu.c
index 2dd1fe13..19f7079 100644
--- a/arch/x86/lib/cpu.c
+++ b/arch/x86/lib/cpu.c
@@ -1,5 +1,6 @@
#include <linux/types.h>
#include <linux/export.h>
+#include <asm/cpu.h>
unsigned int x86_family(unsigned int sig)
{
diff --git a/arch/x86/math-emu/fpu_emu.h b/arch/x86/math-emu/fpu_emu.h
index a5a41ec..0c12222 100644
--- a/arch/x86/math-emu/fpu_emu.h
+++ b/arch/x86/math-emu/fpu_emu.h
@@ -177,7 +177,7 @@ static inline void reg_copy(FPU_REG const *x, FPU_REG *y)
#define setexponentpos(x,y) { (*(short *)&((x)->exp)) = \
((y) + EXTENDED_Ebias) & 0x7fff; }
#define exponent16(x) (*(short *)&((x)->exp))
-#define setexponent16(x,y) { (*(short *)&((x)->exp)) = (y); }
+#define setexponent16(x,y) { (*(short *)&((x)->exp)) = (u16)(y); }
#define addexponent(x,y) { (*(short *)&((x)->exp)) += (y); }
#define stdexp(x) { (*(short *)&((x)->exp)) += EXTENDED_Ebias; }
diff --git a/arch/x86/math-emu/reg_constant.c b/arch/x86/math-emu/reg_constant.c
index 8dc9095..742619e 100644
--- a/arch/x86/math-emu/reg_constant.c
+++ b/arch/x86/math-emu/reg_constant.c
@@ -18,7 +18,7 @@
#include "control_w.h"
#define MAKE_REG(s, e, l, h) { l, h, \
- ((EXTENDED_Ebias+(e)) | ((SIGN_##s != 0)*0x8000)) }
+ (u16)((EXTENDED_Ebias+(e)) | ((SIGN_##s != 0)*0x8000)) }
FPU_REG const CONST_1 = MAKE_REG(POS, 0, 0x00000000, 0x80000000);
#if 0
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 9d9765e..1bcb724 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -261,13 +261,14 @@ static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address)
pmd = pmd_offset(pud, address);
pmd_k = pmd_offset(pud_k, address);
+
+ if (pmd_present(*pmd) != pmd_present(*pmd_k))
+ set_pmd(pmd, *pmd_k);
+
if (!pmd_present(*pmd_k))
return NULL;
-
- if (!pmd_present(*pmd))
- set_pmd(pmd, *pmd_k);
else
- BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k));
+ BUG_ON(pmd_pfn(*pmd) != pmd_pfn(*pmd_k));
return pmd_k;
}
@@ -287,17 +288,13 @@ void vmalloc_sync_all(void)
spin_lock(&pgd_lock);
list_for_each_entry(page, &pgd_list, lru) {
spinlock_t *pgt_lock;
- pmd_t *ret;
/* the pgt_lock only for Xen */
pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
spin_lock(pgt_lock);
- ret = vmalloc_sync_one(page_address(page), address);
+ vmalloc_sync_one(page_address(page), address);
spin_unlock(pgt_lock);
-
- if (!ret)
- break;
}
spin_unlock(&pgd_lock);
}
diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
index 513ce09..3aa3149 100644
--- a/arch/x86/power/cpu.c
+++ b/arch/x86/power/cpu.c
@@ -13,6 +13,7 @@
#include <linux/smp.h>
#include <linux/perf_event.h>
#include <linux/tboot.h>
+#include <linux/dmi.h>
#include <asm/pgtable.h>
#include <asm/proto.h>
@@ -24,7 +25,7 @@
#include <asm/debugreg.h>
#include <asm/cpu.h>
#include <asm/mmu_context.h>
-#include <linux/dmi.h>
+#include <asm/cpu_device_id.h>
#ifdef CONFIG_X86_32
__visible unsigned long saved_context_ebx;
@@ -398,15 +399,14 @@ static int __init bsp_pm_check_init(void)
core_initcall(bsp_pm_check_init);
-static int msr_init_context(const u32 *msr_id, const int total_num)
+static int msr_build_context(const u32 *msr_id, const int num)
{
- int i = 0;
+ struct saved_msrs *saved_msrs = &saved_context.saved_msrs;
struct saved_msr *msr_array;
+ int total_num;
+ int i, j;
- if (saved_context.saved_msrs.array || saved_context.saved_msrs.num > 0) {
- pr_err("x86/pm: MSR quirk already applied, please check your DMI match table.\n");
- return -EINVAL;
- }
+ total_num = saved_msrs->num + num;
msr_array = kmalloc_array(total_num, sizeof(struct saved_msr), GFP_KERNEL);
if (!msr_array) {
@@ -414,19 +414,30 @@ static int msr_init_context(const u32 *msr_id, const int total_num)
return -ENOMEM;
}
- for (i = 0; i < total_num; i++) {
- msr_array[i].info.msr_no = msr_id[i];
+ if (saved_msrs->array) {
+ /*
+ * Multiple callbacks can invoke this function, so copy any
+ * MSR save requests from previous invocations.
+ */
+ memcpy(msr_array, saved_msrs->array,
+ sizeof(struct saved_msr) * saved_msrs->num);
+
+ kfree(saved_msrs->array);
+ }
+
+ for (i = saved_msrs->num, j = 0; i < total_num; i++, j++) {
+ msr_array[i].info.msr_no = msr_id[j];
msr_array[i].valid = false;
msr_array[i].info.reg.q = 0;
}
- saved_context.saved_msrs.num = total_num;
- saved_context.saved_msrs.array = msr_array;
+ saved_msrs->num = total_num;
+ saved_msrs->array = msr_array;
return 0;
}
/*
- * The following section is a quirk framework for problematic BIOSen:
+ * The following sections are a quirk framework for problematic BIOSen:
* Sometimes MSRs are modified by the BIOSen after suspended to
* RAM, this might cause unexpected behavior after wakeup.
* Thus we save/restore these specified MSRs across suspend/resume
@@ -441,7 +452,7 @@ static int msr_initialize_bdw(const struct dmi_system_id *d)
u32 bdw_msr_id[] = { MSR_IA32_THERM_CONTROL };
pr_info("x86/pm: %s detected, MSR saving is needed during suspending.\n", d->ident);
- return msr_init_context(bdw_msr_id, ARRAY_SIZE(bdw_msr_id));
+ return msr_build_context(bdw_msr_id, ARRAY_SIZE(bdw_msr_id));
}
static const struct dmi_system_id msr_save_dmi_table[] = {
@@ -456,9 +467,58 @@ static const struct dmi_system_id msr_save_dmi_table[] = {
{}
};
+static int msr_save_cpuid_features(const struct x86_cpu_id *c)
+{
+ u32 cpuid_msr_id[] = {
+ MSR_AMD64_CPUID_FN_1,
+ };
+
+ pr_info("x86/pm: family %#hx cpu detected, MSR saving is needed during suspending.\n",
+ c->family);
+
+ return msr_build_context(cpuid_msr_id, ARRAY_SIZE(cpuid_msr_id));
+}
+
+static const struct x86_cpu_id msr_save_cpu_table[] = {
+ {
+ .vendor = X86_VENDOR_AMD,
+ .family = 0x15,
+ .model = X86_MODEL_ANY,
+ .feature = X86_FEATURE_ANY,
+ .driver_data = (kernel_ulong_t)msr_save_cpuid_features,
+ },
+ {
+ .vendor = X86_VENDOR_AMD,
+ .family = 0x16,
+ .model = X86_MODEL_ANY,
+ .feature = X86_FEATURE_ANY,
+ .driver_data = (kernel_ulong_t)msr_save_cpuid_features,
+ },
+ {}
+};
+
+typedef int (*pm_cpu_match_t)(const struct x86_cpu_id *);
+static int pm_cpu_check(const struct x86_cpu_id *c)
+{
+ const struct x86_cpu_id *m;
+ int ret = 0;
+
+ m = x86_match_cpu(msr_save_cpu_table);
+ if (m) {
+ pm_cpu_match_t fn;
+
+ fn = (pm_cpu_match_t)m->driver_data;
+ ret = fn(m);
+ }
+
+ return ret;
+}
+
static int pm_check_save_msr(void)
{
dmi_check_system(msr_save_dmi_table);
+ pm_cpu_check(msr_save_cpu_table);
+
return 0;
}
diff --git a/arch/x86/purgatory/Makefile b/arch/x86/purgatory/Makefile
index 3cf302b..8901a1f 100644
--- a/arch/x86/purgatory/Makefile
+++ b/arch/x86/purgatory/Makefile
@@ -6,6 +6,9 @@
targets += $(purgatory-y)
PURGATORY_OBJS = $(addprefix $(obj)/,$(purgatory-y))
+$(obj)/string.o: $(srctree)/arch/x86/boot/compressed/string.c FORCE
+ $(call if_changed_rule,cc_o_c)
+
$(obj)/sha256.o: $(srctree)/lib/sha256.c FORCE
$(call if_changed_rule,cc_o_c)
@@ -17,11 +20,34 @@
# Default KBUILD_CFLAGS can have -pg option set when FTRACE is enabled. That
# in turn leaves some undefined symbols like __fentry__ in purgatory and not
-# sure how to relocate those. Like kexec-tools, use custom flags.
+# sure how to relocate those.
+ifdef CONFIG_FUNCTION_TRACER
+CFLAGS_REMOVE_sha256.o += $(CC_FLAGS_FTRACE)
+CFLAGS_REMOVE_purgatory.o += $(CC_FLAGS_FTRACE)
+CFLAGS_REMOVE_string.o += $(CC_FLAGS_FTRACE)
+CFLAGS_REMOVE_kexec-purgatory.o += $(CC_FLAGS_FTRACE)
+endif
-KBUILD_CFLAGS := -fno-strict-aliasing -Wall -Wstrict-prototypes -fno-zero-initialized-in-bss -fno-builtin -ffreestanding -c -Os -mcmodel=large
-KBUILD_CFLAGS += -m$(BITS)
-KBUILD_CFLAGS += $(call cc-option,-fno-PIE)
+ifdef CONFIG_STACKPROTECTOR
+CFLAGS_REMOVE_sha256.o += -fstack-protector
+CFLAGS_REMOVE_purgatory.o += -fstack-protector
+CFLAGS_REMOVE_string.o += -fstack-protector
+CFLAGS_REMOVE_kexec-purgatory.o += -fstack-protector
+endif
+
+ifdef CONFIG_STACKPROTECTOR_STRONG
+CFLAGS_REMOVE_sha256.o += -fstack-protector-strong
+CFLAGS_REMOVE_purgatory.o += -fstack-protector-strong
+CFLAGS_REMOVE_string.o += -fstack-protector-strong
+CFLAGS_REMOVE_kexec-purgatory.o += -fstack-protector-strong
+endif
+
+ifdef CONFIG_RETPOLINE
+CFLAGS_REMOVE_sha256.o += $(RETPOLINE_CFLAGS)
+CFLAGS_REMOVE_purgatory.o += $(RETPOLINE_CFLAGS)
+CFLAGS_REMOVE_string.o += $(RETPOLINE_CFLAGS)
+CFLAGS_REMOVE_kexec-purgatory.o += $(RETPOLINE_CFLAGS)
+endif
$(obj)/purgatory.ro: $(PURGATORY_OBJS) FORCE
$(call if_changed,ld)
diff --git a/arch/x86/purgatory/purgatory.c b/arch/x86/purgatory/purgatory.c
index 025c34a..7971f7a 100644
--- a/arch/x86/purgatory/purgatory.c
+++ b/arch/x86/purgatory/purgatory.c
@@ -70,3 +70,9 @@ void purgatory(void)
}
copy_backup_region();
}
+
+/*
+ * Defined in order to reuse memcpy() and memset() from
+ * arch/x86/boot/compressed/string.c
+ */
+void warn(const char *msg) {}
diff --git a/arch/x86/purgatory/string.c b/arch/x86/purgatory/string.c
deleted file mode 100644
index 795ca4f..0000000
--- a/arch/x86/purgatory/string.c
+++ /dev/null
@@ -1,25 +0,0 @@
-/*
- * Simple string functions.
- *
- * Copyright (C) 2014 Red Hat Inc.
- *
- * Author:
- * Vivek Goyal <vgoyal@redhat.com>
- *
- * This source code is licensed under the GNU General Public License,
- * Version 2. See the file COPYING for more details.
- */
-
-#include <linux/types.h>
-
-#include "../boot/string.c"
-
-void *memcpy(void *dst, const void *src, size_t len)
-{
- return __builtin_memcpy(dst, src, len);
-}
-
-void *memset(void *dst, int c, size_t len)
-{
- return __builtin_memset(dst, c, len);
-}
diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c
index 782f98b..1730a26 100644
--- a/arch/x86/xen/enlighten_pv.c
+++ b/arch/x86/xen/enlighten_pv.c
@@ -597,12 +597,12 @@ struct trap_array_entry {
static struct trap_array_entry trap_array[] = {
{ debug, xen_xendebug, true },
- { int3, xen_xenint3, true },
{ double_fault, xen_double_fault, true },
#ifdef CONFIG_X86_MCE
{ machine_check, xen_machine_check, true },
#endif
{ nmi, xen_xennmi, true },
+ { int3, xen_int3, false },
{ overflow, xen_overflow, false },
#ifdef CONFIG_IA32_EMULATION
{ entry_INT80_compat, xen_entry_INT80_compat, false },
diff --git a/arch/x86/xen/xen-asm_64.S b/arch/x86/xen/xen-asm_64.S
index 417b339..3a6feed 100644
--- a/arch/x86/xen/xen-asm_64.S
+++ b/arch/x86/xen/xen-asm_64.S
@@ -30,7 +30,6 @@
xen_pv_trap debug
xen_pv_trap xendebug
xen_pv_trap int3
-xen_pv_trap xenint3
xen_pv_trap xennmi
xen_pv_trap overflow
xen_pv_trap bounds
diff --git a/arch/xtensa/kernel/setup.c b/arch/xtensa/kernel/setup.c
index a285fbd..15580e4 100644
--- a/arch/xtensa/kernel/setup.c
+++ b/arch/xtensa/kernel/setup.c
@@ -515,6 +515,7 @@ void cpu_reset(void)
"add %2, %2, %7\n\t"
"addi %0, %0, -1\n\t"
"bnez %0, 1b\n\t"
+ "isync\n\t"
/* Jump to identity mapping */
"jx %3\n"
"2:\n\t"
diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
index becd793..d8d2ac2 100644
--- a/block/bfq-iosched.c
+++ b/block/bfq-iosched.c
@@ -1886,9 +1886,14 @@ static void bfq_request_merged(struct request_queue *q, struct request *req,
blk_rq_pos(container_of(rb_prev(&req->rb_node),
struct request, rb_node))) {
struct bfq_queue *bfqq = bfq_init_rq(req);
- struct bfq_data *bfqd = bfqq->bfqd;
+ struct bfq_data *bfqd;
struct request *prev, *next_rq;
+ if (!bfqq)
+ return;
+
+ bfqd = bfqq->bfqd;
+
/* Reposition request in its sort_list */
elv_rb_del(&bfqq->sort_list, req);
elv_rb_add(&bfqq->sort_list, req);
@@ -1930,6 +1935,9 @@ static void bfq_requests_merged(struct request_queue *q, struct request *rq,
struct bfq_queue *bfqq = bfq_init_rq(rq),
*next_bfqq = bfq_init_rq(next);
+ if (!bfqq)
+ return;
+
/*
* If next and rq belong to the same bfq_queue and next is older
* than rq, then reposition rq in the fifo (by substituting next
@@ -4590,12 +4598,12 @@ static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
spin_lock_irq(&bfqd->lock);
bfqq = bfq_init_rq(rq);
- if (at_head || blk_rq_is_passthrough(rq)) {
+ if (!bfqq || at_head || blk_rq_is_passthrough(rq)) {
if (at_head)
list_add(&rq->queuelist, &bfqd->dispatch);
else
list_add_tail(&rq->queuelist, &bfqd->dispatch);
- } else { /* bfqq is assumed to be non null here */
+ } else {
idle_timer_disabled = __bfq_insert_request(bfqd, rq);
/*
* Update bfqq, because, if a queue merge has occurred
diff --git a/block/bio-integrity.c b/block/bio-integrity.c
index 67b5fb8..5bd90cd 100644
--- a/block/bio-integrity.c
+++ b/block/bio-integrity.c
@@ -291,8 +291,12 @@ bool bio_integrity_prep(struct bio *bio)
ret = bio_integrity_add_page(bio, virt_to_page(buf),
bytes, offset);
- if (ret == 0)
- return false;
+ if (ret == 0) {
+ printk(KERN_ERR "could not attach integrity payload\n");
+ kfree(buf);
+ status = BLK_STS_RESOURCE;
+ goto err_end_io;
+ }
if (ret < bytes)
break;
diff --git a/block/blk-core.c b/block/blk-core.c
index 40ad6ea..365d17b 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -198,6 +198,7 @@ void blk_rq_init(struct request_queue *q, struct request *rq)
rq->internal_tag = -1;
rq->start_time_ns = ktime_get_ns();
rq->part = NULL;
+ refcount_set(&rq->ref, 1);
}
EXPORT_SYMBOL(blk_rq_init);
@@ -420,24 +421,25 @@ void blk_sync_queue(struct request_queue *q)
EXPORT_SYMBOL(blk_sync_queue);
/**
- * blk_set_preempt_only - set QUEUE_FLAG_PREEMPT_ONLY
+ * blk_set_pm_only - increment pm_only counter
* @q: request queue pointer
- *
- * Returns the previous value of the PREEMPT_ONLY flag - 0 if the flag was not
- * set and 1 if the flag was already set.
*/
-int blk_set_preempt_only(struct request_queue *q)
+void blk_set_pm_only(struct request_queue *q)
{
- return blk_queue_flag_test_and_set(QUEUE_FLAG_PREEMPT_ONLY, q);
+ atomic_inc(&q->pm_only);
}
-EXPORT_SYMBOL_GPL(blk_set_preempt_only);
+EXPORT_SYMBOL_GPL(blk_set_pm_only);
-void blk_clear_preempt_only(struct request_queue *q)
+void blk_clear_pm_only(struct request_queue *q)
{
- blk_queue_flag_clear(QUEUE_FLAG_PREEMPT_ONLY, q);
- wake_up_all(&q->mq_freeze_wq);
+ int pm_only;
+
+ pm_only = atomic_dec_return(&q->pm_only);
+ WARN_ON_ONCE(pm_only < 0);
+ if (pm_only == 0)
+ wake_up_all(&q->mq_freeze_wq);
}
-EXPORT_SYMBOL_GPL(blk_clear_preempt_only);
+EXPORT_SYMBOL_GPL(blk_clear_pm_only);
/**
* __blk_run_queue_uncond - run a queue whether or not it has been stopped
@@ -915,7 +917,7 @@ EXPORT_SYMBOL(blk_alloc_queue);
*/
int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags)
{
- const bool preempt = flags & BLK_MQ_REQ_PREEMPT;
+ const bool pm = flags & BLK_MQ_REQ_PREEMPT;
while (true) {
bool success = false;
@@ -923,11 +925,11 @@ int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags)
rcu_read_lock();
if (percpu_ref_tryget_live(&q->q_usage_counter)) {
/*
- * The code that sets the PREEMPT_ONLY flag is
- * responsible for ensuring that that flag is globally
- * visible before the queue is unfrozen.
+ * The code that increments the pm_only counter is
+ * responsible for ensuring that that counter is
+ * globally visible before the queue is unfrozen.
*/
- if (preempt || !blk_queue_preempt_only(q)) {
+ if (pm || !blk_queue_pm_only(q)) {
success = true;
} else {
percpu_ref_put(&q->q_usage_counter);
@@ -952,7 +954,7 @@ int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags)
wait_event(q->mq_freeze_wq,
(atomic_read(&q->mq_freeze_depth) == 0 &&
- (preempt || !blk_queue_preempt_only(q))) ||
+ (pm || !blk_queue_pm_only(q))) ||
blk_queue_dying(q));
if (blk_queue_dying(q))
return -ENODEV;
diff --git a/block/blk-mq-debugfs.c b/block/blk-mq-debugfs.c
index cb1e6cf..a5ea868 100644
--- a/block/blk-mq-debugfs.c
+++ b/block/blk-mq-debugfs.c
@@ -102,6 +102,14 @@ static int blk_flags_show(struct seq_file *m, const unsigned long flags,
return 0;
}
+static int queue_pm_only_show(void *data, struct seq_file *m)
+{
+ struct request_queue *q = data;
+
+ seq_printf(m, "%d\n", atomic_read(&q->pm_only));
+ return 0;
+}
+
#define QUEUE_FLAG_NAME(name) [QUEUE_FLAG_##name] = #name
static const char *const blk_queue_flag_name[] = {
QUEUE_FLAG_NAME(QUEUED),
@@ -132,7 +140,6 @@ static const char *const blk_queue_flag_name[] = {
QUEUE_FLAG_NAME(REGISTERED),
QUEUE_FLAG_NAME(SCSI_PASSTHROUGH),
QUEUE_FLAG_NAME(QUIESCED),
- QUEUE_FLAG_NAME(PREEMPT_ONLY),
};
#undef QUEUE_FLAG_NAME
@@ -209,6 +216,7 @@ static ssize_t queue_write_hint_store(void *data, const char __user *buf,
static const struct blk_mq_debugfs_attr blk_mq_debugfs_queue_attrs[] = {
{ "poll_stat", 0400, queue_poll_stat_show },
{ "requeue_list", 0400, .seq_ops = &queue_requeue_list_seq_ops },
+ { "pm_only", 0600, queue_pm_only_show, NULL },
{ "state", 0600, queue_state_show, queue_state_write },
{ "write_hints", 0600, queue_write_hint_show, queue_write_hint_store },
{ "zone_wlock", 0400, queue_zone_wlock_show, NULL },
diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c
index 43c2615..e11b5da 100644
--- a/drivers/acpi/arm64/iort.c
+++ b/drivers/acpi/arm64/iort.c
@@ -616,8 +616,8 @@ static int iort_dev_find_its_id(struct device *dev, u32 req_id,
/* Move to ITS specific data */
its = (struct acpi_iort_its_group *)node->node_data;
- if (idx > its->its_count) {
- dev_err(dev, "requested ITS ID index [%d] is greater than available [%d]\n",
+ if (idx >= its->its_count) {
+ dev_err(dev, "requested ITS ID index [%d] overruns ITS entries [%d]\n",
idx, its->its_count);
return -ENXIO;
}
diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c
index 995c4d8..761f0c1 100644
--- a/drivers/acpi/blacklist.c
+++ b/drivers/acpi/blacklist.c
@@ -30,7 +30,9 @@
#include "internal.h"
+#ifdef CONFIG_DMI
static const struct dmi_system_id acpi_rev_dmi_table[] __initconst;
+#endif
/*
* POLICY: If *anything* doesn't work, put it on the blacklist.
@@ -74,7 +76,9 @@ int __init acpi_blacklisted(void)
}
(void)early_acpi_osi_init();
+#ifdef CONFIG_DMI
dmi_check_system(acpi_rev_dmi_table);
+#endif
return blacklisted;
}
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index 9f5bc9b..71e777d 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -2138,8 +2138,18 @@ static struct binder_thread *binder_get_txn_from_and_acq_inner(
static void binder_free_transaction(struct binder_transaction *t)
{
- if (t->buffer)
- t->buffer->transaction = NULL;
+ struct binder_proc *target_proc = t->to_proc;
+
+ if (target_proc) {
+ binder_inner_proc_lock(target_proc);
+ if (t->buffer)
+ t->buffer->transaction = NULL;
+ binder_inner_proc_unlock(target_proc);
+ }
+ /*
+ * If the transaction has no target_proc, then
+ * t->buffer->transaction has already been cleared.
+ */
kfree(t);
binder_stats_deleted(BINDER_STAT_TRANSACTION);
}
@@ -3104,7 +3114,7 @@ static void binder_transaction(struct binder_proc *proc,
else
return_error = BR_DEAD_REPLY;
mutex_unlock(&context->context_mgr_node_lock);
- if (target_node && target_proc == proc) {
+ if (target_node && target_proc->pid == proc->pid) {
binder_user_error("%d:%d got transaction to context manager from process owning it\n",
proc->pid, thread->pid);
return_error = BR_FAILED_REPLY;
@@ -3848,10 +3858,12 @@ static int binder_thread_write(struct binder_proc *proc,
buffer->debug_id,
buffer->transaction ? "active" : "finished");
+ binder_inner_proc_lock(proc);
if (buffer->transaction) {
buffer->transaction->buffer = NULL;
buffer->transaction = NULL;
}
+ binder_inner_proc_unlock(proc);
if (buffer->async_transaction && buffer->target_node) {
struct binder_node *buf_node;
struct binder_work *w;
diff --git a/drivers/ata/libahci_platform.c b/drivers/ata/libahci_platform.c
index c92c10d..5bece97 100644
--- a/drivers/ata/libahci_platform.c
+++ b/drivers/ata/libahci_platform.c
@@ -313,6 +313,9 @@ static int ahci_platform_get_phy(struct ahci_host_priv *hpriv, u32 port,
hpriv->phys[port] = NULL;
rc = 0;
break;
+ case -EPROBE_DEFER:
+ /* Do not complain yet */
+ break;
default:
dev_err(dev,
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 1984fc7..3a64fa4 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -1803,6 +1803,21 @@ static unsigned int ata_scsi_verify_xlat(struct ata_queued_cmd *qc)
return 1;
}
+static bool ata_check_nblocks(struct scsi_cmnd *scmd, u32 n_blocks)
+{
+ struct request *rq = scmd->request;
+ u32 req_blocks;
+
+ if (!blk_rq_is_passthrough(rq))
+ return true;
+
+ req_blocks = blk_rq_bytes(rq) / scmd->device->sector_size;
+ if (n_blocks > req_blocks)
+ return false;
+
+ return true;
+}
+
/**
* ata_scsi_rw_xlat - Translate SCSI r/w command into an ATA one
* @qc: Storage for translated ATA taskfile
@@ -1847,6 +1862,8 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc)
scsi_10_lba_len(cdb, &block, &n_block);
if (cdb[1] & (1 << 3))
tf_flags |= ATA_TFLAG_FUA;
+ if (!ata_check_nblocks(scmd, n_block))
+ goto invalid_fld;
break;
case READ_6:
case WRITE_6:
@@ -1861,6 +1878,8 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc)
*/
if (!n_block)
n_block = 256;
+ if (!ata_check_nblocks(scmd, n_block))
+ goto invalid_fld;
break;
case READ_16:
case WRITE_16:
@@ -1871,6 +1890,8 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc)
scsi_16_lba_len(cdb, &block, &n_block);
if (cdb[1] & (1 << 3))
tf_flags |= ATA_TFLAG_FUA;
+ if (!ata_check_nblocks(scmd, n_block))
+ goto invalid_fld;
break;
default:
DPRINTK("no-byte command\n");
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
index c5ea0fc..873cc09 100644
--- a/drivers/ata/libata-sff.c
+++ b/drivers/ata/libata-sff.c
@@ -674,6 +674,10 @@ static void ata_pio_sector(struct ata_queued_cmd *qc)
unsigned int offset;
unsigned char *buf;
+ if (!qc->cursg) {
+ qc->curbytes = qc->nbytes;
+ return;
+ }
if (qc->curbytes == qc->nbytes - qc->sect_size)
ap->hsm_task_state = HSM_ST_LAST;
@@ -699,6 +703,8 @@ static void ata_pio_sector(struct ata_queued_cmd *qc)
if (qc->cursg_ofs == qc->cursg->length) {
qc->cursg = sg_next(qc->cursg);
+ if (!qc->cursg)
+ ap->hsm_task_state = HSM_ST_LAST;
qc->cursg_ofs = 0;
}
}
diff --git a/drivers/ata/libata-zpodd.c b/drivers/ata/libata-zpodd.c
index 173e6f2..eefda51 100644
--- a/drivers/ata/libata-zpodd.c
+++ b/drivers/ata/libata-zpodd.c
@@ -56,7 +56,7 @@ static enum odd_mech_type zpodd_get_mech_type(struct ata_device *dev)
unsigned int ret;
struct rm_feature_desc *desc;
struct ata_taskfile tf;
- static const char cdb[] = { GPCMD_GET_CONFIGURATION,
+ static const char cdb[ATAPI_CDB_LEN] = { GPCMD_GET_CONFIGURATION,
2, /* only 1 feature descriptor requested */
0, 3, /* 3, removable medium feature */
0, 0, 0,/* reserved */
diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
index 82532c2..008905d 100644
--- a/drivers/atm/iphase.c
+++ b/drivers/atm/iphase.c
@@ -63,6 +63,7 @@
#include <asm/byteorder.h>
#include <linux/vmalloc.h>
#include <linux/jiffies.h>
+#include <linux/nospec.h>
#include "iphase.h"
#include "suni.h"
#define swap_byte_order(x) (((x & 0xff) << 8) | ((x & 0xff00) >> 8))
@@ -2760,8 +2761,11 @@ static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
}
if (copy_from_user(&ia_cmds, arg, sizeof ia_cmds)) return -EFAULT;
board = ia_cmds.status;
- if ((board < 0) || (board > iadev_count))
- board = 0;
+
+ if ((board < 0) || (board > iadev_count))
+ board = 0;
+ board = array_index_nospec(board, iadev_count + 1);
+
iadev = ia_dev[board];
switch (ia_cmds.cmd) {
case MEMDUMP:
diff --git a/drivers/base/base.h b/drivers/base/base.h
index 7a419a7..559b047 100644
--- a/drivers/base/base.h
+++ b/drivers/base/base.h
@@ -66,6 +66,9 @@ struct driver_private {
* probed first.
* @device - pointer back to the struct device that this structure is
* associated with.
+ * @dead - This device is currently either in the process of or has been
+ * removed from the system. Any asynchronous events scheduled for this
+ * device should exit without taking any action.
*
* Nothing outside of the driver core should ever touch these fields.
*/
@@ -76,6 +79,7 @@ struct device_private {
struct klist_node knode_bus;
struct list_head deferred_probe;
struct device *device;
+ u8 dead:1;
};
#define to_device_private_parent(obj) \
container_of(obj, struct device_private, knode_parent)
diff --git a/drivers/base/core.c b/drivers/base/core.c
index a7790fd..cd13be9 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -2036,6 +2036,24 @@ void put_device(struct device *dev)
}
EXPORT_SYMBOL_GPL(put_device);
+bool kill_device(struct device *dev)
+{
+ /*
+ * Require the device lock and set the "dead" flag to guarantee that
+ * the update behavior is consistent with the other bitfields near
+ * it and that we cannot have an asynchronous probe routine trying
+ * to run while we are tearing out the bus/class/sysfs from
+ * underneath the device.
+ */
+ lockdep_assert_held(&dev->mutex);
+
+ if (dev->p->dead)
+ return false;
+ dev->p->dead = true;
+ return true;
+}
+EXPORT_SYMBOL_GPL(kill_device);
+
/**
* device_del - delete device from system.
* @dev: device.
@@ -2055,6 +2073,10 @@ void device_del(struct device *dev)
struct kobject *glue_dir = NULL;
struct class_interface *class_intf;
+ device_lock(dev);
+ kill_device(dev);
+ device_unlock(dev);
+
/* Notify clients of device removal. This call must come
* before dpm_sysfs_remove().
*/
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index 7d53342..b7e156f 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -746,15 +746,6 @@ static int __device_attach_driver(struct device_driver *drv, void *_data)
bool async_allowed;
int ret;
- /*
- * Check if device has already been claimed. This may
- * happen with driver loading, device discovery/registration,
- * and deferred probe processing happens all at once with
- * multiple threads.
- */
- if (dev->driver)
- return -EBUSY;
-
ret = driver_match_device(drv, dev);
if (ret == 0) {
/* no match */
@@ -789,6 +780,15 @@ static void __device_attach_async_helper(void *_dev, async_cookie_t cookie)
device_lock(dev);
+ /*
+ * Check if device has already been removed or claimed. This may
+ * happen with driver loading, device discovery/registration,
+ * and deferred probe processing happens all at once with
+ * multiple threads.
+ */
+ if (dev->p->dead || dev->driver)
+ goto out_unlock;
+
if (dev->parent)
pm_runtime_get_sync(dev->parent);
@@ -799,7 +799,7 @@ static void __device_attach_async_helper(void *_dev, async_cookie_t cookie)
if (dev->parent)
pm_runtime_put(dev->parent);
-
+out_unlock:
device_unlock(dev);
put_device(dev);
@@ -912,7 +912,7 @@ static int __driver_attach(struct device *dev, void *data)
if (dev->parent && dev->bus->need_parent_lock)
device_lock(dev->parent);
device_lock(dev);
- if (!dev->driver)
+ if (!dev->p->dead && !dev->driver)
driver_probe_device(drv, dev);
device_unlock(dev);
if (dev->parent && dev->bus->need_parent_lock)
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index 81f9bd6..8ebe99b 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -5241,7 +5241,7 @@ static int drbd_do_auth(struct drbd_connection *connection)
unsigned int key_len;
char secret[SHARED_SECRET_MAX]; /* 64 byte */
unsigned int resp_size;
- SHASH_DESC_ON_STACK(desc, connection->cram_hmac_tfm);
+ struct shash_desc *desc;
struct packet_info pi;
struct net_conf *nc;
int err, rv;
@@ -5254,6 +5254,13 @@ static int drbd_do_auth(struct drbd_connection *connection)
memcpy(secret, nc->shared_secret, key_len);
rcu_read_unlock();
+ desc = kmalloc(sizeof(struct shash_desc) +
+ crypto_shash_descsize(connection->cram_hmac_tfm),
+ GFP_KERNEL);
+ if (!desc) {
+ rv = -1;
+ goto fail;
+ }
desc->tfm = connection->cram_hmac_tfm;
desc->flags = 0;
@@ -5396,7 +5403,10 @@ static int drbd_do_auth(struct drbd_connection *connection)
kfree(peers_ch);
kfree(response);
kfree(right_response);
- shash_desc_zero(desc);
+ if (desc) {
+ shash_desc_zero(desc);
+ kfree(desc);
+ }
return rv;
}
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index f1e63eb..cef8e00 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -886,7 +886,7 @@ static void loop_unprepare_queue(struct loop_device *lo)
static int loop_kthread_worker_fn(void *worker_ptr)
{
- current->flags |= PF_LESS_THROTTLE;
+ current->flags |= PF_LESS_THROTTLE | PF_MEMALLOC_NOIO;
return kthread_worker_fn(worker_ptr);
}
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index c13a6d1..fa60f26 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -1218,7 +1218,7 @@ static void nbd_clear_sock_ioctl(struct nbd_device *nbd,
struct block_device *bdev)
{
sock_shutdown(nbd);
- kill_bdev(bdev);
+ __invalidate_device(bdev, true);
nbd_bdev_reset(bdev);
if (test_and_clear_bit(NBD_HAS_CONFIG_REF,
&nbd->config->runtime_flags))
diff --git a/drivers/bluetooth/hci_ath.c b/drivers/bluetooth/hci_ath.c
index d568fbd..2023592 100644
--- a/drivers/bluetooth/hci_ath.c
+++ b/drivers/bluetooth/hci_ath.c
@@ -112,6 +112,9 @@ static int ath_open(struct hci_uart *hu)
BT_DBG("hu %p", hu);
+ if (!hci_uart_has_flow_control(hu))
+ return -EOPNOTSUPP;
+
ath = kzalloc(sizeof(*ath), GFP_KERNEL);
if (!ath)
return -ENOMEM;
diff --git a/drivers/bluetooth/hci_bcm.c b/drivers/bluetooth/hci_bcm.c
index 8001323..aa6b7ed 100644
--- a/drivers/bluetooth/hci_bcm.c
+++ b/drivers/bluetooth/hci_bcm.c
@@ -369,6 +369,9 @@ static int bcm_open(struct hci_uart *hu)
bt_dev_dbg(hu->hdev, "hu %p", hu);
+ if (!hci_uart_has_flow_control(hu))
+ return -EOPNOTSUPP;
+
bcm = kzalloc(sizeof(*bcm), GFP_KERNEL);
if (!bcm)
return -ENOMEM;
diff --git a/drivers/bluetooth/hci_intel.c b/drivers/bluetooth/hci_intel.c
index 46ace32..e922852 100644
--- a/drivers/bluetooth/hci_intel.c
+++ b/drivers/bluetooth/hci_intel.c
@@ -406,6 +406,9 @@ static int intel_open(struct hci_uart *hu)
BT_DBG("hu %p", hu);
+ if (!hci_uart_has_flow_control(hu))
+ return -EOPNOTSUPP;
+
intel = kzalloc(sizeof(*intel), GFP_KERNEL);
if (!intel)
return -ENOMEM;
diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c
index c915daf0..efeb813 100644
--- a/drivers/bluetooth/hci_ldisc.c
+++ b/drivers/bluetooth/hci_ldisc.c
@@ -299,6 +299,19 @@ static int hci_uart_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
return 0;
}
+/* Check the underlying device or tty has flow control support */
+bool hci_uart_has_flow_control(struct hci_uart *hu)
+{
+ /* serdev nodes check if the needed operations are present */
+ if (hu->serdev)
+ return true;
+
+ if (hu->tty->driver->ops->tiocmget && hu->tty->driver->ops->tiocmset)
+ return true;
+
+ return false;
+}
+
/* Flow control or un-flow control the device */
void hci_uart_set_flow_control(struct hci_uart *hu, bool enable)
{
diff --git a/drivers/bluetooth/hci_mrvl.c b/drivers/bluetooth/hci_mrvl.c
index ffb0066..23791df 100644
--- a/drivers/bluetooth/hci_mrvl.c
+++ b/drivers/bluetooth/hci_mrvl.c
@@ -66,6 +66,9 @@ static int mrvl_open(struct hci_uart *hu)
BT_DBG("hu %p", hu);
+ if (!hci_uart_has_flow_control(hu))
+ return -EOPNOTSUPP;
+
mrvl = kzalloc(sizeof(*mrvl), GFP_KERNEL);
if (!mrvl)
return -ENOMEM;
diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c
index 77004c2..f96e58d 100644
--- a/drivers/bluetooth/hci_qca.c
+++ b/drivers/bluetooth/hci_qca.c
@@ -450,6 +450,9 @@ static int qca_open(struct hci_uart *hu)
BT_DBG("hu %p qca_open", hu);
+ if (!hci_uart_has_flow_control(hu))
+ return -EOPNOTSUPP;
+
qca = kzalloc(sizeof(struct qca_data), GFP_KERNEL);
if (!qca)
return -ENOMEM;
diff --git a/drivers/bluetooth/hci_uart.h b/drivers/bluetooth/hci_uart.h
index 00cab2f..067a610 100644
--- a/drivers/bluetooth/hci_uart.h
+++ b/drivers/bluetooth/hci_uart.h
@@ -118,6 +118,7 @@ int hci_uart_tx_wakeup(struct hci_uart *hu);
int hci_uart_init_ready(struct hci_uart *hu);
void hci_uart_init_work(struct work_struct *work);
void hci_uart_set_baudrate(struct hci_uart *hu, unsigned int speed);
+bool hci_uart_has_flow_control(struct hci_uart *hu);
void hci_uart_set_flow_control(struct hci_uart *hu, bool enable);
void hci_uart_set_speeds(struct hci_uart *hu, unsigned int init_speed,
unsigned int oper_speed);
diff --git a/drivers/bus/mhi/controllers/mhi_arch_qcom.c b/drivers/bus/mhi/controllers/mhi_arch_qcom.c
index cdb334b..a19b806 100644
--- a/drivers/bus/mhi/controllers/mhi_arch_qcom.c
+++ b/drivers/bus/mhi/controllers/mhi_arch_qcom.c
@@ -31,8 +31,6 @@ struct arch_info {
void *boot_ipc_log;
void *tsync_ipc_log;
struct mhi_device *boot_dev;
- struct mhi_link_info current_link_info;
- struct work_struct bw_scale_work;
bool drv_connected;
struct notifier_block pm_notifier;
struct completion pm_completion;
@@ -346,7 +344,7 @@ static int mhi_arch_pcie_scale_bw(struct mhi_controller *mhi_cntrl,
struct pci_dev *pci_dev,
struct mhi_link_info *link_info)
{
- int ret, scale;
+ int ret;
mhi_cntrl->lpm_disable(mhi_cntrl, mhi_cntrl->priv_data);
ret = msm_pcie_set_link_bandwidth(pci_dev, link_info->target_link_speed,
@@ -356,60 +354,22 @@ static int mhi_arch_pcie_scale_bw(struct mhi_controller *mhi_cntrl,
if (ret)
return ret;
- /* if we switch to low bw release bus scale voting */
- scale = !(link_info->target_link_speed == PCI_EXP_LNKSTA_CLS_2_5GB);
- mhi_arch_set_bus_request(mhi_cntrl, scale);
+ /* do a bus scale vote based on gen speeds */
+ mhi_arch_set_bus_request(mhi_cntrl, link_info->target_link_speed);
- MHI_VERB("bw changed to speed:0x%x width:0x%x bus_scale:%d\n",
- link_info->target_link_speed, link_info->target_link_width,
- scale);
+ MHI_VERB("bw changed to speed:0x%x width:0x%x\n",
+ link_info->target_link_speed, link_info->target_link_width);
return 0;
}
-static void mhi_arch_pcie_bw_scale_work(struct work_struct *work)
+static int mhi_arch_bw_scale(struct mhi_controller *mhi_cntrl,
+ struct mhi_link_info *link_info)
{
- struct arch_info *arch_info = container_of(work,
- struct arch_info,
- bw_scale_work);
- struct mhi_dev *mhi_dev = arch_info->mhi_dev;
+ struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl);
struct pci_dev *pci_dev = mhi_dev->pci_dev;
- struct device *dev = &pci_dev->dev;
- struct mhi_controller *mhi_cntrl = dev_get_drvdata(dev);
- struct mhi_link_info mhi_link_info;
- struct mhi_link_info *cur_info = &arch_info->current_link_info;
- int ret;
- mutex_lock(&mhi_cntrl->pm_mutex);
- if (!mhi_dev->powered_on || MHI_IS_SUSPENDED(mhi_dev->suspend_mode))
- goto exit_work;
-
- /* copy the latest speed change */
- write_lock_irq(&mhi_cntrl->pm_lock);
- mhi_link_info = mhi_cntrl->mhi_link_info;
- write_unlock_irq(&mhi_cntrl->pm_lock);
-
- /* link is already set to current settings */
- if (cur_info->target_link_speed == mhi_link_info.target_link_speed &&
- cur_info->target_link_width == mhi_link_info.target_link_width)
- goto exit_work;
-
- ret = mhi_arch_pcie_scale_bw(mhi_cntrl, pci_dev, &mhi_link_info);
- if (ret)
- goto exit_work;
-
- *cur_info = mhi_link_info;
-
-exit_work:
- mutex_unlock(&mhi_cntrl->pm_mutex);
-}
-
-static void mhi_arch_pcie_bw_scale_cb(struct mhi_controller *mhi_cntrl,
- struct mhi_dev *mhi_dev)
-{
- struct arch_info *arch_info = mhi_dev->arch_info;
-
- schedule_work(&arch_info->bw_scale_work);
+ return mhi_arch_pcie_scale_bw(mhi_cntrl, pci_dev, link_info);
}
static int mhi_bl_probe(struct mhi_device *mhi_device,
@@ -454,6 +414,7 @@ int mhi_arch_pcie_init(struct mhi_controller *mhi_cntrl)
{
struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl);
struct arch_info *arch_info = mhi_dev->arch_info;
+ struct mhi_link_info *cur_link_info;
char node[32];
int ret;
u16 linkstat;
@@ -462,7 +423,6 @@ int mhi_arch_pcie_init(struct mhi_controller *mhi_cntrl)
struct msm_pcie_register_event *reg_event;
struct pci_dev *root_port;
struct device_node *root_ofnode;
- struct mhi_link_info *cur_link_info;
arch_info = devm_kzalloc(&mhi_dev->pci_dev->dev,
sizeof(*arch_info), GFP_KERNEL);
@@ -566,28 +526,24 @@ int mhi_arch_pcie_init(struct mhi_controller *mhi_cntrl)
mhi_dev->pci_dev, NULL, 0);
mhi_dev->pci_dev->no_d3hot = true;
- INIT_WORK(&arch_info->bw_scale_work,
- mhi_arch_pcie_bw_scale_work);
- mhi_dev->bw_scale = mhi_arch_pcie_bw_scale_cb;
-
- /* store the current bw info */
- ret = pcie_capability_read_word(mhi_dev->pci_dev,
- PCI_EXP_LNKSTA, &linkstat);
- if (ret)
- return ret;
-
- cur_link_info = &arch_info->current_link_info;
- cur_link_info->target_link_speed =
- linkstat & PCI_EXP_LNKSTA_CLS;
- cur_link_info->target_link_width =
- (linkstat & PCI_EXP_LNKSTA_NLW) >>
- PCI_EXP_LNKSTA_NLW_SHIFT;
- mhi_cntrl->mhi_link_info = *cur_link_info;
+ mhi_cntrl->bw_scale = mhi_arch_bw_scale;
mhi_driver_register(&mhi_bl_driver);
}
- return mhi_arch_set_bus_request(mhi_cntrl, 1);
+ /* store the current bw info */
+ ret = pcie_capability_read_word(mhi_dev->pci_dev,
+ PCI_EXP_LNKSTA, &linkstat);
+ if (ret)
+ return ret;
+
+ cur_link_info = &mhi_cntrl->mhi_link_info;
+ cur_link_info->target_link_speed = linkstat & PCI_EXP_LNKSTA_CLS;
+ cur_link_info->target_link_width = (linkstat & PCI_EXP_LNKSTA_NLW) >>
+ PCI_EXP_LNKSTA_NLW_SHIFT;
+
+ return mhi_arch_set_bus_request(mhi_cntrl,
+ cur_link_info->target_link_speed);
}
void mhi_arch_pcie_deinit(struct mhi_controller *mhi_cntrl)
@@ -598,13 +554,12 @@ void mhi_arch_pcie_deinit(struct mhi_controller *mhi_cntrl)
static int mhi_arch_drv_suspend(struct mhi_controller *mhi_cntrl)
{
struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl);
- struct arch_info *arch_info = mhi_dev->arch_info;
struct pci_dev *pci_dev = mhi_dev->pci_dev;
struct mhi_link_info link_info, *cur_link_info;
bool bw_switched = false;
int ret;
- cur_link_info = &arch_info->current_link_info;
+ cur_link_info = &mhi_cntrl->mhi_link_info;
/* if link is not in gen 1 we need to switch to gen 1 */
if (cur_link_info->target_link_speed != PCI_EXP_LNKSTA_CLS_2_5GB) {
link_info.target_link_speed = PCI_EXP_LNKSTA_CLS_2_5GB;
@@ -630,9 +585,6 @@ static int mhi_arch_drv_suspend(struct mhi_controller *mhi_cntrl)
return ret;
}
- if (bw_switched)
- *cur_link_info = link_info;
-
return ret;
}
@@ -689,17 +641,16 @@ static int __mhi_arch_link_resume(struct mhi_controller *mhi_cntrl)
struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl);
struct arch_info *arch_info = mhi_dev->arch_info;
struct pci_dev *pci_dev = mhi_dev->pci_dev;
- struct mhi_link_info *cur_info = &arch_info->current_link_info;
+ struct mhi_link_info *cur_info = &mhi_cntrl->mhi_link_info;
int ret;
MHI_LOG("Entered\n");
- /* request bus scale voting if we're on Gen 2 or higher speed */
- if (cur_info->target_link_speed != PCI_EXP_LNKSTA_CLS_2_5GB) {
- ret = mhi_arch_set_bus_request(mhi_cntrl, 1);
- if (ret)
- MHI_LOG("Could not set bus frequency, ret:%d\n", ret);
- }
+ /* request bus scale voting based on higher gen speed */
+ ret = mhi_arch_set_bus_request(mhi_cntrl,
+ cur_info->target_link_speed);
+ if (ret)
+ MHI_LOG("Could not set bus frequency, ret:%d\n", ret);
ret = msm_pcie_pm_control(MSM_PCIE_RESUME, mhi_cntrl->bus, pci_dev,
NULL, 0);
@@ -733,10 +684,8 @@ static int __mhi_arch_link_resume(struct mhi_controller *mhi_cntrl)
int mhi_arch_link_resume(struct mhi_controller *mhi_cntrl)
{
struct mhi_dev *mhi_dev = mhi_controller_get_devdata(mhi_cntrl);
- struct arch_info *arch_info = mhi_dev->arch_info;
struct pci_dev *pci_dev = mhi_dev->pci_dev;
- struct mhi_link_info *cur_info = &arch_info->current_link_info;
- struct mhi_link_info *updated_info = &mhi_cntrl->mhi_link_info;
+ struct mhi_link_info *cur_info = &mhi_cntrl->mhi_link_info;
int ret = 0;
MHI_LOG("Entered\n");
@@ -748,6 +697,19 @@ int mhi_arch_link_resume(struct mhi_controller *mhi_cntrl)
case MHI_FAST_LINK_OFF:
ret = msm_pcie_pm_control(MSM_PCIE_RESUME, mhi_cntrl->bus,
pci_dev, NULL, 0);
+ if (ret ||
+ cur_info->target_link_speed == PCI_EXP_LNKSTA_CLS_2_5GB)
+ break;
+
+ /*
+ * BW request from device isn't for gen 1 link speed, we can
+ * only print an error here.
+ */
+ if (mhi_arch_pcie_scale_bw(mhi_cntrl, pci_dev, cur_info))
+ MHI_ERR(
+ "Failed to honor bw request: speed:0x%x width:0x%x\n",
+ cur_info->target_link_speed,
+ cur_info->target_link_width);
break;
case MHI_ACTIVE_STATE:
case MHI_FAST_LINK_ON:
@@ -759,14 +721,6 @@ int mhi_arch_link_resume(struct mhi_controller *mhi_cntrl)
return ret;
}
- /* BW request from device doesn't match current link speed */
- if (cur_info->target_link_speed != updated_info->target_link_speed ||
- cur_info->target_link_width != updated_info->target_link_width) {
- ret = mhi_arch_pcie_scale_bw(mhi_cntrl, pci_dev, updated_info);
- if (!ret)
- *cur_info = *updated_info;
- }
-
msm_pcie_l1ss_timeout_enable(pci_dev);
MHI_LOG("Exited\n");
diff --git a/drivers/bus/mhi/controllers/mhi_qcom.c b/drivers/bus/mhi/controllers/mhi_qcom.c
index c7002ec..67e12b5 100644
--- a/drivers/bus/mhi/controllers/mhi_qcom.c
+++ b/drivers/bus/mhi/controllers/mhi_qcom.c
@@ -609,10 +609,6 @@ static void mhi_status_cb(struct mhi_controller *mhi_cntrl,
pm_runtime_mark_last_busy(dev);
pm_request_autosuspend(dev);
break;
- case MHI_CB_BW_REQ:
- if (mhi_dev->bw_scale)
- mhi_dev->bw_scale(mhi_cntrl, mhi_dev);
- break;
case MHI_CB_EE_MISSION_MODE:
/*
* we need to force a suspend so device can switch to
diff --git a/drivers/bus/mhi/controllers/mhi_qcom.h b/drivers/bus/mhi/controllers/mhi_qcom.h
index fdab799..6fbbac9 100644
--- a/drivers/bus/mhi/controllers/mhi_qcom.h
+++ b/drivers/bus/mhi/controllers/mhi_qcom.h
@@ -48,9 +48,6 @@ struct mhi_dev {
dma_addr_t iova_stop;
enum mhi_suspend_mode suspend_mode;
- /* if set, soc support dynamic bw scaling */
- void (*bw_scale)(struct mhi_controller *mhi_cntrl,
- struct mhi_dev *mhi_dev);
unsigned int lpm_disable_depth;
/* lock to toggle low power modes */
spinlock_t lpm_lock;
diff --git a/drivers/bus/mhi/core/mhi_dtr.c b/drivers/bus/mhi/core/mhi_dtr.c
index 73cf01e..db33e95 100644
--- a/drivers/bus/mhi/core/mhi_dtr.c
+++ b/drivers/bus/mhi/core/mhi_dtr.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
-/* Copyright (c) 2018, The Linux Foundation. All rights reserved.*/
+/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.*/
#include <linux/debugfs.h>
#include <linux/device.h>
@@ -164,6 +164,9 @@ static void mhi_dtr_dl_xfer_cb(struct mhi_device *mhi_dev,
if (dtr_msg->msg & CTRL_MSG_RI)
mhi_dev->tiocm |= TIOCM_RI;
spin_unlock_irq(res_lock);
+
+ /* Notify the update */
+ mhi_notify(mhi_dev, MHI_CB_DTR_SIGNAL);
}
static void mhi_dtr_ul_xfer_cb(struct mhi_device *mhi_dev,
diff --git a/drivers/bus/mhi/core/mhi_init.c b/drivers/bus/mhi/core/mhi_init.c
index c76d3a4..16ac408 100644
--- a/drivers/bus/mhi/core/mhi_init.c
+++ b/drivers/bus/mhi/core/mhi_init.c
@@ -14,6 +14,14 @@
#include <linux/mhi.h>
#include "mhi_internal.h"
+const char * const mhi_log_level_str[MHI_MSG_LVL_MAX] = {
+ [MHI_MSG_LVL_VERBOSE] = "Verbose",
+ [MHI_MSG_LVL_INFO] = "Info",
+ [MHI_MSG_LVL_ERROR] = "Error",
+ [MHI_MSG_LVL_CRITICAL] = "Critical",
+ [MHI_MSG_LVL_MASK_ALL] = "Mask all",
+};
+
const char * const mhi_ee_str[MHI_EE_MAX] = {
[MHI_EE_PBL] = "PBL",
[MHI_EE_SBL] = "SBL",
@@ -58,6 +66,7 @@ static const char * const mhi_pm_state_str[] = {
[MHI_PM_BIT_SYS_ERR_PROCESS] = "SYS_ERR Process",
[MHI_PM_BIT_SHUTDOWN_PROCESS] = "SHUTDOWN Process",
[MHI_PM_BIT_LD_ERR_FATAL_DETECT] = "LD or Error Fatal Detect",
+ [MHI_PM_BIT_SHUTDOWN_NO_ACCESS] = "SHUTDOWN No Access",
};
struct mhi_bus mhi_bus;
@@ -72,6 +81,38 @@ const char *to_mhi_pm_state_str(enum MHI_PM_STATE state)
return mhi_pm_state_str[index];
}
+static ssize_t log_level_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct mhi_device *mhi_dev = to_mhi_device(dev);
+ struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
+
+ return snprintf(buf, PAGE_SIZE, "%s\n",
+ TO_MHI_LOG_LEVEL_STR(mhi_cntrl->log_lvl));
+}
+
+static ssize_t log_level_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct mhi_device *mhi_dev = to_mhi_device(dev);
+ struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
+ enum MHI_DEBUG_LEVEL log_level;
+
+ if (kstrtou32(buf, 0, &log_level) < 0)
+ return -EINVAL;
+
+ mhi_cntrl->log_lvl = log_level;
+
+ MHI_LOG("IPC log level changed to: %s\n",
+ TO_MHI_LOG_LEVEL_STR(log_level));
+
+ return count;
+}
+static DEVICE_ATTR_RW(log_level);
+
static ssize_t bus_vote_show(struct device *dev,
struct device_attribute *attr,
char *buf)
@@ -130,27 +171,28 @@ static ssize_t device_vote_store(struct device *dev,
}
static DEVICE_ATTR_RW(device_vote);
-static struct attribute *mhi_vote_attrs[] = {
+static struct attribute *mhi_sysfs_attrs[] = {
+ &dev_attr_log_level.attr,
&dev_attr_bus_vote.attr,
&dev_attr_device_vote.attr,
NULL,
};
-static const struct attribute_group mhi_vote_group = {
- .attrs = mhi_vote_attrs,
+static const struct attribute_group mhi_sysfs_group = {
+ .attrs = mhi_sysfs_attrs,
};
-int mhi_create_vote_sysfs(struct mhi_controller *mhi_cntrl)
+int mhi_create_sysfs(struct mhi_controller *mhi_cntrl)
{
return sysfs_create_group(&mhi_cntrl->mhi_dev->dev.kobj,
- &mhi_vote_group);
+ &mhi_sysfs_group);
}
-void mhi_destroy_vote_sysfs(struct mhi_controller *mhi_cntrl)
+void mhi_destroy_sysfs(struct mhi_controller *mhi_cntrl)
{
struct mhi_device *mhi_dev = mhi_cntrl->mhi_dev;
- sysfs_remove_group(&mhi_dev->dev.kobj, &mhi_vote_group);
+ sysfs_remove_group(&mhi_dev->dev.kobj, &mhi_sysfs_group);
/* relinquish any pending votes for device */
while (atomic_read(&mhi_dev->dev_vote))
@@ -183,7 +225,7 @@ void mhi_deinit_free_irq(struct mhi_controller *mhi_cntrl)
struct mhi_event *mhi_event = mhi_cntrl->mhi_event;
for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
- if (mhi_event->offload_ev)
+ if (!mhi_event->request_irq)
continue;
free_irq(mhi_cntrl->irq[mhi_event->msi], mhi_event);
@@ -207,7 +249,7 @@ int mhi_init_irq_setup(struct mhi_controller *mhi_cntrl)
return ret;
for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
- if (mhi_event->offload_ev)
+ if (!mhi_event->request_irq)
continue;
ret = request_irq(mhi_cntrl->irq[mhi_event->msi],
@@ -224,7 +266,7 @@ int mhi_init_irq_setup(struct mhi_controller *mhi_cntrl)
error_request:
for (--i, --mhi_event; i >= 0; i--, mhi_event--) {
- if (mhi_event->offload_ev)
+ if (!mhi_event->request_irq)
continue;
free_irq(mhi_cntrl->irq[mhi_event->msi], mhi_event);
@@ -496,15 +538,18 @@ int mhi_init_dev_ctxt(struct mhi_controller *mhi_cntrl)
return ret;
}
-static int mhi_get_tsync_er_cfg(struct mhi_controller *mhi_cntrl)
+/* to be used only if a single event ring with the type is present */
+static int mhi_get_er_index(struct mhi_controller *mhi_cntrl,
+ enum mhi_er_data_type type)
{
int i;
struct mhi_event *mhi_event = mhi_cntrl->mhi_event;
- /* find event ring with timesync support */
- for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++)
- if (mhi_event->data_type == MHI_ER_TSYNC_ELEMENT_TYPE)
+ /* find event ring for requested type */
+ for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
+ if (mhi_event->data_type == type)
return mhi_event->er_index;
+ }
return -ENOENT;
}
@@ -581,7 +626,7 @@ int mhi_init_timesync(struct mhi_controller *mhi_cntrl)
read_unlock_bh(&mhi_cntrl->pm_lock);
/* get time-sync event ring configuration */
- ret = mhi_get_tsync_er_cfg(mhi_cntrl);
+ ret = mhi_get_er_index(mhi_cntrl, MHI_ER_TSYNC_ELEMENT_TYPE);
if (ret < 0) {
MHI_LOG("Could not find timesync event ring\n");
return ret;
@@ -611,6 +656,36 @@ int mhi_init_timesync(struct mhi_controller *mhi_cntrl)
return ret;
}
+static int mhi_init_bw_scale(struct mhi_controller *mhi_cntrl)
+{
+ int ret, er_index;
+ u32 bw_cfg_offset;
+
+ /* controller doesn't support dynamic bw switch */
+ if (!mhi_cntrl->bw_scale)
+ return -ENODEV;
+
+ ret = mhi_get_capability_offset(mhi_cntrl, BW_SCALE_CAP_ID,
+ &bw_cfg_offset);
+ if (ret)
+ return ret;
+
+ /* No ER configured to support BW scale */
+ er_index = mhi_get_er_index(mhi_cntrl, MHI_ER_BW_SCALE_ELEMENT_TYPE);
+ if (ret < 0)
+ return er_index;
+
+ bw_cfg_offset += BW_SCALE_CFG_OFFSET;
+
+ MHI_LOG("BW_CFG OFFSET:0x%x\n", bw_cfg_offset);
+
+ /* advertise host support */
+ mhi_write_reg(mhi_cntrl, mhi_cntrl->regs, bw_cfg_offset,
+ MHI_BW_SCALE_SETUP(er_index));
+
+ return 0;
+}
+
int mhi_init_mmio(struct mhi_controller *mhi_cntrl)
{
u32 val;
@@ -707,6 +782,9 @@ int mhi_init_mmio(struct mhi_controller *mhi_cntrl)
mhi_write_reg(mhi_cntrl, mhi_cntrl->wake_db, 0, 0);
mhi_cntrl->wake_set = false;
+ /* setup bw scale db */
+ mhi_cntrl->bw_scale_db = base + val + (8 * MHI_BW_SCALE_CHAN_DB);
+
/* setup channel db addresses */
mhi_chan = mhi_cntrl->mhi_chan;
for (i = 0; i < mhi_cntrl->max_chan; i++, val += 8, mhi_chan++)
@@ -737,6 +815,9 @@ int mhi_init_mmio(struct mhi_controller *mhi_cntrl)
reg_info[i].mask, reg_info[i].shift,
reg_info[i].val);
+ /* setup bandwidth scaling features */
+ mhi_init_bw_scale(mhi_cntrl);
+
return 0;
}
@@ -887,6 +968,8 @@ static int of_parse_ev_cfg(struct mhi_controller *mhi_cntrl,
if (!mhi_cntrl->mhi_event)
return -ENOMEM;
+ INIT_LIST_HEAD(&mhi_cntrl->lp_ev_rings);
+
/* populate ev ring */
mhi_event = mhi_cntrl->mhi_event;
i = 0;
@@ -952,6 +1035,9 @@ static int of_parse_ev_cfg(struct mhi_controller *mhi_cntrl,
case MHI_ER_TSYNC_ELEMENT_TYPE:
mhi_event->process_event = mhi_process_tsync_event_ring;
break;
+ case MHI_ER_BW_SCALE_ELEMENT_TYPE:
+ mhi_event->process_event = mhi_process_bw_scale_ev_ring;
+ break;
}
mhi_event->hw_ring = of_property_read_bool(child, "mhi,hw-ev");
@@ -963,6 +1049,19 @@ static int of_parse_ev_cfg(struct mhi_controller *mhi_cntrl,
"mhi,client-manage");
mhi_event->offload_ev = of_property_read_bool(child,
"mhi,offload");
+
+ /*
+ * low priority events are handled in a separate worker thread
+ * to allow for sleeping functions to be called.
+ */
+ if (!mhi_event->offload_ev) {
+ if (IS_MHI_ER_PRIORITY_LOW(mhi_event))
+ list_add_tail(&mhi_event->node,
+ &mhi_cntrl->lp_ev_rings);
+ else
+ mhi_event->request_irq = true;
+ }
+
mhi_event++;
}
@@ -1242,6 +1341,7 @@ int of_register_mhi_controller(struct mhi_controller *mhi_cntrl)
INIT_WORK(&mhi_cntrl->st_worker, mhi_pm_st_worker);
INIT_WORK(&mhi_cntrl->fw_worker, mhi_fw_load_worker);
INIT_WORK(&mhi_cntrl->syserr_worker, mhi_pm_sys_err_worker);
+ INIT_WORK(&mhi_cntrl->low_priority_worker, mhi_low_priority_worker);
init_waitqueue_head(&mhi_cntrl->state_event);
mhi_cmd = mhi_cntrl->mhi_cmd;
@@ -1255,6 +1355,10 @@ int of_register_mhi_controller(struct mhi_controller *mhi_cntrl)
mhi_event->mhi_cntrl = mhi_cntrl;
spin_lock_init(&mhi_event->lock);
+
+ if (IS_MHI_ER_PRIORITY_LOW(mhi_event))
+ continue;
+
if (mhi_event->data_type == MHI_ER_CTRL_ELEMENT_TYPE)
tasklet_init(&mhi_event->task, mhi_ctrl_ev_task,
(ulong)mhi_event);
diff --git a/drivers/bus/mhi/core/mhi_internal.h b/drivers/bus/mhi/core/mhi_internal.h
index e06e76e..735e4152 100644
--- a/drivers/bus/mhi/core/mhi_internal.h
+++ b/drivers/bus/mhi/core/mhi_internal.h
@@ -8,6 +8,7 @@ extern struct bus_type mhi_bus_type;
/* MHI mmio register mapping */
#define PCI_INVALID_READ(val) (val == U32_MAX)
+#define MHI_REG_SIZE (SZ_4K)
#define MHIREGLEN (0x0)
#define MHIREGLEN_MHIREGLEN_MASK (0xFFFFFFFF)
@@ -153,6 +154,17 @@ extern struct bus_type mhi_bus_type;
#define TIMESYNC_CAP_ID (2)
+/* MHI Bandwidth scaling offsets */
+#define BW_SCALE_CFG_OFFSET (0x04)
+#define BW_SCALE_CFG_CHAN_DB_ID_MASK (0xFE000000)
+#define BW_SCALE_CFG_CHAN_DB_ID_SHIFT (25)
+#define BW_SCALE_CFG_ENABLED_MASK (0x01000000)
+#define BW_SCALE_CFG_ENABLED_SHIFT (24)
+#define BW_SCALE_CFG_ER_ID_MASK (0x00F80000)
+#define BW_SCALE_CFG_ER_ID_SHIFT (19)
+
+#define BW_SCALE_CAP_ID (3)
+
/* MHI BHI offfsets */
#define BHI_BHIVERSION_MINOR (0x00)
#define BHI_BHIVERSION_MAJOR (0x04)
@@ -329,12 +341,13 @@ enum mhi_cmd_type {
#define MHI_TRE_GET_EV_TYPE(tre) (((tre)->dword[1] >> 16) & 0xFF)
#define MHI_TRE_GET_EV_STATE(tre) (((tre)->dword[0] >> 24) & 0xFF)
#define MHI_TRE_GET_EV_EXECENV(tre) (((tre)->dword[0] >> 24) & 0xFF)
-#define MHI_TRE_GET_EV_SEQ(tre) ((tre)->dword[0])
+#define MHI_TRE_GET_EV_TSYNC_SEQ(tre) ((tre)->dword[0])
#define MHI_TRE_GET_EV_TIME(tre) ((tre)->ptr)
#define MHI_TRE_GET_EV_COOKIE(tre) lower_32_bits((tre)->ptr)
#define MHI_TRE_GET_EV_VEID(tre) (((tre)->dword[0] >> 16) & 0xFF)
#define MHI_TRE_GET_EV_LINKSPEED(tre) (((tre)->dword[1] >> 24) & 0xFF)
#define MHI_TRE_GET_EV_LINKWIDTH(tre) ((tre)->dword[0] & 0xFF)
+#define MHI_TRE_GET_EV_BW_REQ_SEQ(tre) (((tre)->dword[0] >> 8) & 0xFF)
/* transfer descriptor macros */
#define MHI_TRE_DATA_PTR(ptr) (ptr)
@@ -428,6 +441,11 @@ extern const char * const mhi_state_str[MHI_STATE_MAX];
!mhi_state_str[state]) ? \
"INVALID_STATE" : mhi_state_str[state])
+extern const char * const mhi_log_level_str[MHI_MSG_LVL_MAX];
+#define TO_MHI_LOG_LEVEL_STR(level) ((level >= MHI_MSG_LVL_MAX || \
+ !mhi_log_level_str[level]) ? \
+ "Mask all" : mhi_log_level_str[level])
+
enum {
MHI_PM_BIT_DISABLE,
MHI_PM_BIT_POR,
@@ -441,6 +459,7 @@ enum {
MHI_PM_BIT_SYS_ERR_PROCESS,
MHI_PM_BIT_SHUTDOWN_PROCESS,
MHI_PM_BIT_LD_ERR_FATAL_DETECT,
+ MHI_PM_BIT_SHUTDOWN_NO_ACCESS,
MHI_PM_BIT_MAX
};
@@ -460,6 +479,7 @@ enum MHI_PM_STATE {
MHI_PM_SHUTDOWN_PROCESS = BIT(MHI_PM_BIT_SHUTDOWN_PROCESS),
/* link not accessible */
MHI_PM_LD_ERR_FATAL_DETECT = BIT(MHI_PM_BIT_LD_ERR_FATAL_DETECT),
+ MHI_PM_SHUTDOWN_NO_ACCESS = BIT(MHI_PM_BIT_SHUTDOWN_NO_ACCESS),
};
#define MHI_REG_ACCESS_VALID(pm_state) ((pm_state & (MHI_PM_POR | MHI_PM_M0 | \
@@ -467,7 +487,7 @@ enum MHI_PM_STATE {
MHI_PM_SYS_ERR_DETECT | MHI_PM_SYS_ERR_PROCESS | \
MHI_PM_SHUTDOWN_PROCESS | MHI_PM_FW_DL_ERR)))
#define MHI_PM_IN_ERROR_STATE(pm_state) (pm_state >= MHI_PM_FW_DL_ERR)
-#define MHI_PM_IN_FATAL_STATE(pm_state) (pm_state == MHI_PM_LD_ERR_FATAL_DETECT)
+#define MHI_PM_IN_FATAL_STATE(pm_state) (pm_state >= MHI_PM_LD_ERR_FATAL_DETECT)
#define MHI_DB_ACCESS_VALID(mhi_cntrl) (mhi_cntrl->pm_state & \
mhi_cntrl->db_access)
#define MHI_WAKE_DB_CLEAR_VALID(pm_state) (pm_state & (MHI_PM_M0 | \
@@ -492,19 +512,38 @@ enum MHI_XFER_TYPE {
#define NR_OF_CMD_RINGS (1)
#define CMD_EL_PER_RING (128)
#define PRIMARY_CMD_RING (0)
+#define MHI_BW_SCALE_CHAN_DB (126)
#define MHI_DEV_WAKE_DB (127)
#define MHI_MAX_MTU (0xffff)
+#define MHI_BW_SCALE_SETUP(er_index) ((MHI_BW_SCALE_CHAN_DB << \
+ BW_SCALE_CFG_CHAN_DB_ID_SHIFT) & BW_SCALE_CFG_CHAN_DB_ID_MASK | \
+ (1 << BW_SCALE_CFG_ENABLED_SHIFT) & BW_SCALE_CFG_ENABLED_MASK | \
+ ((er_index) << BW_SCALE_CFG_ER_ID_SHIFT) & BW_SCALE_CFG_ER_ID_MASK)
+
+#define MHI_BW_SCALE_RESULT(status, seq) ((status & 0xF) << 8 | (seq & 0xFF))
+#define MHI_BW_SCALE_NACK 0xF
+
enum MHI_ER_TYPE {
MHI_ER_TYPE_INVALID = 0x0,
MHI_ER_TYPE_VALID = 0x1,
};
+enum mhi_er_priority {
+ MHI_ER_PRIORITY_HIGH,
+ MHI_ER_PRIORITY_MEDIUM,
+ MHI_ER_PRIORITY_LOW,
+};
+
+#define IS_MHI_ER_PRIORITY_LOW(ev) (ev->priority >= MHI_ER_PRIORITY_LOW)
+#define IS_MHI_ER_PRIORITY_HIGH(ev) (ev->priority == MHI_ER_PRIORITY_HIGH)
+
enum mhi_er_data_type {
MHI_ER_DATA_ELEMENT_TYPE,
MHI_ER_CTRL_ELEMENT_TYPE,
MHI_ER_TSYNC_ELEMENT_TYPE,
- MHI_ER_DATA_TYPE_MAX = MHI_ER_TSYNC_ELEMENT_TYPE,
+ MHI_ER_BW_SCALE_ELEMENT_TYPE,
+ MHI_ER_DATA_TYPE_MAX = MHI_ER_BW_SCALE_ELEMENT_TYPE,
};
enum mhi_ch_ee_mask {
@@ -587,17 +626,19 @@ struct mhi_buf_info {
};
struct mhi_event {
+ struct list_head node;
u32 er_index;
u32 intmod;
u32 msi;
int chan; /* this event ring is dedicated to a channel */
- u32 priority;
+ enum mhi_er_priority priority;
enum mhi_er_data_type data_type;
struct mhi_ring ring;
struct db_cfg db_cfg;
bool hw_ring;
bool cl_manage;
bool offload_ev; /* managed by a device driver */
+ bool request_irq; /* has dedicated interrupt handler */
spinlock_t lock;
struct mhi_chan *mhi_chan; /* dedicated to channel */
struct tasklet_struct task;
@@ -700,6 +741,7 @@ int mhi_queue_state_transition(struct mhi_controller *mhi_cntrl,
void mhi_pm_st_worker(struct work_struct *work);
void mhi_fw_load_worker(struct work_struct *work);
void mhi_pm_sys_err_worker(struct work_struct *work);
+void mhi_low_priority_worker(struct work_struct *work);
int mhi_ready_state_transition(struct mhi_controller *mhi_cntrl);
void mhi_ctrl_ev_task(unsigned long data);
int mhi_pm_m0_transition(struct mhi_controller *mhi_cntrl);
@@ -712,6 +754,8 @@ int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl,
struct mhi_event *mhi_event, u32 event_quota);
int mhi_process_tsync_event_ring(struct mhi_controller *mhi_cntrl,
struct mhi_event *mhi_event, u32 event_quota);
+int mhi_process_bw_scale_ev_ring(struct mhi_controller *mhi_cntrl,
+ struct mhi_event *mhi_event, u32 event_quota);
int mhi_send_cmd(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan,
enum MHI_CMD cmd);
int __mhi_device_get_sync(struct mhi_controller *mhi_cntrl);
@@ -760,11 +804,12 @@ void mhi_ring_chan_db(struct mhi_controller *mhi_cntrl,
struct mhi_chan *mhi_chan);
int mhi_get_capability_offset(struct mhi_controller *mhi_cntrl, u32 capability,
u32 *offset);
+void *mhi_to_virtual(struct mhi_ring *ring, dma_addr_t addr);
int mhi_init_timesync(struct mhi_controller *mhi_cntrl);
int mhi_create_timesync_sysfs(struct mhi_controller *mhi_cntrl);
void mhi_destroy_timesync(struct mhi_controller *mhi_cntrl);
-int mhi_create_vote_sysfs(struct mhi_controller *mhi_cntrl);
-void mhi_destroy_vote_sysfs(struct mhi_controller *mhi_cntrl);
+int mhi_create_sysfs(struct mhi_controller *mhi_cntrl);
+void mhi_destroy_sysfs(struct mhi_controller *mhi_cntrl);
int mhi_early_notify_device(struct device *dev, void *data);
/* timesync log support */
diff --git a/drivers/bus/mhi/core/mhi_main.c b/drivers/bus/mhi/core/mhi_main.c
index 7eb387d..8e1e2fd 100644
--- a/drivers/bus/mhi/core/mhi_main.c
+++ b/drivers/bus/mhi/core/mhi_main.c
@@ -81,7 +81,9 @@ int mhi_get_capability_offset(struct mhi_controller *mhi_cntrl,
if (ret)
return ret;
- *offset += next_offset;
+ *offset = next_offset;
+ if (*offset >= MHI_REG_SIZE)
+ return -ENXIO;
} while (next_offset);
return -ENXIO;
@@ -255,7 +257,7 @@ static int get_nr_avail_ring_elements(struct mhi_controller *mhi_cntrl,
return nr_el;
}
-static void *mhi_to_virtual(struct mhi_ring *ring, dma_addr_t addr)
+void *mhi_to_virtual(struct mhi_ring *ring, dma_addr_t addr)
{
return (addr - ring->iommu_base) + ring->base;
}
@@ -1133,25 +1135,6 @@ int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl,
local_rp->ptr, local_rp->dword[0], local_rp->dword[1]);
switch (type) {
- case MHI_PKT_TYPE_BW_REQ_EVENT:
- {
- struct mhi_link_info *link_info;
-
- link_info = &mhi_cntrl->mhi_link_info;
- write_lock_irq(&mhi_cntrl->pm_lock);
- link_info->target_link_speed =
- MHI_TRE_GET_EV_LINKSPEED(local_rp);
- link_info->target_link_width =
- MHI_TRE_GET_EV_LINKWIDTH(local_rp);
- write_unlock_irq(&mhi_cntrl->pm_lock);
- MHI_VERB(
- "Received BW_REQ with link speed:0x%x width:0x%x\n",
- link_info->target_link_speed,
- link_info->target_link_width);
- mhi_cntrl->status_cb(mhi_cntrl, mhi_cntrl->priv_data,
- MHI_CB_BW_REQ);
- break;
- }
case MHI_PKT_TYPE_STATE_CHANGE_EVENT:
{
enum mhi_dev_state new_state;
@@ -1241,7 +1224,7 @@ int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl,
break;
}
default:
- MHI_ASSERT(1, "Unsupported ev type");
+ MHI_ERR("Unhandled Event: 0x%x\n", type);
break;
}
@@ -1346,7 +1329,7 @@ int mhi_process_tsync_event_ring(struct mhi_controller *mhi_cntrl,
MHI_ASSERT(type != MHI_PKT_TYPE_TSYNC_EVENT, "!TSYNC event");
- sequence = MHI_TRE_GET_EV_SEQ(local_rp);
+ sequence = MHI_TRE_GET_EV_TSYNC_SEQ(local_rp);
remote_time = MHI_TRE_GET_EV_TIME(local_rp);
do {
@@ -1392,6 +1375,94 @@ int mhi_process_tsync_event_ring(struct mhi_controller *mhi_cntrl,
return count;
}
+int mhi_process_bw_scale_ev_ring(struct mhi_controller *mhi_cntrl,
+ struct mhi_event *mhi_event,
+ u32 event_quota)
+{
+ struct mhi_tre *dev_rp;
+ struct mhi_ring *ev_ring = &mhi_event->ring;
+ struct mhi_event_ctxt *er_ctxt =
+ &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index];
+ struct mhi_link_info link_info, *cur_info = &mhi_cntrl->mhi_link_info;
+ int result, ret = 0;
+
+ mutex_lock(&mhi_cntrl->pm_mutex);
+
+ if (unlikely(MHI_EVENT_ACCESS_INVALID(mhi_cntrl->pm_state))) {
+ MHI_LOG("No EV access, PM_STATE:%s\n",
+ to_mhi_pm_state_str(mhi_cntrl->pm_state));
+ ret = -EIO;
+ goto exit_bw_process;
+ }
+
+ /*
+ * BW change is not process during suspend since we're suspending link,
+ * host will process it during resume
+ */
+ if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state)) {
+ ret = -EACCES;
+ goto exit_bw_process;
+ }
+
+ spin_lock_bh(&mhi_event->lock);
+ dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp);
+
+ if (ev_ring->rp == dev_rp) {
+ spin_unlock_bh(&mhi_event->lock);
+ goto exit_bw_process;
+ }
+
+ /* if rp points to base, we need to wrap it around */
+ if (dev_rp == ev_ring->base)
+ dev_rp = ev_ring->base + ev_ring->len;
+ dev_rp--;
+
+ MHI_ASSERT(MHI_TRE_GET_EV_TYPE(dev_rp) != MHI_PKT_TYPE_BW_REQ_EVENT,
+ "!BW SCALE REQ event");
+
+ link_info.target_link_speed = MHI_TRE_GET_EV_LINKSPEED(dev_rp);
+ link_info.target_link_width = MHI_TRE_GET_EV_LINKWIDTH(dev_rp);
+ link_info.sequence_num = MHI_TRE_GET_EV_BW_REQ_SEQ(dev_rp);
+
+ MHI_VERB("Received BW_REQ with seq:%d link speed:0x%x width:0x%x\n",
+ link_info.sequence_num,
+ link_info.target_link_speed,
+ link_info.target_link_width);
+
+ /* fast forward to currently processed element and recycle er */
+ ev_ring->rp = dev_rp;
+ ev_ring->wp = dev_rp - 1;
+ if (ev_ring->wp < ev_ring->base)
+ ev_ring->wp = ev_ring->base + ev_ring->len - ev_ring->el_size;
+ mhi_recycle_ev_ring_element(mhi_cntrl, ev_ring);
+
+ read_lock_bh(&mhi_cntrl->pm_lock);
+ if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl)))
+ mhi_ring_er_db(mhi_event);
+ read_unlock_bh(&mhi_cntrl->pm_lock);
+ spin_unlock_bh(&mhi_event->lock);
+
+ ret = mhi_cntrl->bw_scale(mhi_cntrl, &link_info);
+ if (!ret)
+ *cur_info = link_info;
+
+ result = ret ? MHI_BW_SCALE_NACK : 0;
+
+ read_lock_bh(&mhi_cntrl->pm_lock);
+ if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl)))
+ mhi_write_reg(mhi_cntrl, mhi_cntrl->bw_scale_db, 0,
+ MHI_BW_SCALE_RESULT(result,
+ link_info.sequence_num));
+ read_unlock_bh(&mhi_cntrl->pm_lock);
+
+exit_bw_process:
+ MHI_VERB("exit er_index:%u\n", mhi_event->er_index);
+
+ mutex_unlock(&mhi_cntrl->pm_mutex);
+
+ return ret;
+}
+
void mhi_ev_task(unsigned long data)
{
struct mhi_event *mhi_event = (struct mhi_event *)data;
@@ -1471,7 +1542,13 @@ irqreturn_t mhi_msi_handlr(int irq_number, void *dev)
if (mhi_dev)
mhi_dev->status_cb(mhi_dev, MHI_CB_PENDING_DATA);
- } else
+
+ return IRQ_HANDLED;
+ }
+
+ if (IS_MHI_ER_PRIORITY_HIGH(mhi_event))
+ tasklet_hi_schedule(&mhi_event->task);
+ else
tasklet_schedule(&mhi_event->task);
return IRQ_HANDLED;
@@ -1541,6 +1618,8 @@ irqreturn_t mhi_intvec_handlr(int irq_number, void *dev)
wake_up_all(&mhi_cntrl->state_event);
MHI_VERB("Exit\n");
+ schedule_work(&mhi_cntrl->low_priority_worker);
+
return IRQ_WAKE_THREAD;
}
diff --git a/drivers/bus/mhi/core/mhi_pm.c b/drivers/bus/mhi/core/mhi_pm.c
index 29b5cad..21bd70c9 100644
--- a/drivers/bus/mhi/core/mhi_pm.c
+++ b/drivers/bus/mhi/core/mhi_pm.c
@@ -34,9 +34,11 @@
* M0 -> FW_DL_ERR
* M0 -> M3_ENTER -> M3 -> M3_EXIT --> M0
* L1: SYS_ERR_DETECT -> SYS_ERR_PROCESS --> POR
- * L2: SHUTDOWN_PROCESS -> DISABLE
+ * L2: SHUTDOWN_PROCESS -> LD_ERR_FATAL_DETECT
+ * SHUTDOWN_PROCESS -> DISABLE
* L3: LD_ERR_FATAL_DETECT <--> LD_ERR_FATAL_DETECT
- * LD_ERR_FATAL_DETECT -> SHUTDOWN_PROCESS
+ * LD_ERR_FATAL_DETECT -> SHUTDOWN_NO_ACCESS
+ * SHUTDOWN_NO_ACCESS -> DISABLE
*/
static struct mhi_pm_transitions const mhi_state_transitions[] = {
/* L0 States */
@@ -48,49 +50,52 @@ static struct mhi_pm_transitions const mhi_state_transitions[] = {
MHI_PM_POR,
MHI_PM_POR | MHI_PM_DISABLE | MHI_PM_M0 |
MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
- MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_FW_DL_ERR
+ MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_FW_DL_ERR |
+ MHI_PM_SHUTDOWN_NO_ACCESS
},
{
MHI_PM_M0,
MHI_PM_M0 | MHI_PM_M2 | MHI_PM_M3_ENTER |
MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
- MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_FW_DL_ERR
+ MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_FW_DL_ERR |
+ MHI_PM_SHUTDOWN_NO_ACCESS
},
{
MHI_PM_M2,
MHI_PM_M0 | MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
- MHI_PM_LD_ERR_FATAL_DETECT
+ MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_SHUTDOWN_NO_ACCESS
},
{
MHI_PM_M3_ENTER,
MHI_PM_M3 | MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
- MHI_PM_LD_ERR_FATAL_DETECT
+ MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_SHUTDOWN_NO_ACCESS
},
{
MHI_PM_M3,
MHI_PM_M3_EXIT | MHI_PM_SYS_ERR_DETECT |
- MHI_PM_SHUTDOWN_PROCESS | MHI_PM_LD_ERR_FATAL_DETECT
+ MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_SHUTDOWN_NO_ACCESS
},
{
MHI_PM_M3_EXIT,
MHI_PM_M0 | MHI_PM_SYS_ERR_DETECT | MHI_PM_SHUTDOWN_PROCESS |
- MHI_PM_LD_ERR_FATAL_DETECT
+ MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_SHUTDOWN_NO_ACCESS
},
{
MHI_PM_FW_DL_ERR,
MHI_PM_FW_DL_ERR | MHI_PM_SYS_ERR_DETECT |
- MHI_PM_SHUTDOWN_PROCESS | MHI_PM_LD_ERR_FATAL_DETECT
+ MHI_PM_SHUTDOWN_PROCESS | MHI_PM_LD_ERR_FATAL_DETECT |
+ MHI_PM_SHUTDOWN_NO_ACCESS
},
/* L1 States */
{
MHI_PM_SYS_ERR_DETECT,
MHI_PM_SYS_ERR_PROCESS | MHI_PM_SHUTDOWN_PROCESS |
- MHI_PM_LD_ERR_FATAL_DETECT
+ MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_SHUTDOWN_NO_ACCESS
},
{
MHI_PM_SYS_ERR_PROCESS,
MHI_PM_POR | MHI_PM_SHUTDOWN_PROCESS |
- MHI_PM_LD_ERR_FATAL_DETECT
+ MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_SHUTDOWN_NO_ACCESS
},
/* L2 States */
{
@@ -100,7 +105,11 @@ static struct mhi_pm_transitions const mhi_state_transitions[] = {
/* L3 States */
{
MHI_PM_LD_ERR_FATAL_DETECT,
- MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_SHUTDOWN_PROCESS
+ MHI_PM_LD_ERR_FATAL_DETECT | MHI_PM_SHUTDOWN_NO_ACCESS
+ },
+ {
+ MHI_PM_SHUTDOWN_NO_ACCESS,
+ MHI_PM_DISABLE
},
};
@@ -492,7 +501,7 @@ static int mhi_pm_mission_mode_transition(struct mhi_controller *mhi_cntrl)
mhi_create_devices(mhi_cntrl);
/* setup sysfs nodes for userspace votes */
- mhi_create_vote_sysfs(mhi_cntrl);
+ mhi_create_sysfs(mhi_cntrl);
read_lock_bh(&mhi_cntrl->pm_lock);
@@ -589,7 +598,7 @@ static void mhi_pm_disable_transition(struct mhi_controller *mhi_cntrl,
MHI_LOG("Waiting for all pending event ring processing to complete\n");
mhi_event = mhi_cntrl->mhi_event;
for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
- if (mhi_event->offload_ev)
+ if (!mhi_event->request_irq)
continue;
tasklet_kill(&mhi_event->task);
}
@@ -602,12 +611,13 @@ static void mhi_pm_disable_transition(struct mhi_controller *mhi_cntrl,
MHI_LOG("Finish resetting channels\n");
/* remove support for userspace votes */
- mhi_destroy_vote_sysfs(mhi_cntrl);
+ mhi_destroy_sysfs(mhi_cntrl);
MHI_LOG("Waiting for all pending threads to complete\n");
wake_up_all(&mhi_cntrl->state_event);
flush_work(&mhi_cntrl->st_worker);
flush_work(&mhi_cntrl->fw_worker);
+ flush_work(&mhi_cntrl->low_priority_worker);
mutex_lock(&mhi_cntrl->pm_mutex);
@@ -720,6 +730,44 @@ int mhi_queue_state_transition(struct mhi_controller *mhi_cntrl,
return 0;
}
+static void mhi_low_priority_events_pending(struct mhi_controller *mhi_cntrl)
+{
+ struct mhi_event *mhi_event;
+
+ list_for_each_entry(mhi_event, &mhi_cntrl->lp_ev_rings, node) {
+ struct mhi_event_ctxt *er_ctxt =
+ &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index];
+ struct mhi_ring *ev_ring = &mhi_event->ring;
+
+ spin_lock_bh(&mhi_event->lock);
+ if (ev_ring->rp != mhi_to_virtual(ev_ring, er_ctxt->rp)) {
+ schedule_work(&mhi_cntrl->low_priority_worker);
+ spin_unlock_bh(&mhi_event->lock);
+ break;
+ }
+ spin_unlock_bh(&mhi_event->lock);
+ }
+}
+
+void mhi_low_priority_worker(struct work_struct *work)
+{
+ struct mhi_controller *mhi_cntrl = container_of(work,
+ struct mhi_controller,
+ low_priority_worker);
+ struct mhi_event *mhi_event;
+
+ MHI_VERB("Enter with pm_state:%s MHI_STATE:%s ee:%s\n",
+ to_mhi_pm_state_str(mhi_cntrl->pm_state),
+ TO_MHI_STATE_STR(mhi_cntrl->dev_state),
+ TO_MHI_EXEC_STR(mhi_cntrl->ee));
+
+ /* check low priority event rings and process events */
+ list_for_each_entry(mhi_event, &mhi_cntrl->lp_ev_rings, node) {
+ if (MHI_IN_MISSION_MODE(mhi_cntrl->ee))
+ mhi_event->process_event(mhi_cntrl, mhi_event, U32_MAX);
+ }
+}
+
void mhi_pm_sys_err_worker(struct work_struct *work)
{
struct mhi_controller *mhi_cntrl = container_of(work,
@@ -920,6 +968,7 @@ EXPORT_SYMBOL(mhi_control_error);
void mhi_power_down(struct mhi_controller *mhi_cntrl, bool graceful)
{
enum MHI_PM_STATE cur_state;
+ enum MHI_PM_STATE transition_state = MHI_PM_SHUTDOWN_PROCESS;
/* if it's not graceful shutdown, force MHI to a linkdown state */
if (!graceful) {
@@ -933,8 +982,10 @@ void mhi_power_down(struct mhi_controller *mhi_cntrl, bool graceful)
MHI_ERR("Failed to move to state:%s from:%s\n",
to_mhi_pm_state_str(MHI_PM_LD_ERR_FATAL_DETECT),
to_mhi_pm_state_str(mhi_cntrl->pm_state));
+
+ transition_state = MHI_PM_SHUTDOWN_NO_ACCESS;
}
- mhi_pm_disable_transition(mhi_cntrl, MHI_PM_SHUTDOWN_PROCESS);
+ mhi_pm_disable_transition(mhi_cntrl, transition_state);
mhi_deinit_debugfs(mhi_cntrl);
@@ -1056,10 +1107,8 @@ int mhi_pm_suspend(struct mhi_controller *mhi_cntrl)
/* notify any clients we enter lpm */
list_for_each_entry_safe(itr, tmp, &mhi_cntrl->lpm_chans, node) {
- mutex_lock(&itr->mutex);
if (itr->mhi_dev)
mhi_notify(itr->mhi_dev, MHI_CB_LPM_ENTER);
- mutex_unlock(&itr->mutex);
}
return 0;
@@ -1162,10 +1211,8 @@ int mhi_pm_fast_suspend(struct mhi_controller *mhi_cntrl, bool notify_client)
/* notify any clients we enter lpm */
list_for_each_entry_safe(itr, tmp, &mhi_cntrl->lpm_chans, node) {
- mutex_lock(&itr->mutex);
if (itr->mhi_dev)
mhi_notify(itr->mhi_dev, MHI_CB_LPM_ENTER);
- mutex_unlock(&itr->mutex);
}
return 0;
@@ -1201,10 +1248,8 @@ int mhi_pm_resume(struct mhi_controller *mhi_cntrl)
/* notify any clients we enter lpm */
list_for_each_entry_safe(itr, tmp, &mhi_cntrl->lpm_chans, node) {
- mutex_lock(&itr->mutex);
if (itr->mhi_dev)
mhi_notify(itr->mhi_dev, MHI_CB_LPM_EXIT);
- mutex_unlock(&itr->mutex);
}
write_lock_irq(&mhi_cntrl->pm_lock);
@@ -1244,6 +1289,14 @@ int mhi_pm_resume(struct mhi_controller *mhi_cntrl)
return -EIO;
}
+ /*
+ * If MHI on host is in suspending/suspended state, we do not process
+ * any low priority requests, for example, bandwidth scaling events
+ * from the device. Check for low priority event rings and handle the
+ * pending events upon resume.
+ */
+ mhi_low_priority_events_pending(mhi_cntrl);
+
return 0;
}
@@ -1269,10 +1322,8 @@ int mhi_pm_fast_resume(struct mhi_controller *mhi_cntrl, bool notify_client)
if (notify_client) {
list_for_each_entry_safe(itr, tmp, &mhi_cntrl->lpm_chans,
node) {
- mutex_lock(&itr->mutex);
if (itr->mhi_dev)
mhi_notify(itr->mhi_dev, MHI_CB_LPM_EXIT);
- mutex_unlock(&itr->mutex);
}
}
@@ -1288,6 +1339,7 @@ int mhi_pm_fast_resume(struct mhi_controller *mhi_cntrl, bool notify_client)
switch (mhi_cntrl->pm_state) {
case MHI_PM_M0:
mhi_pm_m0_transition(mhi_cntrl);
+ break;
case MHI_PM_M2:
read_lock_bh(&mhi_cntrl->pm_lock);
mhi_cntrl->wake_get(mhi_cntrl, true);
@@ -1306,12 +1358,15 @@ int mhi_pm_fast_resume(struct mhi_controller *mhi_cntrl, bool notify_client)
*/
mhi_event = mhi_cntrl->mhi_event;
for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) {
- if (mhi_event->offload_ev)
+ if (!mhi_event->request_irq)
continue;
mhi_msi_handlr(0, mhi_event);
}
+ /* schedules worker if any low priority events need to be handled */
+ mhi_low_priority_events_pending(mhi_cntrl);
+
MHI_LOG("Exit with pm_state:%s dev_state:%s\n",
to_mhi_pm_state_str(mhi_cntrl->pm_state),
TO_MHI_STATE_STR(mhi_cntrl->dev_state));
diff --git a/drivers/bus/mhi/devices/mhi_netdev.c b/drivers/bus/mhi/devices/mhi_netdev.c
index ce1e28f..2e6dcde 100644
--- a/drivers/bus/mhi/devices/mhi_netdev.c
+++ b/drivers/bus/mhi/devices/mhi_netdev.c
@@ -14,6 +14,7 @@
#include <linux/errno.h>
#include <linux/of_device.h>
#include <linux/rtnetlink.h>
+#include <linux/kthread.h>
#include <linux/mhi.h>
#define MHI_NETDEV_DRIVER_NAME "mhi_netdev"
@@ -80,6 +81,7 @@ struct mhi_netdev {
int alias;
struct mhi_device *mhi_dev;
struct mhi_netdev *rsc_dev; /* rsc linked node */
+ struct mhi_netdev *rsc_parent;
bool is_rsc_dev;
int wake;
@@ -89,16 +91,26 @@ struct mhi_netdev {
struct napi_struct *napi;
struct net_device *ndev;
- struct mhi_netbuf **netbuf_pool;
- int pool_size; /* must be power of 2 */
- int current_index;
+ struct list_head *recycle_pool;
+ int pool_size;
bool chain_skb;
struct mhi_net_chain *chain;
+ struct task_struct *alloc_task;
+ wait_queue_head_t alloc_event;
+ int bg_pool_limit; /* minimum pool size */
+ int bg_pool_size; /* current size of the pool */
+ struct list_head *bg_pool;
+ spinlock_t bg_lock; /* lock to access list */
+
+
struct dentry *dentry;
enum MHI_DEBUG_LEVEL msg_lvl;
enum MHI_DEBUG_LEVEL ipc_log_lvl;
void *ipc_log;
+
+ /* debug stats */
+ u32 abuffers, kbuffers, rbuffers;
};
struct mhi_netdev_priv {
@@ -111,6 +123,7 @@ struct mhi_netdev_priv {
*/
struct mhi_netbuf {
struct mhi_buf mhi_buf; /* this must be first element */
+ bool recycle;
void (*unmap)(struct device *dev, dma_addr_t addr, size_t size,
enum dma_data_direction dir);
};
@@ -147,7 +160,7 @@ static struct mhi_netbuf *mhi_netdev_alloc(struct device *dev,
struct mhi_buf *mhi_buf;
void *vaddr;
- page = __dev_alloc_pages(gfp, order);
+ page = __dev_alloc_pages(gfp | __GFP_NOMEMALLOC, order);
if (!page)
return NULL;
@@ -155,11 +168,15 @@ static struct mhi_netbuf *mhi_netdev_alloc(struct device *dev,
/* we going to use the end of page to store cached data */
netbuf = vaddr + (PAGE_SIZE << order) - sizeof(*netbuf);
-
+ netbuf->recycle = false;
mhi_buf = (struct mhi_buf *)netbuf;
mhi_buf->page = page;
mhi_buf->buf = vaddr;
mhi_buf->len = (void *)netbuf - vaddr;
+
+ if (!dev)
+ return netbuf;
+
mhi_buf->dma_addr = dma_map_page(dev, page, 0, mhi_buf->len,
DMA_FROM_DEVICE);
if (dma_mapping_error(dev, mhi_buf->dma_addr)) {
@@ -178,9 +195,10 @@ static void mhi_netdev_unmap_page(struct device *dev,
dma_unmap_page(dev, dma_addr, len, dir);
}
-static int mhi_netdev_tmp_alloc(struct mhi_netdev *mhi_netdev, int nr_tre)
+static int mhi_netdev_tmp_alloc(struct mhi_netdev *mhi_netdev,
+ struct mhi_device *mhi_dev,
+ int nr_tre)
{
- struct mhi_device *mhi_dev = mhi_netdev->mhi_dev;
struct device *dev = mhi_dev->dev.parent;
const u32 order = mhi_netdev->order;
int i, ret;
@@ -204,21 +222,73 @@ static int mhi_netdev_tmp_alloc(struct mhi_netdev *mhi_netdev, int nr_tre)
__free_pages(mhi_buf->page, order);
return ret;
}
+ mhi_netdev->abuffers++;
}
return 0;
}
-static void mhi_netdev_queue(struct mhi_netdev *mhi_netdev)
+static int mhi_netdev_queue_bg_pool(struct mhi_netdev *mhi_netdev,
+ struct mhi_device *mhi_dev,
+ int nr_tre)
{
- struct mhi_device *mhi_dev = mhi_netdev->mhi_dev;
+ struct device *dev = mhi_dev->dev.parent;
+ int i, ret;
+ LIST_HEAD(head);
+
+ spin_lock_bh(&mhi_netdev->bg_lock);
+ list_splice_init(mhi_netdev->bg_pool, &head);
+ spin_unlock_bh(&mhi_netdev->bg_lock);
+
+ for (i = 0; i < nr_tre; i++) {
+ struct mhi_buf *mhi_buf =
+ list_first_entry_or_null(&head, struct mhi_buf, node);
+ struct mhi_netbuf *netbuf = (struct mhi_netbuf *)mhi_buf;
+
+ if (!mhi_buf)
+ break;
+
+ mhi_buf->dma_addr = dma_map_page(dev, mhi_buf->page, 0,
+ mhi_buf->len, DMA_FROM_DEVICE);
+ if (dma_mapping_error(dev, mhi_buf->dma_addr))
+ break;
+
+ netbuf->unmap = mhi_netdev_unmap_page;
+ ret = mhi_queue_transfer(mhi_dev, DMA_FROM_DEVICE, mhi_buf,
+ mhi_buf->len, MHI_EOT);
+ if (unlikely(ret)) {
+ MSG_ERR("Failed to queue transfer, ret:%d\n", ret);
+ mhi_netdev_unmap_page(dev, mhi_buf->dma_addr,
+ mhi_buf->len, DMA_FROM_DEVICE);
+ break;
+ }
+ list_del(&mhi_buf->node);
+ mhi_netdev->kbuffers++;
+ }
+
+ /* add remaining buffers back to main pool */
+ spin_lock_bh(&mhi_netdev->bg_lock);
+ list_splice(&head, mhi_netdev->bg_pool);
+ mhi_netdev->bg_pool_size -= i;
+ spin_unlock_bh(&mhi_netdev->bg_lock);
+
+
+ /* wake up the bg thread to allocate more buffers */
+ wake_up_interruptible(&mhi_netdev->alloc_event);
+
+ return i;
+}
+
+static void mhi_netdev_queue(struct mhi_netdev *mhi_netdev,
+ struct mhi_device *mhi_dev)
+{
struct device *dev = mhi_dev->dev.parent;
struct mhi_netbuf *netbuf;
struct mhi_buf *mhi_buf;
- struct mhi_netbuf **netbuf_pool = mhi_netdev->netbuf_pool;
+ struct list_head *pool = mhi_netdev->recycle_pool;
int nr_tre = mhi_get_no_free_descriptors(mhi_dev, DMA_FROM_DEVICE);
- int i, peak, cur_index, ret;
- const int pool_size = mhi_netdev->pool_size - 1, max_peak = 4;
+ int i, ret;
+ const int max_peek = 4;
MSG_VERB("Enter free_desc:%d\n", nr_tre);
@@ -227,23 +297,21 @@ static void mhi_netdev_queue(struct mhi_netdev *mhi_netdev)
/* try going thru reclaim pool first */
for (i = 0; i < nr_tre; i++) {
- /* peak for the next buffer, we going to peak several times,
+ /* peek for the next buffer, we going to peak several times,
* and we going to give up if buffers are not yet free
*/
- cur_index = mhi_netdev->current_index;
+ int peek = 0;
+
netbuf = NULL;
- for (peak = 0; peak < max_peak; peak++) {
- struct mhi_netbuf *tmp = netbuf_pool[cur_index];
-
- mhi_buf = &tmp->mhi_buf;
-
- cur_index = (cur_index + 1) & pool_size;
-
+ list_for_each_entry(mhi_buf, pool, node) {
/* page == 1 idle, buffer is free to reclaim */
if (page_ref_count(mhi_buf->page) == 1) {
- netbuf = tmp;
+ netbuf = (struct mhi_netbuf *)mhi_buf;
break;
}
+
+ if (peek++ >= max_peek)
+ break;
}
/* could not find a free buffer */
@@ -254,6 +322,7 @@ static void mhi_netdev_queue(struct mhi_netdev *mhi_netdev)
* with buffer, the buffer won't be freed
*/
page_ref_inc(mhi_buf->page);
+ list_del(&mhi_buf->node);
dma_sync_single_for_device(dev, mhi_buf->dma_addr, mhi_buf->len,
DMA_FROM_DEVICE);
ret = mhi_queue_transfer(mhi_dev, DMA_FROM_DEVICE, mhi_buf,
@@ -263,30 +332,36 @@ static void mhi_netdev_queue(struct mhi_netdev *mhi_netdev)
netbuf->unmap(dev, mhi_buf->dma_addr, mhi_buf->len,
DMA_FROM_DEVICE);
page_ref_dec(mhi_buf->page);
+ list_add(&mhi_buf->node, pool);
return;
}
- mhi_netdev->current_index = cur_index;
+ mhi_netdev->rbuffers++;
}
+ /* recycling did not work, buffers are still busy use bg pool */
+ if (i < nr_tre)
+ i += mhi_netdev_queue_bg_pool(mhi_netdev, mhi_dev, nr_tre - i);
+
/* recyling did not work, buffers are still busy allocate temp pkts */
if (i < nr_tre)
- mhi_netdev_tmp_alloc(mhi_netdev, nr_tre - i);
+ mhi_netdev_tmp_alloc(mhi_netdev, mhi_dev, nr_tre - i);
}
/* allocating pool of memory */
static int mhi_netdev_alloc_pool(struct mhi_netdev *mhi_netdev)
{
int i;
- struct mhi_netbuf *netbuf, **netbuf_pool;
- struct mhi_buf *mhi_buf;
+ struct mhi_netbuf *netbuf;
+ struct mhi_buf *mhi_buf, *tmp;
const u32 order = mhi_netdev->order;
struct device *dev = mhi_netdev->mhi_dev->dev.parent;
+ struct list_head *pool = kmalloc(sizeof(*pool), GFP_KERNEL);
- netbuf_pool = kmalloc_array(mhi_netdev->pool_size, sizeof(*netbuf_pool),
- GFP_KERNEL);
- if (!netbuf_pool)
+ if (!pool)
return -ENOMEM;
+ INIT_LIST_HEAD(pool);
+
for (i = 0; i < mhi_netdev->pool_size; i++) {
/* allocate paged data */
netbuf = mhi_netdev_alloc(dev, GFP_KERNEL, order);
@@ -294,44 +369,100 @@ static int mhi_netdev_alloc_pool(struct mhi_netdev *mhi_netdev)
goto error_alloc_page;
netbuf->unmap = dma_sync_single_for_cpu;
- netbuf_pool[i] = netbuf;
+ netbuf->recycle = true;
+ mhi_buf = (struct mhi_buf *)netbuf;
+ list_add(&mhi_buf->node, pool);
}
- mhi_netdev->netbuf_pool = netbuf_pool;
+ mhi_netdev->recycle_pool = pool;
return 0;
error_alloc_page:
- for (--i; i >= 0; i--) {
- netbuf = netbuf_pool[i];
- mhi_buf = &netbuf->mhi_buf;
+ list_for_each_entry_safe(mhi_buf, tmp, pool, node) {
+ list_del(&mhi_buf->node);
dma_unmap_page(dev, mhi_buf->dma_addr, mhi_buf->len,
DMA_FROM_DEVICE);
__free_pages(mhi_buf->page, order);
}
- kfree(netbuf_pool);
+ kfree(pool);
return -ENOMEM;
}
static void mhi_netdev_free_pool(struct mhi_netdev *mhi_netdev)
{
- int i;
- struct mhi_netbuf *netbuf, **netbuf_pool = mhi_netdev->netbuf_pool;
struct device *dev = mhi_netdev->mhi_dev->dev.parent;
- struct mhi_buf *mhi_buf;
+ struct mhi_buf *mhi_buf, *tmp;
- for (i = 0; i < mhi_netdev->pool_size; i++) {
- netbuf = netbuf_pool[i];
- mhi_buf = &netbuf->mhi_buf;
+ list_for_each_entry_safe(mhi_buf, tmp, mhi_netdev->recycle_pool, node) {
+ list_del(&mhi_buf->node);
dma_unmap_page(dev, mhi_buf->dma_addr, mhi_buf->len,
DMA_FROM_DEVICE);
__free_pages(mhi_buf->page, mhi_netdev->order);
}
- kfree(mhi_netdev->netbuf_pool);
- mhi_netdev->netbuf_pool = NULL;
+ kfree(mhi_netdev->recycle_pool);
+
+ /* free the bg pool */
+ list_for_each_entry_safe(mhi_buf, tmp, mhi_netdev->bg_pool, node) {
+ list_del(&mhi_buf->node);
+ __free_pages(mhi_buf->page, mhi_netdev->order);
+ mhi_netdev->bg_pool_size--;
+ }
+}
+
+static int mhi_netdev_alloc_thread(void *data)
+{
+ struct mhi_netdev *mhi_netdev = data;
+ struct mhi_netbuf *netbuf;
+ struct mhi_buf *mhi_buf, *tmp_buf;
+ const u32 order = mhi_netdev->order;
+ LIST_HEAD(head);
+
+ while (!kthread_should_stop()) {
+ while (mhi_netdev->bg_pool_size <= mhi_netdev->bg_pool_limit) {
+ int buffers = 0, i;
+
+ /* do a bulk allocation */
+ for (i = 0; i < NAPI_POLL_WEIGHT; i++) {
+ if (kthread_should_stop())
+ goto exit_alloc;
+
+ netbuf = mhi_netdev_alloc(NULL, GFP_KERNEL,
+ order);
+ if (!netbuf)
+ continue;
+
+ mhi_buf = (struct mhi_buf *)netbuf;
+ list_add(&mhi_buf->node, &head);
+ buffers++;
+ }
+
+ /* add the list to main pool */
+ spin_lock_bh(&mhi_netdev->bg_lock);
+ list_splice_init(&head, mhi_netdev->bg_pool);
+ mhi_netdev->bg_pool_size += buffers;
+ spin_unlock_bh(&mhi_netdev->bg_lock);
+ }
+
+ /* replenish the ring */
+ napi_schedule(mhi_netdev->napi);
+
+ /* wait for buffers to run low or thread to stop */
+ wait_event_interruptible(mhi_netdev->alloc_event,
+ kthread_should_stop() ||
+ mhi_netdev->bg_pool_size <= mhi_netdev->bg_pool_limit);
+ }
+
+exit_alloc:
+ list_for_each_entry_safe(mhi_buf, tmp_buf, &head, node) {
+ list_del(&mhi_buf->node);
+ __free_pages(mhi_buf->page, order);
+ }
+
+ return 0;
}
static int mhi_netdev_poll(struct napi_struct *napi, int budget)
@@ -361,10 +492,10 @@ static int mhi_netdev_poll(struct napi_struct *napi, int budget)
}
/* queue new buffers */
- mhi_netdev_queue(mhi_netdev);
+ mhi_netdev_queue(mhi_netdev, mhi_dev);
if (rsc_dev)
- mhi_netdev_queue(rsc_dev);
+ mhi_netdev_queue(mhi_netdev, rsc_dev->mhi_dev);
/* complete work if # of packet processed less than allocated budget */
if (rx_work < budget)
@@ -658,6 +789,8 @@ static void mhi_netdev_xfer_dl_cb(struct mhi_device *mhi_dev,
struct mhi_net_chain *chain = mhi_netdev->chain;
netbuf->unmap(dev, mhi_buf->dma_addr, mhi_buf->len, DMA_FROM_DEVICE);
+ if (likely(netbuf->recycle))
+ list_add_tail(&mhi_buf->node, mhi_netdev->recycle_pool);
/* modem is down, drop the buffer */
if (mhi_result->transaction_status == -ENOTCONN) {
@@ -708,6 +841,47 @@ static void mhi_netdev_status_cb(struct mhi_device *mhi_dev, enum MHI_CB mhi_cb)
struct dentry *dentry;
+static int mhi_netdev_debugfs_stats_show(struct seq_file *m, void *d)
+{
+ struct mhi_netdev *mhi_netdev = m->private;
+
+ seq_printf(m,
+ "mru:%u order:%u pool_size:%d, bg_pool_size:%d bg_pool_limit:%d abuf:%u kbuf:%u rbuf:%u\n",
+ mhi_netdev->mru, mhi_netdev->order, mhi_netdev->pool_size,
+ mhi_netdev->bg_pool_size, mhi_netdev->bg_pool_limit,
+ mhi_netdev->abuffers, mhi_netdev->kbuffers,
+ mhi_netdev->rbuffers);
+
+ return 0;
+}
+
+static int mhi_netdev_debugfs_stats_open(struct inode *inode, struct file *fp)
+{
+ return single_open(fp, mhi_netdev_debugfs_stats_show, inode->i_private);
+}
+
+static const struct file_operations debugfs_stats = {
+ .open = mhi_netdev_debugfs_stats_open,
+ .release = single_release,
+ .read = seq_read,
+};
+
+static int mhi_netdev_debugfs_chain(void *data, u64 val)
+{
+ struct mhi_netdev *mhi_netdev = data;
+ struct mhi_netdev *rsc_dev = mhi_netdev->rsc_dev;
+
+ mhi_netdev->chain = NULL;
+
+ if (rsc_dev)
+ rsc_dev->chain = NULL;
+
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(debugfs_chain, NULL,
+ mhi_netdev_debugfs_chain, "%llu\n");
+
static void mhi_netdev_create_debugfs(struct mhi_netdev *mhi_netdev)
{
char node_name[32];
@@ -724,6 +898,11 @@ static void mhi_netdev_create_debugfs(struct mhi_netdev *mhi_netdev)
mhi_netdev->dentry = debugfs_create_dir(node_name, dentry);
if (IS_ERR_OR_NULL(mhi_netdev->dentry))
return;
+
+ debugfs_create_file_unsafe("stats", 0444, mhi_netdev->dentry,
+ mhi_netdev, &debugfs_stats);
+ debugfs_create_file_unsafe("chain", 0444, mhi_netdev->dentry,
+ mhi_netdev, &debugfs_chain);
}
static void mhi_netdev_create_debugfs_dir(void)
@@ -755,12 +934,12 @@ static void mhi_netdev_remove(struct mhi_device *mhi_dev)
return;
}
+ kthread_stop(mhi_netdev->alloc_task);
netif_stop_queue(mhi_netdev->ndev);
napi_disable(mhi_netdev->napi);
unregister_netdev(mhi_netdev->ndev);
netif_napi_del(mhi_netdev->napi);
free_netdev(mhi_netdev->ndev);
- mhi_netdev_free_pool(mhi_netdev);
if (!IS_ERR_OR_NULL(mhi_netdev->dentry))
debugfs_remove_recursive(mhi_netdev->dentry);
@@ -782,6 +961,9 @@ static void mhi_netdev_clone_dev(struct mhi_netdev *mhi_netdev,
mhi_netdev->ipc_log_lvl = parent->ipc_log_lvl;
mhi_netdev->is_rsc_dev = true;
mhi_netdev->chain = parent->chain;
+ mhi_netdev->rsc_parent = parent;
+ mhi_netdev->recycle_pool = parent->recycle_pool;
+ mhi_netdev->bg_pool = parent->bg_pool;
}
static int mhi_netdev_probe(struct mhi_device *mhi_dev,
@@ -803,6 +985,13 @@ static int mhi_netdev_probe(struct mhi_device *mhi_dev,
if (!mhi_netdev)
return -ENOMEM;
+ /* move mhi channels to start state */
+ ret = mhi_prepare_for_transfer(mhi_dev);
+ if (ret) {
+ MSG_ERR("Failed to start channels ret %d\n", ret);
+ return ret;
+ }
+
mhi_netdev->mhi_dev = mhi_dev;
mhi_device_set_devdata(mhi_dev, mhi_netdev);
@@ -850,6 +1039,38 @@ static int mhi_netdev_probe(struct mhi_device *mhi_dev,
if (ret)
return ret;
+ /* setup pool size ~2x ring length*/
+ nr_tre = mhi_get_no_free_descriptors(mhi_dev, DMA_FROM_DEVICE);
+ mhi_netdev->pool_size = 1 << __ilog2_u32(nr_tre);
+ if (nr_tre > mhi_netdev->pool_size)
+ mhi_netdev->pool_size <<= 1;
+ mhi_netdev->pool_size <<= 1;
+
+ /* if we expect child device to share then double the pool */
+ if (of_parse_phandle(of_node, "mhi,rsc-child", 0))
+ mhi_netdev->pool_size <<= 1;
+
+ /* allocate memory pool */
+ ret = mhi_netdev_alloc_pool(mhi_netdev);
+ if (ret)
+ return -ENOMEM;
+
+ /* create a background task to allocate memory */
+ mhi_netdev->bg_pool = kmalloc(sizeof(*mhi_netdev->bg_pool),
+ GFP_KERNEL);
+ if (!mhi_netdev->bg_pool)
+ return -ENOMEM;
+
+ init_waitqueue_head(&mhi_netdev->alloc_event);
+ INIT_LIST_HEAD(mhi_netdev->bg_pool);
+ spin_lock_init(&mhi_netdev->bg_lock);
+ mhi_netdev->bg_pool_limit = mhi_netdev->pool_size / 4;
+ mhi_netdev->alloc_task = kthread_run(mhi_netdev_alloc_thread,
+ mhi_netdev,
+ mhi_netdev->ndev->name);
+ if (IS_ERR(mhi_netdev->alloc_task))
+ return PTR_ERR(mhi_netdev->alloc_task);
+
/* create ipc log buffer */
snprintf(node_name, sizeof(node_name),
"%s_%04x_%02u.%02u.%02u_%u",
@@ -863,25 +1084,6 @@ static int mhi_netdev_probe(struct mhi_device *mhi_dev,
mhi_netdev_create_debugfs(mhi_netdev);
}
- /* move mhi channels to start state */
- ret = mhi_prepare_for_transfer(mhi_dev);
- if (ret) {
- MSG_ERR("Failed to start channels ret %d\n", ret);
- goto error_start;
- }
-
- /* setup pool size ~2x ring length*/
- nr_tre = mhi_get_no_free_descriptors(mhi_dev, DMA_FROM_DEVICE);
- mhi_netdev->pool_size = 1 << __ilog2_u32(nr_tre);
- if (nr_tre > mhi_netdev->pool_size)
- mhi_netdev->pool_size <<= 1;
- mhi_netdev->pool_size <<= 1;
-
- /* allocate memory pool */
- ret = mhi_netdev_alloc_pool(mhi_netdev);
- if (ret)
- goto error_start;
-
/* link child node with parent node if it's children dev */
if (p_netdev)
p_netdev->rsc_dev = mhi_netdev;
@@ -892,18 +1094,6 @@ static int mhi_netdev_probe(struct mhi_device *mhi_dev,
napi_schedule(mhi_netdev->napi);
return 0;
-
-error_start:
- if (phandle)
- return ret;
-
- netif_stop_queue(mhi_netdev->ndev);
- napi_disable(mhi_netdev->napi);
- unregister_netdev(mhi_netdev->ndev);
- netif_napi_del(mhi_netdev->napi);
- free_netdev(mhi_netdev->ndev);
-
- return ret;
}
static const struct mhi_device_id mhi_netdev_match_table[] = {
diff --git a/drivers/bus/mhi/devices/mhi_satellite.c b/drivers/bus/mhi/devices/mhi_satellite.c
index 33338f4..9fff109 100644
--- a/drivers/bus/mhi/devices/mhi_satellite.c
+++ b/drivers/bus/mhi/devices/mhi_satellite.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2019, The Linux Foundation. All rights reserved.*/
-#include <linux/debugfs.h>
+#include <linux/async.h>
#include <linux/device.h>
#include <linux/dma-direction.h>
#include <linux/dma-mapping.h>
@@ -19,8 +19,6 @@
#define MHI_SAT_DRIVER_NAME "mhi_satellite"
-static bool mhi_sat_defer_init;
-
/* logging macros */
#define IPC_LOG_PAGES (10)
#define IPC_LOG_LVL (MHI_MSG_LVL_INFO)
@@ -147,17 +145,13 @@ enum mhi_ev_ccs {
/* satellite subsystem definitions */
enum subsys_id {
SUBSYS_ADSP,
- SUBSYS_CDSP,
SUBSYS_SLPI,
- SUBSYS_MODEM,
SUBSYS_MAX,
};
static const char * const subsys_names[SUBSYS_MAX] = {
[SUBSYS_ADSP] = "adsp",
- [SUBSYS_CDSP] = "cdsp",
[SUBSYS_SLPI] = "slpi",
- [SUBSYS_MODEM] = "modem",
};
struct mhi_sat_subsys {
@@ -235,6 +229,21 @@ struct mhi_sat_packet {
void *msg; /* incoming message */
};
+enum mhi_sat_state {
+ SAT_READY, /* initial state when device is presented to driver */
+ SAT_RUNNING, /* subsystem can communicate with the device */
+ SAT_DISCONNECTED, /* rpmsg link is down */
+ SAT_FATAL_DETECT, /* device is down as fatal error was detected early */
+ SAT_ERROR, /* device is down after error or graceful shutdown */
+ SAT_DISABLED, /* set if rpmsg link goes down after device is down */
+};
+
+#define MHI_SAT_ACTIVE(cntrl) (cntrl->state == SAT_RUNNING)
+#define MHI_SAT_FATAL_DETECT(cntrl) (cntrl->state == SAT_FATAL_DETECT)
+#define MHI_SAT_ALLOW_CONNECTION(cntrl) (cntrl->state == SAT_READY || \
+ cntrl->state == SAT_DISCONNECTED)
+#define MHI_SAT_IN_ERROR_STATE(cntrl) (cntrl->state >= SAT_FATAL_DETECT)
+
struct mhi_sat_cntrl {
struct list_head node;
@@ -250,6 +259,7 @@ struct mhi_sat_cntrl {
struct work_struct connect_work; /* subsystem connection worker */
struct work_struct process_work; /* incoming packets processor */
+ async_cookie_t error_cookie; /* synchronize device error handling */
/* mhi core/controller configurations */
u32 dev_id; /* unique device ID with BDF as per connection topology */
@@ -261,7 +271,8 @@ struct mhi_sat_cntrl {
int num_devices; /* mhi devices current count */
int max_devices; /* count of maximum devices for subsys/controller */
u16 seq; /* internal sequence number for all outgoing packets */
- bool active; /* flag set if hello packet/MHI_CFG event was sent */
+ enum mhi_sat_state state; /* controller state manager */
+ spinlock_t state_lock; /* lock to change controller state */
/* command completion variables */
u16 last_cmd_seq; /* sequence number of last sent command packet */
@@ -285,9 +296,6 @@ struct mhi_sat_driver {
struct mhi_sat_subsys *subsys; /* pointer to subsystem array */
unsigned int num_subsys;
-
- struct dentry *dentry; /* debugfs directory */
- bool deferred_init_done; /* flag for deferred init protection */
};
static struct mhi_sat_driver mhi_sat_driver;
@@ -566,6 +574,83 @@ static void mhi_sat_process_cmds(struct mhi_sat_cntrl *sat_cntrl,
}
}
+/* send sys_err command to subsystem if device asserts or is powered off */
+static void mhi_sat_send_sys_err(struct mhi_sat_cntrl *sat_cntrl)
+{
+ struct mhi_sat_subsys *subsys = sat_cntrl->subsys;
+ struct sat_tre *pkt;
+ void *msg;
+ int ret;
+
+ /* flush all pending work */
+ flush_work(&sat_cntrl->connect_work);
+ flush_work(&sat_cntrl->process_work);
+
+ msg = kmalloc(SAT_MSG_SIZE(1), GFP_KERNEL);
+
+ MHI_SAT_ASSERT(!msg, "Unable to malloc for SYS_ERR message!\n");
+ if (!msg)
+ return;
+
+ pkt = SAT_TRE_OFFSET(msg);
+ pkt->ptr = MHI_TRE_CMD_SYS_ERR_PTR;
+ pkt->dword[0] = MHI_TRE_CMD_SYS_ERR_D0;
+ pkt->dword[1] = MHI_TRE_CMD_SYS_ERR_D1;
+
+ mutex_lock(&sat_cntrl->cmd_wait_mutex);
+
+ ret = mhi_sat_send_msg(sat_cntrl, SAT_MSG_ID_CMD,
+ SAT_RESERVED_SEQ_NUM, msg,
+ SAT_MSG_SIZE(1));
+ kfree(msg);
+ if (ret) {
+ MHI_SAT_ERR("Failed to notify SYS_ERR cmd\n");
+ mutex_unlock(&sat_cntrl->cmd_wait_mutex);
+ return;
+ }
+
+ MHI_SAT_LOG("SYS_ERR command sent\n");
+
+ /* blocking call to wait for command completion event */
+ mhi_sat_wait_cmd_completion(sat_cntrl);
+
+ mutex_unlock(&sat_cntrl->cmd_wait_mutex);
+}
+
+static void mhi_sat_error_worker(void *data, async_cookie_t cookie)
+{
+ struct mhi_sat_cntrl *sat_cntrl = data;
+ struct mhi_sat_subsys *subsys = sat_cntrl->subsys;
+ struct sat_tre *pkt;
+ void *msg;
+ int ret;
+
+ MHI_SAT_LOG("Entered\n");
+
+ /* flush all pending work */
+ flush_work(&sat_cntrl->connect_work);
+ flush_work(&sat_cntrl->process_work);
+
+ msg = kmalloc(SAT_MSG_SIZE(1), GFP_KERNEL);
+
+ MHI_SAT_ASSERT(!msg, "Unable to malloc for SYS_ERR message!\n");
+ if (!msg)
+ return;
+
+ pkt = SAT_TRE_OFFSET(msg);
+ pkt->ptr = MHI_TRE_EVT_MHI_STATE_PTR;
+ pkt->dword[0] = MHI_TRE_EVT_MHI_STATE_D0(MHI_STATE_SYS_ERR);
+ pkt->dword[1] = MHI_TRE_EVT_MHI_STATE_D1;
+
+ ret = mhi_sat_send_msg(sat_cntrl, SAT_MSG_ID_EVT,
+ SAT_RESERVED_SEQ_NUM, msg,
+ SAT_MSG_SIZE(1));
+ kfree(msg);
+
+ MHI_SAT_LOG("SYS_ERROR state change event send %s!\n", ret ? "failure" :
+ "success");
+}
+
static void mhi_sat_process_worker(struct work_struct *work)
{
struct mhi_sat_cntrl *sat_cntrl = container_of(work,
@@ -588,6 +673,9 @@ static void mhi_sat_process_worker(struct work_struct *work)
list_del(&packet->node);
+ if (!MHI_SAT_ACTIVE(sat_cntrl))
+ goto process_next;
+
mhi_sat_process_cmds(sat_cntrl, hdr, pkt);
/* send response event(s) */
@@ -596,6 +684,7 @@ static void mhi_sat_process_worker(struct work_struct *work)
SAT_MSG_SIZE(SAT_TRE_NUM_PKTS(
hdr->payload_size)));
+process_next:
kfree(packet);
}
@@ -607,21 +696,26 @@ static void mhi_sat_connect_worker(struct work_struct *work)
struct mhi_sat_cntrl *sat_cntrl = container_of(work,
struct mhi_sat_cntrl, connect_work);
struct mhi_sat_subsys *subsys = sat_cntrl->subsys;
+ enum mhi_sat_state prev_state;
struct sat_tre *pkt;
void *msg;
int ret;
+ spin_lock_irq(&sat_cntrl->state_lock);
if (!subsys->rpdev || sat_cntrl->max_devices != sat_cntrl->num_devices
- || sat_cntrl->active)
+ || !(MHI_SAT_ALLOW_CONNECTION(sat_cntrl))) {
+ spin_unlock_irq(&sat_cntrl->state_lock);
return;
+ }
+ prev_state = sat_cntrl->state;
+ sat_cntrl->state = SAT_RUNNING;
+ spin_unlock_irq(&sat_cntrl->state_lock);
MHI_SAT_LOG("Entered\n");
msg = kmalloc(SAT_MSG_SIZE(3), GFP_ATOMIC);
if (!msg)
- return;
-
- sat_cntrl->active = true;
+ goto error_connect_work;
pkt = SAT_TRE_OFFSET(msg);
@@ -648,11 +742,18 @@ static void mhi_sat_connect_worker(struct work_struct *work)
kfree(msg);
if (ret) {
MHI_SAT_ERR("Failed to send hello packet:%d\n", ret);
- sat_cntrl->active = false;
- return;
+ goto error_connect_work;
}
MHI_SAT_LOG("Device 0x%x sent hello packet\n", sat_cntrl->dev_id);
+
+ return;
+
+error_connect_work:
+ spin_lock_irq(&sat_cntrl->state_lock);
+ if (MHI_SAT_ACTIVE(sat_cntrl))
+ sat_cntrl->state = prev_state;
+ spin_unlock_irq(&sat_cntrl->state_lock);
}
static void mhi_sat_process_events(struct mhi_sat_cntrl *sat_cntrl,
@@ -697,7 +798,7 @@ static int mhi_sat_rpmsg_cb(struct rpmsg_device *rpdev, void *data, int len,
}
/* Inactive controller cannot process incoming commands */
- if (unlikely(!sat_cntrl->active)) {
+ if (unlikely(!MHI_SAT_ACTIVE(sat_cntrl))) {
MHI_SAT_ERR("Message for inactive controller!\n");
return 0;
}
@@ -732,10 +833,21 @@ static void mhi_sat_rpmsg_remove(struct rpmsg_device *rpdev)
/* unprepare each controller/device from transfer */
mutex_lock(&subsys->cntrl_mutex);
list_for_each_entry(sat_cntrl, &subsys->cntrl_list, node) {
- if (!sat_cntrl->active)
- continue;
+ async_synchronize_cookie(sat_cntrl->error_cookie + 1);
- sat_cntrl->active = false;
+ spin_lock_irq(&sat_cntrl->state_lock);
+ /*
+ * move to disabled state if early error fatal is detected
+ * and rpmsg link goes down before device remove call from
+ * mhi is received
+ */
+ if (MHI_SAT_IN_ERROR_STATE(sat_cntrl)) {
+ sat_cntrl->state = SAT_DISABLED;
+ spin_unlock_irq(&sat_cntrl->state_lock);
+ continue;
+ }
+ sat_cntrl->state = SAT_DISCONNECTED;
+ spin_unlock_irq(&sat_cntrl->state_lock);
flush_work(&sat_cntrl->connect_work);
flush_work(&sat_cntrl->process_work);
@@ -781,6 +893,8 @@ static int mhi_sat_rpmsg_probe(struct rpmsg_device *rpdev)
if (!subsys)
return -EINVAL;
+ mutex_lock(&subsys->cntrl_mutex);
+
MHI_SUBSYS_LOG("Received RPMSG probe\n");
dev_set_drvdata(&rpdev->dev, subsys);
@@ -793,6 +907,8 @@ static int mhi_sat_rpmsg_probe(struct rpmsg_device *rpdev)
schedule_work(&sat_cntrl->connect_work);
spin_unlock_irq(&subsys->cntrl_lock);
+ mutex_unlock(&subsys->cntrl_mutex);
+
return 0;
}
@@ -814,6 +930,21 @@ static struct rpmsg_driver mhi_sat_rpmsg_driver = {
static void mhi_sat_dev_status_cb(struct mhi_device *mhi_dev,
enum MHI_CB mhi_cb)
{
+ struct mhi_sat_device *sat_dev = mhi_device_get_devdata(mhi_dev);
+ struct mhi_sat_cntrl *sat_cntrl = sat_dev->cntrl;
+ struct mhi_sat_subsys *subsys = sat_cntrl->subsys;
+ unsigned long flags;
+
+ if (mhi_cb != MHI_CB_FATAL_ERROR)
+ return;
+
+ MHI_SAT_LOG("Device fatal error detected\n");
+ spin_lock_irqsave(&sat_cntrl->state_lock, flags);
+ if (MHI_SAT_ACTIVE(sat_cntrl))
+ sat_cntrl->error_cookie = async_schedule(mhi_sat_error_worker,
+ sat_cntrl);
+ sat_cntrl->state = SAT_FATAL_DETECT;
+ spin_unlock_irqrestore(&sat_cntrl->state_lock, flags);
}
static void mhi_sat_dev_remove(struct mhi_device *mhi_dev)
@@ -822,9 +953,7 @@ static void mhi_sat_dev_remove(struct mhi_device *mhi_dev)
struct mhi_sat_cntrl *sat_cntrl = sat_dev->cntrl;
struct mhi_sat_subsys *subsys = sat_cntrl->subsys;
struct mhi_buf *buf, *tmp;
- struct sat_tre *pkt;
- void *msg;
- int ret;
+ bool send_sys_err = false;
/* remove device node from probed list */
mutex_lock(&sat_cntrl->list_mutex);
@@ -834,51 +963,32 @@ static void mhi_sat_dev_remove(struct mhi_device *mhi_dev)
sat_cntrl->num_devices--;
mutex_lock(&subsys->cntrl_mutex);
- /* prepare SYS_ERR command if first device is being removed */
- if (sat_cntrl->active) {
- sat_cntrl->active = false;
- /* flush all pending work */
- flush_work(&sat_cntrl->connect_work);
- flush_work(&sat_cntrl->process_work);
+ async_synchronize_cookie(sat_cntrl->error_cookie + 1);
- msg = kmalloc(SAT_MSG_SIZE(1), GFP_KERNEL);
+ /* send sys_err if first device is removed */
+ spin_lock_irq(&sat_cntrl->state_lock);
+ if (MHI_SAT_ACTIVE(sat_cntrl) || MHI_SAT_FATAL_DETECT(sat_cntrl))
+ send_sys_err = true;
+ sat_cntrl->state = SAT_ERROR;
+ spin_unlock_irq(&sat_cntrl->state_lock);
- MHI_SAT_ASSERT(!msg, "Unable to malloc for SYS_ERR message!\n");
+ if (send_sys_err)
+ mhi_sat_send_sys_err(sat_cntrl);
- pkt = SAT_TRE_OFFSET(msg);
- pkt->ptr = MHI_TRE_CMD_SYS_ERR_PTR;
- pkt->dword[0] = MHI_TRE_CMD_SYS_ERR_D0;
- pkt->dword[1] = MHI_TRE_CMD_SYS_ERR_D1;
-
- /* acquire cmd_wait_mutex before sending command */
- mutex_lock(&sat_cntrl->cmd_wait_mutex);
-
- ret = mhi_sat_send_msg(sat_cntrl, SAT_MSG_ID_CMD,
- SAT_RESERVED_SEQ_NUM, msg,
- SAT_MSG_SIZE(1));
- kfree(msg);
- if (ret) {
- MHI_SAT_ERR("Failed to notify SYS_ERR\n");
- mutex_unlock(&sat_cntrl->cmd_wait_mutex);
- goto exit_sys_err_send;
- }
-
- MHI_SAT_LOG("SYS_ERR command sent\n");
-
- /* blocking call to wait for command completion event */
- mhi_sat_wait_cmd_completion(sat_cntrl);
-
- mutex_unlock(&sat_cntrl->cmd_wait_mutex);
- }
-
-exit_sys_err_send:
/* exit if some devices are still present */
if (sat_cntrl->num_devices) {
mutex_unlock(&subsys->cntrl_mutex);
return;
}
+ /*
+ * cancel any pending work as it is possible that work gets queued
+ * when rpmsg probe comes in before controller is removed
+ */
+ cancel_work_sync(&sat_cntrl->connect_work);
+ cancel_work_sync(&sat_cntrl->process_work);
+
/* remove address mappings */
mutex_lock(&sat_cntrl->list_mutex);
list_for_each_entry_safe(buf, tmp, &sat_cntrl->addr_map_list, node) {
@@ -937,6 +1047,7 @@ static int mhi_sat_dev_probe(struct mhi_device *mhi_dev,
mutex_init(&sat_cntrl->list_mutex);
mutex_init(&sat_cntrl->cmd_wait_mutex);
spin_lock_init(&sat_cntrl->pkt_lock);
+ spin_lock_init(&sat_cntrl->state_lock);
INIT_WORK(&sat_cntrl->connect_work, mhi_sat_connect_worker);
INIT_WORK(&sat_cntrl->process_work, mhi_sat_process_worker);
INIT_LIST_HEAD(&sat_cntrl->dev_list);
@@ -1006,17 +1117,6 @@ static const struct mhi_device_id mhi_sat_dev_match_table[] = {
{ .chan = "ADSP_7", .driver_data = SUBSYS_ADSP },
{ .chan = "ADSP_8", .driver_data = SUBSYS_ADSP },
{ .chan = "ADSP_9", .driver_data = SUBSYS_ADSP },
- /* CDSP */
- { .chan = "CDSP_0", .driver_data = SUBSYS_CDSP },
- { .chan = "CDSP_1", .driver_data = SUBSYS_CDSP },
- { .chan = "CDSP_2", .driver_data = SUBSYS_CDSP },
- { .chan = "CDSP_3", .driver_data = SUBSYS_CDSP },
- { .chan = "CDSP_4", .driver_data = SUBSYS_CDSP },
- { .chan = "CDSP_5", .driver_data = SUBSYS_CDSP },
- { .chan = "CDSP_6", .driver_data = SUBSYS_CDSP },
- { .chan = "CDSP_7", .driver_data = SUBSYS_CDSP },
- { .chan = "CDSP_8", .driver_data = SUBSYS_CDSP },
- { .chan = "CDSP_9", .driver_data = SUBSYS_CDSP },
/* SLPI */
{ .chan = "SLPI_0", .driver_data = SUBSYS_SLPI },
{ .chan = "SLPI_1", .driver_data = SUBSYS_SLPI },
@@ -1028,17 +1128,6 @@ static const struct mhi_device_id mhi_sat_dev_match_table[] = {
{ .chan = "SLPI_7", .driver_data = SUBSYS_SLPI },
{ .chan = "SLPI_8", .driver_data = SUBSYS_SLPI },
{ .chan = "SLPI_9", .driver_data = SUBSYS_SLPI },
- /* MODEM */
- { .chan = "MODEM_0", .driver_data = SUBSYS_MODEM },
- { .chan = "MODEM_1", .driver_data = SUBSYS_MODEM },
- { .chan = "MODEM_2", .driver_data = SUBSYS_MODEM },
- { .chan = "MODEM_3", .driver_data = SUBSYS_MODEM },
- { .chan = "MODEM_4", .driver_data = SUBSYS_MODEM },
- { .chan = "MODEM_5", .driver_data = SUBSYS_MODEM },
- { .chan = "MODEM_6", .driver_data = SUBSYS_MODEM },
- { .chan = "MODEM_7", .driver_data = SUBSYS_MODEM },
- { .chan = "MODEM_8", .driver_data = SUBSYS_MODEM },
- { .chan = "MODEM_9", .driver_data = SUBSYS_MODEM },
{},
};
@@ -1053,44 +1142,6 @@ static struct mhi_driver mhi_sat_dev_driver = {
},
};
-int mhi_sat_trigger_init(void *data, u64 val)
-{
- struct mhi_sat_subsys *subsys;
- int i, ret;
-
- if (mhi_sat_driver.deferred_init_done)
- return -EIO;
-
- ret = register_rpmsg_driver(&mhi_sat_rpmsg_driver);
- if (ret)
- goto error_sat_trigger_init;
-
- ret = mhi_driver_register(&mhi_sat_dev_driver);
- if (ret)
- goto error_sat_trigger_register;
-
- mhi_sat_driver.deferred_init_done = true;
-
- return 0;
-
-error_sat_trigger_register:
- unregister_rpmsg_driver(&mhi_sat_rpmsg_driver);
-
-error_sat_trigger_init:
- subsys = mhi_sat_driver.subsys;
- for (i = 0; i < mhi_sat_driver.num_subsys; i++, subsys++) {
- ipc_log_context_destroy(subsys->ipc_log);
- mutex_destroy(&subsys->cntrl_mutex);
- }
- kfree(mhi_sat_driver.subsys);
- mhi_sat_driver.subsys = NULL;
-
- return ret;
-}
-
-DEFINE_SIMPLE_ATTRIBUTE(mhi_sat_debugfs_fops, NULL,
- mhi_sat_trigger_init, "%llu\n");
-
static int mhi_sat_init(void)
{
struct mhi_sat_subsys *subsys;
@@ -1116,20 +1167,6 @@ static int mhi_sat_init(void)
subsys->ipc_log = ipc_log_context_create(IPC_LOG_PAGES, log, 0);
}
- /* create debugfs entry if defer_init is enabled */
- if (mhi_sat_defer_init) {
- mhi_sat_driver.dentry = debugfs_create_dir("mhi_sat", NULL);
- if (IS_ERR_OR_NULL(mhi_sat_driver.dentry)) {
- ret = -ENODEV;
- goto error_sat_init;
- }
-
- debugfs_create_file("debug", 0444, mhi_sat_driver.dentry, NULL,
- &mhi_sat_debugfs_fops);
-
- return 0;
- }
-
ret = register_rpmsg_driver(&mhi_sat_rpmsg_driver);
if (ret)
goto error_sat_init;
diff --git a/drivers/bus/mhi/devices/mhi_uci.c b/drivers/bus/mhi/devices/mhi_uci.c
index 978c627..e31eaa4 100644
--- a/drivers/bus/mhi/devices/mhi_uci.c
+++ b/drivers/bus/mhi/devices/mhi_uci.c
@@ -12,6 +12,7 @@
#include <linux/of_device.h>
#include <linux/poll.h>
#include <linux/slab.h>
+#include <linux/termios.h>
#include <linux/types.h>
#include <linux/wait.h>
#include <linux/uaccess.h>
@@ -46,6 +47,7 @@ struct uci_dev {
size_t mtu;
int ref_count;
bool enabled;
+ u32 tiocm;
void *ipc_log;
};
@@ -145,11 +147,20 @@ static long mhi_uci_ioctl(struct file *file,
{
struct uci_dev *uci_dev = file->private_data;
struct mhi_device *mhi_dev = uci_dev->mhi_dev;
+ struct uci_chan *uci_chan = &uci_dev->dl_chan;
long ret = -ERESTARTSYS;
mutex_lock(&uci_dev->mutex);
- if (uci_dev->enabled)
+
+ if (cmd == TIOCMGET) {
+ spin_lock_bh(&uci_chan->lock);
+ ret = uci_dev->tiocm;
+ uci_dev->tiocm = 0;
+ spin_unlock_bh(&uci_chan->lock);
+ } else if (uci_dev->enabled) {
ret = mhi_ioctl(mhi_dev, cmd, arg);
+ }
+
mutex_unlock(&uci_dev->mutex);
return ret;
@@ -212,9 +223,16 @@ static unsigned int mhi_uci_poll(struct file *file, poll_table *wait)
spin_lock_bh(&uci_chan->lock);
if (!uci_dev->enabled) {
mask = POLLERR;
- } else if (!list_empty(&uci_chan->pending) || uci_chan->cur_buf) {
- MSG_VERB("Client can read from node\n");
- mask |= POLLIN | POLLRDNORM;
+ } else {
+ if (!list_empty(&uci_chan->pending) || uci_chan->cur_buf) {
+ MSG_VERB("Client can read from node\n");
+ mask |= POLLIN | POLLRDNORM;
+ }
+
+ if (uci_dev->tiocm) {
+ MSG_VERB("Line status changed\n");
+ mask |= POLLPRI;
+ }
}
spin_unlock_bh(&uci_chan->lock);
@@ -646,6 +664,20 @@ static void mhi_dl_xfer_cb(struct mhi_device *mhi_dev,
wake_up(&uci_chan->wq);
}
+static void mhi_status_cb(struct mhi_device *mhi_dev, enum MHI_CB reason)
+{
+ struct uci_dev *uci_dev = mhi_device_get_devdata(mhi_dev);
+ struct uci_chan *uci_chan = &uci_dev->dl_chan;
+ unsigned long flags;
+
+ if (reason == MHI_CB_DTR_SIGNAL) {
+ spin_lock_irqsave(&uci_chan->lock, flags);
+ uci_dev->tiocm = mhi_dev->tiocm;
+ spin_unlock_irqrestore(&uci_chan->lock, flags);
+ wake_up(&uci_chan->wq);
+ }
+}
+
/* .driver_data stores max mtu */
static const struct mhi_device_id mhi_uci_match_table[] = {
{ .chan = "LOOPBACK", .driver_data = 0x1000 },
@@ -664,6 +696,7 @@ static struct mhi_driver mhi_uci_driver = {
.probe = mhi_uci_probe,
.ul_xfer_cb = mhi_ul_xfer_cb,
.dl_xfer_cb = mhi_dl_xfer_cb,
+ .status_cb = mhi_status_cb,
.driver = {
.name = MHI_UCI_DRIVER_NAME,
.owner = THIS_MODULE,
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index 2819868..f8c47e9 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -551,13 +551,21 @@
subsystems crash.
config MSM_ADSPRPC
- tristate "QTI ADSP RPC driver"
- depends on QCOM_GLINK
- help
- Provides a communication mechanism that allows clients to
- make remote method invocations across processor boundary to
- applications/compute DSP processor.
- Say M if you want to enable this module.
+ tristate "QTI FastRPC driver"
+ depends on QCOM_GLINK
+ help
+ Provides a communication mechanism that allows clients to
+ make remote method invocations across processor boundary to
+ applications/compute DSP processor.
+ Say M if you want to enable this module.
+
+config ADSPRPC_DEBUG
+ bool "Debug logs in FastRPC driver"
+ help
+ Enable debug logs in the fastrpc driver. Flag will be
+ disabled by default to maximize RPC performance as debug
+ logging will impact RPC overhead.
+ Say Y here if you want to enable the logs.
config MSM_RDBG
tristate "QTI Remote debug driver"
diff --git a/drivers/char/adsprpc.c b/drivers/char/adsprpc.c
index a457a86..90bb9dd 100644
--- a/drivers/char/adsprpc.c
+++ b/drivers/char/adsprpc.c
@@ -17,6 +17,7 @@
#include <linux/msm_ion.h>
#include <soc/qcom/secure_buffer.h>
#include <linux/rpmsg.h>
+#include <linux/ipc_logging.h>
#include <soc/qcom/subsystem_notif.h>
#include <soc/qcom/subsystem_restart.h>
#include <soc/qcom/service-notifier.h>
@@ -47,6 +48,7 @@
#define ADSP_MMAP_HEAP_ADDR 4
#define ADSP_MMAP_REMOTE_HEAP_ADDR 8
#define ADSP_MMAP_ADD_PAGES 0x1000
+#define ADSP_MMAP_ADD_PAGES_LLC 0x3000
#define FASTRPC_DMAHANDLE_NOMAP (16)
#define FASTRPC_ENOSUCH 39
@@ -97,7 +99,7 @@
#define RH_CID ADSP_DOMAIN_ID
#define PERF_KEYS \
- "count:flush:map:copy:rpmsg:getargs:putargs:invalidate:invoke:tid:ptr"
+ "count:flush:map:copy:rpmsg:getargs:putargs:invalidate:invoke"
#define FASTRPC_STATIC_HANDLE_PROCESS_GROUP (1)
#define FASTRPC_STATIC_HANDLE_DSP_UTILITIES (2)
#define FASTRPC_STATIC_HANDLE_LISTENER (3)
@@ -129,6 +131,15 @@
(int64_t *)(perf_ptr + offset)\
: (int64_t *)NULL) : (int64_t *)NULL)
+#define FASTRPC_GLINK_LOG_PAGES 8
+#define LOG_FASTRPC_GLINK_MSG(ctx, x, ...) \
+ do { \
+ if (ctx) \
+ ipc_log_string(ctx, "%s (%d, %d): "x, \
+ current->comm, current->tgid, current->pid, \
+ ##__VA_ARGS__); \
+ } while (0)
+
static int fastrpc_pdr_notifier_cb(struct notifier_block *nb,
unsigned long code,
void *data);
@@ -295,6 +306,7 @@ struct fastrpc_channel_ctx {
/* Indicates, if channel is restricted to secure node only */
int secure;
struct fastrpc_dsp_capabilities dsp_cap_kernel;
+ void *ipc_log_ctx;
};
struct fastrpc_apps {
@@ -796,7 +808,7 @@ static int fastrpc_mmap_create(struct fastrpc_file *fl, int fd,
struct fastrpc_session_ctx *sess;
struct fastrpc_apps *apps = fl->apps;
int cid = fl->cid;
- struct fastrpc_channel_ctx *chan = &apps->channel[cid];
+ struct fastrpc_channel_ctx *chan = NULL;
struct fastrpc_mmap *map = NULL;
dma_addr_t region_phys = 0;
void *region_vaddr = NULL;
@@ -804,6 +816,11 @@ static int fastrpc_mmap_create(struct fastrpc_file *fl, int fd,
int err = 0, vmid, sgl_index = 0;
struct scatterlist *sgl = NULL;
+ VERIFY(err, cid >= 0 && cid < NUM_CHANNELS);
+ if (err)
+ goto bail;
+ chan = &apps->channel[cid];
+
if (!fastrpc_mmap_find(fl, fd, va, len, mflags, 1, ppmap))
return 0;
map = kzalloc(sizeof(*map), GFP_KERNEL);
@@ -1423,7 +1440,7 @@ static void fastrpc_file_list_dtor(struct fastrpc_apps *me)
static int get_args(uint32_t kernel, struct smq_invoke_ctx *ctx)
{
- remote_arg64_t *rpra, *lrpra;
+ remote_arg64_t *rpra;
remote_arg_t *lpra = ctx->lpra;
struct smq_invoke_buf *list;
struct smq_phy_page *pages, *ipage;
@@ -1438,7 +1455,11 @@ static int get_args(uint32_t kernel, struct smq_invoke_ctx *ctx)
int mflags = 0;
uint64_t *fdlist;
uint32_t *crclist;
- int64_t *perf_counter = getperfcounter(ctx->fl, PERF_COUNT);
+ uint32_t earlyHint;
+ int64_t *perf_counter = NULL;
+
+ if (ctx->fl->profile)
+ perf_counter = getperfcounter(ctx->fl, PERF_COUNT);
/* calculate size of the metadata */
rpra = NULL;
@@ -1477,8 +1498,10 @@ static int get_args(uint32_t kernel, struct smq_invoke_ctx *ctx)
ipage += 1;
}
mutex_unlock(&ctx->fl->map_mutex);
+
+ /* metalen includes meta data, fds, crc and early wakeup hint */
metalen = copylen = (size_t)&ipage[0] + (sizeof(uint64_t) * M_FDLIST) +
- (sizeof(uint32_t) * M_CRCLIST);
+ (sizeof(uint32_t) * M_CRCLIST) + sizeof(earlyHint);
/* allocate new local rpra buffer */
lrpralen = (size_t)&list[0];
@@ -1487,11 +1510,7 @@ static int get_args(uint32_t kernel, struct smq_invoke_ctx *ctx)
if (err)
goto bail;
}
- if (ctx->lbuf->virt)
- memset(ctx->lbuf->virt, 0, lrpralen);
-
- lrpra = ctx->lbuf->virt;
- ctx->lrpra = lrpra;
+ ctx->lrpra = ctx->lbuf->virt;
/* calculate len required for copying */
for (oix = 0; oix < inbufs + outbufs; ++oix) {
@@ -1541,13 +1560,13 @@ static int get_args(uint32_t kernel, struct smq_invoke_ctx *ctx)
/* map ion buffers */
PERF(ctx->fl->profile, GET_COUNTER(perf_counter, PERF_MAP),
- for (i = 0; rpra && lrpra && i < inbufs + outbufs; ++i) {
+ for (i = 0; rpra && i < inbufs + outbufs; ++i) {
struct fastrpc_mmap *map = ctx->maps[i];
uint64_t buf = ptr_to_uint64(lpra[i].buf.pv);
size_t len = lpra[i].buf.len;
- rpra[i].buf.pv = lrpra[i].buf.pv = 0;
- rpra[i].buf.len = lrpra[i].buf.len = len;
+ rpra[i].buf.pv = 0;
+ rpra[i].buf.len = len;
if (!len)
continue;
if (map) {
@@ -1575,7 +1594,7 @@ static int get_args(uint32_t kernel, struct smq_invoke_ctx *ctx)
pages[idx].addr = map->phys + offset;
pages[idx].size = num << PAGE_SHIFT;
}
- rpra[i].buf.pv = lrpra[i].buf.pv = buf;
+ rpra[i].buf.pv = buf;
}
PERF_END);
for (i = bufs; i < bufs + handles; ++i) {
@@ -1585,15 +1604,16 @@ static int get_args(uint32_t kernel, struct smq_invoke_ctx *ctx)
pages[i].size = map->size;
}
fdlist = (uint64_t *)&pages[bufs + handles];
- for (i = 0; i < M_FDLIST; i++)
- fdlist[i] = 0;
crclist = (uint32_t *)&fdlist[M_FDLIST];
- memset(crclist, 0, sizeof(uint32_t)*M_CRCLIST);
+ /* reset fds, crc and early wakeup hint memory */
+ /* remote process updates these values before responding */
+ memset(fdlist, 0, sizeof(uint64_t)*M_FDLIST +
+ sizeof(uint32_t)*M_CRCLIST + sizeof(earlyHint));
/* copy non ion buffers */
PERF(ctx->fl->profile, GET_COUNTER(perf_counter, PERF_COPY),
rlen = copylen - metalen;
- for (oix = 0; rpra && lrpra && oix < inbufs + outbufs; ++oix) {
+ for (oix = 0; rpra && oix < inbufs + outbufs; ++oix) {
int i = ctx->overps[oix]->raix;
struct fastrpc_mmap *map = ctx->maps[i];
size_t mlen;
@@ -1612,7 +1632,7 @@ static int get_args(uint32_t kernel, struct smq_invoke_ctx *ctx)
VERIFY(err, rlen >= mlen);
if (err)
goto bail;
- rpra[i].buf.pv = lrpra[i].buf.pv =
+ rpra[i].buf.pv =
(args - ctx->overps[oix]->offset);
pages[list[i].pgidx].addr = ctx->buf->phys -
ctx->overps[oix]->offset +
@@ -1645,7 +1665,7 @@ static int get_args(uint32_t kernel, struct smq_invoke_ctx *ctx)
if (map && (map->attr & FASTRPC_ATTR_COHERENT))
continue;
- if (rpra && lrpra && rpra[i].buf.len &&
+ if (rpra && rpra[i].buf.len &&
ctx->overps[oix]->mstart) {
if (map && map->buf) {
dma_buf_begin_cpu_access(map->buf,
@@ -1659,13 +1679,15 @@ static int get_args(uint32_t kernel, struct smq_invoke_ctx *ctx)
}
}
PERF_END);
- for (i = bufs; rpra && lrpra && i < bufs + handles; i++) {
- rpra[i].dma.fd = lrpra[i].dma.fd = ctx->fds[i];
- rpra[i].dma.len = lrpra[i].dma.len = (uint32_t)lpra[i].buf.len;
- rpra[i].dma.offset = lrpra[i].dma.offset =
- (uint32_t)(uintptr_t)lpra[i].buf.pv;
+ for (i = bufs; rpra && i < bufs + handles; i++) {
+ rpra[i].dma.fd = ctx->fds[i];
+ rpra[i].dma.len = (uint32_t)lpra[i].buf.len;
+ rpra[i].dma.offset = (uint32_t)(uintptr_t)lpra[i].buf.pv;
}
+ /* Copy rpra to local buffer */
+ if (ctx->lrpra && rpra && lrpralen > 0)
+ memcpy(ctx->lrpra, rpra, lrpralen);
bail:
return err;
}
@@ -1755,13 +1777,14 @@ static void inv_args_pre(struct smq_invoke_ctx *ctx)
uint64_to_ptr(rpra[i].buf.pv))) {
if (map && map->buf) {
dma_buf_begin_cpu_access(map->buf,
- DMA_TO_DEVICE);
+ DMA_BIDIRECTIONAL);
dma_buf_end_cpu_access(map->buf,
- DMA_TO_DEVICE);
- } else
+ DMA_BIDIRECTIONAL);
+ } else {
dmac_flush_range(
uint64_to_ptr(rpra[i].buf.pv), (char *)
uint64_to_ptr(rpra[i].buf.pv + 1));
+ }
}
end = (uintptr_t)uint64_to_ptr(rpra[i].buf.pv +
@@ -1769,12 +1792,13 @@ static void inv_args_pre(struct smq_invoke_ctx *ctx)
if (!IS_CACHE_ALIGNED(end)) {
if (map && map->buf) {
dma_buf_begin_cpu_access(map->buf,
- DMA_TO_DEVICE);
+ DMA_BIDIRECTIONAL);
dma_buf_end_cpu_access(map->buf,
- DMA_TO_DEVICE);
- } else
+ DMA_BIDIRECTIONAL);
+ } else {
dmac_flush_range((char *)end,
(char *)end + 1);
+ }
}
}
}
@@ -1853,6 +1877,10 @@ static int fastrpc_invoke_send(struct smq_invoke_ctx *ctx,
goto bail;
}
err = rpmsg_send(channel_ctx->rpdev->ept, (void *)msg, sizeof(*msg));
+ LOG_FASTRPC_GLINK_MSG(channel_ctx->ipc_log_ctx,
+ "sent pkt %pK (sz %d): ctx 0x%llx, handle 0x%x, sc 0x%x (rpmsg err %d)",
+ (void *)msg, sizeof(*msg),
+ msg->invoke.header.ctx, handle, ctx->sc, err);
mutex_unlock(&channel_ctx->rpmsg_mutex);
bail:
return err;
@@ -1942,11 +1970,13 @@ static int fastrpc_internal_invoke(struct fastrpc_file *fl, uint32_t mode,
goto bail;
}
- if (!fl->sctx->smmu.coherent) {
- PERF(fl->profile, GET_COUNTER(perf_counter, PERF_INVARGS),
- inv_args_pre(ctx);
- PERF_END);
- }
+ PERF(fl->profile, GET_COUNTER(perf_counter, PERF_INVARGS),
+ inv_args_pre(ctx);
+ PERF_END);
+
+ PERF(fl->profile, GET_COUNTER(perf_counter, PERF_INVARGS),
+ inv_args(ctx);
+ PERF_END);
PERF(fl->profile, GET_COUNTER(perf_counter, PERF_LINK),
VERIFY(err, 0 == fastrpc_invoke_send(ctx, kernel, invoke->handle));
@@ -1965,8 +1995,7 @@ static int fastrpc_internal_invoke(struct fastrpc_file *fl, uint32_t mode,
}
PERF(fl->profile, GET_COUNTER(perf_counter, PERF_INVARGS),
- if (!fl->sctx->smmu.coherent)
- inv_args(ctx);
+ inv_args(ctx);
PERF_END);
VERIFY(err, 0 == (err = ctx->retval));
@@ -2761,7 +2790,8 @@ static int fastrpc_internal_munmap(struct fastrpc_file *fl,
spin_lock(&fl->hlock);
hlist_for_each_entry_safe(rbuf, n, &fl->remote_bufs, hn_rem) {
- if (rbuf->raddr && (rbuf->flags == ADSP_MMAP_ADD_PAGES)) {
+ if (rbuf->raddr && ((rbuf->flags == ADSP_MMAP_ADD_PAGES) ||
+ (rbuf->flags == ADSP_MMAP_ADD_PAGES_LLC))) {
if ((rbuf->raddr == ud->vaddrout) &&
(rbuf->size == ud->size)) {
free = rbuf;
@@ -2853,7 +2883,8 @@ static int fastrpc_internal_mmap(struct fastrpc_file *fl,
goto bail;
}
mutex_lock(&fl->internal_map_mutex);
- if (ud->flags == ADSP_MMAP_ADD_PAGES) {
+ if ((ud->flags == ADSP_MMAP_ADD_PAGES) ||
+ (ud->flags == ADSP_MMAP_ADD_PAGES_LLC)) {
if (ud->vaddrin) {
err = -EINVAL;
pr_err("adsprpc: %s: %s: ERROR: adding user allocated pages is not supported\n",
@@ -2864,6 +2895,8 @@ static int fastrpc_internal_mmap(struct fastrpc_file *fl,
DMA_ATTR_DELAYED_UNMAP |
DMA_ATTR_NO_KERNEL_MAPPING |
DMA_ATTR_FORCE_NON_COHERENT;
+ if (ud->flags == ADSP_MMAP_ADD_PAGES_LLC)
+ dma_attr |= DMA_ATTR_IOMMU_USE_UPSTREAM_HINT;
err = fastrpc_buf_alloc(fl, ud->size, dma_attr, ud->flags,
1, &rbuf);
if (err)
@@ -2945,10 +2978,9 @@ static int fastrpc_session_alloc_locked(struct fastrpc_channel_ctx *chan,
return err;
}
-static int fastrpc_rpmsg_probe(struct rpmsg_device *rpdev)
+static inline int get_cid_from_rpdev(struct rpmsg_device *rpdev)
{
- int err = 0;
- int cid = -1;
+ int err = 0, cid = -1;
VERIFY(err, !IS_ERR_OR_NULL(rpdev));
if (err)
@@ -2963,6 +2995,19 @@ static int fastrpc_rpmsg_probe(struct rpmsg_device *rpdev)
else if (!strcmp(rpdev->dev.parent->of_node->name, "mdsp"))
cid = MDSP_DOMAIN_ID;
+ return cid;
+}
+
+static int fastrpc_rpmsg_probe(struct rpmsg_device *rpdev)
+{
+ int err = 0;
+ int cid = -1;
+
+ VERIFY(err, !IS_ERR_OR_NULL(rpdev));
+ if (err)
+ return -EINVAL;
+
+ cid = get_cid_from_rpdev(rpdev);
VERIFY(err, cid >= 0 && cid < NUM_CHANNELS);
if (err)
goto bail;
@@ -2971,6 +3016,19 @@ static int fastrpc_rpmsg_probe(struct rpmsg_device *rpdev)
mutex_unlock(&gcinfo[cid].rpmsg_mutex);
pr_info("adsprpc: %s: opened rpmsg channel for %s\n",
__func__, gcinfo[cid].subsys);
+
+#if IS_ENABLED(CONFIG_ADSPRPC_DEBUG)
+ if (!gcinfo[cid].ipc_log_ctx)
+ gcinfo[cid].ipc_log_ctx =
+ ipc_log_context_create(FASTRPC_GLINK_LOG_PAGES,
+ gcinfo[cid].name, 0);
+ if (!gcinfo[cid].ipc_log_ctx)
+ pr_warn("adsprpc: %s: failed to create IPC log context for %s\n",
+ __func__, gcinfo[cid].subsys);
+ else
+ pr_info("adsprpc: %s: enabled IPC logging for %s\n",
+ __func__, gcinfo[cid].subsys);
+#endif
bail:
if (err)
pr_err("adsprpc: rpmsg probe of %s cid %d failed\n",
@@ -2988,15 +3046,7 @@ static void fastrpc_rpmsg_remove(struct rpmsg_device *rpdev)
if (err)
return;
- if (!strcmp(rpdev->dev.parent->of_node->name, "cdsp"))
- cid = CDSP_DOMAIN_ID;
- else if (!strcmp(rpdev->dev.parent->of_node->name, "adsp"))
- cid = ADSP_DOMAIN_ID;
- else if (!strcmp(rpdev->dev.parent->of_node->name, "dsps"))
- cid = SDSP_DOMAIN_ID;
- else if (!strcmp(rpdev->dev.parent->of_node->name, "mdsp"))
- cid = MDSP_DOMAIN_ID;
-
+ cid = get_cid_from_rpdev(rpdev);
VERIFY(err, cid >= 0 && cid < NUM_CHANNELS);
if (err)
goto bail;
@@ -3024,6 +3074,17 @@ static int fastrpc_rpmsg_callback(struct rpmsg_device *rpdev, void *data,
if (err)
goto bail;
+#if IS_ENABLED(CONFIG_ADSPRPC_DEBUG)
+ int cid = -1;
+
+ cid = get_cid_from_rpdev(rpdev);
+ if (cid >= 0 && cid < NUM_CHANNELS) {
+ LOG_FASTRPC_GLINK_MSG(gcinfo[cid].ipc_log_ctx,
+ "recvd pkt %pK (sz %d): ctx 0x%llx, retVal %d",
+ data, len, rsp->ctx, rsp->retval);
+ }
+#endif
+
index = (uint32_t)((rsp->ctx & FASTRPC_CTXID_MASK) >> 4);
VERIFY(err, index < FASTRPC_CTX_MAX);
if (err)
@@ -3041,7 +3102,8 @@ static int fastrpc_rpmsg_callback(struct rpmsg_device *rpdev, void *data,
context_notify_user(me->ctxtable[index], rsp->retval);
bail:
if (err)
- pr_err("adsprpc: invalid response or context (err %d)\n", err);
+ pr_err("adsprpc: ERROR: %s: invalid response (data %pK, len %d) from remote subsystem (err %d)\n",
+ __func__, data, len, err);
return err;
}
@@ -3631,7 +3693,7 @@ static int fastrpc_getperf(struct fastrpc_ioctl_perf *ioctl_perf,
param, sizeof(*ioctl_perf));
if (err)
goto bail;
- ioctl_perf->numkeys = sizeof(struct fastrpc_perf)/sizeof(int64_t);
+ ioctl_perf->numkeys = PERF_KEY_MAX;
if (ioctl_perf->keys) {
char *keys = PERF_KEYS;
@@ -4500,6 +4562,8 @@ static void __exit fastrpc_device_exit(void)
for (i = 0; i < NUM_CHANNELS; i++) {
if (!gcinfo[i].name)
continue;
+ if (me->channel[i].ipc_log_ctx)
+ ipc_log_context_destroy(me->channel[i].ipc_log_ctx);
subsys_notif_unregister_notifier(me->channel[i].handle,
&me->channel[i].nb);
}
diff --git a/drivers/char/diag/diag_dci.c b/drivers/char/diag/diag_dci.c
index 5ba540c..2ae87eb 100644
--- a/drivers/char/diag/diag_dci.c
+++ b/drivers/char/diag/diag_dci.c
@@ -2091,6 +2091,11 @@ static int diag_process_dci_pkt_rsp(unsigned char *buf, int len)
if ((ret == DIAG_DCI_NO_ERROR && !common_cmd) || ret < 0)
return ret;
+ reg_entry.cmd_code = 0;
+ reg_entry.subsys_id = 0;
+ reg_entry.cmd_code_hi = 0;
+ reg_entry.cmd_code_lo = 0;
+
if (header_len >= (sizeof(uint8_t)))
reg_entry.cmd_code = header->cmd_code;
if (header_len >= (2 * sizeof(uint8_t)))
diff --git a/drivers/char/diag/diagchar.h b/drivers/char/diag/diagchar.h
index c45de725..6d84722 100644
--- a/drivers/char/diag/diagchar.h
+++ b/drivers/char/diag/diagchar.h
@@ -780,7 +780,7 @@ struct diagchar_dev {
int dci_tag;
int dci_client_id[MAX_DCI_CLIENTS];
struct mutex dci_mutex;
- spinlock_t rpmsginfo_lock[NUM_PERIPHERALS];
+ struct mutex rpmsginfo_mutex[NUM_PERIPHERALS];
int num_dci_client;
unsigned char *apps_dci_buf;
int dci_state;
diff --git a/drivers/char/diag/diagchar_core.c b/drivers/char/diag/diagchar_core.c
index e3e6d75..9d5417d 100644
--- a/drivers/char/diag/diagchar_core.c
+++ b/drivers/char/diag/diagchar_core.c
@@ -4360,7 +4360,7 @@ static int __init diagchar_init(void)
mutex_init(&driver->hdlc_recovery_mutex);
for (i = 0; i < NUM_PERIPHERALS; i++) {
mutex_init(&driver->diagfwd_channel_mutex[i]);
- spin_lock_init(&driver->rpmsginfo_lock[i]);
+ mutex_init(&driver->rpmsginfo_mutex[i]);
driver->diag_id_sent[i] = 0;
}
init_waitqueue_head(&driver->wait_q);
diff --git a/drivers/char/diag/diagfwd_rpmsg.c b/drivers/char/diag/diagfwd_rpmsg.c
index 6dda72a..c1262c1 100644
--- a/drivers/char/diag/diagfwd_rpmsg.c
+++ b/drivers/char/diag/diagfwd_rpmsg.c
@@ -391,17 +391,12 @@ static void diag_state_open_rpmsg(void *ctxt)
static void diag_rpmsg_queue_read(void *ctxt)
{
struct diag_rpmsg_info *rpmsg_info = NULL;
- unsigned long flags;
if (!ctxt)
return;
rpmsg_info = (struct diag_rpmsg_info *)ctxt;
- spin_lock_irqsave(&driver->rpmsginfo_lock[PERI_RPMSG], flags);
- if (rpmsg_info->hdl && rpmsg_info->wq &&
- atomic_read(&rpmsg_info->opened))
- queue_work(rpmsg_info->wq, &(rpmsg_info->read_work));
- spin_unlock_irqrestore(&driver->rpmsginfo_lock[PERI_RPMSG], flags);
+ queue_work(rpmsg_info->wq, &(rpmsg_info->read_work));
}
static void diag_state_close_rpmsg(void *ctxt)
@@ -435,7 +430,6 @@ static int diag_rpmsg_read(void *ctxt, unsigned char *buf, int buf_len)
struct diag_rpmsg_info *rpmsg_info = NULL;
struct diagfwd_info *fwd_info = NULL;
int ret_val = 0;
- unsigned long flags;
if (!ctxt || !buf || buf_len <= 0)
return -EIO;
@@ -446,16 +440,15 @@ static int diag_rpmsg_read(void *ctxt, unsigned char *buf, int buf_len)
return -EIO;
}
- spin_lock_irqsave(&driver->rpmsginfo_lock[PERI_RPMSG], flags);
+ mutex_lock(&driver->rpmsginfo_mutex[PERI_RPMSG]);
if (!atomic_read(&rpmsg_info->opened) ||
!rpmsg_info->hdl || !rpmsg_info->inited) {
DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
"diag:RPMSG channel not opened");
- spin_unlock_irqrestore(&driver->rpmsginfo_lock[PERI_RPMSG],
- flags);
+ mutex_unlock(&driver->rpmsginfo_mutex[PERI_RPMSG]);
return -EIO;
}
- spin_unlock_irqrestore(&driver->rpmsginfo_lock[PERI_RPMSG], flags);
+ mutex_unlock(&driver->rpmsginfo_mutex[PERI_RPMSG]);
fwd_info = rpmsg_info->fwd_ctxt;
@@ -479,25 +472,22 @@ static void diag_rpmsg_read_work_fn(struct work_struct *work)
struct diag_rpmsg_info *rpmsg_info = container_of(work,
struct diag_rpmsg_info,
read_work);
- unsigned long flags;
if (!rpmsg_info)
return;
- spin_lock_irqsave(&driver->rpmsginfo_lock[PERI_RPMSG], flags);
+ mutex_lock(&driver->rpmsginfo_mutex[PERI_RPMSG]);
if (!atomic_read(&rpmsg_info->opened)) {
- spin_unlock_irqrestore(&driver->rpmsginfo_lock[PERI_RPMSG],
- flags);
+ mutex_unlock(&driver->rpmsginfo_mutex[PERI_RPMSG]);
return;
}
if (!rpmsg_info->inited) {
- spin_unlock_irqrestore(&driver->rpmsginfo_lock[PERI_RPMSG],
- flags);
+ mutex_unlock(&driver->rpmsginfo_mutex[PERI_RPMSG]);
diag_ws_release();
return;
}
- spin_unlock_irqrestore(&driver->rpmsginfo_lock[PERI_RPMSG], flags);
+ mutex_unlock(&driver->rpmsginfo_mutex[PERI_RPMSG]);
diagfwd_channel_read(rpmsg_info->fwd_ctxt);
}
@@ -507,7 +497,6 @@ static int diag_rpmsg_write(void *ctxt, unsigned char *buf, int len)
struct diag_rpmsg_info *rpmsg_info = NULL;
int err = 0;
struct rpmsg_device *rpdev = NULL;
- unsigned long flags;
if (!ctxt || !buf)
return -EIO;
@@ -519,16 +508,14 @@ static int diag_rpmsg_write(void *ctxt, unsigned char *buf, int len)
return -EINVAL;
}
- spin_lock_irqsave(&driver->rpmsginfo_lock[PERI_RPMSG], flags);
+ mutex_lock(&driver->rpmsginfo_mutex[PERI_RPMSG]);
if (!rpmsg_info->inited || !rpmsg_info->hdl ||
!atomic_read(&rpmsg_info->opened)) {
pr_err_ratelimited("diag: In %s, rpmsg not inited, rpmsg_info: %pK, buf: %pK, len: %d\n",
__func__, rpmsg_info, buf, len);
- spin_unlock_irqrestore(&driver->rpmsginfo_lock[PERI_RPMSG],
- flags);
+ mutex_unlock(&driver->rpmsginfo_mutex[PERI_RPMSG]);
return -ENODEV;
}
- spin_unlock_irqrestore(&driver->rpmsginfo_lock[PERI_RPMSG], flags);
rpdev = (struct rpmsg_device *)rpmsg_info->hdl;
err = rpmsg_send(rpdev->ept, buf, len);
@@ -538,6 +525,7 @@ static int diag_rpmsg_write(void *ctxt, unsigned char *buf, int len)
} else
err = -ENOMEM;
+ mutex_unlock(&driver->rpmsginfo_mutex[PERI_RPMSG]);
return err;
}
@@ -547,18 +535,16 @@ static void diag_rpmsg_late_init_work_fn(struct work_struct *work)
struct diag_rpmsg_info *rpmsg_info = container_of(work,
struct diag_rpmsg_info,
late_init_work);
- unsigned long flags;
if (!rpmsg_info)
return;
- spin_lock_irqsave(&driver->rpmsginfo_lock[PERI_RPMSG], flags);
+ mutex_lock(&driver->rpmsginfo_mutex[PERI_RPMSG]);
if (!rpmsg_info->hdl) {
- spin_unlock_irqrestore(&driver->rpmsginfo_lock[PERI_RPMSG],
- flags);
+ mutex_unlock(&driver->rpmsginfo_mutex[PERI_RPMSG]);
return;
}
- spin_unlock_irqrestore(&driver->rpmsginfo_lock[PERI_RPMSG], flags);
+ mutex_unlock(&driver->rpmsginfo_mutex[PERI_RPMSG]);
diagfwd_channel_open(rpmsg_info->fwd_ctxt);
DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "rpmsg late init p: %d t: %d\n",
@@ -571,18 +557,16 @@ static void diag_rpmsg_open_work_fn(struct work_struct *work)
struct diag_rpmsg_info *rpmsg_info = container_of(work,
struct diag_rpmsg_info,
open_work);
- unsigned long flags;
if (!rpmsg_info)
return;
- spin_lock_irqsave(&driver->rpmsginfo_lock[PERI_RPMSG], flags);
+ mutex_lock(&driver->rpmsginfo_mutex[PERI_RPMSG]);
if (!rpmsg_info->inited) {
- spin_unlock_irqrestore(&driver->rpmsginfo_lock[PERI_RPMSG],
- flags);
+ mutex_unlock(&driver->rpmsginfo_mutex[PERI_RPMSG]);
return;
}
- spin_unlock_irqrestore(&driver->rpmsginfo_lock[PERI_RPMSG], flags);
+ mutex_unlock(&driver->rpmsginfo_mutex[PERI_RPMSG]);
if (rpmsg_info->type != TYPE_CNTL) {
diagfwd_channel_open(rpmsg_info->fwd_ctxt);
@@ -597,19 +581,17 @@ static void diag_rpmsg_close_work_fn(struct work_struct *work)
struct diag_rpmsg_info *rpmsg_info = container_of(work,
struct diag_rpmsg_info,
close_work);
- unsigned long flags;
if (!rpmsg_info)
return;
- spin_lock_irqsave(&driver->rpmsginfo_lock[PERI_RPMSG], flags);
+ mutex_lock(&driver->rpmsginfo_mutex[PERI_RPMSG]);
if (!rpmsg_info->inited || !rpmsg_info->hdl) {
- spin_unlock_irqrestore(&driver->rpmsginfo_lock[PERI_RPMSG],
- flags);
+ mutex_unlock(&driver->rpmsginfo_mutex[PERI_RPMSG]);
return;
}
rpmsg_info->hdl = NULL;
- spin_unlock_irqrestore(&driver->rpmsginfo_lock[PERI_RPMSG], flags);
+ mutex_unlock(&driver->rpmsginfo_mutex[PERI_RPMSG]);
diagfwd_channel_close(rpmsg_info->fwd_ctxt);
}
@@ -722,20 +704,18 @@ static void rpmsg_late_init(struct diag_rpmsg_info *rpmsg_info)
int diag_rpmsg_init_peripheral(uint8_t peripheral)
{
- unsigned long flags;
-
if (peripheral >= NUM_PERIPHERALS) {
pr_err("diag: In %s, invalid peripheral %d\n", __func__,
peripheral);
return -EINVAL;
}
- spin_lock_irqsave(&driver->rpmsginfo_lock[peripheral], flags);
+ mutex_lock(&driver->rpmsginfo_mutex[peripheral]);
rpmsg_late_init(&rpmsg_data[peripheral]);
rpmsg_late_init(&rpmsg_dci[peripheral]);
rpmsg_late_init(&rpmsg_cmd[peripheral]);
rpmsg_late_init(&rpmsg_dci_cmd[peripheral]);
- spin_unlock_irqrestore(&driver->rpmsginfo_lock[peripheral], flags);
+ mutex_unlock(&driver->rpmsginfo_mutex[peripheral]);
return 0;
}
@@ -743,7 +723,6 @@ int diag_rpmsg_init_peripheral(uint8_t peripheral)
static void __diag_rpmsg_init(struct diag_rpmsg_info *rpmsg_info)
{
char wq_name[DIAG_RPMSG_NAME_SZ + 12];
- unsigned long flags;
if (!rpmsg_info)
return;
@@ -763,7 +742,7 @@ static void __diag_rpmsg_init(struct diag_rpmsg_info *rpmsg_info)
INIT_WORK(&(rpmsg_info->close_work), diag_rpmsg_close_work_fn);
INIT_WORK(&(rpmsg_info->read_work), diag_rpmsg_read_work_fn);
INIT_WORK(&(rpmsg_info->late_init_work), diag_rpmsg_late_init_work_fn);
- spin_lock_irqsave(&driver->rpmsginfo_lock[PERI_RPMSG], flags);
+ mutex_lock(&driver->rpmsginfo_mutex[PERI_RPMSG]);
rpmsg_info->hdl = NULL;
rpmsg_info->fwd_ctxt = NULL;
atomic_set(&rpmsg_info->opened, 0);
@@ -772,7 +751,7 @@ static void __diag_rpmsg_init(struct diag_rpmsg_info *rpmsg_info)
"%s initialized fwd_ctxt: %pK hdl: %pK\n",
rpmsg_info->name, rpmsg_info->fwd_ctxt,
rpmsg_info->hdl);
- spin_unlock_irqrestore(&driver->rpmsginfo_lock[PERI_RPMSG], flags);
+ mutex_unlock(&driver->rpmsginfo_mutex[PERI_RPMSG]);
}
void diag_rpmsg_invalidate(void *ctxt, struct diagfwd_info *fwd_ctxt)
@@ -790,7 +769,6 @@ int diag_rpmsg_init(void)
{
uint8_t peripheral;
struct diag_rpmsg_info *rpmsg_info = NULL;
- unsigned long flags;
for (peripheral = 0; peripheral < NUM_PERIPHERALS; peripheral++) {
if (peripheral != PERIPHERAL_WDSP)
@@ -800,10 +778,9 @@ int diag_rpmsg_init(void)
diagfwd_cntl_register(TRANSPORT_RPMSG, rpmsg_info->peripheral,
(void *)rpmsg_info, &rpmsg_ops,
&(rpmsg_info->fwd_ctxt));
- spin_lock_irqsave(&driver->rpmsginfo_lock[peripheral], flags);
+ mutex_lock(&driver->rpmsginfo_mutex[peripheral]);
rpmsg_info->inited = 1;
- spin_unlock_irqrestore(&driver->rpmsginfo_lock[peripheral],
- flags);
+ mutex_unlock(&driver->rpmsginfo_mutex[peripheral]);
diagfwd_channel_open(rpmsg_info->fwd_ctxt);
diagfwd_late_open(rpmsg_info->fwd_ctxt);
__diag_rpmsg_init(&rpmsg_data[peripheral]);
@@ -836,31 +813,27 @@ static void __diag_rpmsg_exit(struct diag_rpmsg_info *rpmsg_info)
void diag_rpmsg_early_exit(void)
{
int peripheral = 0;
- unsigned long flags;
for (peripheral = 0; peripheral < NUM_PERIPHERALS; peripheral++) {
if (peripheral != PERIPHERAL_WDSP)
continue;
- spin_lock_irqsave(&driver->rpmsginfo_lock[peripheral], flags);
+ mutex_lock(&driver->rpmsginfo_mutex[peripheral]);
__diag_rpmsg_exit(&rpmsg_cntl[peripheral]);
- spin_unlock_irqrestore(&driver->rpmsginfo_lock[peripheral],
- flags);
+ mutex_unlock(&driver->rpmsginfo_mutex[peripheral]);
}
}
void diag_rpmsg_exit(void)
{
int peripheral = 0;
- unsigned long flags;
for (peripheral = 0; peripheral < NUM_PERIPHERALS; peripheral++) {
- spin_lock_irqsave(&driver->rpmsginfo_lock[peripheral], flags);
+ mutex_lock(&driver->rpmsginfo_mutex[peripheral]);
__diag_rpmsg_exit(&rpmsg_data[peripheral]);
__diag_rpmsg_exit(&rpmsg_cmd[peripheral]);
__diag_rpmsg_exit(&rpmsg_dci[peripheral]);
__diag_rpmsg_exit(&rpmsg_dci_cmd[peripheral]);
- spin_unlock_irqrestore(&driver->rpmsginfo_lock[peripheral],
- flags);
+ mutex_unlock(&driver->rpmsginfo_mutex[peripheral]);
}
}
@@ -886,7 +859,6 @@ static struct diag_rpmsg_info *diag_get_rpmsg_ptr(char *name)
static int diag_rpmsg_probe(struct rpmsg_device *rpdev)
{
struct diag_rpmsg_info *rpmsg_info = NULL;
- unsigned long flags;
if (!rpdev)
return 0;
@@ -896,11 +868,10 @@ static int diag_rpmsg_probe(struct rpmsg_device *rpdev)
rpmsg_info = diag_get_rpmsg_ptr(rpdev->id.name);
if (rpmsg_info) {
- spin_lock_irqsave(&driver->rpmsginfo_lock[PERI_RPMSG], flags);
+ mutex_lock(&driver->rpmsginfo_mutex[PERI_RPMSG]);
rpmsg_info->hdl = rpdev;
atomic_set(&rpmsg_info->opened, 1);
- spin_unlock_irqrestore(&driver->rpmsginfo_lock[PERI_RPMSG],
- flags);
+ mutex_unlock(&driver->rpmsginfo_mutex[PERI_RPMSG]);
dev_set_drvdata(&rpdev->dev, rpmsg_info);
diagfwd_channel_read(rpmsg_info->fwd_ctxt);
@@ -913,17 +884,15 @@ static int diag_rpmsg_probe(struct rpmsg_device *rpdev)
static void diag_rpmsg_remove(struct rpmsg_device *rpdev)
{
struct diag_rpmsg_info *rpmsg_info = NULL;
- unsigned long flags;
if (!rpdev)
return;
rpmsg_info = diag_get_rpmsg_ptr(rpdev->id.name);
if (rpmsg_info) {
- spin_lock_irqsave(&driver->rpmsginfo_lock[PERI_RPMSG], flags);
+ mutex_lock(&driver->rpmsginfo_mutex[PERI_RPMSG]);
atomic_set(&rpmsg_info->opened, 0);
- spin_unlock_irqrestore(&driver->rpmsginfo_lock[PERI_RPMSG],
- flags);
+ mutex_unlock(&driver->rpmsginfo_mutex[PERI_RPMSG]);
queue_work(rpmsg_info->wq, &rpmsg_info->close_work);
}
}
diff --git a/drivers/char/diag/diagfwd_socket.c b/drivers/char/diag/diagfwd_socket.c
index 27c1f64..23e8e3d 100644
--- a/drivers/char/diag/diagfwd_socket.c
+++ b/drivers/char/diag/diagfwd_socket.c
@@ -652,13 +652,10 @@ static void handle_ctrl_pkt(struct diag_socket_info *info, void *buf, int len)
info->name);
mutex_lock(&driver->diag_notifier_mutex);
- if (bootup_req[info->peripheral] == PERIPHERAL_SSR_UP) {
+ if (bootup_req[info->peripheral] == PERIPHERAL_SSR_UP)
DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
- "diag: %s is up, stopping cleanup: bootup_req = %d\n",
+ "diag: %s is up, bootup_req = %d\n",
info->name, (int)bootup_req[info->peripheral]);
- mutex_unlock(&driver->diag_notifier_mutex);
- break;
- }
mutex_unlock(&driver->diag_notifier_mutex);
socket_close_channel(info);
}
diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
index 9bffcd3..c0732f0 100644
--- a/drivers/char/hpet.c
+++ b/drivers/char/hpet.c
@@ -570,8 +570,7 @@ static inline unsigned long hpet_time_div(struct hpets *hpets,
unsigned long long m;
m = hpets->hp_tick_freq + (dis >> 1);
- do_div(m, dis);
- return (unsigned long)m;
+ return div64_ul(m, dis);
}
static int
diff --git a/drivers/clk/at91/clk-generated.c b/drivers/clk/at91/clk-generated.c
index 3348136..1131524 100644
--- a/drivers/clk/at91/clk-generated.c
+++ b/drivers/clk/at91/clk-generated.c
@@ -153,6 +153,8 @@ static int clk_generated_determine_rate(struct clk_hw *hw,
continue;
div = DIV_ROUND_CLOSEST(parent_rate, req->rate);
+ if (div > GENERATED_MAX_DIV + 1)
+ div = GENERATED_MAX_DIV + 1;
clk_generated_best_diff(req, parent, parent_rate, div,
&best_diff, &best_rate);
diff --git a/drivers/clk/qcom/Kconfig b/drivers/clk/qcom/Kconfig
index a72c058..bd73664 100644
--- a/drivers/clk/qcom/Kconfig
+++ b/drivers/clk/qcom/Kconfig
@@ -407,3 +407,29 @@
Support for the debug clock controller on Qualcomm Technologies, Inc
LITO devices.
Say Y if you want to support the clock measurement functionality.
+
+config SM_GCC_BENGAL
+ tristate "BENGAL Global Clock Controller"
+ select QCOM_GDSC
+ depends on COMMON_CLK_QCOM
+ help
+ Support for the global clock controller on Bengal devices.
+ Say Y if you want to use peripheral devices such as UART, SPI,
+ I2C, USB, UFS, SDCC, PCIe, Camera, Video etc.
+
+config SM_GPUCC_BENGAL
+ tristate "BENGAL Graphics Clock Controller"
+ select SM_GCC_BENGAL
+ help
+ Support for the graphics clock controller on Qualcomm Technologies, Inc
+ BENGAL devices.
+ Say Y if you want to support graphics controller devices.
+
+config SM_DISPCC_BENGAL
+ tristate "BENGAL Display Clock Controller"
+ select SM_GCC_BENGAL
+ help
+ Support for the display clock controller on Qualcomm Technologies, Inc.
+ BENGAL devices.
+ Say Y if you want to support display devices and functionality such as
+ splash screen.
diff --git a/drivers/clk/qcom/Makefile b/drivers/clk/qcom/Makefile
index 40dca85..3e58b7b 100644
--- a/drivers/clk/qcom/Makefile
+++ b/drivers/clk/qcom/Makefile
@@ -54,9 +54,12 @@
obj-$(CONFIG_SDM_VIDEOCC_845) += videocc-sdm845.o
obj-$(CONFIG_SM_CAMCC_LITO) += camcc-lito.o
obj-$(CONFIG_SM_DEBUGCC_LITO) += debugcc-lito.o
+obj-$(CONFIG_SM_DISPCC_BENGAL) += dispcc-bengal.o
obj-$(CONFIG_SM_DISPCC_LITO) += dispcc-lito.o
+obj-$(CONFIG_SM_GCC_BENGAL) += gcc-bengal.o
obj-$(CONFIG_SM_GCC_LITO) += gcc-lito.o
+obj-$(CONFIG_SM_GPUCC_BENGAL) += gpucc-bengal.o
+obj-$(CONFIG_SM_GPUCC_LITO) += gpucc-lito.o
obj-$(CONFIG_SM_NPUCC_LITO) += npucc-lito.o
obj-$(CONFIG_SM_VIDEOCC_LITO) += videocc-lito.o
-obj-$(CONFIG_SM_GPUCC_LITO) += gpucc-lito.o
obj-$(CONFIG_SPMI_PMIC_CLKDIV) += clk-spmi-pmic-div.o
diff --git a/drivers/clk/qcom/camcc-kona.c b/drivers/clk/qcom/camcc-kona.c
index 95637b2..a208f54 100644
--- a/drivers/clk/qcom/camcc-kona.c
+++ b/drivers/clk/qcom/camcc-kona.c
@@ -328,7 +328,6 @@ static const struct alpha_pll_config cam_cc_pll2_config_sm8250_v2 = {
.config_ctl_val = 0x08200920,
.config_ctl_hi_val = 0x05008011,
.config_ctl_hi1_val = 0x00000000,
- .test_ctl_val = 0x00010000,
.user_ctl_val = 0x00000100,
.user_ctl_hi_val = 0x00000000,
.user_ctl_hi1_val = 0x00000000,
@@ -378,9 +377,9 @@ static struct clk_alpha_pll_postdiv cam_cc_pll2_out_main = {
};
static const struct alpha_pll_config cam_cc_pll3_config = {
- .l = 0xF,
+ .l = 0x24,
.cal_l = 0x44,
- .alpha = 0xA000,
+ .alpha = 0x7555,
.config_ctl_val = 0x20485699,
.config_ctl_hi_val = 0x00002261,
.config_ctl_hi1_val = 0x029A699C,
@@ -433,9 +432,9 @@ static struct clk_alpha_pll_postdiv cam_cc_pll3_out_even = {
};
static const struct alpha_pll_config cam_cc_pll4_config = {
- .l = 0xF,
+ .l = 0x24,
.cal_l = 0x44,
- .alpha = 0xA000,
+ .alpha = 0x7555,
.config_ctl_val = 0x20485699,
.config_ctl_hi_val = 0x00002261,
.config_ctl_hi1_val = 0x029A699C,
@@ -877,16 +876,19 @@ static struct clk_rcg2 cam_cc_icp_clk_src = {
};
static const struct freq_tbl ftbl_cam_cc_ife_0_clk_src[] = {
- F(150000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0),
- F(200000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0),
- F(250000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0),
- F(300000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
F(350000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0),
- F(425000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0),
F(475000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0),
- F(525000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0),
F(576000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0),
- F(630000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0),
+ F(720000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
+static const struct freq_tbl ftbl_cam_cc_ife_0_clk_src_kona_v2[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(350000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0),
+ F(475000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0),
+ F(576000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0),
F(680000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0),
{ }
};
@@ -944,16 +946,19 @@ static struct clk_rcg2 cam_cc_ife_0_csid_clk_src = {
};
static const struct freq_tbl ftbl_cam_cc_ife_1_clk_src[] = {
- F(150000000, P_CAM_CC_PLL4_OUT_EVEN, 1, 0, 0),
- F(200000000, P_CAM_CC_PLL4_OUT_EVEN, 1, 0, 0),
- F(250000000, P_CAM_CC_PLL4_OUT_EVEN, 1, 0, 0),
- F(300000000, P_CAM_CC_PLL4_OUT_EVEN, 1, 0, 0),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
F(350000000, P_CAM_CC_PLL4_OUT_EVEN, 1, 0, 0),
- F(425000000, P_CAM_CC_PLL4_OUT_EVEN, 1, 0, 0),
F(475000000, P_CAM_CC_PLL4_OUT_EVEN, 1, 0, 0),
- F(525000000, P_CAM_CC_PLL4_OUT_EVEN, 1, 0, 0),
F(576000000, P_CAM_CC_PLL4_OUT_EVEN, 1, 0, 0),
- F(630000000, P_CAM_CC_PLL4_OUT_EVEN, 1, 0, 0),
+ F(720000000, P_CAM_CC_PLL4_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
+static const struct freq_tbl ftbl_cam_cc_ife_1_clk_src_kona_v2[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(350000000, P_CAM_CC_PLL4_OUT_EVEN, 1, 0, 0),
+ F(475000000, P_CAM_CC_PLL4_OUT_EVEN, 1, 0, 0),
+ F(576000000, P_CAM_CC_PLL4_OUT_EVEN, 1, 0, 0),
F(680000000, P_CAM_CC_PLL4_OUT_EVEN, 1, 0, 0),
{ }
};
@@ -2715,7 +2720,9 @@ static void cam_cc_kona_fixup_konav2(struct regmap *regmap)
cam_cc_bps_clk_src.freq_tbl = ftbl_cam_cc_bps_clk_src_kona_v2;
cam_cc_fd_core_clk_src.freq_tbl = ftbl_cam_cc_fd_core_clk_src_kona_v2;
cam_cc_icp_clk_src.freq_tbl = ftbl_cam_cc_fd_core_clk_src_kona_v2;
+ cam_cc_ife_0_clk_src.freq_tbl = ftbl_cam_cc_ife_0_clk_src_kona_v2;
cam_cc_ife_0_clk_src.clkr.hw.init->rate_max[VDD_NOMINAL] = 680000000;
+ cam_cc_ife_1_clk_src.freq_tbl = ftbl_cam_cc_ife_1_clk_src_kona_v2;
cam_cc_ife_1_clk_src.clkr.hw.init->rate_max[VDD_NOMINAL] = 680000000;
cam_cc_ife_lite_clk_src.freq_tbl = ftbl_cam_cc_ife_lite_clk_src_kona_v2;
cam_cc_jpeg_clk_src.freq_tbl = ftbl_cam_cc_bps_clk_src_kona_v2;
diff --git a/drivers/clk/qcom/camcc-lito.c b/drivers/clk/qcom/camcc-lito.c
index 125e082..20c3295 100644
--- a/drivers/clk/qcom/camcc-lito.c
+++ b/drivers/clk/qcom/camcc-lito.c
@@ -24,7 +24,7 @@
#include "clk-rcg.h"
#include "clk-regmap.h"
#include "common.h"
-#include "vdd-level.h"
+#include "vdd-level-lito.h"
static DEFINE_VDD_REGULATORS(vdd_cx, VDD_NUM, 1, vdd_corner);
static DEFINE_VDD_REGULATORS(vdd_mx, VDD_NUM, 1, vdd_corner);
diff --git a/drivers/clk/qcom/dispcc-bengal.c b/drivers/clk/qcom/dispcc-bengal.c
new file mode 100644
index 0000000..4f48c64
--- /dev/null
+++ b/drivers/clk/qcom/dispcc-bengal.c
@@ -0,0 +1,722 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+
+#define pr_fmt(fmt) "clk: %s: " fmt, __func__
+
+#include <linux/clk.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,dispcc-bengal.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-rcg.h"
+#include "clk-regmap-divider.h"
+#include "clk-regmap.h"
+#include "common.h"
+#include "vdd-level-bengal.h"
+
+static DEFINE_VDD_REGULATORS(vdd_cx, VDD_NUM, 1, vdd_corner);
+
+enum {
+ P_BI_TCXO,
+ P_CHIP_SLEEP_CLK,
+ P_CORE_BI_PLL_TEST_SE,
+ P_DISP_CC_PLL0_OUT_MAIN,
+ P_DSI0_PHY_PLL_OUT_BYTECLK,
+ P_DSI0_PHY_PLL_OUT_DSICLK,
+ P_DSI1_PHY_PLL_OUT_DSICLK,
+ P_GPLL0_OUT_MAIN,
+};
+
+static const struct parent_map disp_cc_parent_map_0[] = {
+ { P_BI_TCXO, 0 },
+ { P_DSI0_PHY_PLL_OUT_BYTECLK, 1 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const disp_cc_parent_names_0[] = {
+ "bi_tcxo",
+ "dsi0_phy_pll_out_byteclk",
+ "core_bi_pll_test_se",
+};
+
+static const struct parent_map disp_cc_parent_map_1[] = {
+ { P_BI_TCXO, 0 },
+ { P_DISP_CC_PLL0_OUT_MAIN, 1 },
+ { P_GPLL0_OUT_MAIN, 4 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const disp_cc_parent_names_1[] = {
+ "bi_tcxo",
+ "disp_cc_pll0_out_main",
+ "gpll0_out_main",
+ "core_bi_pll_test_se",
+};
+
+static const struct parent_map disp_cc_parent_map_2[] = {
+ { P_BI_TCXO, 0 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const disp_cc_parent_names_2[] = {
+ "bi_tcxo",
+ "core_bi_pll_test_se",
+};
+
+static const struct parent_map disp_cc_parent_map_3[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 4 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const disp_cc_parent_names_3[] = {
+ "bi_tcxo",
+ "gpll0_out_main",
+ "core_bi_pll_test_se",
+};
+
+static const struct parent_map disp_cc_parent_map_4[] = {
+ { P_BI_TCXO, 0 },
+ { P_DSI0_PHY_PLL_OUT_DSICLK, 1 },
+ { P_DSI1_PHY_PLL_OUT_DSICLK, 2 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const disp_cc_parent_names_4[] = {
+ "bi_tcxo",
+ "dsi0_phy_pll_out_dsiclk",
+ "dsi1_phy_pll_out_dsiclk",
+ "core_bi_pll_test_se",
+};
+
+static const struct parent_map disp_cc_parent_map_5[] = {
+ { P_CHIP_SLEEP_CLK, 0 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const disp_cc_parent_names_5[] = {
+ "chip_sleep_clk",
+ "core_bi_pll_test_se",
+};
+
+static struct pll_vco spark_vco[] = {
+ { 500000000, 1000000000, 2 },
+};
+
+/* 768MHz configuration */
+static const struct alpha_pll_config disp_cc_pll0_config = {
+ .l = 0x28,
+ .alpha = 0x0,
+ .alpha_en_mask = BIT(24),
+ .vco_val = 0x2 << 20,
+ .vco_mask = GENMASK(21, 20),
+ .main_output_mask = BIT(0),
+ .config_ctl_val = 0x40008529,
+};
+
+static struct clk_alpha_pll disp_cc_pll0 = {
+ .offset = 0x0,
+ .vco_table = spark_vco,
+ .num_vco = ARRAY_SIZE(spark_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "disp_cc_pll0",
+ .parent_names = (const char *[]){ "bi_tcxo" },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_ops,
+ .vdd_class = &vdd_cx,
+ .num_rate_max = VDD_NUM,
+ .rate_max = (unsigned long[VDD_NUM]) {
+ [VDD_MIN] = 1000000000,
+ [VDD_NOMINAL] = 2000000000},
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_disp_cc_pll0_out_main[] = {
+ { 0x0, 1 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv disp_cc_pll0_out_main = {
+ .offset = 0x0,
+ .post_div_shift = 8,
+ .post_div_table = post_div_table_disp_cc_pll0_out_main,
+ .num_post_div = ARRAY_SIZE(post_div_table_disp_cc_pll0_out_main),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "disp_cc_pll0_out_main",
+ .parent_names = (const char *[]){ "disp_cc_pll0" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_ops,
+ },
+};
+
+
+static struct clk_regmap_div disp_cc_mdss_byte0_div_clk_src = {
+ .reg = 0x20d4,
+ .shift = 0,
+ .width = 2,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "disp_cc_mdss_byte0_div_clk_src",
+ .parent_names =
+ (const char *[]){ "disp_cc_mdss_byte0_clk_src" },
+ .num_parents = 1,
+ .ops = &clk_regmap_div_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_disp_cc_mdss_ahb_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(37500000, P_GPLL0_OUT_MAIN, 8, 0, 0),
+ F(75000000, P_GPLL0_OUT_MAIN, 4, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 disp_cc_mdss_ahb_clk_src = {
+ .cmd_rcgr = 0x2154,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_3,
+ .freq_tbl = ftbl_disp_cc_mdss_ahb_clk_src,
+ .enable_safe_config = true,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_ahb_clk_src",
+ .parent_names = disp_cc_parent_names_3,
+ .num_parents = 3,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ .vdd_class = &vdd_cx,
+ .num_rate_max = VDD_NUM,
+ .rate_max = (unsigned long[VDD_NUM]) {
+ [VDD_MIN] = 19200000,
+ [VDD_LOW] = 37500000,
+ [VDD_NOMINAL] = 75000000},
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_byte0_clk_src = {
+ .cmd_rcgr = 0x20bc,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_0,
+ .enable_safe_config = true,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_byte0_clk_src",
+ .parent_names = disp_cc_parent_names_0,
+ .num_parents = 3,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_byte2_ops,
+ .vdd_class = &vdd_cx,
+ .num_rate_max = VDD_NUM,
+ .rate_max = (unsigned long[VDD_NUM]) {
+ [VDD_MIN] = 19200000,
+ [VDD_LOWER] = 164000000,
+ [VDD_LOW] = 187500000},
+ },
+};
+
+static const struct freq_tbl ftbl_disp_cc_mdss_esc0_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 disp_cc_mdss_esc0_clk_src = {
+ .cmd_rcgr = 0x20d8,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_0,
+ .freq_tbl = ftbl_disp_cc_mdss_esc0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_esc0_clk_src",
+ .parent_names = disp_cc_parent_names_0,
+ .num_parents = 3,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ .vdd_class = &vdd_cx,
+ .num_rate_max = VDD_NUM,
+ .rate_max = (unsigned long[VDD_NUM]) {
+ [VDD_MIN] = 19200000},
+ },
+};
+
+static const struct freq_tbl ftbl_disp_cc_mdss_mdp_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(192000000, P_DISP_CC_PLL0_OUT_MAIN, 4, 0, 0),
+ F(256000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(307200000, P_DISP_CC_PLL0_OUT_MAIN, 2.5, 0, 0),
+ F(384000000, P_DISP_CC_PLL0_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 disp_cc_mdss_mdp_clk_src = {
+ .cmd_rcgr = 0x2074,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_1,
+ .freq_tbl = ftbl_disp_cc_mdss_mdp_clk_src,
+ .enable_safe_config = true,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_mdp_clk_src",
+ .parent_names = disp_cc_parent_names_1,
+ .num_parents = 4,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ .vdd_class = &vdd_cx,
+ .num_rate_max = VDD_NUM,
+ .rate_max = (unsigned long[VDD_NUM]) {
+ [VDD_MIN] = 19200000,
+ [VDD_LOWER] = 192000000,
+ [VDD_LOW] = 256000000,
+ [VDD_LOW_L1] = 307200000,
+ [VDD_NOMINAL] = 384000000},
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_pclk0_clk_src = {
+ .cmd_rcgr = 0x205c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_4,
+ .enable_safe_config = true,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_pclk0_clk_src",
+ .parent_names = disp_cc_parent_names_4,
+ .num_parents = 4,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_pixel_ops,
+ .vdd_class = &vdd_cx,
+ .num_rate_max = VDD_NUM,
+ .rate_max = (unsigned long[VDD_NUM]) {
+ [VDD_MIN] = 19200000,
+ [VDD_LOWER] = 183310056,
+ [VDD_LOW] = 250000000},
+ },
+};
+
+static const struct freq_tbl ftbl_disp_cc_mdss_rot_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(192000000, P_DISP_CC_PLL0_OUT_MAIN, 4, 0, 0),
+ F(256000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(307200000, P_DISP_CC_PLL0_OUT_MAIN, 2.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 disp_cc_mdss_rot_clk_src = {
+ .cmd_rcgr = 0x208c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_1,
+ .freq_tbl = ftbl_disp_cc_mdss_rot_clk_src,
+ .enable_safe_config = true,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_rot_clk_src",
+ .parent_names = disp_cc_parent_names_1,
+ .num_parents = 4,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ .vdd_class = &vdd_cx,
+ .num_rate_max = VDD_NUM,
+ .rate_max = (unsigned long[VDD_NUM]) {
+ [VDD_MIN] = 19200000,
+ [VDD_LOWER] = 192000000,
+ [VDD_LOW] = 256000000,
+ [VDD_LOW_L1] = 307200000},
+ },
+};
+
+static struct clk_rcg2 disp_cc_mdss_vsync_clk_src = {
+ .cmd_rcgr = 0x20a4,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_2,
+ .freq_tbl = ftbl_disp_cc_mdss_esc0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_vsync_clk_src",
+ .parent_names = disp_cc_parent_names_2,
+ .num_parents = 2,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ .vdd_class = &vdd_cx,
+ .num_rate_max = VDD_NUM,
+ .rate_max = (unsigned long[VDD_NUM]) {
+ [VDD_MIN] = 19200000},
+ },
+};
+
+static const struct freq_tbl ftbl_disp_cc_sleep_clk_src[] = {
+ F(32764, P_CHIP_SLEEP_CLK, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 disp_cc_sleep_clk_src = {
+ .cmd_rcgr = 0x6050,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_5,
+ .freq_tbl = ftbl_disp_cc_sleep_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "disp_cc_sleep_clk_src",
+ .parent_names = disp_cc_parent_names_5,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ .vdd_class = &vdd_cx,
+ .num_rate_max = VDD_NUM,
+ .rate_max = (unsigned long[VDD_NUM]) {
+ [VDD_MIN] = 32000},
+ },
+};
+
+static struct clk_rcg2 disp_cc_xo_clk_src = {
+ .cmd_rcgr = 0x6034,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = disp_cc_parent_map_2,
+ .freq_tbl = ftbl_disp_cc_mdss_esc0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "disp_cc_xo_clk_src",
+ .parent_names = disp_cc_parent_names_2,
+ .num_parents = 2,
+ .ops = &clk_rcg2_ops,
+ .vdd_class = &vdd_cx,
+ .num_rate_max = VDD_NUM,
+ .rate_max = (unsigned long[VDD_NUM]) {
+ [VDD_MIN] = 19200000},
+ },
+};
+
+static struct clk_branch disp_cc_mdss_ahb_clk = {
+ .halt_reg = 0x2044,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2044,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_ahb_clk",
+ .parent_names = (const char *[]){
+ "disp_cc_mdss_ahb_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_byte0_clk = {
+ .halt_reg = 0x2024,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2024,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_byte0_clk",
+ .parent_names = (const char *[]){
+ "disp_cc_mdss_byte0_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_byte0_intf_clk = {
+ .halt_reg = 0x2028,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2028,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_byte0_intf_clk",
+ .parent_names = (const char *[]){
+ "disp_cc_mdss_byte0_div_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_esc0_clk = {
+ .halt_reg = 0x202c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x202c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_esc0_clk",
+ .parent_names = (const char *[]){
+ "disp_cc_mdss_esc0_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_mdp_clk = {
+ .halt_reg = 0x2008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_mdp_clk",
+ .parent_names = (const char *[]){
+ "disp_cc_mdss_mdp_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_mdp_lut_clk = {
+ .halt_reg = 0x2018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x2018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_mdp_lut_clk",
+ .parent_names = (const char *[]){
+ "disp_cc_mdss_mdp_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_non_gdsc_ahb_clk = {
+ .halt_reg = 0x4004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x4004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_non_gdsc_ahb_clk",
+ .parent_names = (const char *[]){
+ "disp_cc_mdss_ahb_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_pclk0_clk = {
+ .halt_reg = 0x2004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_pclk0_clk",
+ .parent_names = (const char *[]){
+ "disp_cc_mdss_pclk0_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_rot_clk = {
+ .halt_reg = 0x2010,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_rot_clk",
+ .parent_names = (const char *[]){
+ "disp_cc_mdss_rot_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_mdss_vsync_clk = {
+ .halt_reg = 0x2020,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2020,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "disp_cc_mdss_vsync_clk",
+ .parent_names = (const char *[]){
+ "disp_cc_mdss_vsync_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_sleep_clk = {
+ .halt_reg = 0x6068,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x6068,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "disp_cc_sleep_clk",
+ .parent_names = (const char *[]){
+ "disp_cc_sleep_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch disp_cc_xo_clk = {
+ .halt_reg = 0x604c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x604c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "disp_cc_xo_clk",
+ .parent_names = (const char *[]){
+ "disp_cc_xo_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_regmap *disp_cc_bengal_clocks[] = {
+ [DISP_CC_MDSS_AHB_CLK] = &disp_cc_mdss_ahb_clk.clkr,
+ [DISP_CC_MDSS_AHB_CLK_SRC] = &disp_cc_mdss_ahb_clk_src.clkr,
+ [DISP_CC_MDSS_BYTE0_CLK] = &disp_cc_mdss_byte0_clk.clkr,
+ [DISP_CC_MDSS_BYTE0_CLK_SRC] = &disp_cc_mdss_byte0_clk_src.clkr,
+ [DISP_CC_MDSS_BYTE0_DIV_CLK_SRC] = &disp_cc_mdss_byte0_div_clk_src.clkr,
+ [DISP_CC_MDSS_BYTE0_INTF_CLK] = &disp_cc_mdss_byte0_intf_clk.clkr,
+ [DISP_CC_MDSS_ESC0_CLK] = &disp_cc_mdss_esc0_clk.clkr,
+ [DISP_CC_MDSS_ESC0_CLK_SRC] = &disp_cc_mdss_esc0_clk_src.clkr,
+ [DISP_CC_MDSS_MDP_CLK] = &disp_cc_mdss_mdp_clk.clkr,
+ [DISP_CC_MDSS_MDP_CLK_SRC] = &disp_cc_mdss_mdp_clk_src.clkr,
+ [DISP_CC_MDSS_MDP_LUT_CLK] = &disp_cc_mdss_mdp_lut_clk.clkr,
+ [DISP_CC_MDSS_NON_GDSC_AHB_CLK] = &disp_cc_mdss_non_gdsc_ahb_clk.clkr,
+ [DISP_CC_MDSS_PCLK0_CLK] = &disp_cc_mdss_pclk0_clk.clkr,
+ [DISP_CC_MDSS_PCLK0_CLK_SRC] = &disp_cc_mdss_pclk0_clk_src.clkr,
+ [DISP_CC_MDSS_ROT_CLK] = &disp_cc_mdss_rot_clk.clkr,
+ [DISP_CC_MDSS_ROT_CLK_SRC] = &disp_cc_mdss_rot_clk_src.clkr,
+ [DISP_CC_MDSS_VSYNC_CLK] = &disp_cc_mdss_vsync_clk.clkr,
+ [DISP_CC_MDSS_VSYNC_CLK_SRC] = &disp_cc_mdss_vsync_clk_src.clkr,
+ [DISP_CC_PLL0] = &disp_cc_pll0.clkr,
+ [DISP_CC_PLL0_OUT_MAIN] = &disp_cc_pll0_out_main.clkr,
+ [DISP_CC_SLEEP_CLK] = &disp_cc_sleep_clk.clkr,
+ [DISP_CC_SLEEP_CLK_SRC] = &disp_cc_sleep_clk_src.clkr,
+ [DISP_CC_XO_CLK] = &disp_cc_xo_clk.clkr,
+ [DISP_CC_XO_CLK_SRC] = &disp_cc_xo_clk_src.clkr,
+};
+
+static const struct regmap_config disp_cc_bengal_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x10000,
+ .fast_io = true,
+};
+
+static const struct qcom_cc_desc disp_cc_bengal_desc = {
+ .config = &disp_cc_bengal_regmap_config,
+ .clks = disp_cc_bengal_clocks,
+ .num_clks = ARRAY_SIZE(disp_cc_bengal_clocks),
+};
+
+static const struct of_device_id dispcc_bengal_match_table[] = {
+ { .compatible = "qcom,bengal-dispcc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, disp_cc_bengal_match_table);
+
+static int dispcc_bengal_probe(struct platform_device *pdev)
+{
+ struct regmap *regmap;
+ struct clk *clk;
+ int ret;
+
+ vdd_cx.regulator[0] = devm_regulator_get(&pdev->dev, "vdd_cx");
+ if (IS_ERR(vdd_cx.regulator[0])) {
+ if (PTR_ERR(vdd_cx.regulator[0]) != -EPROBE_DEFER)
+ dev_err(&pdev->dev,
+ "Unable to get vdd_cx regulator\n");
+ return PTR_ERR(vdd_cx.regulator[0]);
+ }
+
+ regmap = qcom_cc_map(pdev, &disp_cc_bengal_desc);
+ if (IS_ERR(regmap)) {
+ pr_err("Failed to map the disp_cc registers\n");
+ return PTR_ERR(regmap);
+ }
+
+ clk = clk_get(&pdev->dev, "cfg_ahb_clk");
+ if (IS_ERR(clk)) {
+ if (PTR_ERR(clk) != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "Unable to get ahb clock handle\n");
+ return PTR_ERR(clk);
+ }
+ clk_put(clk);
+
+ clk_alpha_pll_configure(&disp_cc_pll0, regmap, &disp_cc_pll0_config);
+
+ ret = qcom_cc_really_probe(pdev, &disp_cc_bengal_desc, regmap);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to register Display CC clocks\n");
+ return ret;
+ }
+
+ dev_info(&pdev->dev, "Registered Display CC clocks\n");
+ return 0;
+}
+
+static struct platform_driver dispcc_bengal_driver = {
+ .probe = dispcc_bengal_probe,
+ .driver = {
+ .name = "bengal-dispcc",
+ .of_match_table = dispcc_bengal_match_table,
+ },
+};
+
+static int __init disp_cc_bengal_init(void)
+{
+ return platform_driver_register(&dispcc_bengal_driver);
+}
+subsys_initcall(disp_cc_bengal_init);
+
+static void __exit disp_cc_bengal_exit(void)
+{
+ platform_driver_unregister(&dispcc_bengal_driver);
+}
+module_exit(disp_cc_bengal_exit);
+
+MODULE_DESCRIPTION("QTI DISPCC bengal Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/qcom/dispcc-lito.c b/drivers/clk/qcom/dispcc-lito.c
index 35a7d34..0d4f4b7 100644
--- a/drivers/clk/qcom/dispcc-lito.c
+++ b/drivers/clk/qcom/dispcc-lito.c
@@ -25,7 +25,7 @@
#include "common.h"
#include "gdsc.h"
#include "reset.h"
-#include "vdd-level.h"
+#include "vdd-level-lito.h"
static DEFINE_VDD_REGULATORS(vdd_cx, VDD_NUM, 1, vdd_corner);
diff --git a/drivers/clk/qcom/gcc-bengal.c b/drivers/clk/qcom/gcc-bengal.c
new file mode 100644
index 0000000..b2cad01
--- /dev/null
+++ b/drivers/clk/qcom/gcc-bengal.c
@@ -0,0 +1,3897 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+
+#define pr_fmt(fmt) "clk: %s: " fmt, __func__
+
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/regmap.h>
+#include <linux/reset-controller.h>
+
+#include <dt-bindings/clock/qcom,gcc-bengal.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-pll.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "clk-regmap-divider.h"
+#include "common.h"
+#include "reset.h"
+#include "vdd-level-bengal.h"
+
+static DEFINE_VDD_REGULATORS(vdd_cx, VDD_NUM, 1, vdd_corner);
+static DEFINE_VDD_REGULATORS(vdd_cx_ao, VDD_NUM, 1, vdd_corner);
+static DEFINE_VDD_REGULATORS(vdd_mx, VDD_NUM, 1, vdd_corner);
+
+enum {
+ P_BI_TCXO,
+ P_CORE_BI_PLL_TEST_SE,
+ P_GPLL0_OUT_AUX2,
+ P_GPLL0_OUT_EARLY,
+ P_GPLL10_OUT_MAIN,
+ P_GPLL11_OUT_AUX,
+ P_GPLL11_OUT_AUX2,
+ P_GPLL11_OUT_MAIN,
+ P_GPLL3_OUT_EARLY,
+ P_GPLL3_OUT_MAIN,
+ P_GPLL4_OUT_MAIN,
+ P_GPLL6_OUT_EARLY,
+ P_GPLL6_OUT_MAIN,
+ P_GPLL7_OUT_MAIN,
+ P_GPLL8_OUT_EARLY,
+ P_GPLL8_OUT_MAIN,
+ P_GPLL9_OUT_EARLY,
+ P_GPLL9_OUT_MAIN,
+ P_SLEEP_CLK,
+};
+
+static const struct parent_map gcc_parent_map_0[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_EARLY, 1 },
+ { P_GPLL0_OUT_AUX2, 2 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const gcc_parent_names_0[] = {
+ "bi_tcxo",
+ "gpll0",
+ "gpll0_out_aux2",
+ "core_bi_pll_test_se",
+};
+static const char * const gcc_parent_names_0_ao[] = {
+ "bi_tcxo_ao",
+ "gpll0",
+ "gpll0_out_aux2",
+ "core_bi_pll_test_se",
+};
+
+static const struct parent_map gcc_parent_map_1[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_EARLY, 1 },
+ { P_GPLL0_OUT_AUX2, 2 },
+ { P_GPLL6_OUT_MAIN, 4 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const gcc_parent_names_1[] = {
+ "bi_tcxo",
+ "gpll0",
+ "gpll0_out_aux2",
+ "gpll6_out_main",
+ "core_bi_pll_test_se",
+};
+
+static const struct parent_map gcc_parent_map_2[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_EARLY, 1 },
+ { P_GPLL0_OUT_AUX2, 2 },
+ { P_SLEEP_CLK, 5 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const gcc_parent_names_2[] = {
+ "bi_tcxo",
+ "gpll0",
+ "gpll0_out_aux2",
+ "sleep_clk",
+ "core_bi_pll_test_se",
+};
+
+static const struct parent_map gcc_parent_map_3[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_EARLY, 1 },
+ { P_GPLL9_OUT_EARLY, 2 },
+ { P_GPLL10_OUT_MAIN, 3 },
+ { P_GPLL9_OUT_MAIN, 5 },
+ { P_GPLL3_OUT_MAIN, 6 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const gcc_parent_names_3[] = {
+ "bi_tcxo",
+ "gpll0",
+ "gpll9",
+ "gpll10_out_main",
+ "gpll9_out_main",
+ "gpll3_out_main",
+ "core_bi_pll_test_se",
+};
+
+static const struct parent_map gcc_parent_map_4[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_EARLY, 1 },
+ { P_GPLL0_OUT_AUX2, 2 },
+ { P_GPLL4_OUT_MAIN, 5 },
+ { P_GPLL3_OUT_MAIN, 6 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const gcc_parent_names_4[] = {
+ "bi_tcxo",
+ "gpll0",
+ "gpll0_out_aux2",
+ "gpll4_out_main",
+ "gpll3_out_main",
+ "core_bi_pll_test_se",
+};
+
+static const struct parent_map gcc_parent_map_5[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_EARLY, 1 },
+ { P_GPLL8_OUT_EARLY, 2 },
+ { P_GPLL10_OUT_MAIN, 3 },
+ { P_GPLL8_OUT_MAIN, 4 },
+ { P_GPLL9_OUT_MAIN, 5 },
+ { P_GPLL3_OUT_MAIN, 6 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const gcc_parent_names_5[] = {
+ "bi_tcxo",
+ "gpll0",
+ "gpll8",
+ "gpll10_out_main",
+ "gpll8_out_main",
+ "gpll9_out_main",
+ "gpll3_out_main",
+ "core_bi_pll_test_se",
+};
+
+static const struct parent_map gcc_parent_map_6[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_EARLY, 1 },
+ { P_GPLL8_OUT_EARLY, 2 },
+ { P_GPLL10_OUT_MAIN, 3 },
+ { P_GPLL6_OUT_MAIN, 4 },
+ { P_GPLL9_OUT_MAIN, 5 },
+ { P_GPLL3_OUT_EARLY, 6 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const gcc_parent_names_6[] = {
+ "bi_tcxo",
+ "gpll0",
+ "gpll8",
+ "gpll10_out_main",
+ "gpll6_out_main",
+ "gpll9_out_main",
+ "gpll3",
+ "core_bi_pll_test_se",
+};
+
+static const struct parent_map gcc_parent_map_7[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_EARLY, 1 },
+ { P_GPLL0_OUT_AUX2, 2 },
+ { P_GPLL10_OUT_MAIN, 3 },
+ { P_GPLL4_OUT_MAIN, 5 },
+ { P_GPLL3_OUT_EARLY, 6 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const gcc_parent_names_7[] = {
+ "bi_tcxo",
+ "gpll0",
+ "gpll0_out_aux2",
+ "gpll10_out_main",
+ "gpll4_out_main",
+ "gpll3",
+ "core_bi_pll_test_se",
+};
+
+static const struct parent_map gcc_parent_map_8[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_EARLY, 1 },
+ { P_GPLL8_OUT_EARLY, 2 },
+ { P_GPLL10_OUT_MAIN, 3 },
+ { P_GPLL8_OUT_MAIN, 4 },
+ { P_GPLL9_OUT_MAIN, 5 },
+ { P_GPLL3_OUT_EARLY, 6 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const gcc_parent_names_8[] = {
+ "bi_tcxo",
+ "gpll0",
+ "gpll8",
+ "gpll10_out_main",
+ "gpll8_out_main",
+ "gpll9_out_main",
+ "gpll3",
+ "core_bi_pll_test_se",
+};
+
+static const struct parent_map gcc_parent_map_9[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_EARLY, 1 },
+ { P_GPLL0_OUT_AUX2, 2 },
+ { P_GPLL10_OUT_MAIN, 3 },
+ { P_GPLL8_OUT_MAIN, 4 },
+ { P_GPLL9_OUT_MAIN, 5 },
+ { P_GPLL3_OUT_EARLY, 6 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const gcc_parent_names_9[] = {
+ "bi_tcxo",
+ "gpll0",
+ "gpll0_out_aux2",
+ "gpll10_out_main",
+ "gpll8_out_main",
+ "gpll9_out_main",
+ "gpll3",
+ "core_bi_pll_test_se",
+};
+
+static const struct parent_map gcc_parent_map_10[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_EARLY, 1 },
+ { P_GPLL8_OUT_EARLY, 2 },
+ { P_GPLL10_OUT_MAIN, 3 },
+ { P_GPLL6_OUT_EARLY, 4 },
+ { P_GPLL9_OUT_MAIN, 5 },
+ { P_GPLL3_OUT_MAIN, 6 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const gcc_parent_names_10[] = {
+ "bi_tcxo",
+ "gpll0",
+ "gpll8",
+ "gpll10_out_main",
+ "gpll6",
+ "gpll9_out_main",
+ "gpll3_out_main",
+ "core_bi_pll_test_se",
+};
+
+static const struct parent_map gcc_parent_map_11[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_EARLY, 1 },
+ { P_GPLL0_OUT_AUX2, 2 },
+ { P_GPLL7_OUT_MAIN, 3 },
+ { P_GPLL4_OUT_MAIN, 5 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const gcc_parent_names_11[] = {
+ "bi_tcxo",
+ "gpll0",
+ "gpll0_out_aux2",
+ "gpll7_out_main",
+ "gpll4_out_main",
+ "core_bi_pll_test_se",
+};
+
+static const struct parent_map gcc_parent_map_12[] = {
+ { P_BI_TCXO, 0 },
+ { P_SLEEP_CLK, 5 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const gcc_parent_names_12[] = {
+ "bi_tcxo",
+ "sleep_clk",
+ "core_bi_pll_test_se",
+};
+
+static const struct parent_map gcc_parent_map_13[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL11_OUT_MAIN, 1 },
+ { P_GPLL11_OUT_AUX, 2 },
+ { P_GPLL11_OUT_AUX2, 3 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const gcc_parent_names_13[] = {
+ "bi_tcxo",
+ "gpll11_out_main",
+ "gpll11_out_aux",
+ "gpll11_out_aux2",
+ "core_bi_pll_test_se",
+};
+
+static const struct parent_map gcc_parent_map_14[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_EARLY, 1 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const gcc_parent_names_14[] = {
+ "bi_tcxo",
+ "gpll0",
+ "core_bi_pll_test_se",
+};
+
+static const struct parent_map gcc_parent_map_15[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_EARLY, 1 },
+ { P_GPLL6_OUT_MAIN, 4 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const gcc_parent_names_15[] = {
+ "bi_tcxo",
+ "gpll0",
+ "gpll6_out_main",
+ "core_bi_pll_test_se",
+};
+
+static struct pll_vco brammo_vco[] = {
+ { 500000000, 1250000000, 0 },
+};
+
+static struct pll_vco default_vco[] = {
+ { 1000000000, 2000000000, 0 },
+ { 750000000, 1500000000, 1 },
+ { 500000000, 1000000000, 2 },
+ { 250000000, 500000000, 3 },
+};
+
+static const u8 clk_alpha_pll_regs_offset[][PLL_OFF_MAX_REGS] = {
+ [CLK_ALPHA_PLL_TYPE_DEFAULT] = {
+ [PLL_OFF_L_VAL] = 0x04,
+ [PLL_OFF_ALPHA_VAL] = 0x08,
+ [PLL_OFF_ALPHA_VAL_U] = 0x0c,
+ [PLL_OFF_TEST_CTL] = 0x10,
+ [PLL_OFF_TEST_CTL_U] = 0x14,
+ [PLL_OFF_USER_CTL] = 0x18,
+ [PLL_OFF_USER_CTL_U] = 0x1C,
+ [PLL_OFF_CONFIG_CTL] = 0x20,
+ [PLL_OFF_STATUS] = 0x24,
+ },
+ [CLK_ALPHA_PLL_TYPE_BRAMMO] = {
+ [PLL_OFF_L_VAL] = 0x04,
+ [PLL_OFF_ALPHA_VAL] = 0x08,
+ [PLL_OFF_ALPHA_VAL_U] = 0x0c,
+ [PLL_OFF_TEST_CTL] = 0x10,
+ [PLL_OFF_TEST_CTL_U] = 0x14,
+ [PLL_OFF_USER_CTL] = 0x18,
+ [PLL_OFF_CONFIG_CTL] = 0x1C,
+ [PLL_OFF_STATUS] = 0x20,
+ },
+};
+
+static struct clk_alpha_pll gpll0 = {
+ .offset = 0x0,
+ .vco_table = default_vco,
+ .num_vco = ARRAY_SIZE(default_vco),
+ .regs = clk_alpha_pll_regs_offset[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr = {
+ .enable_reg = 0x79000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll0",
+ .parent_names = (const char *[]){ "bi_tcxo" },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_ops,
+ .vdd_class = &vdd_cx,
+ .num_rate_max = VDD_NUM,
+ .rate_max = (unsigned long[VDD_NUM]) {
+ [VDD_MIN] = 1000000000,
+ [VDD_NOMINAL] = 2000000000},
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_gpll0_out_aux2[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv gpll0_out_aux2 = {
+ .offset = 0x0,
+ .post_div_shift = 8,
+ .post_div_table = post_div_table_gpll0_out_aux2,
+ .num_post_div = ARRAY_SIZE(post_div_table_gpll0_out_aux2),
+ .width = 4,
+ .regs = clk_alpha_pll_regs_offset[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll0_out_aux2",
+ .parent_names = (const char *[]){ "gpll0" },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_ro_ops,
+ },
+};
+
+static const struct clk_div_table post_div_table_gpll0_out_main[] = {
+ { 0x0, 1 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv gpll0_out_main = {
+ .offset = 0x0,
+ .post_div_shift = 8,
+ .post_div_table = post_div_table_gpll0_out_main,
+ .num_post_div = ARRAY_SIZE(post_div_table_gpll0_out_main),
+ .width = 4,
+ .regs = clk_alpha_pll_regs_offset[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll0_out_main",
+ .parent_names = (const char *[]){ "gpll0" },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_ro_ops,
+ },
+};
+
+/* 1152MHz configuration */
+static const struct alpha_pll_config gpll10_config = {
+ .l = 0x3c,
+ .vco_val = 0x1 << 20,
+ .vco_mask = GENMASK(21, 20),
+ .main_output_mask = BIT(0),
+ .config_ctl_val = 0x40008529,
+};
+
+static struct clk_alpha_pll gpll10 = {
+ .offset = 0xa000,
+ .vco_table = default_vco,
+ .num_vco = ARRAY_SIZE(default_vco),
+ .regs = clk_alpha_pll_regs_offset[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr = {
+ .enable_reg = 0x79000,
+ .enable_mask = BIT(10),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll10",
+ .parent_names = (const char *[]){ "bi_tcxo" },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_ops,
+ .vdd_class = &vdd_mx,
+ .num_rate_max = VDD_NUM,
+ .rate_max = (unsigned long[VDD_NUM]) {
+ [VDD_MIN] = 1000000000,
+ [VDD_NOMINAL] = 2000000000},
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_gpll10_out_main[] = {
+ { 0x0, 1 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv gpll10_out_main = {
+ .offset = 0xa000,
+ .post_div_shift = 8,
+ .post_div_table = post_div_table_gpll10_out_main,
+ .num_post_div = ARRAY_SIZE(post_div_table_gpll10_out_main),
+ .width = 4,
+ .regs = clk_alpha_pll_regs_offset[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll10_out_main",
+ .parent_names = (const char *[]){ "gpll10" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_ops,
+ },
+};
+
+/* 600MHz configuration */
+static const struct alpha_pll_config gpll11_config = {
+ .l = 0x1F,
+ .alpha = 0x0,
+ .alpha_hi = 0x40,
+ .alpha_en_mask = BIT(24),
+ .vco_val = 0x2 << 20,
+ .vco_mask = GENMASK(21, 20),
+ .config_ctl_val = 0x40008529,
+};
+
+static struct clk_alpha_pll gpll11 = {
+ .offset = 0xb000,
+ .vco_table = default_vco,
+ .num_vco = ARRAY_SIZE(default_vco),
+ .flags = SUPPORTS_DYNAMIC_UPDATE,
+ .regs = clk_alpha_pll_regs_offset[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr = {
+ .enable_reg = 0x79000,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll11",
+ .parent_names = (const char *[]){ "bi_tcxo" },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_ops,
+ .vdd_class = &vdd_cx,
+ .num_rate_max = VDD_NUM,
+ .rate_max = (unsigned long[VDD_NUM]) {
+ [VDD_MIN] = 1000000000,
+ [VDD_NOMINAL] = 2000000000},
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_gpll11_out_main[] = {
+ { 0x0, 1 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv gpll11_out_main = {
+ .offset = 0xb000,
+ .post_div_shift = 8,
+ .post_div_table = post_div_table_gpll11_out_main,
+ .num_post_div = ARRAY_SIZE(post_div_table_gpll11_out_main),
+ .width = 4,
+ .regs = clk_alpha_pll_regs_offset[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll11_out_main",
+ .parent_names = (const char *[]){ "gpll11" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_ops,
+ },
+};
+
+static struct clk_alpha_pll gpll3 = {
+ .offset = 0x3000,
+ .vco_table = default_vco,
+ .num_vco = ARRAY_SIZE(default_vco),
+ .regs = clk_alpha_pll_regs_offset[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr = {
+ .enable_reg = 0x79000,
+ .enable_mask = BIT(3),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll3",
+ .parent_names = (const char *[]){ "bi_tcxo" },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_ops,
+ .vdd_class = &vdd_cx,
+ .num_rate_max = VDD_NUM,
+ .rate_max = (unsigned long[VDD_NUM]) {
+ [VDD_MIN] = 1000000000,
+ [VDD_NOMINAL] = 2000000000},
+ },
+ },
+};
+
+static struct clk_alpha_pll gpll4 = {
+ .offset = 0x4000,
+ .vco_table = default_vco,
+ .num_vco = ARRAY_SIZE(default_vco),
+ .regs = clk_alpha_pll_regs_offset[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr = {
+ .enable_reg = 0x79000,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll4",
+ .parent_names = (const char *[]){ "bi_tcxo" },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_ops,
+ .vdd_class = &vdd_cx,
+ .num_rate_max = VDD_NUM,
+ .rate_max = (unsigned long[VDD_NUM]) {
+ [VDD_MIN] = 1000000000,
+ [VDD_NOMINAL] = 2000000000},
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_gpll4_out_main[] = {
+ { 0x0, 1 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv gpll4_out_main = {
+ .offset = 0x4000,
+ .post_div_shift = 8,
+ .post_div_table = post_div_table_gpll4_out_main,
+ .num_post_div = ARRAY_SIZE(post_div_table_gpll4_out_main),
+ .width = 4,
+ .regs = clk_alpha_pll_regs_offset[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll4_out_main",
+ .parent_names = (const char *[]){ "gpll4" },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_ro_ops,
+ },
+};
+
+static struct clk_alpha_pll gpll6 = {
+ .offset = 0x6000,
+ .vco_table = default_vco,
+ .num_vco = ARRAY_SIZE(default_vco),
+ .regs = clk_alpha_pll_regs_offset[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr = {
+ .enable_reg = 0x79000,
+ .enable_mask = BIT(6),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll6",
+ .parent_names = (const char *[]){ "bi_tcxo" },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_ops,
+ .vdd_class = &vdd_cx,
+ .num_rate_max = VDD_NUM,
+ .rate_max = (unsigned long[VDD_NUM]) {
+ [VDD_MIN] = 1000000000,
+ [VDD_NOMINAL] = 2000000000},
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_gpll6_out_main[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv gpll6_out_main = {
+ .offset = 0x6000,
+ .post_div_shift = 8,
+ .post_div_table = post_div_table_gpll6_out_main,
+ .num_post_div = ARRAY_SIZE(post_div_table_gpll6_out_main),
+ .width = 4,
+ .regs = clk_alpha_pll_regs_offset[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll6_out_main",
+ .parent_names = (const char *[]){ "gpll6" },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_ro_ops,
+ },
+};
+
+static struct clk_alpha_pll gpll7 = {
+ .offset = 0x7000,
+ .vco_table = default_vco,
+ .num_vco = ARRAY_SIZE(default_vco),
+ .regs = clk_alpha_pll_regs_offset[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr = {
+ .enable_reg = 0x79000,
+ .enable_mask = BIT(7),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll7",
+ .parent_names = (const char *[]){ "bi_tcxo" },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_ops,
+ .vdd_class = &vdd_cx,
+ .num_rate_max = VDD_NUM,
+ .rate_max = (unsigned long[VDD_NUM]) {
+ [VDD_MIN] = 1000000000,
+ [VDD_NOMINAL] = 2000000000},
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_gpll7_out_main[] = {
+ { 0x0, 1 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv gpll7_out_main = {
+ .offset = 0x7000,
+ .post_div_shift = 8,
+ .post_div_table = post_div_table_gpll7_out_main,
+ .num_post_div = ARRAY_SIZE(post_div_table_gpll7_out_main),
+ .width = 4,
+ .regs = clk_alpha_pll_regs_offset[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll7_out_main",
+ .parent_names = (const char *[]){ "gpll7" },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_ro_ops,
+ },
+};
+
+/* 800MHz configuration */
+static const struct alpha_pll_config gpll8_config = {
+ .l = 0x29,
+ .alpha = 0xAA000000,
+ .alpha_hi = 0xAA,
+ .alpha_en_mask = BIT(24),
+ .vco_val = 0x2 << 20,
+ .vco_mask = GENMASK(21, 20),
+ .main_output_mask = BIT(0),
+ .early_output_mask = BIT(3),
+ .post_div_val = 0x1 << 8,
+ .post_div_mask = GENMASK(11, 8),
+ .config_ctl_val = 0x40008529,
+};
+
+static struct clk_alpha_pll gpll8 = {
+ .offset = 0x8000,
+ .vco_table = default_vco,
+ .num_vco = ARRAY_SIZE(default_vco),
+ .regs = clk_alpha_pll_regs_offset[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .flags = SUPPORTS_DYNAMIC_UPDATE,
+ .clkr = {
+ .enable_reg = 0x79000,
+ .enable_mask = BIT(8),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll8",
+ .parent_names = (const char *[]){ "bi_tcxo" },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_ops,
+ .vdd_class = &vdd_cx,
+ .num_rate_max = VDD_NUM,
+ .rate_max = (unsigned long[VDD_NUM]) {
+ [VDD_MIN] = 1000000000,
+ [VDD_NOMINAL] = 2000000000},
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_gpll8_out_main[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv gpll8_out_main = {
+ .offset = 0x8000,
+ .post_div_shift = 8,
+ .post_div_table = post_div_table_gpll8_out_main,
+ .num_post_div = ARRAY_SIZE(post_div_table_gpll8_out_main),
+ .width = 4,
+ .regs = clk_alpha_pll_regs_offset[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll8_out_main",
+ .parent_names = (const char *[]){ "gpll8" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_ro_ops,
+ },
+};
+
+/* 1152MHz configuration */
+static const struct alpha_pll_config gpll9_config = {
+ .l = 0x3C,
+ .alpha = 0x0,
+ .post_div_val = 0x1 << 8,
+ .post_div_mask = GENMASK(9, 8),
+ .main_output_mask = BIT(0),
+ .config_ctl_val = 0x000040C9,
+};
+
+static struct clk_alpha_pll gpll9 = {
+ .offset = 0x9000,
+ .vco_table = brammo_vco,
+ .num_vco = ARRAY_SIZE(brammo_vco),
+ .regs = clk_alpha_pll_regs_offset[CLK_ALPHA_PLL_TYPE_BRAMMO],
+ .clkr = {
+ .enable_reg = 0x79000,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll9",
+ .parent_names = (const char *[]){ "bi_tcxo" },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_ops,
+ .vdd_class = &vdd_mx,
+ .num_rate_max = VDD_NUM,
+ .rate_max = (unsigned long[VDD_NUM]) {
+ [VDD_LOWER] = 1250000000,
+ [VDD_LOW] = 1250000000,
+ [VDD_NOMINAL] = 1250000000},
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_gpll9_out_main[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv gpll9_out_main = {
+ .offset = 0x9000,
+ .post_div_shift = 8,
+ .post_div_table = post_div_table_gpll9_out_main,
+ .num_post_div = ARRAY_SIZE(post_div_table_gpll9_out_main),
+ .width = 2,
+ .regs = clk_alpha_pll_regs_offset[CLK_ALPHA_PLL_TYPE_BRAMMO],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll9_out_main",
+ .parent_names = (const char *[]){ "gpll9" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_usb30_prim_mock_utmi_postdiv_clk_src = {
+ .reg = 0x1a04c,
+ .shift = 0,
+ .width = 2,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "gcc_usb30_prim_mock_utmi_postdiv_clk_src",
+ .parent_names =
+ (const char *[]){ "gcc_usb30_prim_mock_utmi_clk_src" },
+ .num_parents = 1,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_camss_axi_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(150000000, P_GPLL0_OUT_AUX2, 2, 0, 0),
+ F(200000000, P_GPLL0_OUT_AUX2, 1.5, 0, 0),
+ F(300000000, P_GPLL0_OUT_AUX2, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_camss_axi_clk_src = {
+ .cmd_rcgr = 0x5802c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_7,
+ .freq_tbl = ftbl_gcc_camss_axi_clk_src,
+ .enable_safe_config = true,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_axi_clk_src",
+ .parent_names = gcc_parent_names_7,
+ .num_parents = 7,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ .vdd_class = &vdd_cx,
+ .num_rate_max = VDD_NUM,
+ .rate_max = (unsigned long[VDD_NUM]) {
+ [VDD_LOWER] = 19200000,
+ [VDD_LOW] = 150000000,
+ [VDD_LOW_L1] = 200000000,
+ [VDD_NOMINAL] = 300000000},
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_camss_cci_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(37500000, P_GPLL0_OUT_AUX2, 8, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_camss_cci_clk_src = {
+ .cmd_rcgr = 0x56000,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_9,
+ .freq_tbl = ftbl_gcc_camss_cci_clk_src,
+ .enable_safe_config = true,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_cci_clk_src",
+ .parent_names = gcc_parent_names_9,
+ .num_parents = 8,
+ .flags = CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE,
+ .ops = &clk_rcg2_ops,
+ .vdd_class = &vdd_cx,
+ .num_rate_max = VDD_NUM,
+ .rate_max = (unsigned long[VDD_NUM]) {
+ [VDD_LOWER] = 19200000,
+ [VDD_LOW] = 37500000},
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_camss_csi0phytimer_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(100000000, P_GPLL0_OUT_AUX2, 3, 0, 0),
+ F(200000000, P_GPLL0_OUT_AUX2, 1.5, 0, 0),
+ F(268800000, P_GPLL4_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_camss_csi0phytimer_clk_src = {
+ .cmd_rcgr = 0x59000,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_4,
+ .freq_tbl = ftbl_gcc_camss_csi0phytimer_clk_src,
+ .enable_safe_config = true,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_csi0phytimer_clk_src",
+ .parent_names = gcc_parent_names_4,
+ .num_parents = 6,
+ .flags = CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE,
+ .ops = &clk_rcg2_ops,
+ .vdd_class = &vdd_cx,
+ .num_rate_max = VDD_NUM,
+ .rate_max = (unsigned long[VDD_NUM]) {
+ [VDD_LOWER] = 19200000,
+ [VDD_LOW] = 200000000,
+ [VDD_NOMINAL] = 268800000},
+ },
+};
+
+static struct clk_rcg2 gcc_camss_csi1phytimer_clk_src = {
+ .cmd_rcgr = 0x5901c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_4,
+ .freq_tbl = ftbl_gcc_camss_csi0phytimer_clk_src,
+ .enable_safe_config = true,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_csi1phytimer_clk_src",
+ .parent_names = gcc_parent_names_4,
+ .num_parents = 6,
+ .flags = CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE,
+ .ops = &clk_rcg2_ops,
+ .vdd_class = &vdd_cx,
+ .num_rate_max = VDD_NUM,
+ .rate_max = (unsigned long[VDD_NUM]) {
+ [VDD_LOWER] = 19200000,
+ [VDD_LOW] = 200000000,
+ [VDD_NOMINAL] = 268800000},
+ },
+};
+
+static struct clk_rcg2 gcc_camss_csi2phytimer_clk_src = {
+ .cmd_rcgr = 0x59038,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_4,
+ .freq_tbl = ftbl_gcc_camss_csi0phytimer_clk_src,
+ .enable_safe_config = true,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_csi2phytimer_clk_src",
+ .parent_names = gcc_parent_names_4,
+ .num_parents = 6,
+ .flags = CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE,
+ .ops = &clk_rcg2_ops,
+ .vdd_class = &vdd_cx,
+ .num_rate_max = VDD_NUM,
+ .rate_max = (unsigned long[VDD_NUM]) {
+ [VDD_LOWER] = 19200000,
+ [VDD_LOW] = 200000000,
+ [VDD_NOMINAL] = 268800000},
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_camss_mclk0_clk_src[] = {
+ F(24000000, P_GPLL9_OUT_MAIN, 1, 1, 24),
+ F(64000000, P_GPLL9_OUT_MAIN, 1, 1, 9),
+ { }
+};
+
+static struct clk_rcg2 gcc_camss_mclk0_clk_src = {
+ .cmd_rcgr = 0x51000,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_3,
+ .freq_tbl = ftbl_gcc_camss_mclk0_clk_src,
+ .enable_safe_config = true,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_mclk0_clk_src",
+ .parent_names = gcc_parent_names_3,
+ .num_parents = 7,
+ .flags = CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE,
+ .ops = &clk_rcg2_ops,
+ .vdd_class = &vdd_cx,
+ .num_rate_max = VDD_NUM,
+ .rate_max = (unsigned long[VDD_NUM]) {
+ [VDD_LOWER] = 64000000},
+ },
+};
+
+static struct clk_rcg2 gcc_camss_mclk1_clk_src = {
+ .cmd_rcgr = 0x5101c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_3,
+ .freq_tbl = ftbl_gcc_camss_mclk0_clk_src,
+ .enable_safe_config = true,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_mclk1_clk_src",
+ .parent_names = gcc_parent_names_3,
+ .num_parents = 7,
+ .flags = CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE,
+ .ops = &clk_rcg2_ops,
+ .vdd_class = &vdd_cx,
+ .num_rate_max = VDD_NUM,
+ .rate_max = (unsigned long[VDD_NUM]) {
+ [VDD_LOWER] = 64000000},
+ },
+};
+
+static struct clk_rcg2 gcc_camss_mclk2_clk_src = {
+ .cmd_rcgr = 0x51038,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_3,
+ .freq_tbl = ftbl_gcc_camss_mclk0_clk_src,
+ .enable_safe_config = true,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_mclk2_clk_src",
+ .parent_names = gcc_parent_names_3,
+ .num_parents = 7,
+ .flags = CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE,
+ .ops = &clk_rcg2_ops,
+ .vdd_class = &vdd_cx,
+ .num_rate_max = VDD_NUM,
+ .rate_max = (unsigned long[VDD_NUM]) {
+ [VDD_LOWER] = 64000000},
+ },
+};
+
+static struct clk_rcg2 gcc_camss_mclk3_clk_src = {
+ .cmd_rcgr = 0x51054,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_3,
+ .freq_tbl = ftbl_gcc_camss_mclk0_clk_src,
+ .enable_safe_config = true,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_mclk3_clk_src",
+ .parent_names = gcc_parent_names_3,
+ .num_parents = 7,
+ .flags = CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE,
+ .ops = &clk_rcg2_ops,
+ .vdd_class = &vdd_cx,
+ .num_rate_max = VDD_NUM,
+ .rate_max = (unsigned long[VDD_NUM]) {
+ [VDD_LOWER] = 64000000},
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_camss_ope_ahb_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(171428571, P_GPLL0_OUT_EARLY, 3.5, 0, 0),
+ F(240000000, P_GPLL0_OUT_EARLY, 2.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_camss_ope_ahb_clk_src = {
+ .cmd_rcgr = 0x55024,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_8,
+ .freq_tbl = ftbl_gcc_camss_ope_ahb_clk_src,
+ .enable_safe_config = true,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_ope_ahb_clk_src",
+ .parent_names = gcc_parent_names_8,
+ .num_parents = 8,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ .vdd_class = &vdd_cx,
+ .num_rate_max = VDD_NUM,
+ .rate_max = (unsigned long[VDD_NUM]) {
+ [VDD_LOWER] = 19200000,
+ [VDD_LOW] = 171428571,
+ [VDD_NOMINAL] = 240000000},
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_camss_ope_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(200000000, P_GPLL8_OUT_MAIN, 2, 0, 0),
+ F(266600000, P_GPLL8_OUT_MAIN, 1, 0, 0),
+ F(465000000, P_GPLL8_OUT_MAIN, 1, 0, 0),
+ F(580000000, P_GPLL8_OUT_EARLY, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_camss_ope_clk_src = {
+ .cmd_rcgr = 0x55004,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_8,
+ .freq_tbl = ftbl_gcc_camss_ope_clk_src,
+ .enable_safe_config = true,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_ope_clk_src",
+ .parent_names = gcc_parent_names_8,
+ .num_parents = 8,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ .vdd_class = &vdd_cx,
+ .num_rate_max = VDD_NUM,
+ .rate_max = (unsigned long[VDD_NUM]) {
+ [VDD_LOWER] = 19200000,
+ [VDD_LOW] = 200000000,
+ [VDD_LOW_L1] = 266600000,
+ [VDD_NOMINAL] = 465000000,
+ [VDD_HIGH] = 580000000},
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_camss_tfe_0_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(256000000, P_GPLL10_OUT_MAIN, 4.5, 0, 0),
+ F(460800000, P_GPLL10_OUT_MAIN, 2.5, 0, 0),
+ F(576000000, P_GPLL10_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_camss_tfe_0_clk_src = {
+ .cmd_rcgr = 0x52004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_5,
+ .freq_tbl = ftbl_gcc_camss_tfe_0_clk_src,
+ .enable_safe_config = true,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_tfe_0_clk_src",
+ .parent_names = gcc_parent_names_5,
+ .num_parents = 8,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ .vdd_class = &vdd_cx,
+ .num_rate_max = VDD_NUM,
+ .rate_max = (unsigned long[VDD_NUM]) {
+ [VDD_LOWER] = 19200000,
+ [VDD_LOW] = 256000000,
+ [VDD_LOW_L1] = 460800000,
+ [VDD_NOMINAL] = 576000000},
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_camss_tfe_0_csid_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(240000000, P_GPLL0_OUT_EARLY, 2.5, 0, 0),
+ F(384000000, P_GPLL6_OUT_MAIN, 1, 0, 0),
+ F(426400000, P_GPLL3_OUT_EARLY, 2.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_camss_tfe_0_csid_clk_src = {
+ .cmd_rcgr = 0x52094,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_6,
+ .freq_tbl = ftbl_gcc_camss_tfe_0_csid_clk_src,
+ .enable_safe_config = true,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_tfe_0_csid_clk_src",
+ .parent_names = gcc_parent_names_6,
+ .num_parents = 8,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ .vdd_class = &vdd_cx,
+ .num_rate_max = VDD_NUM,
+ .rate_max = (unsigned long[VDD_NUM]) {
+ [VDD_LOWER] = 19200000,
+ [VDD_LOW] = 240000000,
+ [VDD_LOW_L1] = 384000000,
+ [VDD_HIGH] = 426400000},
+ },
+};
+
+static struct clk_rcg2 gcc_camss_tfe_1_clk_src = {
+ .cmd_rcgr = 0x52024,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_5,
+ .freq_tbl = ftbl_gcc_camss_tfe_0_clk_src,
+ .enable_safe_config = true,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_tfe_1_clk_src",
+ .parent_names = gcc_parent_names_5,
+ .num_parents = 8,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ .vdd_class = &vdd_cx,
+ .num_rate_max = VDD_NUM,
+ .rate_max = (unsigned long[VDD_NUM]) {
+ [VDD_LOWER] = 19200000,
+ [VDD_LOW] = 256000000,
+ [VDD_LOW_L1] = 460800000,
+ [VDD_NOMINAL] = 576000000},
+ },
+};
+
+static struct clk_rcg2 gcc_camss_tfe_1_csid_clk_src = {
+ .cmd_rcgr = 0x520b4,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_6,
+ .freq_tbl = ftbl_gcc_camss_tfe_0_csid_clk_src,
+ .enable_safe_config = true,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_tfe_1_csid_clk_src",
+ .parent_names = gcc_parent_names_6,
+ .num_parents = 8,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ .vdd_class = &vdd_cx,
+ .num_rate_max = VDD_NUM,
+ .rate_max = (unsigned long[VDD_NUM]) {
+ [VDD_LOWER] = 19200000,
+ [VDD_LOW] = 240000000,
+ [VDD_LOW_L1] = 384000000,
+ [VDD_HIGH] = 426400000},
+ },
+};
+
+static struct clk_rcg2 gcc_camss_tfe_2_clk_src = {
+ .cmd_rcgr = 0x52044,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_5,
+ .freq_tbl = ftbl_gcc_camss_tfe_0_clk_src,
+ .enable_safe_config = true,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_tfe_2_clk_src",
+ .parent_names = gcc_parent_names_5,
+ .num_parents = 8,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ .vdd_class = &vdd_cx,
+ .num_rate_max = VDD_NUM,
+ .rate_max = (unsigned long[VDD_NUM]) {
+ [VDD_LOWER] = 19200000,
+ [VDD_LOW] = 256000000,
+ [VDD_LOW_L1] = 460800000,
+ [VDD_NOMINAL] = 576000000},
+ },
+};
+
+static struct clk_rcg2 gcc_camss_tfe_2_csid_clk_src = {
+ .cmd_rcgr = 0x520d4,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_6,
+ .freq_tbl = ftbl_gcc_camss_tfe_0_csid_clk_src,
+ .enable_safe_config = true,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_tfe_2_csid_clk_src",
+ .parent_names = gcc_parent_names_6,
+ .num_parents = 8,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ .vdd_class = &vdd_cx,
+ .num_rate_max = VDD_NUM,
+ .rate_max = (unsigned long[VDD_NUM]) {
+ [VDD_LOWER] = 19200000,
+ [VDD_LOW] = 240000000,
+ [VDD_LOW_L1] = 384000000,
+ [VDD_HIGH] = 426400000},
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_camss_tfe_cphy_rx_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(240000000, P_GPLL0_OUT_EARLY, 2.5, 0, 0),
+ F(341333333, P_GPLL6_OUT_EARLY, 1, 4, 9),
+ F(384000000, P_GPLL6_OUT_EARLY, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_camss_tfe_cphy_rx_clk_src = {
+ .cmd_rcgr = 0x52064,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_10,
+ .freq_tbl = ftbl_gcc_camss_tfe_cphy_rx_clk_src,
+ .enable_safe_config = true,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_tfe_cphy_rx_clk_src",
+ .parent_names = gcc_parent_names_10,
+ .num_parents = 8,
+ .flags = CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE,
+ .ops = &clk_rcg2_ops,
+ .vdd_class = &vdd_cx,
+ .num_rate_max = VDD_NUM,
+ .rate_max = (unsigned long[VDD_NUM]) {
+ [VDD_LOWER] = 19200000,
+ [VDD_LOW] = 240000000,
+ [VDD_LOW_L1] = 341333333,
+ [VDD_HIGH] = 384000000},
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_camss_top_ahb_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(40000000, P_GPLL0_OUT_AUX2, 7.5, 0, 0),
+ F(80000000, P_GPLL0_OUT_EARLY, 7.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_camss_top_ahb_clk_src = {
+ .cmd_rcgr = 0x58010,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_7,
+ .freq_tbl = ftbl_gcc_camss_top_ahb_clk_src,
+ .enable_safe_config = true,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_top_ahb_clk_src",
+ .parent_names = gcc_parent_names_7,
+ .num_parents = 7,
+ .flags = CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE,
+ .ops = &clk_rcg2_ops,
+ .vdd_class = &vdd_cx,
+ .num_rate_max = VDD_NUM,
+ .rate_max = (unsigned long[VDD_NUM]) {
+ [VDD_LOWER] = 19200000,
+ [VDD_LOW] = 80000000},
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_gp1_clk_src[] = {
+ F(25000000, P_GPLL0_OUT_AUX2, 12, 0, 0),
+ F(50000000, P_GPLL0_OUT_AUX2, 6, 0, 0),
+ F(100000000, P_GPLL0_OUT_AUX2, 3, 0, 0),
+ F(200000000, P_GPLL0_OUT_AUX2, 1.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_gp1_clk_src = {
+ .cmd_rcgr = 0x4d004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_gp1_clk_src",
+ .parent_names = gcc_parent_names_2,
+ .num_parents = 5,
+ .ops = &clk_rcg2_ops,
+ .vdd_class = &vdd_cx,
+ .num_rate_max = VDD_NUM,
+ .rate_max = (unsigned long[VDD_NUM]) {
+ [VDD_LOWER] = 50000000,
+ [VDD_LOW] = 100000000,
+ [VDD_NOMINAL] = 200000000},
+ },
+};
+
+static struct clk_rcg2 gcc_gp2_clk_src = {
+ .cmd_rcgr = 0x4e004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_gp2_clk_src",
+ .parent_names = gcc_parent_names_2,
+ .num_parents = 5,
+ .ops = &clk_rcg2_ops,
+ .vdd_class = &vdd_cx,
+ .num_rate_max = VDD_NUM,
+ .rate_max = (unsigned long[VDD_NUM]) {
+ [VDD_LOWER] = 50000000,
+ [VDD_LOW] = 100000000,
+ [VDD_NOMINAL] = 200000000},
+ },
+};
+
+static struct clk_rcg2 gcc_gp3_clk_src = {
+ .cmd_rcgr = 0x4f004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_gp3_clk_src",
+ .parent_names = gcc_parent_names_2,
+ .num_parents = 5,
+ .ops = &clk_rcg2_ops,
+ .vdd_class = &vdd_cx,
+ .num_rate_max = VDD_NUM,
+ .rate_max = (unsigned long[VDD_NUM]) {
+ [VDD_LOWER] = 50000000,
+ [VDD_LOW] = 100000000,
+ [VDD_NOMINAL] = 200000000},
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_pdm2_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(60000000, P_GPLL0_OUT_AUX2, 5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_pdm2_clk_src = {
+ .cmd_rcgr = 0x20010,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pdm2_clk_src,
+ .enable_safe_config = true,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_pdm2_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ .vdd_class = &vdd_cx,
+ .num_rate_max = VDD_NUM,
+ .rate_max = (unsigned long[VDD_NUM]) {
+ [VDD_LOWER] = 19200000,
+ [VDD_LOW] = 60000000},
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_qupv3_wrap0_s0_clk_src[] = {
+ F(7372800, P_GPLL0_OUT_AUX2, 1, 384, 15625),
+ F(14745600, P_GPLL0_OUT_AUX2, 1, 768, 15625),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(29491200, P_GPLL0_OUT_AUX2, 1, 1536, 15625),
+ F(32000000, P_GPLL0_OUT_AUX2, 1, 8, 75),
+ F(48000000, P_GPLL0_OUT_AUX2, 1, 4, 25),
+ F(64000000, P_GPLL0_OUT_AUX2, 1, 16, 75),
+ F(75000000, P_GPLL0_OUT_AUX2, 4, 0, 0),
+ F(80000000, P_GPLL0_OUT_AUX2, 1, 4, 15),
+ F(96000000, P_GPLL0_OUT_AUX2, 1, 8, 25),
+ F(100000000, P_GPLL0_OUT_AUX2, 3, 0, 0),
+ F(102400000, P_GPLL0_OUT_AUX2, 1, 128, 375),
+ F(112000000, P_GPLL0_OUT_AUX2, 1, 28, 75),
+ F(117964800, P_GPLL0_OUT_AUX2, 1, 6144, 15625),
+ F(120000000, P_GPLL0_OUT_AUX2, 2.5, 0, 0),
+ F(128000000, P_GPLL6_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s0_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s0_clk_src",
+ .parent_names = gcc_parent_names_1,
+ .num_parents = 5,
+ .ops = &clk_rcg2_ops,
+ .vdd_class = &vdd_cx,
+ .num_rate_max = VDD_NUM,
+ .rate_max = (unsigned long[VDD_NUM]) {
+ [VDD_LOWER] = 75000000,
+ [VDD_LOW] = 100000000,
+ [VDD_NOMINAL] = 128000000},
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s0_clk_src = {
+ .cmd_rcgr = 0x1f148,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s0_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s1_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s1_clk_src",
+ .parent_names = gcc_parent_names_1,
+ .num_parents = 5,
+ .ops = &clk_rcg2_ops,
+ .vdd_class = &vdd_cx,
+ .num_rate_max = VDD_NUM,
+ .rate_max = (unsigned long[VDD_NUM]) {
+ [VDD_LOWER] = 75000000,
+ [VDD_LOW] = 100000000,
+ [VDD_NOMINAL] = 128000000},
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s1_clk_src = {
+ .cmd_rcgr = 0x1f278,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s1_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s2_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s2_clk_src",
+ .parent_names = gcc_parent_names_1,
+ .num_parents = 5,
+ .ops = &clk_rcg2_ops,
+ .vdd_class = &vdd_cx,
+ .num_rate_max = VDD_NUM,
+ .rate_max = (unsigned long[VDD_NUM]) {
+ [VDD_LOWER] = 75000000,
+ [VDD_LOW] = 100000000,
+ [VDD_NOMINAL] = 128000000},
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s2_clk_src = {
+ .cmd_rcgr = 0x1f3a8,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s2_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s3_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s3_clk_src",
+ .parent_names = gcc_parent_names_1,
+ .num_parents = 5,
+ .ops = &clk_rcg2_ops,
+ .vdd_class = &vdd_cx,
+ .num_rate_max = VDD_NUM,
+ .rate_max = (unsigned long[VDD_NUM]) {
+ [VDD_LOWER] = 75000000,
+ [VDD_LOW] = 100000000,
+ [VDD_NOMINAL] = 128000000},
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s3_clk_src = {
+ .cmd_rcgr = 0x1f4d8,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s3_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s4_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s4_clk_src",
+ .parent_names = gcc_parent_names_1,
+ .num_parents = 5,
+ .ops = &clk_rcg2_ops,
+ .vdd_class = &vdd_cx,
+ .num_rate_max = VDD_NUM,
+ .rate_max = (unsigned long[VDD_NUM]) {
+ [VDD_LOWER] = 75000000,
+ [VDD_LOW] = 100000000,
+ [VDD_NOMINAL] = 128000000},
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s4_clk_src = {
+ .cmd_rcgr = 0x1f608,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s4_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s5_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s5_clk_src",
+ .parent_names = gcc_parent_names_1,
+ .num_parents = 5,
+ .ops = &clk_rcg2_ops,
+ .vdd_class = &vdd_cx,
+ .num_rate_max = VDD_NUM,
+ .rate_max = (unsigned long[VDD_NUM]) {
+ [VDD_LOWER] = 75000000,
+ [VDD_LOW] = 100000000,
+ [VDD_NOMINAL] = 128000000},
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s5_clk_src = {
+ .cmd_rcgr = 0x1f738,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s5_clk_src_init,
+};
+
+static const struct freq_tbl ftbl_gcc_sdcc1_apps_clk_src[] = {
+ F(144000, P_BI_TCXO, 16, 3, 25),
+ F(400000, P_BI_TCXO, 12, 1, 4),
+ F(20000000, P_GPLL0_OUT_AUX2, 5, 1, 3),
+ F(25000000, P_GPLL0_OUT_AUX2, 6, 1, 2),
+ F(50000000, P_GPLL0_OUT_AUX2, 6, 0, 0),
+ F(100000000, P_GPLL0_OUT_AUX2, 3, 0, 0),
+ F(192000000, P_GPLL6_OUT_MAIN, 2, 0, 0),
+ F(384000000, P_GPLL6_OUT_MAIN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_sdcc1_apps_clk_src = {
+ .cmd_rcgr = 0x38028,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_sdcc1_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc1_apps_clk_src",
+ .parent_names = gcc_parent_names_1,
+ .num_parents = 5,
+ .ops = &clk_rcg2_ops,
+ .vdd_class = &vdd_cx,
+ .flags = CLK_OPS_PARENT_ENABLE,
+ .num_rate_max = VDD_NUM,
+ .rate_max = (unsigned long[VDD_NUM]) {
+ [VDD_LOWER] = 100000000,
+ [VDD_LOW_L1] = 384000000},
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_sdcc1_ice_core_clk_src[] = {
+ F(75000000, P_GPLL0_OUT_AUX2, 4, 0, 0),
+ F(100000000, P_GPLL0_OUT_AUX2, 3, 0, 0),
+ F(150000000, P_GPLL0_OUT_AUX2, 2, 0, 0),
+ F(200000000, P_GPLL0_OUT_EARLY, 3, 0, 0),
+ F(300000000, P_GPLL0_OUT_AUX2, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_sdcc1_ice_core_clk_src = {
+ .cmd_rcgr = 0x38010,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_sdcc1_ice_core_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc1_ice_core_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ .vdd_class = &vdd_cx,
+ .num_rate_max = VDD_NUM,
+ .rate_max = (unsigned long[VDD_NUM]) {
+ [VDD_LOWER] = 100000000,
+ [VDD_LOW] = 150000000,
+ [VDD_LOW_L1] = 300000000},
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_sdcc2_apps_clk_src[] = {
+ F(400000, P_BI_TCXO, 12, 1, 4),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(25000000, P_GPLL0_OUT_AUX2, 12, 0, 0),
+ F(50000000, P_GPLL0_OUT_AUX2, 6, 0, 0),
+ F(100000000, P_GPLL0_OUT_AUX2, 3, 0, 0),
+ F(202000000, P_GPLL7_OUT_MAIN, 4, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_sdcc2_apps_clk_src = {
+ .cmd_rcgr = 0x1e00c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_11,
+ .freq_tbl = ftbl_gcc_sdcc2_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc2_apps_clk_src",
+ .parent_names = gcc_parent_names_11,
+ .num_parents = 6,
+ .ops = &clk_rcg2_ops,
+ .flags = CLK_OPS_PARENT_ENABLE,
+ .vdd_class = &vdd_cx,
+ .num_rate_max = VDD_NUM,
+ .rate_max = (unsigned long[VDD_NUM]) {
+ [VDD_LOWER] = 100000000,
+ [VDD_LOW_L1] = 202000000},
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_ufs_phy_axi_clk_src[] = {
+ F(25000000, P_GPLL0_OUT_AUX2, 12, 0, 0),
+ F(50000000, P_GPLL0_OUT_AUX2, 6, 0, 0),
+ F(100000000, P_GPLL0_OUT_AUX2, 3, 0, 0),
+ F(200000000, P_GPLL0_OUT_EARLY, 3, 0, 0),
+ F(240000000, P_GPLL0_OUT_EARLY, 2.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_ufs_phy_axi_clk_src = {
+ .cmd_rcgr = 0x45020,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_ufs_phy_axi_clk_src,
+ .enable_safe_config = true,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_axi_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ .vdd_class = &vdd_cx,
+ .num_rate_max = VDD_NUM,
+ .rate_max = (unsigned long[VDD_NUM]) {
+ [VDD_LOWER] = 50000000,
+ [VDD_LOW] = 100000000,
+ [VDD_NOMINAL] = 200000000,
+ [VDD_HIGH] = 240000000},
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_ufs_phy_ice_core_clk_src[] = {
+ F(37500000, P_GPLL0_OUT_AUX2, 8, 0, 0),
+ F(75000000, P_GPLL0_OUT_AUX2, 4, 0, 0),
+ F(150000000, P_GPLL0_OUT_AUX2, 2, 0, 0),
+ F(300000000, P_GPLL0_OUT_AUX2, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_ufs_phy_ice_core_clk_src = {
+ .cmd_rcgr = 0x45048,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_ufs_phy_ice_core_clk_src,
+ .enable_safe_config = true,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_ice_core_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ .vdd_class = &vdd_cx,
+ .num_rate_max = VDD_NUM,
+ .rate_max = (unsigned long[VDD_NUM]) {
+ [VDD_LOWER] = 75000000,
+ [VDD_LOW] = 150000000,
+ [VDD_NOMINAL] = 300000000},
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_ufs_phy_phy_aux_clk_src[] = {
+ F(9600000, P_BI_TCXO, 2, 0, 0),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_ufs_phy_phy_aux_clk_src = {
+ .cmd_rcgr = 0x4507c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_ufs_phy_phy_aux_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_phy_aux_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ .vdd_class = &vdd_cx,
+ .num_rate_max = VDD_NUM,
+ .rate_max = (unsigned long[VDD_NUM]) {
+ [VDD_LOWER] = 19200000},
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_ufs_phy_unipro_core_clk_src[] = {
+ F(37500000, P_GPLL0_OUT_AUX2, 8, 0, 0),
+ F(75000000, P_GPLL0_OUT_AUX2, 4, 0, 0),
+ F(150000000, P_GPLL0_OUT_AUX2, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_ufs_phy_unipro_core_clk_src = {
+ .cmd_rcgr = 0x45060,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_ufs_phy_unipro_core_clk_src,
+ .enable_safe_config = true,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_unipro_core_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ .vdd_class = &vdd_cx,
+ .num_rate_max = VDD_NUM,
+ .rate_max = (unsigned long[VDD_NUM]) {
+ [VDD_LOWER] = 37500000,
+ [VDD_LOW] = 75000000,
+ [VDD_NOMINAL] = 150000000},
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_usb30_prim_master_clk_src[] = {
+ F(66666667, P_GPLL0_OUT_AUX2, 4.5, 0, 0),
+ F(133333333, P_GPLL0_OUT_EARLY, 4.5, 0, 0),
+ F(200000000, P_GPLL0_OUT_EARLY, 3, 0, 0),
+ F(240000000, P_GPLL0_OUT_EARLY, 2.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_usb30_prim_master_clk_src = {
+ .cmd_rcgr = 0x1a01c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_usb30_prim_master_clk_src,
+ .enable_safe_config = true,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_prim_master_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ .vdd_class = &vdd_cx,
+ .num_rate_max = VDD_NUM,
+ .rate_max = (unsigned long[VDD_NUM]) {
+ [VDD_LOWER] = 66666667,
+ [VDD_LOW] = 133333333,
+ [VDD_NOMINAL] = 200000000,
+ [VDD_HIGH] = 240000000},
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_usb30_prim_mock_utmi_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_usb30_prim_mock_utmi_clk_src = {
+ .cmd_rcgr = 0x1a034,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_usb30_prim_mock_utmi_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_prim_mock_utmi_clk_src",
+ .parent_names = gcc_parent_names_0,
+ .num_parents = 4,
+ .ops = &clk_rcg2_ops,
+ .vdd_class = &vdd_cx,
+ .num_rate_max = VDD_NUM,
+ .rate_max = (unsigned long[VDD_NUM]) {
+ [VDD_LOWER] = 19200000},
+ },
+};
+
+static struct clk_rcg2 gcc_usb3_prim_phy_aux_clk_src = {
+ .cmd_rcgr = 0x1a060,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_12,
+ .freq_tbl = ftbl_gcc_usb30_prim_mock_utmi_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_prim_phy_aux_clk_src",
+ .parent_names = gcc_parent_names_12,
+ .num_parents = 3,
+ .ops = &clk_rcg2_ops,
+ .vdd_class = &vdd_cx,
+ .num_rate_max = VDD_NUM,
+ .rate_max = (unsigned long[VDD_NUM]) {
+ [VDD_LOWER] = 19200000},
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_video_venus_clk_src[] = {
+ F(133000000, P_GPLL11_OUT_MAIN, 4.5, 0, 0),
+ F(240000000, P_GPLL11_OUT_MAIN, 2.5, 0, 0),
+ F(300000000, P_GPLL11_OUT_MAIN, 2, 0, 0),
+ F(384000000, P_GPLL11_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_video_venus_clk_src = {
+ .cmd_rcgr = 0x58060,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_13,
+ .freq_tbl = ftbl_gcc_video_venus_clk_src,
+ .enable_safe_config = true,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_video_venus_clk_src",
+ .parent_names = gcc_parent_names_13,
+ .num_parents = 5,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ .vdd_class = &vdd_cx,
+ .num_rate_max = VDD_NUM,
+ .rate_max = (unsigned long[VDD_NUM]) {
+ [VDD_LOWER] = 133000000,
+ [VDD_LOW] = 240000000,
+ [VDD_LOW_L1] = 300000000,
+ [VDD_NOMINAL] = 384000000},
+ },
+};
+
+static struct clk_branch gcc_ahb2phy_csi_clk = {
+ .halt_reg = 0x1d004,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x1d004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x1d004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ahb2phy_csi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ahb2phy_usb_clk = {
+ .halt_reg = 0x1d008,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x1d008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x1d008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ahb2phy_usb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_bimc_gpu_axi_clk = {
+ .halt_reg = 0x71154,
+ .halt_check = BRANCH_HALT_DELAY,
+ .hwcg_reg = 0x71154,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x71154,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_bimc_gpu_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_boot_rom_ahb_clk = {
+ .halt_reg = 0x23004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x23004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x79004,
+ .enable_mask = BIT(10),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_boot_rom_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cam_throttle_nrt_clk = {
+ .halt_reg = 0x17070,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x17070,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x79004,
+ .enable_mask = BIT(27),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_cam_throttle_nrt_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cam_throttle_rt_clk = {
+ .halt_reg = 0x1706c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x1706c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x79004,
+ .enable_mask = BIT(26),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_cam_throttle_rt_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camera_ahb_clk = {
+ .halt_reg = 0x17008,
+ .halt_check = BRANCH_HALT_DELAY,
+ .hwcg_reg = 0x17008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x17008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camera_ahb_clk",
+ .flags = CLK_IS_CRITICAL,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camera_xo_clk = {
+ .halt_reg = 0x17028,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x17028,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camera_xo_clk",
+ .flags = CLK_IS_CRITICAL,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_axi_clk = {
+ .halt_reg = 0x58044,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x58044,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_axi_clk",
+ .parent_names = (const char *[]){
+ "gcc_camss_axi_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_camnoc_atb_clk = {
+ .halt_reg = 0x5804c,
+ .halt_check = BRANCH_HALT_DELAY,
+ .hwcg_reg = 0x5804c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x5804c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_camnoc_atb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_camnoc_nts_xo_clk = {
+ .halt_reg = 0x58050,
+ .halt_check = BRANCH_HALT_DELAY,
+ .hwcg_reg = 0x58050,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x58050,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_camnoc_nts_xo_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_cci_0_clk = {
+ .halt_reg = 0x56018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x56018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_cci_0_clk",
+ .parent_names = (const char *[]){
+ "gcc_camss_cci_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_cphy_0_clk = {
+ .halt_reg = 0x52088,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x52088,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_cphy_0_clk",
+ .parent_names = (const char *[]){
+ "gcc_camss_tfe_cphy_rx_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_cphy_1_clk = {
+ .halt_reg = 0x5208c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x5208c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_cphy_1_clk",
+ .parent_names = (const char *[]){
+ "gcc_camss_tfe_cphy_rx_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_cphy_2_clk = {
+ .halt_reg = 0x52090,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x52090,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_cphy_2_clk",
+ .parent_names = (const char *[]){
+ "gcc_camss_tfe_cphy_rx_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_csi0phytimer_clk = {
+ .halt_reg = 0x59018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x59018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_csi0phytimer_clk",
+ .parent_names = (const char *[]){
+ "gcc_camss_csi0phytimer_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_csi1phytimer_clk = {
+ .halt_reg = 0x59034,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x59034,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_csi1phytimer_clk",
+ .parent_names = (const char *[]){
+ "gcc_camss_csi1phytimer_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_csi2phytimer_clk = {
+ .halt_reg = 0x59050,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x59050,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_csi2phytimer_clk",
+ .parent_names = (const char *[]){
+ "gcc_camss_csi2phytimer_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_mclk0_clk = {
+ .halt_reg = 0x51018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x51018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_mclk0_clk",
+ .parent_names = (const char *[]){
+ "gcc_camss_mclk0_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_mclk1_clk = {
+ .halt_reg = 0x51034,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x51034,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_mclk1_clk",
+ .parent_names = (const char *[]){
+ "gcc_camss_mclk1_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_mclk2_clk = {
+ .halt_reg = 0x51050,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x51050,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_mclk2_clk",
+ .parent_names = (const char *[]){
+ "gcc_camss_mclk2_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_mclk3_clk = {
+ .halt_reg = 0x5106c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x5106c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_mclk3_clk",
+ .parent_names = (const char *[]){
+ "gcc_camss_mclk3_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_nrt_axi_clk = {
+ .halt_reg = 0x58054,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x58054,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_nrt_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_ope_ahb_clk = {
+ .halt_reg = 0x5503c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x5503c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_ope_ahb_clk",
+ .parent_names = (const char *[]){
+ "gcc_camss_ope_ahb_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_ope_clk = {
+ .halt_reg = 0x5501c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x5501c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_ope_clk",
+ .parent_names = (const char *[]){
+ "gcc_camss_ope_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_rt_axi_clk = {
+ .halt_reg = 0x5805c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x5805c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_rt_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_tfe_0_clk = {
+ .halt_reg = 0x5201c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x5201c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_tfe_0_clk",
+ .parent_names = (const char *[]){
+ "gcc_camss_tfe_0_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_tfe_0_cphy_rx_clk = {
+ .halt_reg = 0x5207c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x5207c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_tfe_0_cphy_rx_clk",
+ .parent_names = (const char *[]){
+ "gcc_camss_tfe_cphy_rx_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_tfe_0_csid_clk = {
+ .halt_reg = 0x520ac,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x520ac,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_tfe_0_csid_clk",
+ .parent_names = (const char *[]){
+ "gcc_camss_tfe_0_csid_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_tfe_1_clk = {
+ .halt_reg = 0x5203c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x5203c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_tfe_1_clk",
+ .parent_names = (const char *[]){
+ "gcc_camss_tfe_1_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_tfe_1_cphy_rx_clk = {
+ .halt_reg = 0x52080,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x52080,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_tfe_1_cphy_rx_clk",
+ .parent_names = (const char *[]){
+ "gcc_camss_tfe_cphy_rx_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_tfe_1_csid_clk = {
+ .halt_reg = 0x520cc,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x520cc,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_tfe_1_csid_clk",
+ .parent_names = (const char *[]){
+ "gcc_camss_tfe_1_csid_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_tfe_2_clk = {
+ .halt_reg = 0x5205c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x5205c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_tfe_2_clk",
+ .parent_names = (const char *[]){
+ "gcc_camss_tfe_2_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_tfe_2_cphy_rx_clk = {
+ .halt_reg = 0x52084,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x52084,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_tfe_2_cphy_rx_clk",
+ .parent_names = (const char *[]){
+ "gcc_camss_tfe_cphy_rx_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_tfe_2_csid_clk = {
+ .halt_reg = 0x520ec,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x520ec,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_tfe_2_csid_clk",
+ .parent_names = (const char *[]){
+ "gcc_camss_tfe_2_csid_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_top_ahb_clk = {
+ .halt_reg = 0x58028,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x58028,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_top_ahb_clk",
+ .parent_names = (const char *[]){
+ "gcc_camss_top_ahb_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cfg_noc_usb3_prim_axi_clk = {
+ .halt_reg = 0x1a084,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x1a084,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x1a084,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_cfg_noc_usb3_prim_axi_clk",
+ .parent_names = (const char *[]){
+ "gcc_usb30_prim_master_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cpuss_ahb_clk = {
+ .halt_reg = 0x2b000,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x2b000,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x79004,
+ .enable_mask = BIT(21),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_cpuss_ahb_clk",
+ .flags = CLK_IS_CRITICAL,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cpuss_gnoc_clk = {
+ .halt_reg = 0x2b004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x2b004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x79004,
+ .enable_mask = BIT(22),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_cpuss_gnoc_clk",
+ .flags = CLK_IS_CRITICAL,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cpuss_throttle_core_clk = {
+ .halt_reg = 0x2b180,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x2b180,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x79004,
+ .enable_mask = BIT(30),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_cpuss_throttle_core_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cpuss_throttle_xo_clk = {
+ .halt_reg = 0x2b17c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2b17c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_cpuss_throttle_xo_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_disp_ahb_clk = {
+ .halt_reg = 0x1700c,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x1700c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x1700c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_disp_ahb_clk",
+ .flags = CLK_IS_CRITICAL,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_disp_gpll0_div_clk_src = {
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x79004,
+ .enable_mask = BIT(20),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_disp_gpll0_div_clk_src",
+ .parent_names = (const char *[]){
+ "gpll0",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_disp_hf_axi_clk = {
+ .halt_reg = 0x17020,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x17020,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x17020,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_disp_hf_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_disp_throttle_core_clk = {
+ .halt_reg = 0x17064,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x17064,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x7900c,
+ .enable_mask = BIT(5),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_disp_throttle_core_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_disp_xo_clk = {
+ .halt_reg = 0x1702c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1702c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_disp_xo_clk",
+ .flags = CLK_IS_CRITICAL,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp1_clk = {
+ .halt_reg = 0x4d000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4d000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gp1_clk",
+ .parent_names = (const char *[]){
+ "gcc_gp1_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp2_clk = {
+ .halt_reg = 0x4e000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4e000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gp2_clk",
+ .parent_names = (const char *[]){
+ "gcc_gp2_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp3_clk = {
+ .halt_reg = 0x4f000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4f000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gp3_clk",
+ .parent_names = (const char *[]){
+ "gcc_gp3_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_cfg_ahb_clk = {
+ .halt_reg = 0x36004,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x36004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x36004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpu_cfg_ahb_clk",
+ .flags = CLK_IS_CRITICAL,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_gpll0_clk_src = {
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x79004,
+ .enable_mask = BIT(15),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpu_gpll0_clk_src",
+ .parent_names = (const char *[]){
+ "gpll0",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_gpll0_div_clk_src = {
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x79004,
+ .enable_mask = BIT(16),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpu_gpll0_div_clk_src",
+ .parent_names = (const char *[]){
+ "gpll0_out_aux2",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_iref_clk = {
+ .halt_reg = 0x36100,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x36100,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpu_iref_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_memnoc_gfx_clk = {
+ .halt_reg = 0x3600c,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x3600c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x3600c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpu_memnoc_gfx_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_snoc_dvm_gfx_clk = {
+ .halt_reg = 0x36018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x36018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpu_snoc_dvm_gfx_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_throttle_core_clk = {
+ .halt_reg = 0x36048,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x36048,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x79004,
+ .enable_mask = BIT(31),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpu_throttle_core_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_throttle_xo_clk = {
+ .halt_reg = 0x36044,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x36044,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpu_throttle_xo_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm2_clk = {
+ .halt_reg = 0x2000c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2000c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pdm2_clk",
+ .parent_names = (const char *[]){
+ "gcc_pdm2_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm_ahb_clk = {
+ .halt_reg = 0x20004,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x20004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x20004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pdm_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm_xo4_clk = {
+ .halt_reg = 0x20008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x20008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pdm_xo4_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_prng_ahb_clk = {
+ .halt_reg = 0x21004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x21004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x79004,
+ .enable_mask = BIT(13),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_prng_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_camera_nrt_ahb_clk = {
+ .halt_reg = 0x17014,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x17014,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x7900c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qmip_camera_nrt_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_camera_rt_ahb_clk = {
+ .halt_reg = 0x17060,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x17060,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x7900c,
+ .enable_mask = BIT(2),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qmip_camera_rt_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_cpuss_cfg_ahb_clk = {
+ .halt_reg = 0x2b178,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x2b178,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x79004,
+ .enable_mask = BIT(18),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qmip_cpuss_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_disp_ahb_clk = {
+ .halt_reg = 0x17018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x17018,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x7900c,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qmip_disp_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_gpu_cfg_ahb_clk = {
+ .halt_reg = 0x36040,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x36040,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x7900c,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qmip_gpu_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_video_vcodec_ahb_clk = {
+ .halt_reg = 0x17010,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x17010,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x79004,
+ .enable_mask = BIT(25),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qmip_video_vcodec_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_core_2x_clk = {
+ .halt_reg = 0x1f014,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x7900c,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_core_2x_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_core_clk = {
+ .halt_reg = 0x1f00c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x7900c,
+ .enable_mask = BIT(8),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_core_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s0_clk = {
+ .halt_reg = 0x1f144,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x7900c,
+ .enable_mask = BIT(10),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s0_clk",
+ .parent_names = (const char *[]){
+ "gcc_qupv3_wrap0_s0_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s1_clk = {
+ .halt_reg = 0x1f274,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x7900c,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s1_clk",
+ .parent_names = (const char *[]){
+ "gcc_qupv3_wrap0_s1_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s2_clk = {
+ .halt_reg = 0x1f3a4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x7900c,
+ .enable_mask = BIT(12),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s2_clk",
+ .parent_names = (const char *[]){
+ "gcc_qupv3_wrap0_s2_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s3_clk = {
+ .halt_reg = 0x1f4d4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x7900c,
+ .enable_mask = BIT(13),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s3_clk",
+ .parent_names = (const char *[]){
+ "gcc_qupv3_wrap0_s3_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s4_clk = {
+ .halt_reg = 0x1f604,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x7900c,
+ .enable_mask = BIT(14),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s4_clk",
+ .parent_names = (const char *[]){
+ "gcc_qupv3_wrap0_s4_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s5_clk = {
+ .halt_reg = 0x1f734,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x7900c,
+ .enable_mask = BIT(15),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s5_clk",
+ .parent_names = (const char *[]){
+ "gcc_qupv3_wrap0_s5_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_0_m_ahb_clk = {
+ .halt_reg = 0x1f004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x1f004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x7900c,
+ .enable_mask = BIT(6),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap_0_m_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_0_s_ahb_clk = {
+ .halt_reg = 0x1f008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x1f008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x7900c,
+ .enable_mask = BIT(7),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap_0_s_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc1_ahb_clk = {
+ .halt_reg = 0x38008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x38008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc1_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc1_apps_clk = {
+ .halt_reg = 0x38004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x38004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc1_apps_clk",
+ .parent_names = (const char *[]){
+ "gcc_sdcc1_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT | CLK_ENABLE_HAND_OFF,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc1_ice_core_clk = {
+ .halt_reg = 0x3800c,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x3800c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x3800c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc1_ice_core_clk",
+ .parent_names = (const char *[]){
+ "gcc_sdcc1_ice_core_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc2_ahb_clk = {
+ .halt_reg = 0x1e008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1e008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc2_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc2_apps_clk = {
+ .halt_reg = 0x1e004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1e004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc2_apps_clk",
+ .parent_names = (const char *[]){
+ "gcc_sdcc2_apps_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sys_noc_cpuss_ahb_clk = {
+ .halt_reg = 0x2b06c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x2b06c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x79004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sys_noc_cpuss_ahb_clk",
+ .flags = CLK_IS_CRITICAL,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sys_noc_ufs_phy_axi_clk = {
+ .halt_reg = 0x45098,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x45098,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sys_noc_ufs_phy_axi_clk",
+ .parent_names = (const char *[]){
+ "gcc_ufs_phy_axi_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sys_noc_usb3_prim_axi_clk = {
+ .halt_reg = 0x1a080,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x1a080,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x1a080,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sys_noc_usb3_prim_axi_clk",
+ .parent_names = (const char *[]){
+ "gcc_usb30_prim_master_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_ahb_clk = {
+ .halt_reg = 0x45014,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x45014,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x45014,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_axi_clk = {
+ .halt_reg = 0x45010,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x45010,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x45010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_axi_clk",
+ .parent_names = (const char *[]){
+ "gcc_ufs_phy_axi_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_ice_core_clk = {
+ .halt_reg = 0x45044,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x45044,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x45044,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_ice_core_clk",
+ .parent_names = (const char *[]){
+ "gcc_ufs_phy_ice_core_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_phy_aux_clk = {
+ .halt_reg = 0x45078,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x45078,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x45078,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_phy_aux_clk",
+ .parent_names = (const char *[]){
+ "gcc_ufs_phy_phy_aux_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_rx_symbol_0_clk = {
+ .halt_reg = 0x4501c,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x4501c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_rx_symbol_0_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_tx_symbol_0_clk = {
+ .halt_reg = 0x45018,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x45018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_tx_symbol_0_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ufs_phy_unipro_core_clk = {
+ .halt_reg = 0x45040,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x45040,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x45040,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ufs_phy_unipro_core_clk",
+ .parent_names = (const char *[]){
+ "gcc_ufs_phy_unipro_core_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_prim_master_clk = {
+ .halt_reg = 0x1a010,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1a010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_prim_master_clk",
+ .parent_names = (const char *[]){
+ "gcc_usb30_prim_master_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_prim_mock_utmi_clk = {
+ .halt_reg = 0x1a018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1a018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_prim_mock_utmi_clk",
+ .parent_names = (const char *[]){
+ "gcc_usb30_prim_mock_utmi_postdiv_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_prim_sleep_clk = {
+ .halt_reg = 0x1a014,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1a014,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_prim_sleep_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_prim_clkref_clk = {
+ .halt_reg = 0x9f000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9f000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_prim_clkref_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_prim_phy_com_aux_clk = {
+ .halt_reg = 0x1a054,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1a054,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_prim_phy_com_aux_clk",
+ .parent_names = (const char *[]){
+ "gcc_usb3_prim_phy_aux_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_prim_phy_pipe_clk = {
+ .halt_reg = 0x1a058,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x1a058,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x1a058,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_prim_phy_pipe_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_vcodec0_axi_clk = {
+ .halt_reg = 0x6e008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x6e008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_vcodec0_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_venus_ahb_clk = {
+ .halt_reg = 0x6e010,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x6e010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_venus_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_venus_ctl_axi_clk = {
+ .halt_reg = 0x6e004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x6e004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_venus_ctl_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_video_ahb_clk = {
+ .halt_reg = 0x17004,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x17004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x17004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_video_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_video_axi0_clk = {
+ .halt_reg = 0x1701c,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x1701c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x1701c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_video_axi0_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_video_throttle_core_clk = {
+ .halt_reg = 0x17068,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x17068,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x79004,
+ .enable_mask = BIT(28),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_video_throttle_core_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_video_vcodec0_sys_clk = {
+ .halt_reg = 0x580a4,
+ .halt_check = BRANCH_HALT_DELAY,
+ .hwcg_reg = 0x580a4,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x580a4,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_video_vcodec0_sys_clk",
+ .parent_names = (const char *[]){
+ "gcc_video_venus_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_video_venus_ctl_clk = {
+ .halt_reg = 0x5808c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x5808c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_video_venus_ctl_clk",
+ .parent_names = (const char *[]){
+ "gcc_video_venus_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_video_xo_clk = {
+ .halt_reg = 0x17024,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x17024,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_video_xo_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_regmap *gcc_bengal_clocks[] = {
+ [GCC_AHB2PHY_CSI_CLK] = &gcc_ahb2phy_csi_clk.clkr,
+ [GCC_AHB2PHY_USB_CLK] = &gcc_ahb2phy_usb_clk.clkr,
+ [GCC_BIMC_GPU_AXI_CLK] = &gcc_bimc_gpu_axi_clk.clkr,
+ [GCC_BOOT_ROM_AHB_CLK] = &gcc_boot_rom_ahb_clk.clkr,
+ [GCC_CAM_THROTTLE_NRT_CLK] = &gcc_cam_throttle_nrt_clk.clkr,
+ [GCC_CAM_THROTTLE_RT_CLK] = &gcc_cam_throttle_rt_clk.clkr,
+ [GCC_CAMERA_AHB_CLK] = &gcc_camera_ahb_clk.clkr,
+ [GCC_CAMERA_XO_CLK] = &gcc_camera_xo_clk.clkr,
+ [GCC_CAMSS_AXI_CLK] = &gcc_camss_axi_clk.clkr,
+ [GCC_CAMSS_AXI_CLK_SRC] = &gcc_camss_axi_clk_src.clkr,
+ [GCC_CAMSS_CAMNOC_ATB_CLK] = &gcc_camss_camnoc_atb_clk.clkr,
+ [GCC_CAMSS_CAMNOC_NTS_XO_CLK] = &gcc_camss_camnoc_nts_xo_clk.clkr,
+ [GCC_CAMSS_CCI_0_CLK] = &gcc_camss_cci_0_clk.clkr,
+ [GCC_CAMSS_CCI_CLK_SRC] = &gcc_camss_cci_clk_src.clkr,
+ [GCC_CAMSS_CPHY_0_CLK] = &gcc_camss_cphy_0_clk.clkr,
+ [GCC_CAMSS_CPHY_1_CLK] = &gcc_camss_cphy_1_clk.clkr,
+ [GCC_CAMSS_CPHY_2_CLK] = &gcc_camss_cphy_2_clk.clkr,
+ [GCC_CAMSS_CSI0PHYTIMER_CLK] = &gcc_camss_csi0phytimer_clk.clkr,
+ [GCC_CAMSS_CSI0PHYTIMER_CLK_SRC] = &gcc_camss_csi0phytimer_clk_src.clkr,
+ [GCC_CAMSS_CSI1PHYTIMER_CLK] = &gcc_camss_csi1phytimer_clk.clkr,
+ [GCC_CAMSS_CSI1PHYTIMER_CLK_SRC] = &gcc_camss_csi1phytimer_clk_src.clkr,
+ [GCC_CAMSS_CSI2PHYTIMER_CLK] = &gcc_camss_csi2phytimer_clk.clkr,
+ [GCC_CAMSS_CSI2PHYTIMER_CLK_SRC] = &gcc_camss_csi2phytimer_clk_src.clkr,
+ [GCC_CAMSS_MCLK0_CLK] = &gcc_camss_mclk0_clk.clkr,
+ [GCC_CAMSS_MCLK0_CLK_SRC] = &gcc_camss_mclk0_clk_src.clkr,
+ [GCC_CAMSS_MCLK1_CLK] = &gcc_camss_mclk1_clk.clkr,
+ [GCC_CAMSS_MCLK1_CLK_SRC] = &gcc_camss_mclk1_clk_src.clkr,
+ [GCC_CAMSS_MCLK2_CLK] = &gcc_camss_mclk2_clk.clkr,
+ [GCC_CAMSS_MCLK2_CLK_SRC] = &gcc_camss_mclk2_clk_src.clkr,
+ [GCC_CAMSS_MCLK3_CLK] = &gcc_camss_mclk3_clk.clkr,
+ [GCC_CAMSS_MCLK3_CLK_SRC] = &gcc_camss_mclk3_clk_src.clkr,
+ [GCC_CAMSS_NRT_AXI_CLK] = &gcc_camss_nrt_axi_clk.clkr,
+ [GCC_CAMSS_OPE_AHB_CLK] = &gcc_camss_ope_ahb_clk.clkr,
+ [GCC_CAMSS_OPE_AHB_CLK_SRC] = &gcc_camss_ope_ahb_clk_src.clkr,
+ [GCC_CAMSS_OPE_CLK] = &gcc_camss_ope_clk.clkr,
+ [GCC_CAMSS_OPE_CLK_SRC] = &gcc_camss_ope_clk_src.clkr,
+ [GCC_CAMSS_RT_AXI_CLK] = &gcc_camss_rt_axi_clk.clkr,
+ [GCC_CAMSS_TFE_0_CLK] = &gcc_camss_tfe_0_clk.clkr,
+ [GCC_CAMSS_TFE_0_CLK_SRC] = &gcc_camss_tfe_0_clk_src.clkr,
+ [GCC_CAMSS_TFE_0_CPHY_RX_CLK] = &gcc_camss_tfe_0_cphy_rx_clk.clkr,
+ [GCC_CAMSS_TFE_0_CSID_CLK] = &gcc_camss_tfe_0_csid_clk.clkr,
+ [GCC_CAMSS_TFE_0_CSID_CLK_SRC] = &gcc_camss_tfe_0_csid_clk_src.clkr,
+ [GCC_CAMSS_TFE_1_CLK] = &gcc_camss_tfe_1_clk.clkr,
+ [GCC_CAMSS_TFE_1_CLK_SRC] = &gcc_camss_tfe_1_clk_src.clkr,
+ [GCC_CAMSS_TFE_1_CPHY_RX_CLK] = &gcc_camss_tfe_1_cphy_rx_clk.clkr,
+ [GCC_CAMSS_TFE_1_CSID_CLK] = &gcc_camss_tfe_1_csid_clk.clkr,
+ [GCC_CAMSS_TFE_1_CSID_CLK_SRC] = &gcc_camss_tfe_1_csid_clk_src.clkr,
+ [GCC_CAMSS_TFE_2_CLK] = &gcc_camss_tfe_2_clk.clkr,
+ [GCC_CAMSS_TFE_2_CLK_SRC] = &gcc_camss_tfe_2_clk_src.clkr,
+ [GCC_CAMSS_TFE_2_CPHY_RX_CLK] = &gcc_camss_tfe_2_cphy_rx_clk.clkr,
+ [GCC_CAMSS_TFE_2_CSID_CLK] = &gcc_camss_tfe_2_csid_clk.clkr,
+ [GCC_CAMSS_TFE_2_CSID_CLK_SRC] = &gcc_camss_tfe_2_csid_clk_src.clkr,
+ [GCC_CAMSS_TFE_CPHY_RX_CLK_SRC] = &gcc_camss_tfe_cphy_rx_clk_src.clkr,
+ [GCC_CAMSS_TOP_AHB_CLK] = &gcc_camss_top_ahb_clk.clkr,
+ [GCC_CAMSS_TOP_AHB_CLK_SRC] = &gcc_camss_top_ahb_clk_src.clkr,
+ [GCC_CFG_NOC_USB3_PRIM_AXI_CLK] = &gcc_cfg_noc_usb3_prim_axi_clk.clkr,
+ [GCC_CPUSS_AHB_CLK] = &gcc_cpuss_ahb_clk.clkr,
+ [GCC_CPUSS_GNOC_CLK] = &gcc_cpuss_gnoc_clk.clkr,
+ [GCC_CPUSS_THROTTLE_CORE_CLK] = &gcc_cpuss_throttle_core_clk.clkr,
+ [GCC_CPUSS_THROTTLE_XO_CLK] = &gcc_cpuss_throttle_xo_clk.clkr,
+ [GCC_DISP_AHB_CLK] = &gcc_disp_ahb_clk.clkr,
+ [GCC_DISP_GPLL0_DIV_CLK_SRC] = &gcc_disp_gpll0_div_clk_src.clkr,
+ [GCC_DISP_HF_AXI_CLK] = &gcc_disp_hf_axi_clk.clkr,
+ [GCC_DISP_THROTTLE_CORE_CLK] = &gcc_disp_throttle_core_clk.clkr,
+ [GCC_DISP_XO_CLK] = &gcc_disp_xo_clk.clkr,
+ [GCC_GP1_CLK] = &gcc_gp1_clk.clkr,
+ [GCC_GP1_CLK_SRC] = &gcc_gp1_clk_src.clkr,
+ [GCC_GP2_CLK] = &gcc_gp2_clk.clkr,
+ [GCC_GP2_CLK_SRC] = &gcc_gp2_clk_src.clkr,
+ [GCC_GP3_CLK] = &gcc_gp3_clk.clkr,
+ [GCC_GP3_CLK_SRC] = &gcc_gp3_clk_src.clkr,
+ [GCC_GPU_CFG_AHB_CLK] = &gcc_gpu_cfg_ahb_clk.clkr,
+ [GCC_GPU_GPLL0_CLK_SRC] = &gcc_gpu_gpll0_clk_src.clkr,
+ [GCC_GPU_GPLL0_DIV_CLK_SRC] = &gcc_gpu_gpll0_div_clk_src.clkr,
+ [GCC_GPU_IREF_CLK] = &gcc_gpu_iref_clk.clkr,
+ [GCC_GPU_MEMNOC_GFX_CLK] = &gcc_gpu_memnoc_gfx_clk.clkr,
+ [GCC_GPU_SNOC_DVM_GFX_CLK] = &gcc_gpu_snoc_dvm_gfx_clk.clkr,
+ [GCC_GPU_THROTTLE_CORE_CLK] = &gcc_gpu_throttle_core_clk.clkr,
+ [GCC_GPU_THROTTLE_XO_CLK] = &gcc_gpu_throttle_xo_clk.clkr,
+ [GCC_PDM2_CLK] = &gcc_pdm2_clk.clkr,
+ [GCC_PDM2_CLK_SRC] = &gcc_pdm2_clk_src.clkr,
+ [GCC_PDM_AHB_CLK] = &gcc_pdm_ahb_clk.clkr,
+ [GCC_PDM_XO4_CLK] = &gcc_pdm_xo4_clk.clkr,
+ [GCC_PRNG_AHB_CLK] = &gcc_prng_ahb_clk.clkr,
+ [GCC_QMIP_CAMERA_NRT_AHB_CLK] = &gcc_qmip_camera_nrt_ahb_clk.clkr,
+ [GCC_QMIP_CAMERA_RT_AHB_CLK] = &gcc_qmip_camera_rt_ahb_clk.clkr,
+ [GCC_QMIP_CPUSS_CFG_AHB_CLK] = &gcc_qmip_cpuss_cfg_ahb_clk.clkr,
+ [GCC_QMIP_DISP_AHB_CLK] = &gcc_qmip_disp_ahb_clk.clkr,
+ [GCC_QMIP_GPU_CFG_AHB_CLK] = &gcc_qmip_gpu_cfg_ahb_clk.clkr,
+ [GCC_QMIP_VIDEO_VCODEC_AHB_CLK] = &gcc_qmip_video_vcodec_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP0_CORE_2X_CLK] = &gcc_qupv3_wrap0_core_2x_clk.clkr,
+ [GCC_QUPV3_WRAP0_CORE_CLK] = &gcc_qupv3_wrap0_core_clk.clkr,
+ [GCC_QUPV3_WRAP0_S0_CLK] = &gcc_qupv3_wrap0_s0_clk.clkr,
+ [GCC_QUPV3_WRAP0_S0_CLK_SRC] = &gcc_qupv3_wrap0_s0_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S1_CLK] = &gcc_qupv3_wrap0_s1_clk.clkr,
+ [GCC_QUPV3_WRAP0_S1_CLK_SRC] = &gcc_qupv3_wrap0_s1_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S2_CLK] = &gcc_qupv3_wrap0_s2_clk.clkr,
+ [GCC_QUPV3_WRAP0_S2_CLK_SRC] = &gcc_qupv3_wrap0_s2_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S3_CLK] = &gcc_qupv3_wrap0_s3_clk.clkr,
+ [GCC_QUPV3_WRAP0_S3_CLK_SRC] = &gcc_qupv3_wrap0_s3_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S4_CLK] = &gcc_qupv3_wrap0_s4_clk.clkr,
+ [GCC_QUPV3_WRAP0_S4_CLK_SRC] = &gcc_qupv3_wrap0_s4_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S5_CLK] = &gcc_qupv3_wrap0_s5_clk.clkr,
+ [GCC_QUPV3_WRAP0_S5_CLK_SRC] = &gcc_qupv3_wrap0_s5_clk_src.clkr,
+ [GCC_QUPV3_WRAP_0_M_AHB_CLK] = &gcc_qupv3_wrap_0_m_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_0_S_AHB_CLK] = &gcc_qupv3_wrap_0_s_ahb_clk.clkr,
+ [GCC_SDCC1_AHB_CLK] = &gcc_sdcc1_ahb_clk.clkr,
+ [GCC_SDCC1_APPS_CLK] = &gcc_sdcc1_apps_clk.clkr,
+ [GCC_SDCC1_APPS_CLK_SRC] = &gcc_sdcc1_apps_clk_src.clkr,
+ [GCC_SDCC1_ICE_CORE_CLK] = &gcc_sdcc1_ice_core_clk.clkr,
+ [GCC_SDCC1_ICE_CORE_CLK_SRC] = &gcc_sdcc1_ice_core_clk_src.clkr,
+ [GCC_SDCC2_AHB_CLK] = &gcc_sdcc2_ahb_clk.clkr,
+ [GCC_SDCC2_APPS_CLK] = &gcc_sdcc2_apps_clk.clkr,
+ [GCC_SDCC2_APPS_CLK_SRC] = &gcc_sdcc2_apps_clk_src.clkr,
+ [GCC_SYS_NOC_CPUSS_AHB_CLK] = &gcc_sys_noc_cpuss_ahb_clk.clkr,
+ [GCC_SYS_NOC_UFS_PHY_AXI_CLK] = &gcc_sys_noc_ufs_phy_axi_clk.clkr,
+ [GCC_SYS_NOC_USB3_PRIM_AXI_CLK] = &gcc_sys_noc_usb3_prim_axi_clk.clkr,
+ [GCC_UFS_PHY_AHB_CLK] = &gcc_ufs_phy_ahb_clk.clkr,
+ [GCC_UFS_PHY_AXI_CLK] = &gcc_ufs_phy_axi_clk.clkr,
+ [GCC_UFS_PHY_AXI_CLK_SRC] = &gcc_ufs_phy_axi_clk_src.clkr,
+ [GCC_UFS_PHY_ICE_CORE_CLK] = &gcc_ufs_phy_ice_core_clk.clkr,
+ [GCC_UFS_PHY_ICE_CORE_CLK_SRC] = &gcc_ufs_phy_ice_core_clk_src.clkr,
+ [GCC_UFS_PHY_PHY_AUX_CLK] = &gcc_ufs_phy_phy_aux_clk.clkr,
+ [GCC_UFS_PHY_PHY_AUX_CLK_SRC] = &gcc_ufs_phy_phy_aux_clk_src.clkr,
+ [GCC_UFS_PHY_RX_SYMBOL_0_CLK] = &gcc_ufs_phy_rx_symbol_0_clk.clkr,
+ [GCC_UFS_PHY_TX_SYMBOL_0_CLK] = &gcc_ufs_phy_tx_symbol_0_clk.clkr,
+ [GCC_UFS_PHY_UNIPRO_CORE_CLK] = &gcc_ufs_phy_unipro_core_clk.clkr,
+ [GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC] =
+ &gcc_ufs_phy_unipro_core_clk_src.clkr,
+ [GCC_USB30_PRIM_MASTER_CLK] = &gcc_usb30_prim_master_clk.clkr,
+ [GCC_USB30_PRIM_MASTER_CLK_SRC] = &gcc_usb30_prim_master_clk_src.clkr,
+ [GCC_USB30_PRIM_MOCK_UTMI_CLK] = &gcc_usb30_prim_mock_utmi_clk.clkr,
+ [GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC] =
+ &gcc_usb30_prim_mock_utmi_clk_src.clkr,
+ [GCC_USB30_PRIM_MOCK_UTMI_POSTDIV_CLK_SRC] =
+ &gcc_usb30_prim_mock_utmi_postdiv_clk_src.clkr,
+ [GCC_USB30_PRIM_SLEEP_CLK] = &gcc_usb30_prim_sleep_clk.clkr,
+ [GCC_USB3_PRIM_CLKREF_CLK] = &gcc_usb3_prim_clkref_clk.clkr,
+ [GCC_USB3_PRIM_PHY_AUX_CLK_SRC] = &gcc_usb3_prim_phy_aux_clk_src.clkr,
+ [GCC_USB3_PRIM_PHY_COM_AUX_CLK] = &gcc_usb3_prim_phy_com_aux_clk.clkr,
+ [GCC_USB3_PRIM_PHY_PIPE_CLK] = &gcc_usb3_prim_phy_pipe_clk.clkr,
+ [GCC_VCODEC0_AXI_CLK] = &gcc_vcodec0_axi_clk.clkr,
+ [GCC_VENUS_AHB_CLK] = &gcc_venus_ahb_clk.clkr,
+ [GCC_VENUS_CTL_AXI_CLK] = &gcc_venus_ctl_axi_clk.clkr,
+ [GCC_VIDEO_AHB_CLK] = &gcc_video_ahb_clk.clkr,
+ [GCC_VIDEO_AXI0_CLK] = &gcc_video_axi0_clk.clkr,
+ [GCC_VIDEO_THROTTLE_CORE_CLK] = &gcc_video_throttle_core_clk.clkr,
+ [GCC_VIDEO_VCODEC0_SYS_CLK] = &gcc_video_vcodec0_sys_clk.clkr,
+ [GCC_VIDEO_VENUS_CLK_SRC] = &gcc_video_venus_clk_src.clkr,
+ [GCC_VIDEO_VENUS_CTL_CLK] = &gcc_video_venus_ctl_clk.clkr,
+ [GCC_VIDEO_XO_CLK] = &gcc_video_xo_clk.clkr,
+ [GPLL0] = &gpll0.clkr,
+ [GPLL0_OUT_AUX2] = &gpll0_out_aux2.clkr,
+ [GPLL0_OUT_MAIN] = &gpll0_out_main.clkr,
+ [GPLL10] = &gpll10.clkr,
+ [GPLL10_OUT_MAIN] = &gpll10_out_main.clkr,
+ [GPLL11] = &gpll11.clkr,
+ [GPLL11_OUT_MAIN] = &gpll11_out_main.clkr,
+ [GPLL3] = &gpll3.clkr,
+ [GPLL4] = &gpll4.clkr,
+ [GPLL4_OUT_MAIN] = &gpll4_out_main.clkr,
+ [GPLL6] = &gpll6.clkr,
+ [GPLL6_OUT_MAIN] = &gpll6_out_main.clkr,
+ [GPLL7] = &gpll7.clkr,
+ [GPLL7_OUT_MAIN] = &gpll7_out_main.clkr,
+ [GPLL8] = &gpll8.clkr,
+ [GPLL8_OUT_MAIN] = &gpll8_out_main.clkr,
+ [GPLL9] = &gpll9.clkr,
+ [GPLL9_OUT_MAIN] = &gpll9_out_main.clkr,
+};
+
+static const struct qcom_reset_map gcc_bengal_resets[] = {
+ [GCC_QUSB2PHY_PRIM_BCR] = { 0x1c000 },
+ [GCC_QUSB2PHY_SEC_BCR] = { 0x1c004 },
+ [GCC_UFS_PHY_BCR] = { 0x45000 },
+ [GCC_USB30_PRIM_BCR] = { 0x1a000 },
+ [GCC_USB_PHY_CFG_AHB2PHY_BCR] = { 0x1d000 },
+ [GCC_VCODEC0_BCR] = { 0x58094 },
+ [GCC_VENUS_BCR] = { 0x58078 },
+ [GCC_VIDEO_INTERFACE_BCR] = { 0x6e000 },
+};
+
+static const struct clk_rcg_dfs_data gcc_dfs_clocks[] = {
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s0_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s1_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s2_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s3_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s4_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s5_clk_src),
+};
+
+static const struct regmap_config gcc_bengal_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0xc7000,
+ .fast_io = true,
+};
+
+static const struct qcom_cc_desc gcc_bengal_desc = {
+ .config = &gcc_bengal_regmap_config,
+ .clks = gcc_bengal_clocks,
+ .num_clks = ARRAY_SIZE(gcc_bengal_clocks),
+ .resets = gcc_bengal_resets,
+ .num_resets = ARRAY_SIZE(gcc_bengal_resets),
+};
+
+static const struct of_device_id gcc_bengal_match_table[] = {
+ { .compatible = "qcom,bengal-gcc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, gcc_bengal_match_table);
+
+static int gcc_bengal_probe(struct platform_device *pdev)
+{
+ struct regmap *regmap;
+ int ret;
+
+ regmap = qcom_cc_map(pdev, &gcc_bengal_desc);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ vdd_cx.regulator[0] = devm_regulator_get(&pdev->dev, "vdd_cx");
+ if (IS_ERR(vdd_cx.regulator[0])) {
+ if (!(PTR_ERR(vdd_cx.regulator[0]) == -EPROBE_DEFER))
+ dev_err(&pdev->dev,
+ "Unable to get vdd_cx regulator\n");
+ return PTR_ERR(vdd_cx.regulator[0]);
+ }
+
+ vdd_cx_ao.regulator[0] = devm_regulator_get(&pdev->dev, "vdd_cx_ao");
+ if (IS_ERR(vdd_cx_ao.regulator[0])) {
+ if (!(PTR_ERR(vdd_cx_ao.regulator[0]) == -EPROBE_DEFER))
+ dev_err(&pdev->dev,
+ "Unable to get vdd_cx_ao regulator\n");
+ return PTR_ERR(vdd_cx_ao.regulator[0]);
+ }
+
+ vdd_mx.regulator[0] = devm_regulator_get(&pdev->dev, "vdd_mx");
+ if (IS_ERR(vdd_mx.regulator[0])) {
+ if (!(PTR_ERR(vdd_mx.regulator[0]) == -EPROBE_DEFER))
+ dev_err(&pdev->dev,
+ "Unable to get vdd_mx regulator\n");
+ return PTR_ERR(vdd_mx.regulator[0]);
+ }
+
+ ret = qcom_cc_register_rcg_dfs(regmap, gcc_dfs_clocks,
+ ARRAY_SIZE(gcc_dfs_clocks));
+ if (ret)
+ return ret;
+
+ /* Disable the GPLL0 active input to NPU and GPU via MISC registers */
+
+ clk_alpha_pll_configure(&gpll8, regmap, &gpll8_config);
+ clk_alpha_pll_configure(&gpll9, regmap, &gpll9_config);
+ clk_alpha_pll_configure(&gpll10, regmap, &gpll10_config);
+ clk_alpha_pll_configure(&gpll11, regmap, &gpll11_config);
+
+ ret = qcom_cc_really_probe(pdev, &gcc_bengal_desc, regmap);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to register GCC clocks\n");
+ return ret;
+ }
+
+ dev_info(&pdev->dev, "Registered GCC clocks\n");
+ return ret;
+}
+
+static struct platform_driver gcc_bengal_driver = {
+ .probe = gcc_bengal_probe,
+ .driver = {
+ .name = "gcc-bengal",
+ .of_match_table = gcc_bengal_match_table,
+ },
+};
+
+static int __init gcc_bengal_init(void)
+{
+ return platform_driver_register(&gcc_bengal_driver);
+}
+subsys_initcall(gcc_bengal_init);
+
+static void __exit gcc_bengal_exit(void)
+{
+ platform_driver_unregister(&gcc_bengal_driver);
+}
+module_exit(gcc_bengal_exit);
+
+MODULE_DESCRIPTION("QTI GCC BENGAL Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/qcom/gcc-lito.c b/drivers/clk/qcom/gcc-lito.c
index f5ec81f..926a8da 100644
--- a/drivers/clk/qcom/gcc-lito.c
+++ b/drivers/clk/qcom/gcc-lito.c
@@ -26,7 +26,7 @@
#include "clk-regmap.h"
#include "common.h"
#include "reset.h"
-#include "vdd-level.h"
+#include "vdd-level-lito.h"
#define GCC_NPU_MISC 0x4d110
#define GCC_GPU_MISC 0x71028
diff --git a/drivers/clk/qcom/gpucc-bengal.c b/drivers/clk/qcom/gpucc-bengal.c
new file mode 100644
index 0000000..91f62b2
--- /dev/null
+++ b/drivers/clk/qcom/gpucc-bengal.c
@@ -0,0 +1,517 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+
+#define pr_fmt(fmt) "clk: %s: " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,gpucc-bengal.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "common.h"
+#include "vdd-level-bengal.h"
+
+#define CX_GMU_CBCR_SLEEP_MASK 0xf
+#define CX_GMU_CBCR_SLEEP_SHIFT 4
+#define CX_GMU_CBCR_WAKE_MASK 0xf
+#define CX_GMU_CBCR_WAKE_SHIFT 8
+
+static DEFINE_VDD_REGULATORS(vdd_cx, VDD_NUM, 1, vdd_corner);
+
+enum {
+ P_BI_TCXO,
+ P_CORE_BI_PLL_TEST_SE,
+ P_GPLL0_OUT_MAIN,
+ P_GPLL0_OUT_MAIN_DIV,
+ P_GPU_CC_PLL0_2X_DIV_CLK_SRC,
+ P_GPU_CC_PLL0_OUT_AUX2,
+ P_GPU_CC_PLL0_OUT_MAIN,
+ P_GPU_CC_PLL1_OUT_AUX,
+ P_GPU_CC_PLL1_OUT_AUX2,
+ P_GPU_CC_PLL1_OUT_MAIN,
+};
+
+static const struct parent_map gpu_cc_parent_map_0[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPU_CC_PLL0_OUT_MAIN, 1 },
+ { P_GPU_CC_PLL1_OUT_MAIN, 3 },
+ { P_GPLL0_OUT_MAIN, 5 },
+ { P_GPLL0_OUT_MAIN_DIV, 6 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const gpu_cc_parent_names_0[] = {
+ "bi_tcxo",
+ "gpu_cc_pll0_out_main",
+ "gpu_cc_pll1_out_main",
+ "gpll0_out_main",
+ "gpll0_out_main_div",
+ "core_bi_pll_test_se",
+};
+
+static const struct parent_map gpu_cc_parent_map_1[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPU_CC_PLL0_2X_DIV_CLK_SRC, 1 },
+ { P_GPU_CC_PLL0_OUT_AUX2, 2 },
+ { P_GPU_CC_PLL1_OUT_AUX, 3 },
+ { P_GPU_CC_PLL1_OUT_AUX2, 4 },
+ { P_GPLL0_OUT_MAIN, 5 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const gpu_cc_parent_names_1[] = {
+ "bi_tcxo",
+ "gpu_cc_pll0_out_aux",
+ "gpu_cc_pll0_out_aux2",
+ "gpu_cc_pll1_out_aux",
+ "gpu_cc_pll1_out_aux2",
+ "gpll0_out_main",
+ "core_bi_pll_test_se",
+};
+
+static struct pll_vco default_vco[] = {
+ { 1000000000, 2000000000, 0 },
+ { 750000000, 1500000000, 1 },
+ { 500000000, 1000000000, 2 },
+ { 250000000, 500000000, 3 },
+};
+
+static const struct alpha_pll_config gpu_cc_pll0_config = {
+ .l = 0x1B,
+ .alpha = 0x55000000,
+ .alpha_hi = 0xB5,
+ .alpha_en_mask = BIT(24),
+ .main_output_mask = BIT(0),
+ .aux_output_mask = BIT(1),
+ .aux2_output_mask = BIT(2),
+ .config_ctl_val = 0x40008529,
+};
+
+/* 532MHz configuration */
+static struct clk_alpha_pll gpu_cc_pll0 = {
+ .offset = 0x0,
+ .vco_table = default_vco,
+ .num_vco = ARRAY_SIZE(default_vco),
+ .flags = SUPPORTS_DYNAMIC_UPDATE,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_pll0",
+ .parent_names = (const char *[]){ "bi_tcxo" },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_ops,
+ .vdd_class = &vdd_cx,
+ .num_rate_max = VDD_NUM,
+ .rate_max = (unsigned long[VDD_NUM]) {
+ [VDD_MIN] = 1000000000,
+ [VDD_NOMINAL] = 2000000000},
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_gpu_cc_pll0_out_aux2[] = {
+ { 0x0, 1 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv gpu_cc_pll0_out_aux2 = {
+ .offset = 0x0,
+ .post_div_shift = 8,
+ .post_div_table = post_div_table_gpu_cc_pll0_out_aux2,
+ .num_post_div = ARRAY_SIZE(post_div_table_gpu_cc_pll0_out_aux2),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_pll0_out_aux2",
+ .parent_names = (const char *[]){ "gpu_cc_pll0" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_ops,
+ },
+};
+
+/* 640MHz configuration */
+static const struct alpha_pll_config gpu_cc_pll1_config = {
+ .l = 0x21,
+ .alpha = 0x55555555,
+ .alpha_hi = 0x55,
+ .alpha_en_mask = BIT(24),
+ .vco_val = 0x2 << 20,
+ .vco_mask = GENMASK(21, 20),
+ .main_output_mask = BIT(0),
+ .aux_output_mask = BIT(1),
+ .config_ctl_val = 0x40008529,
+};
+
+static struct clk_alpha_pll gpu_cc_pll1 = {
+ .offset = 0x100,
+ .vco_table = default_vco,
+ .num_vco = ARRAY_SIZE(default_vco),
+ .flags = SUPPORTS_DYNAMIC_UPDATE,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_pll1",
+ .parent_names = (const char *[]){ "bi_tcxo" },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_ops,
+ .vdd_class = &vdd_cx,
+ .num_rate_max = VDD_NUM,
+ .rate_max = (unsigned long[VDD_NUM]) {
+ [VDD_MIN] = 1000000000,
+ [VDD_NOMINAL] = 2000000000},
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_gpu_cc_pll1_out_aux[] = {
+ { 0x0, 1 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv gpu_cc_pll1_out_aux = {
+ .offset = 0x100,
+ .post_div_shift = 15,
+ .post_div_table = post_div_table_gpu_cc_pll1_out_aux,
+ .num_post_div = ARRAY_SIZE(post_div_table_gpu_cc_pll1_out_aux),
+ .width = 3,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_pll1_out_aux",
+ .parent_names = (const char *[]){ "gpu_cc_pll1" },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gpu_cc_gmu_clk_src[] = {
+ F(200000000, P_GPLL0_OUT_MAIN, 1.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gpu_cc_gmu_clk_src = {
+ .cmd_rcgr = 0x1120,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gpu_cc_parent_map_0,
+ .freq_tbl = ftbl_gpu_cc_gmu_clk_src,
+ .enable_safe_config = true,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_gmu_clk_src",
+ .parent_names = gpu_cc_parent_names_0,
+ .num_parents = 6,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ .vdd_class = &vdd_cx,
+ .num_rate_max = VDD_NUM,
+ .rate_max = (unsigned long[VDD_NUM]) {
+ [VDD_LOWER] = 200000000},
+ },
+};
+
+static const struct freq_tbl ftbl_gpu_cc_gx_gfx3d_clk_src[] = {
+ F(320000000, P_GPU_CC_PLL1_OUT_AUX, 2, 0, 0),
+ F(465000000, P_GPU_CC_PLL1_OUT_AUX, 2, 0, 0),
+ F(600000000, P_GPU_CC_PLL0_OUT_AUX2, 2, 0, 0),
+ F(745000000, P_GPU_CC_PLL0_OUT_AUX2, 2, 0, 0),
+ F(820000000, P_GPU_CC_PLL0_OUT_AUX2, 2, 0, 0),
+ F(900000000, P_GPU_CC_PLL0_OUT_AUX2, 2, 0, 0),
+ F(950000000, P_GPU_CC_PLL0_OUT_AUX2, 2, 0, 0),
+ F(980000000, P_GPU_CC_PLL0_OUT_AUX2, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gpu_cc_gx_gfx3d_clk_src = {
+ .cmd_rcgr = 0x101c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gpu_cc_parent_map_1,
+ .freq_tbl = ftbl_gpu_cc_gx_gfx3d_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_gx_gfx3d_clk_src",
+ .parent_names = gpu_cc_parent_names_1,
+ .num_parents = 7,
+ .flags = CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE,
+ .ops = &clk_rcg2_ops,
+ .vdd_class = &vdd_cx,
+ .num_rate_max = VDD_NUM,
+ .rate_max = (unsigned long[VDD_NUM]) {
+ [VDD_LOWER] = 320000000,
+ [VDD_LOW] = 465000000,
+ [VDD_LOW_L1] = 600000000,
+ [VDD_NOMINAL] = 745000000,
+ [VDD_NOMINAL_L1] = 820000000,
+ [VDD_HIGH] = 900000000,
+ [VDD_HIGH_L1] = 980000000},
+ },
+};
+
+static struct clk_branch gpu_cc_ahb_clk = {
+ .halt_reg = 0x1078,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x1078,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_ahb_clk",
+ .flags = CLK_IS_CRITICAL,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_crc_ahb_clk = {
+ .halt_reg = 0x107c,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x107c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_crc_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_cx_gfx3d_clk = {
+ .halt_reg = 0x10a4,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x10a4,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_cx_gfx3d_clk",
+ .parent_names = (const char *[]){
+ "gpu_cc_gx_gfx3d_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_cx_gmu_clk = {
+ .halt_reg = 0x1098,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1098,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_cx_gmu_clk",
+ .parent_names = (const char *[]){
+ "gpu_cc_gmu_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_cx_snoc_dvm_clk = {
+ .halt_reg = 0x108c,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x108c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_cx_snoc_dvm_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_cxo_aon_clk = {
+ .halt_reg = 0x1004,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x1004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_cxo_aon_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_cxo_clk = {
+ .halt_reg = 0x109c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x109c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_cxo_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_gx_cxo_clk = {
+ .halt_reg = 0x1060,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1060,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_gx_cxo_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_gx_gfx3d_clk = {
+ .halt_reg = 0x1054,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x1054,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_gx_gfx3d_clk",
+ .parent_names = (const char *[]){
+ "gpu_cc_gx_gfx3d_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_sleep_clk = {
+ .halt_reg = 0x1090,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x1090,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_sleep_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_hlos1_vote_gpu_smmu_clk = {
+ .halt_reg = 0x5000,
+ .halt_check = BRANCH_VOTED,
+ .clkr = {
+ .enable_reg = 0x5000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_hlos1_vote_gpu_smmu_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_regmap *gpu_cc_bengal_clocks[] = {
+ [GPU_CC_AHB_CLK] = &gpu_cc_ahb_clk.clkr,
+ [GPU_CC_CRC_AHB_CLK] = &gpu_cc_crc_ahb_clk.clkr,
+ [GPU_CC_CX_GFX3D_CLK] = &gpu_cc_cx_gfx3d_clk.clkr,
+ [GPU_CC_CX_GMU_CLK] = &gpu_cc_cx_gmu_clk.clkr,
+ [GPU_CC_CX_SNOC_DVM_CLK] = &gpu_cc_cx_snoc_dvm_clk.clkr,
+ [GPU_CC_CXO_AON_CLK] = &gpu_cc_cxo_aon_clk.clkr,
+ [GPU_CC_CXO_CLK] = &gpu_cc_cxo_clk.clkr,
+ [GPU_CC_GMU_CLK_SRC] = &gpu_cc_gmu_clk_src.clkr,
+ [GPU_CC_GX_CXO_CLK] = &gpu_cc_gx_cxo_clk.clkr,
+ [GPU_CC_GX_GFX3D_CLK] = &gpu_cc_gx_gfx3d_clk.clkr,
+ [GPU_CC_GX_GFX3D_CLK_SRC] = &gpu_cc_gx_gfx3d_clk_src.clkr,
+ [GPU_CC_PLL0] = &gpu_cc_pll0.clkr,
+ [GPU_CC_PLL0_OUT_AUX2] = &gpu_cc_pll0_out_aux2.clkr,
+ [GPU_CC_PLL1] = &gpu_cc_pll1.clkr,
+ [GPU_CC_PLL1_OUT_AUX] = &gpu_cc_pll1_out_aux.clkr,
+ [GPU_CC_SLEEP_CLK] = &gpu_cc_sleep_clk.clkr,
+ [GPU_CC_HLOS1_VOTE_GPU_SMMU_CLK] = &gpu_cc_hlos1_vote_gpu_smmu_clk.clkr,
+};
+
+static const struct regmap_config gpu_cc_bengal_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x7008,
+ .fast_io = true,
+};
+
+static const struct qcom_cc_desc gpu_cc_bengal_desc = {
+ .config = &gpu_cc_bengal_regmap_config,
+ .clks = gpu_cc_bengal_clocks,
+ .num_clks = ARRAY_SIZE(gpu_cc_bengal_clocks),
+};
+
+static const struct of_device_id gpucc_bengal_match_table[] = {
+ { .compatible = "qcom,bengal-gpucc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, gpucc_bengal_match_table);
+
+static int gpucc_bengal_probe(struct platform_device *pdev)
+{
+ struct regmap *regmap;
+ unsigned int value, mask;
+ int ret;
+
+ vdd_cx.regulator[0] = devm_regulator_get(&pdev->dev, "vdd_cx");
+ if (IS_ERR(vdd_cx.regulator[0])) {
+ if (!(PTR_ERR(vdd_cx.regulator[0]) == -EPROBE_DEFER))
+ dev_err(&pdev->dev,
+ "Unable to get vdd_cx regulator\n");
+ return PTR_ERR(vdd_cx.regulator[0]);
+ }
+
+ regmap = qcom_cc_map(pdev, &gpu_cc_bengal_desc);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ clk_alpha_pll_configure(&gpu_cc_pll0, regmap, &gpu_cc_pll0_config);
+ clk_alpha_pll_configure(&gpu_cc_pll1, regmap, &gpu_cc_pll1_config);
+
+ /* Recommended WAKEUP/SLEEP settings for the gpu_cc_cx_gmu_clk */
+ mask = CX_GMU_CBCR_WAKE_MASK << CX_GMU_CBCR_WAKE_SHIFT;
+ mask |= CX_GMU_CBCR_SLEEP_MASK << CX_GMU_CBCR_SLEEP_SHIFT;
+ value = 0xf << CX_GMU_CBCR_WAKE_SHIFT | 0xf << CX_GMU_CBCR_SLEEP_SHIFT;
+ regmap_update_bits(regmap, gpu_cc_cx_gmu_clk.clkr.enable_reg,
+ mask, value);
+
+ ret = qcom_cc_really_probe(pdev, &gpu_cc_bengal_desc, regmap);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to register GPUCC clocks\n");
+ return ret;
+ }
+
+ dev_info(&pdev->dev, "Registered GPUCC clocks\n");
+ return ret;
+}
+
+static struct platform_driver gpucc_bengal_driver = {
+ .probe = gpucc_bengal_probe,
+ .driver = {
+ .name = "bengal-gpucc",
+ .of_match_table = gpucc_bengal_match_table,
+ },
+};
+
+static int __init gpu_cc_bengal_init(void)
+{
+ return platform_driver_register(&gpucc_bengal_driver);
+}
+subsys_initcall(gpu_cc_bengal_init);
+
+static void __exit gpu_cc_bengal_exit(void)
+{
+ platform_driver_unregister(&gpucc_bengal_driver);
+}
+module_exit(gpu_cc_bengal_exit);
+
+MODULE_DESCRIPTION("QTI GPU_CC BENGAL Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/qcom/gpucc-lito.c b/drivers/clk/qcom/gpucc-lito.c
index 310d4bb..156fe4f 100644
--- a/drivers/clk/qcom/gpucc-lito.c
+++ b/drivers/clk/qcom/gpucc-lito.c
@@ -26,7 +26,7 @@
#include "clk-regmap.h"
#include "common.h"
#include "reset.h"
-#include "vdd-level.h"
+#include "vdd-level-lito.h"
static DEFINE_VDD_REGULATORS(vdd_cx, VDD_NUM, 1, vdd_corner);
static DEFINE_VDD_REGULATORS(vdd_mx, VDD_NUM, 1, vdd_corner);
diff --git a/drivers/clk/qcom/npucc-lito.c b/drivers/clk/qcom/npucc-lito.c
index e2604f5..5a56ca9 100644
--- a/drivers/clk/qcom/npucc-lito.c
+++ b/drivers/clk/qcom/npucc-lito.c
@@ -26,7 +26,7 @@
#include "clk-regmap.h"
#include "common.h"
#include "reset.h"
-#include "vdd-level.h"
+#include "vdd-level-lito.h"
static DEFINE_VDD_REGULATORS(vdd_cx, VDD_NUM, 1, vdd_corner);
@@ -283,10 +283,11 @@ static struct clk_fixed_factor npu_cc_crc_div = {
static const struct freq_tbl ftbl_npu_cc_cal_hm0_clk_src[] = {
F(200000000, P_NPU_CC_CRC_DIV, 1, 0, 0),
- F(300000000, P_NPU_CC_CRC_DIV, 1, 0, 0),
- F(518400000, P_NPU_CC_CRC_DIV, 1, 0, 0),
- F(633600000, P_NPU_CC_CRC_DIV, 1, 0, 0),
- F(825600000, P_NPU_CC_CRC_DIV, 1, 0, 0),
+ F(230000000, P_NPU_CC_CRC_DIV, 1, 0, 0),
+ F(422000000, P_NPU_CC_CRC_DIV, 1, 0, 0),
+ F(557000000, P_NPU_CC_CRC_DIV, 1, 0, 0),
+ F(729000000, P_NPU_CC_CRC_DIV, 1, 0, 0),
+ F(844000000, P_NPU_CC_CRC_DIV, 1, 0, 0),
F(1000000000, P_NPU_CC_CRC_DIV, 1, 0, 0),
{ }
};
@@ -308,11 +309,12 @@ static struct clk_rcg2 npu_cc_cal_hm0_clk_src = {
.num_rate_max = VDD_NUM,
.rate_max = (unsigned long[VDD_NUM]) {
[VDD_MIN] = 200000000,
- [VDD_LOWER] = 300000000,
- [VDD_LOW] = 518400000,
- [VDD_LOW_L1] = 633600000,
- [VDD_NOMINAL] = 825600000,
- [VDD_HIGH] = 1000000000},
+ [VDD_LOWER] = 230000000,
+ [VDD_LOW] = 422000000,
+ [VDD_LOW_L1] = 557000000,
+ [VDD_NOMINAL] = 729000000,
+ [VDD_HIGH] = 844000000,
+ [VDD_HIGH_L1] = 1000000000},
},
};
diff --git a/drivers/clk/qcom/vdd-level-bengal.h b/drivers/clk/qcom/vdd-level-bengal.h
new file mode 100644
index 0000000..619977f
--- /dev/null
+++ b/drivers/clk/qcom/vdd-level-bengal.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __DRIVERS_CLK_QCOM_VDD_LEVEL_H
+#define __DRIVERS_CLK_QCOM_VDD_LEVEL_H
+
+#include <linux/regulator/consumer.h>
+#include <dt-bindings/regulator/qcom,rpmh-regulator-levels.h>
+
+enum vdd_levels {
+ VDD_NONE,
+ VDD_MIN, /* MIN SVS */
+ VDD_LOWER, /* SVS2 */
+ VDD_LOW, /* SVS */
+ VDD_LOW_L1, /* SVSL1 */
+ VDD_NOMINAL, /* NOM */
+ VDD_NOMINAL_L1, /* NOM L1 */
+ VDD_HIGH, /* TURBO */
+ VDD_HIGH_L1, /* TURBO */
+ VDD_NUM,
+};
+
+static int vdd_corner[] = {
+ [VDD_NONE] = 0,
+ [VDD_MIN] = RPMH_REGULATOR_LEVEL_MIN_SVS,
+ [VDD_LOWER] = RPMH_REGULATOR_LEVEL_LOW_SVS,
+ [VDD_LOW] = RPMH_REGULATOR_LEVEL_SVS,
+ [VDD_LOW_L1] = RPMH_REGULATOR_LEVEL_SVS_L1,
+ [VDD_NOMINAL] = RPMH_REGULATOR_LEVEL_NOM,
+ [VDD_NOMINAL_L1] = RPMH_REGULATOR_LEVEL_NOM_L1,
+ [VDD_HIGH] = RPMH_REGULATOR_LEVEL_TURBO,
+ [VDD_HIGH_L1] = RPMH_REGULATOR_LEVEL_TURBO_L1,
+};
+
+#endif
diff --git a/drivers/clk/qcom/vdd-level-lito.h b/drivers/clk/qcom/vdd-level-lito.h
new file mode 100644
index 0000000..b968de1
--- /dev/null
+++ b/drivers/clk/qcom/vdd-level-lito.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __DRIVERS_CLK_QCOM_VDD_LEVEL_LITO_H
+#define __DRIVERS_CLK_QCOM_VDD_LEVEL_LITO_H
+
+#include <linux/regulator/consumer.h>
+#include <dt-bindings/regulator/qcom,rpmh-regulator-levels.h>
+
+enum vdd_levels {
+ VDD_NONE,
+ VDD_MIN, /* MIN SVS */
+ VDD_LOWER, /* SVS2 */
+ VDD_LOW, /* SVS */
+ VDD_LOW_L1, /* SVSL1 */
+ VDD_NOMINAL, /* NOM */
+ VDD_HIGH, /* TURBO */
+ VDD_HIGH_L1, /* TURBO_L1 */
+ VDD_NUM,
+};
+
+static int vdd_corner[] = {
+ [VDD_NONE] = 0,
+ [VDD_MIN] = RPMH_REGULATOR_LEVEL_MIN_SVS,
+ [VDD_LOWER] = RPMH_REGULATOR_LEVEL_LOW_SVS,
+ [VDD_LOW] = RPMH_REGULATOR_LEVEL_SVS,
+ [VDD_LOW_L1] = RPMH_REGULATOR_LEVEL_SVS_L1,
+ [VDD_NOMINAL] = RPMH_REGULATOR_LEVEL_NOM,
+ [VDD_HIGH] = RPMH_REGULATOR_LEVEL_TURBO,
+ [VDD_HIGH_L1] = RPMH_REGULATOR_LEVEL_TURBO_L1,
+};
+
+#endif
diff --git a/drivers/clk/qcom/videocc-lito.c b/drivers/clk/qcom/videocc-lito.c
index 6619bb4..03b63f8 100644
--- a/drivers/clk/qcom/videocc-lito.c
+++ b/drivers/clk/qcom/videocc-lito.c
@@ -28,7 +28,7 @@
#include "common.h"
#include "gdsc.h"
#include "reset.h"
-#include "vdd-level.h"
+#include "vdd-level-lito.h"
static DEFINE_VDD_REGULATORS(vdd_cx, VDD_NUM, 1, vdd_corner);
diff --git a/drivers/clk/renesas/renesas-cpg-mssr.c b/drivers/clk/renesas/renesas-cpg-mssr.c
index f4b013e..24485be 100644
--- a/drivers/clk/renesas/renesas-cpg-mssr.c
+++ b/drivers/clk/renesas/renesas-cpg-mssr.c
@@ -535,17 +535,11 @@ static int cpg_mssr_reset(struct reset_controller_dev *rcdev,
unsigned int reg = id / 32;
unsigned int bit = id % 32;
u32 bitmask = BIT(bit);
- unsigned long flags;
- u32 value;
dev_dbg(priv->dev, "reset %u%02u\n", reg, bit);
/* Reset module */
- spin_lock_irqsave(&priv->rmw_lock, flags);
- value = readl(priv->base + SRCR(reg));
- value |= bitmask;
- writel(value, priv->base + SRCR(reg));
- spin_unlock_irqrestore(&priv->rmw_lock, flags);
+ writel(bitmask, priv->base + SRCR(reg));
/* Wait for at least one cycle of the RCLK clock (@ ca. 32 kHz) */
udelay(35);
@@ -562,16 +556,10 @@ static int cpg_mssr_assert(struct reset_controller_dev *rcdev, unsigned long id)
unsigned int reg = id / 32;
unsigned int bit = id % 32;
u32 bitmask = BIT(bit);
- unsigned long flags;
- u32 value;
dev_dbg(priv->dev, "assert %u%02u\n", reg, bit);
- spin_lock_irqsave(&priv->rmw_lock, flags);
- value = readl(priv->base + SRCR(reg));
- value |= bitmask;
- writel(value, priv->base + SRCR(reg));
- spin_unlock_irqrestore(&priv->rmw_lock, flags);
+ writel(bitmask, priv->base + SRCR(reg));
return 0;
}
diff --git a/drivers/clk/socfpga/clk-periph-s10.c b/drivers/clk/socfpga/clk-periph-s10.c
index 568f59b..e7c877d 100644
--- a/drivers/clk/socfpga/clk-periph-s10.c
+++ b/drivers/clk/socfpga/clk-periph-s10.c
@@ -37,7 +37,7 @@ static unsigned long clk_peri_cnt_clk_recalc_rate(struct clk_hw *hwclk,
if (socfpgaclk->fixed_div) {
div = socfpgaclk->fixed_div;
} else {
- if (!socfpgaclk->bypass_reg)
+ if (socfpgaclk->hw.reg)
div = ((readl(socfpgaclk->hw.reg) & 0x7ff) + 1);
}
diff --git a/drivers/clk/sprd/Kconfig b/drivers/clk/sprd/Kconfig
index 8789247..bad8099 100644
--- a/drivers/clk/sprd/Kconfig
+++ b/drivers/clk/sprd/Kconfig
@@ -2,6 +2,7 @@
tristate "Clock support for Spreadtrum SoCs"
depends on ARCH_SPRD || COMPILE_TEST
default ARCH_SPRD
+ select REGMAP_MMIO
if SPRD_COMMON_CLK
diff --git a/drivers/clk/sprd/sc9860-clk.c b/drivers/clk/sprd/sc9860-clk.c
index 9980ab5..f76305b 100644
--- a/drivers/clk/sprd/sc9860-clk.c
+++ b/drivers/clk/sprd/sc9860-clk.c
@@ -2023,6 +2023,7 @@ static int sc9860_clk_probe(struct platform_device *pdev)
{
const struct of_device_id *match;
const struct sprd_clk_desc *desc;
+ int ret;
match = of_match_node(sprd_sc9860_clk_ids, pdev->dev.of_node);
if (!match) {
@@ -2031,7 +2032,9 @@ static int sc9860_clk_probe(struct platform_device *pdev)
}
desc = match->data;
- sprd_clk_regmap_init(pdev, desc);
+ ret = sprd_clk_regmap_init(pdev, desc);
+ if (ret)
+ return ret;
return sprd_clk_probe(&pdev->dev, desc->hw_clks);
}
diff --git a/drivers/clk/tegra/clk-tegra210.c b/drivers/clk/tegra/clk-tegra210.c
index 9eb1cb1..4e1bc23 100644
--- a/drivers/clk/tegra/clk-tegra210.c
+++ b/drivers/clk/tegra/clk-tegra210.c
@@ -2214,9 +2214,9 @@ static struct div_nmp pllu_nmp = {
};
static struct tegra_clk_pll_freq_table pll_u_freq_table[] = {
- { 12000000, 480000000, 40, 1, 0, 0 },
- { 13000000, 480000000, 36, 1, 0, 0 }, /* actual: 468.0 MHz */
- { 38400000, 480000000, 25, 2, 0, 0 },
+ { 12000000, 480000000, 40, 1, 1, 0 },
+ { 13000000, 480000000, 36, 1, 1, 0 }, /* actual: 468.0 MHz */
+ { 38400000, 480000000, 25, 2, 1, 0 },
{ 0, 0, 0, 0, 0, 0 },
};
@@ -3343,6 +3343,7 @@ static struct tegra_clk_init_table init_table[] __initdata = {
{ TEGRA210_CLK_DFLL_REF, TEGRA210_CLK_PLL_P, 51000000, 1 },
{ TEGRA210_CLK_SBC4, TEGRA210_CLK_PLL_P, 12000000, 1 },
{ TEGRA210_CLK_PLL_RE_VCO, TEGRA210_CLK_CLK_MAX, 672000000, 1 },
+ { TEGRA210_CLK_PLL_U_OUT1, TEGRA210_CLK_CLK_MAX, 48000000, 1 },
{ TEGRA210_CLK_XUSB_GATE, TEGRA210_CLK_CLK_MAX, 0, 1 },
{ TEGRA210_CLK_XUSB_SS_SRC, TEGRA210_CLK_PLL_U_480M, 120000000, 0 },
{ TEGRA210_CLK_XUSB_FS_SRC, TEGRA210_CLK_PLL_U_48M, 48000000, 0 },
@@ -3367,7 +3368,6 @@ static struct tegra_clk_init_table init_table[] __initdata = {
{ TEGRA210_CLK_PLL_DP, TEGRA210_CLK_CLK_MAX, 270000000, 0 },
{ TEGRA210_CLK_SOC_THERM, TEGRA210_CLK_PLL_P, 51000000, 0 },
{ TEGRA210_CLK_CCLK_G, TEGRA210_CLK_CLK_MAX, 0, 1 },
- { TEGRA210_CLK_PLL_U_OUT1, TEGRA210_CLK_CLK_MAX, 48000000, 1 },
{ TEGRA210_CLK_PLL_U_OUT2, TEGRA210_CLK_CLK_MAX, 60000000, 1 },
/* This MUST be the last entry. */
{ TEGRA210_CLK_CLK_MAX, TEGRA210_CLK_CLK_MAX, 0, 0 },
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 995685f..ecaf191 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -1900,8 +1900,10 @@ unsigned int cpufreq_driver_fast_switch(struct cpufreq_policy *policy,
target_freq = clamp_val(target_freq, policy->min, policy->max);
ret = cpufreq_driver->fast_switch(policy, target_freq);
- if (ret)
+ if (ret) {
cpufreq_times_record_transition(policy, ret);
+ cpufreq_stats_record_transition(policy, ret);
+ }
return ret;
}
diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c
index 1572129..21b919b 100644
--- a/drivers/cpufreq/cpufreq_stats.c
+++ b/drivers/cpufreq/cpufreq_stats.c
@@ -30,11 +30,12 @@ struct cpufreq_stats {
static void cpufreq_stats_update(struct cpufreq_stats *stats)
{
unsigned long long cur_time = get_jiffies_64();
+ unsigned long flags;
- spin_lock(&cpufreq_stats_lock);
+ spin_lock_irqsave(&cpufreq_stats_lock, flags);
stats->time_in_state[stats->last_index] += cur_time - stats->last_time;
stats->last_time = cur_time;
- spin_unlock(&cpufreq_stats_lock);
+ spin_unlock_irqrestore(&cpufreq_stats_lock, flags);
}
static void cpufreq_stats_clear_table(struct cpufreq_stats *stats)
@@ -58,9 +59,6 @@ static ssize_t show_time_in_state(struct cpufreq_policy *policy, char *buf)
ssize_t len = 0;
int i;
- if (policy->fast_switch_enabled)
- return 0;
-
cpufreq_stats_update(stats);
for (i = 0; i < stats->state_num; i++) {
len += sprintf(buf + len, "%u %llu\n", stats->freq_table[i],
@@ -84,9 +82,6 @@ static ssize_t show_trans_table(struct cpufreq_policy *policy, char *buf)
ssize_t len = 0;
int i, j;
- if (policy->fast_switch_enabled)
- return 0;
-
len += snprintf(buf + len, PAGE_SIZE - len, " From : To\n");
len += snprintf(buf + len, PAGE_SIZE - len, " : ");
for (i = 0; i < stats->state_num; i++) {
diff --git a/drivers/cpufreq/pasemi-cpufreq.c b/drivers/cpufreq/pasemi-cpufreq.c
index c7710c1..a0620c9 100644
--- a/drivers/cpufreq/pasemi-cpufreq.c
+++ b/drivers/cpufreq/pasemi-cpufreq.c
@@ -145,11 +145,19 @@ static int pas_cpufreq_cpu_init(struct cpufreq_policy *policy)
int err = -ENODEV;
cpu = of_get_cpu_node(policy->cpu, NULL);
-
- of_node_put(cpu);
if (!cpu)
goto out;
+ max_freqp = of_get_property(cpu, "clock-frequency", NULL);
+ of_node_put(cpu);
+ if (!max_freqp) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ /* we need the freq in kHz */
+ max_freq = *max_freqp / 1000;
+
dn = of_find_compatible_node(NULL, NULL, "1682m-sdc");
if (!dn)
dn = of_find_compatible_node(NULL, NULL,
@@ -185,16 +193,6 @@ static int pas_cpufreq_cpu_init(struct cpufreq_policy *policy)
}
pr_debug("init cpufreq on CPU %d\n", policy->cpu);
-
- max_freqp = of_get_property(cpu, "clock-frequency", NULL);
- if (!max_freqp) {
- err = -EINVAL;
- goto out_unmap_sdcpwr;
- }
-
- /* we need the freq in kHz */
- max_freq = *max_freqp / 1000;
-
pr_debug("max clock-frequency is at %u kHz\n", max_freq);
pr_debug("initializing frequency table\n");
@@ -212,9 +210,6 @@ static int pas_cpufreq_cpu_init(struct cpufreq_policy *policy)
return cpufreq_generic_init(policy, pas_freqs, get_gizmo_latency());
-out_unmap_sdcpwr:
- iounmap(sdcpwr_mapbase);
-
out_unmap_sdcasr:
iounmap(sdcasr_mapbase);
out:
diff --git a/drivers/crypto/ccp/ccp-crypto-aes-galois.c b/drivers/crypto/ccp/ccp-crypto-aes-galois.c
index ca1f0d7..e5dcb29 100644
--- a/drivers/crypto/ccp/ccp-crypto-aes-galois.c
+++ b/drivers/crypto/ccp/ccp-crypto-aes-galois.c
@@ -61,6 +61,19 @@ static int ccp_aes_gcm_setkey(struct crypto_aead *tfm, const u8 *key,
static int ccp_aes_gcm_setauthsize(struct crypto_aead *tfm,
unsigned int authsize)
{
+ switch (authsize) {
+ case 16:
+ case 15:
+ case 14:
+ case 13:
+ case 12:
+ case 8:
+ case 4:
+ break;
+ default:
+ return -EINVAL;
+ }
+
return 0;
}
@@ -107,6 +120,7 @@ static int ccp_aes_gcm_crypt(struct aead_request *req, bool encrypt)
memset(&rctx->cmd, 0, sizeof(rctx->cmd));
INIT_LIST_HEAD(&rctx->cmd.entry);
rctx->cmd.engine = CCP_ENGINE_AES;
+ rctx->cmd.u.aes.authsize = crypto_aead_authsize(tfm);
rctx->cmd.u.aes.type = ctx->u.aes.type;
rctx->cmd.u.aes.mode = ctx->u.aes.mode;
rctx->cmd.u.aes.action = encrypt;
diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c
index e212bad..1e2e421 100644
--- a/drivers/crypto/ccp/ccp-ops.c
+++ b/drivers/crypto/ccp/ccp-ops.c
@@ -625,6 +625,7 @@ static int ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q,
unsigned long long *final;
unsigned int dm_offset;
+ unsigned int authsize;
unsigned int jobid;
unsigned int ilen;
bool in_place = true; /* Default value */
@@ -646,6 +647,21 @@ static int ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q,
if (!aes->key) /* Gotta have a key SGL */
return -EINVAL;
+ /* Zero defaults to 16 bytes, the maximum size */
+ authsize = aes->authsize ? aes->authsize : AES_BLOCK_SIZE;
+ switch (authsize) {
+ case 16:
+ case 15:
+ case 14:
+ case 13:
+ case 12:
+ case 8:
+ case 4:
+ break;
+ default:
+ return -EINVAL;
+ }
+
/* First, decompose the source buffer into AAD & PT,
* and the destination buffer into AAD, CT & tag, or
* the input into CT & tag.
@@ -660,7 +676,7 @@ static int ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q,
p_tag = scatterwalk_ffwd(sg_tag, p_outp, ilen);
} else {
/* Input length for decryption includes tag */
- ilen = aes->src_len - AES_BLOCK_SIZE;
+ ilen = aes->src_len - authsize;
p_tag = scatterwalk_ffwd(sg_tag, p_inp, ilen);
}
@@ -769,8 +785,7 @@ static int ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q,
while (src.sg_wa.bytes_left) {
ccp_prepare_data(&src, &dst, &op, AES_BLOCK_SIZE, true);
if (!src.sg_wa.bytes_left) {
- unsigned int nbytes = aes->src_len
- % AES_BLOCK_SIZE;
+ unsigned int nbytes = ilen % AES_BLOCK_SIZE;
if (nbytes) {
op.eom = 1;
@@ -842,19 +857,19 @@ static int ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q,
if (aes->action == CCP_AES_ACTION_ENCRYPT) {
/* Put the ciphered tag after the ciphertext. */
- ccp_get_dm_area(&final_wa, 0, p_tag, 0, AES_BLOCK_SIZE);
+ ccp_get_dm_area(&final_wa, 0, p_tag, 0, authsize);
} else {
/* Does this ciphered tag match the input? */
- ret = ccp_init_dm_workarea(&tag, cmd_q, AES_BLOCK_SIZE,
+ ret = ccp_init_dm_workarea(&tag, cmd_q, authsize,
DMA_BIDIRECTIONAL);
if (ret)
goto e_tag;
- ret = ccp_set_dm_area(&tag, 0, p_tag, 0, AES_BLOCK_SIZE);
+ ret = ccp_set_dm_area(&tag, 0, p_tag, 0, authsize);
if (ret)
goto e_tag;
ret = crypto_memneq(tag.address, final_wa.address,
- AES_BLOCK_SIZE) ? -EBADMSG : 0;
+ authsize) ? -EBADMSG : 0;
ccp_dm_free(&tag);
}
@@ -862,11 +877,11 @@ static int ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q,
ccp_dm_free(&final_wa);
e_dst:
- if (aes->src_len && !in_place)
+ if (ilen > 0 && !in_place)
ccp_free_data(&dst, cmd_q);
e_src:
- if (aes->src_len)
+ if (ilen > 0)
ccp_free_data(&src, cmd_q);
e_aad:
diff --git a/drivers/dma/qcom/gpi.c b/drivers/dma/qcom/gpi.c
index c6cbe61..31e9fc1 100644
--- a/drivers/dma/qcom/gpi.c
+++ b/drivers/dma/qcom/gpi.c
@@ -3,7 +3,6 @@
* Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
*/
-#include <asm/dma-iommu.h>
#include <linux/atomic.h>
#include <linux/completion.h>
#include <linux/debugfs.h>
@@ -2534,111 +2533,6 @@ static void gpi_setup_debug(struct gpi_dev *gpi_dev)
}
}
-static struct dma_iommu_mapping *gpi_create_mapping(struct gpi_dev *gpi_dev)
-{
- dma_addr_t base;
- size_t size;
-
- /*
- * If S1_BYPASS enabled then iommu space is not used, however framework
- * still require clients to create a mapping space before attaching. So
- * set to smallest size required by iommu framework.
- */
- if (gpi_dev->smmu_cfg & GPI_SMMU_S1_BYPASS) {
- base = 0;
- size = PAGE_SIZE;
- } else {
- base = gpi_dev->iova_base;
- size = gpi_dev->iova_size;
- }
-
- GPI_LOG(gpi_dev, "Creating iommu mapping of base:0x%llx size:%lu\n",
- base, size);
-
- return __depr_arm_iommu_create_mapping(&platform_bus_type, base, size);
-}
-
-static int gpi_smmu_init(struct gpi_dev *gpi_dev)
-{
- struct dma_iommu_mapping *mapping = NULL;
- int ret;
-
- if (gpi_dev->smmu_cfg) {
-
- /* create mapping table */
- mapping = gpi_create_mapping(gpi_dev);
- if (IS_ERR(mapping)) {
- GPI_ERR(gpi_dev,
- "Failed to create iommu mapping, ret:%ld\n",
- PTR_ERR(mapping));
- return PTR_ERR(mapping);
- }
-
- if (gpi_dev->smmu_cfg & GPI_SMMU_S1_BYPASS) {
- int s1_bypass = 1;
-
- ret = iommu_domain_set_attr(mapping->domain,
- DOMAIN_ATTR_S1_BYPASS, &s1_bypass);
- if (ret) {
- GPI_ERR(gpi_dev,
- "Failed to set attr S1_BYPASS, ret:%d\n",
- ret);
- goto release_mapping;
- }
- }
-
- if (gpi_dev->smmu_cfg & GPI_SMMU_FAST) {
- int fast = 1;
-
- ret = iommu_domain_set_attr(mapping->domain,
- DOMAIN_ATTR_FAST, &fast);
- if (ret) {
- GPI_ERR(gpi_dev,
- "Failed to set attr FAST, ret:%d\n",
- ret);
- goto release_mapping;
- }
- }
-
- if (gpi_dev->smmu_cfg & GPI_SMMU_ATOMIC) {
- int atomic = 1;
-
- ret = iommu_domain_set_attr(mapping->domain,
- DOMAIN_ATTR_ATOMIC, &atomic);
- if (ret) {
- GPI_ERR(gpi_dev,
- "Failed to set attr ATOMIC, ret:%d\n",
- ret);
- goto release_mapping;
- }
- }
-
- ret = __depr_arm_iommu_attach_device(gpi_dev->dev, mapping);
- if (ret) {
- GPI_ERR(gpi_dev,
- "Failed with iommu_attach, ret:%d\n", ret);
- goto release_mapping;
- }
- }
-
- GPI_LOG(gpi_dev, "Setting dma mask to 64\n");
- ret = dma_set_mask(gpi_dev->dev, DMA_BIT_MASK(64));
- if (ret) {
- GPI_ERR(gpi_dev, "Error setting dma_mask to 64, ret:%d\n", ret);
- goto error_set_mask;
- }
-
- return ret;
-
-error_set_mask:
- if (gpi_dev->smmu_cfg)
- __depr_arm_iommu_detach_device(gpi_dev->dev);
-release_mapping:
- if (mapping)
- __depr_arm_iommu_release_mapping(mapping);
- return ret;
-}
-
static int gpi_probe(struct platform_device *pdev)
{
struct gpi_dev *gpi_dev;
@@ -2696,56 +2590,14 @@ static int gpi_probe(struct platform_device *pdev)
return ret;
}
- ret = of_property_read_u32(gpi_dev->dev->of_node, "qcom,smmu-cfg",
- &gpi_dev->smmu_cfg);
- if (ret) {
- GPI_ERR(gpi_dev, "missing 'qcom,smmu-cfg' DT node\n");
- return ret;
- }
-
ret = of_property_read_string(gpi_dev->dev->of_node,
"qcom,iommu-dma", &mode);
- if ((ret == 0) && (strcmp(mode, "disabled") == 0)) {
- if (gpi_dev->smmu_cfg &&
- !(gpi_dev->smmu_cfg & GPI_SMMU_S1_BYPASS)) {
-
- u64 iova_range[2];
-
- ret = of_property_count_elems_of_size(
- gpi_dev->dev->of_node, "qcom,iova-range",
- sizeof(iova_range));
- if (ret != 1) {
- GPI_ERR(gpi_dev,
- "missing or incorrect 'qcom,iova-range' DT node ret:%d\n",
- ret);
- }
-
- ret = of_property_read_u64_array(gpi_dev->dev->of_node,
- "qcom,iova-range", iova_range,
- ARRAY_SIZE(iova_range));
- if (ret) {
- GPI_ERR(gpi_dev,
- "could not read DT prop 'qcom,iova-range\n");
- return ret;
- }
- gpi_dev->iova_base = iova_range[0];
- gpi_dev->iova_size = iova_range[1];
- }
-
- ret = gpi_smmu_init(gpi_dev);
- if (ret) {
- GPI_ERR(gpi_dev,
- "error configuring smmu, ret:%d\n", ret);
- return ret;
- }
- } else {
- ret = dma_set_mask(gpi_dev->dev, DMA_BIT_MASK(64));
- if (ret) {
- GPI_ERR(gpi_dev,
- "Error setting dma_mask to 64, ret:%d\n", ret);
- return ret;
- }
+ ret = dma_set_mask(gpi_dev->dev, DMA_BIT_MASK(64));
+ if (ret) {
+ GPI_ERR(gpi_dev,
+ "Error setting dma_mask to 64, ret:%d\n", ret);
+ return ret;
}
gpi_dev->gpiis = devm_kzalloc(gpi_dev->dev,
diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c
index 0b05a1e..041ce86 100644
--- a/drivers/dma/sh/rcar-dmac.c
+++ b/drivers/dma/sh/rcar-dmac.c
@@ -1164,7 +1164,7 @@ rcar_dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
/* Someone calling slave DMA on a generic channel? */
- if (rchan->mid_rid < 0 || !sg_len) {
+ if (rchan->mid_rid < 0 || !sg_len || !sg_dma_len(sgl)) {
dev_warn(chan->device->dev,
"%s: bad parameter: len=%d, id=%d\n",
__func__, sg_len, rchan->mid_rid);
diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c
index 8219ab8..fb23993 100644
--- a/drivers/dma/tegra20-apb-dma.c
+++ b/drivers/dma/tegra20-apb-dma.c
@@ -981,8 +981,12 @@ static struct dma_async_tx_descriptor *tegra_dma_prep_slave_sg(
csr |= tdc->slave_id << TEGRA_APBDMA_CSR_REQ_SEL_SHIFT;
}
- if (flags & DMA_PREP_INTERRUPT)
+ if (flags & DMA_PREP_INTERRUPT) {
csr |= TEGRA_APBDMA_CSR_IE_EOC;
+ } else {
+ WARN_ON_ONCE(1);
+ return NULL;
+ }
apb_seq |= TEGRA_APBDMA_APBSEQ_WRAP_WORD_1;
@@ -1124,8 +1128,12 @@ static struct dma_async_tx_descriptor *tegra_dma_prep_dma_cyclic(
csr |= tdc->slave_id << TEGRA_APBDMA_CSR_REQ_SEL_SHIFT;
}
- if (flags & DMA_PREP_INTERRUPT)
+ if (flags & DMA_PREP_INTERRUPT) {
csr |= TEGRA_APBDMA_CSR_IE_EOC;
+ } else {
+ WARN_ON_ONCE(1);
+ return NULL;
+ }
apb_seq |= TEGRA_APBDMA_APBSEQ_WRAP_WORD_1;
diff --git a/drivers/esoc/esoc-mdm-4x.c b/drivers/esoc/esoc-mdm-4x.c
index afcf2ce..86e47fe 100644
--- a/drivers/esoc/esoc-mdm-4x.c
+++ b/drivers/esoc/esoc-mdm-4x.c
@@ -262,7 +262,7 @@ static int mdm_cmd_exe(enum esoc_cmd cmd, struct esoc_clink *esoc)
esoc_mdm_log(
"ESOC_FORCE_PWR_OFF: Queueing request: ESOC_REQ_SHUTDOWN\n");
esoc_clink_queue_request(ESOC_REQ_SHUTDOWN, esoc);
- mdm_toggle_soft_reset(mdm, false);
+ mdm_power_down(mdm);
mdm_update_gpio_configs(mdm, GPIO_UPDATE_BOOTING_CONFIG);
break;
case ESOC_RESET:
@@ -484,7 +484,7 @@ static void mdm_notify(enum esoc_notify notify, struct esoc_clink *esoc)
mdm->ready = false;
esoc_mdm_log(
"ESOC_PRIMARY_REBOOT: Powering down the modem\n");
- mdm_toggle_soft_reset(mdm, false);
+ mdm_power_down(mdm);
break;
};
}
@@ -556,6 +556,7 @@ static irqreturn_t mdm_status_change(int irq, void *dev_id)
cancel_delayed_work(&mdm->mdm2ap_status_check_work);
dev_dbg(dev, "status = 1: mdm is now ready\n");
mdm->ready = true;
+ esoc_clink_evt_notify(ESOC_BOOT_STATE, esoc);
mdm_trigger_dbg(mdm);
queue_work(mdm->mdm_queue, &mdm->mdm_status_work);
if (mdm->get_restart_reason)
@@ -1080,26 +1081,27 @@ static int sdx55m_setup_hw(struct mdm_ctrl *mdm,
dev_err(mdm->dev, "Failed to parse DT gpios\n");
goto err_destroy_wrkq;
}
+ if (!of_property_read_bool(node, "qcom,esoc-spmi-soft-reset")) {
+ ret = mdm_pon_dt_init(mdm);
+ if (ret) {
+ esoc_mdm_log("Failed to parse PON DT gpios\n");
+ dev_err(mdm->dev, "Failed to parse PON DT gpio\n");
+ goto err_destroy_wrkq;
+ }
- ret = mdm_pon_dt_init(mdm);
- if (ret) {
- esoc_mdm_log("Failed to parse PON DT gpios\n");
- dev_err(mdm->dev, "Failed to parse PON DT gpio\n");
- goto err_destroy_wrkq;
+ ret = mdm_pon_setup(mdm);
+ if (ret) {
+ esoc_mdm_log("Failed to setup PON\n");
+ dev_err(mdm->dev, "Failed to setup PON\n");
+ goto err_destroy_wrkq;
+ }
}
ret = mdm_pinctrl_init(mdm);
if (ret) {
esoc_mdm_log("Failed to init pinctrl\n");
dev_err(mdm->dev, "Failed to init pinctrl\n");
- goto err_destroy_wrkq;
- }
-
- ret = mdm_pon_setup(mdm);
- if (ret) {
- esoc_mdm_log("Failed to setup PON\n");
- dev_err(mdm->dev, "Failed to setup PON\n");
- goto err_destroy_wrkq;
+ goto err_release_ipc;
}
ret = mdm_configure_ipc(mdm, pdev);
diff --git a/drivers/esoc/esoc-mdm-drv.c b/drivers/esoc/esoc-mdm-drv.c
index b5bc12c..c90b81e 100644
--- a/drivers/esoc/esoc-mdm-drv.c
+++ b/drivers/esoc/esoc-mdm-drv.c
@@ -28,20 +28,21 @@ enum esoc_pon_state {
enum {
PWR_OFF = 0x1,
- PWR_ON,
- BOOT,
- RUN,
- CRASH,
- IN_DEBUG,
SHUTDOWN,
RESET,
PEER_CRASH,
+ IN_DEBUG,
+ CRASH,
+ PWR_ON,
+ BOOT,
+ RUN,
};
struct mdm_drv {
unsigned int mode;
struct esoc_eng cmd_eng;
struct completion pon_done;
+ struct completion ssr_ready;
struct completion req_eng_wait;
struct esoc_clink *esoc_clink;
enum esoc_pon_state pon_state;
@@ -142,6 +143,14 @@ static void mdm_handle_clink_evt(enum esoc_evt evt,
"ESOC_INVALID_STATE: Calling complete with state: PON_FAIL\n");
mdm_drv->pon_state = PON_FAIL;
complete(&mdm_drv->pon_done);
+ complete(&mdm_drv->ssr_ready);
+ break;
+ case ESOC_BOOT_STATE:
+ if (mdm_drv->mode == PWR_OFF) {
+ esoc_mdm_log(
+ "ESOC_BOOT_STATE: Observed status high from modem.\n");
+ mdm_drv->mode = BOOT;
+ }
break;
case ESOC_RUN_STATE:
esoc_mdm_log(
@@ -149,12 +158,14 @@ static void mdm_handle_clink_evt(enum esoc_evt evt,
mdm_drv->pon_state = PON_SUCCESS;
mdm_drv->mode = RUN,
complete(&mdm_drv->pon_done);
+ complete(&mdm_drv->ssr_ready);
break;
case ESOC_RETRY_PON_EVT:
esoc_mdm_log(
"ESOC_RETRY_PON_EVT: Calling complete with state: PON_RETRY\n");
mdm_drv->pon_state = PON_RETRY;
complete(&mdm_drv->pon_done);
+ complete(&mdm_drv->ssr_ready);
break;
case ESOC_UNEXPECTED_RESET:
esoc_mdm_log("evt_state: ESOC_UNEXPECTED_RESET\n");
@@ -164,19 +175,16 @@ static void mdm_handle_clink_evt(enum esoc_evt evt,
esoc_mdm_log("evt_state: ESOC_ERR_FATAL\n");
/*
- * Modem can crash while we are waiting for pon_done during
- * a subsystem_get(). Setting mode to CRASH will prevent a
- * subsequent subsystem_get() from entering poweron ops. Avoid
- * this by seting mode to CRASH only if device was up and
- * running.
+ * Ignore all modem errfatals if the status is not up
+ * or modem in run state.
*/
- if (mdm_drv->mode == CRASH)
+ if (mdm_drv->mode <= CRASH) {
esoc_mdm_log(
- "Modem in crash state already. Ignoring.\n");
- if (mdm_drv->mode != RUN)
- esoc_mdm_log("Modem not up. Ignoring.\n");
- if (mdm_drv->mode == CRASH || mdm_drv->mode != RUN)
+ "Modem in crash state or not booted. Ignoring.\n");
return;
+ }
+ esoc_mdm_log("Setting crash flag\n");
+ mdm_drv->mode = CRASH;
queue_work(mdm_drv->mdm_queue, &mdm_drv->ssr_work);
break;
case ESOC_REQ_ENG_ON:
@@ -194,12 +202,16 @@ static void mdm_ssr_fn(struct work_struct *work)
struct mdm_drv *mdm_drv = container_of(work, struct mdm_drv, ssr_work);
struct mdm_ctrl *mdm = get_esoc_clink_data(mdm_drv->esoc_clink);
+ /* Wait for pon to complete. Start SSR only if pon is success */
+ wait_for_completion(&mdm_drv->ssr_ready);
+ if (mdm_drv->pon_state != PON_SUCCESS) {
+ esoc_mdm_log("Got errfatal but ignoring as boot failed\n");
+ return;
+ }
+
esoc_client_link_mdm_crash(mdm_drv->esoc_clink);
-
mdm_wait_for_status_low(mdm, false);
-
- esoc_mdm_log("Starting SSR work and setting crash state\n");
- mdm_drv->mode = CRASH;
+ esoc_mdm_log("Starting SSR work\n");
/*
* If restarting esoc fails, the SSR framework triggers a kernel panic
@@ -282,12 +294,14 @@ static int mdm_subsys_shutdown(const struct subsys_desc *crashed_subsys,
container_of(crashed_subsys, struct esoc_clink, subsys);
struct mdm_drv *mdm_drv = esoc_get_drv_data(esoc_clink);
const struct esoc_clink_ops * const clink_ops = esoc_clink->clink_ops;
+ struct mdm_ctrl *mdm = get_esoc_clink_data(mdm_drv->esoc_clink);
esoc_mdm_log("Shutdown request from SSR\n");
mutex_lock(&mdm_drv->poff_lock);
if (mdm_drv->mode == CRASH || mdm_drv->mode == PEER_CRASH) {
esoc_mdm_log("Shutdown in crash mode\n");
+ mdm_wait_for_status_low(mdm, false);
if (mdm_dbg_stall_cmd(ESOC_PREPARE_DEBUG)) {
/* We want to mask debug command.
* In this case return success
@@ -360,7 +374,9 @@ static void mdm_subsys_retry_powerup_cleanup(struct esoc_clink *esoc_clink,
esoc_client_link_power_off(esoc_clink, poff_flags);
mdm_disable_irqs(mdm);
mdm_drv->pon_state = PON_INIT;
+ mdm_drv->mode = PWR_OFF;
reinit_completion(&mdm_drv->pon_done);
+ reinit_completion(&mdm_drv->ssr_ready);
reinit_completion(&mdm_drv->req_eng_wait);
}
@@ -408,6 +424,7 @@ static int mdm_handle_boot_fail(struct esoc_clink *esoc_clink, u8 *pon_trial)
break;
case BOOT_FAIL_ACTION_NOP:
esoc_mdm_log("Leaving the modem in its curent state\n");
+ mdm_drv->mode = PWR_OFF;
return -EIO;
case BOOT_FAIL_ACTION_SHUTDOWN:
default:
@@ -571,6 +588,7 @@ int esoc_ssr_probe(struct esoc_clink *esoc_clink, struct esoc_drv *drv)
}
esoc_set_drv_data(esoc_clink, mdm_drv);
init_completion(&mdm_drv->pon_done);
+ init_completion(&mdm_drv->ssr_ready);
init_completion(&mdm_drv->req_eng_wait);
INIT_WORK(&mdm_drv->ssr_work, mdm_ssr_fn);
mdm_drv->esoc_clink = esoc_clink;
diff --git a/drivers/esoc/esoc-mdm-pon.c b/drivers/esoc/esoc-mdm-pon.c
index abde8c7..1dfff3a 100644
--- a/drivers/esoc/esoc-mdm-pon.c
+++ b/drivers/esoc/esoc-mdm-pon.c
@@ -206,6 +206,12 @@ static int sdx50m_power_down(struct mdm_ctrl *mdm)
return 0;
}
+static int sdx55m_power_down(struct mdm_ctrl *mdm)
+{
+ esoc_mdm_log("Performing warm reset as cold reset is not supported\n");
+ return sdx55m_toggle_soft_reset(mdm, false);
+}
+
static void mdm9x55_cold_reset(struct mdm_ctrl *mdm)
{
dev_dbg(mdm->dev, "Triggering mdm cold reset");
@@ -318,6 +324,7 @@ struct mdm_pon_ops sdx50m_pon_ops = {
struct mdm_pon_ops sdx55m_pon_ops = {
.pon = mdm4x_do_first_power_on,
.soft_reset = sdx55m_toggle_soft_reset,
+ .poff_force = sdx55m_power_down,
.dt_init = mdm4x_pon_dt_init,
.setup = mdm4x_pon_setup,
};
diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig
index 6da80f3..6da0ab4 100644
--- a/drivers/firmware/Kconfig
+++ b/drivers/firmware/Kconfig
@@ -198,7 +198,7 @@
config ISCSI_IBFT_FIND
bool "iSCSI Boot Firmware Table Attributes"
- depends on X86 && ACPI
+ depends on X86 && ISCSI_IBFT
default n
help
This option enables the kernel to find the region of memory
@@ -209,7 +209,8 @@
config ISCSI_IBFT
tristate "iSCSI Boot Firmware Table Attributes module"
select ISCSI_BOOT_SYSFS
- depends on ISCSI_IBFT_FIND && SCSI && SCSI_LOWLEVEL
+ select ISCSI_IBFT_FIND if X86
+ depends on ACPI && SCSI && SCSI_LOWLEVEL
default n
help
This option enables support for detection and exposing of iSCSI
diff --git a/drivers/firmware/iscsi_ibft.c b/drivers/firmware/iscsi_ibft.c
index c51462f..966aef3 100644
--- a/drivers/firmware/iscsi_ibft.c
+++ b/drivers/firmware/iscsi_ibft.c
@@ -93,6 +93,10 @@ MODULE_DESCRIPTION("sysfs interface to BIOS iBFT information");
MODULE_LICENSE("GPL");
MODULE_VERSION(IBFT_ISCSI_VERSION);
+#ifndef CONFIG_ISCSI_IBFT_FIND
+struct acpi_table_ibft *ibft_addr;
+#endif
+
struct ibft_hdr {
u8 id;
u8 version;
diff --git a/drivers/firmware/psci_checker.c b/drivers/firmware/psci_checker.c
index 3469436..cbd53cb 100644
--- a/drivers/firmware/psci_checker.c
+++ b/drivers/firmware/psci_checker.c
@@ -366,16 +366,16 @@ static int suspend_test_thread(void *arg)
for (;;) {
/* Needs to be set first to avoid missing a wakeup. */
set_current_state(TASK_INTERRUPTIBLE);
- if (kthread_should_stop()) {
- __set_current_state(TASK_RUNNING);
+ if (kthread_should_park())
break;
- }
schedule();
}
pr_info("CPU %d suspend test results: success %d, shallow states %d, errors %d\n",
cpu, nb_suspend, nb_shallow_sleep, nb_err);
+ kthread_parkme();
+
return nb_err;
}
@@ -440,8 +440,10 @@ static int suspend_tests(void)
/* Stop and destroy all threads, get return status. */
- for (i = 0; i < nb_threads; ++i)
+ for (i = 0; i < nb_threads; ++i) {
+ err += kthread_park(threads[i]);
err += kthread_stop(threads[i]);
+ }
out:
cpuidle_resume_and_unlock();
kfree(threads);
diff --git a/drivers/fpga/Kconfig b/drivers/fpga/Kconfig
index 1ebcef4..87337fc 100644
--- a/drivers/fpga/Kconfig
+++ b/drivers/fpga/Kconfig
@@ -39,6 +39,7 @@
config FPGA_MGR_ALTERA_PS_SPI
tristate "Altera FPGA Passive Serial over SPI"
depends on SPI
+ select BITREVERSE
help
FPGA manager driver support for Altera Arria/Cyclone/Stratix
using the passive serial interface over SPI.
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 827abde..58ba5aa 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -946,9 +946,11 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip)
}
if (eflags & GPIOEVENT_REQUEST_RISING_EDGE)
- irqflags |= IRQF_TRIGGER_RISING;
+ irqflags |= test_bit(FLAG_ACTIVE_LOW, &desc->flags) ?
+ IRQF_TRIGGER_FALLING : IRQF_TRIGGER_RISING;
if (eflags & GPIOEVENT_REQUEST_FALLING_EDGE)
- irqflags |= IRQF_TRIGGER_FALLING;
+ irqflags |= test_bit(FLAG_ACTIVE_LOW, &desc->flags) ?
+ IRQF_TRIGGER_RISING : IRQF_TRIGGER_FALLING;
irqflags |= IRQF_ONESHOT;
irqflags |= IRQF_SHARED;
@@ -1080,9 +1082,11 @@ static long gpio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
if (test_bit(FLAG_ACTIVE_LOW, &desc->flags))
lineinfo.flags |= GPIOLINE_FLAG_ACTIVE_LOW;
if (test_bit(FLAG_OPEN_DRAIN, &desc->flags))
- lineinfo.flags |= GPIOLINE_FLAG_OPEN_DRAIN;
+ lineinfo.flags |= (GPIOLINE_FLAG_OPEN_DRAIN |
+ GPIOLINE_FLAG_IS_OUT);
if (test_bit(FLAG_OPEN_SOURCE, &desc->flags))
- lineinfo.flags |= GPIOLINE_FLAG_OPEN_SOURCE;
+ lineinfo.flags |= (GPIOLINE_FLAG_OPEN_SOURCE |
+ GPIOLINE_FLAG_IS_OUT);
if (copy_to_user(ip, &lineinfo, sizeof(lineinfo)))
return -EFAULT;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
index f5fb937..65cecfd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
@@ -707,7 +707,7 @@ static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf,
thread = (*pos & GENMASK_ULL(59, 52)) >> 52;
bank = (*pos & GENMASK_ULL(61, 60)) >> 60;
- data = kmalloc_array(1024, sizeof(*data), GFP_KERNEL);
+ data = kcalloc(1024, sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index 72f8018..ede27da 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -1037,6 +1037,9 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
tmp = RREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL);
WREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL, tmp);
+ WREG32_SOC15(HDP, 0, mmHDP_NONSURFACE_BASE, (adev->gmc.vram_start >> 8));
+ WREG32_SOC15(HDP, 0, mmHDP_NONSURFACE_BASE_HI, (adev->gmc.vram_start >> 40));
+
/* After HDP is initialized, flush HDP.*/
adev->nbio_funcs->hdp_flush(adev, NULL);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index 4f22e74..189212c 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -1268,12 +1268,17 @@ int amdkfd_fence_wait_timeout(unsigned int *fence_addr,
return 0;
}
-static int unmap_sdma_queues(struct device_queue_manager *dqm,
- unsigned int sdma_engine)
+static int unmap_sdma_queues(struct device_queue_manager *dqm)
{
- return pm_send_unmap_queue(&dqm->packets, KFD_QUEUE_TYPE_SDMA,
- KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0, false,
- sdma_engine);
+ int i, retval = 0;
+
+ for (i = 0; i < dqm->dev->device_info->num_sdma_engines; i++) {
+ retval = pm_send_unmap_queue(&dqm->packets, KFD_QUEUE_TYPE_SDMA,
+ KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0, false, i);
+ if (retval)
+ return retval;
+ }
+ return retval;
}
/* dqm->lock mutex has to be locked before calling this function */
@@ -1312,10 +1317,8 @@ static int unmap_queues_cpsch(struct device_queue_manager *dqm,
pr_debug("Before destroying queues, sdma queue count is : %u\n",
dqm->sdma_queue_count);
- if (dqm->sdma_queue_count > 0) {
- unmap_sdma_queues(dqm, 0);
- unmap_sdma_queues(dqm, 1);
- }
+ if (dqm->sdma_queue_count > 0)
+ unmap_sdma_queues(dqm);
retval = pm_send_unmap_queue(&dqm->packets, KFD_QUEUE_TYPE_COMPUTE,
filter, filter_param, false, 0);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
index 0cedb37..985bebd 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
@@ -75,6 +75,7 @@ static int init_mqd(struct mqd_manager *mm, void **mqd,
struct v9_mqd *m;
struct kfd_dev *kfd = mm->dev;
+ *mqd_mem_obj = NULL;
/* From V9, for CWSR, the control stack is located on the next page
* boundary after the mqd, we will use the gtt allocation function
* instead of sub-allocation function.
@@ -92,8 +93,10 @@ static int init_mqd(struct mqd_manager *mm, void **mqd,
} else
retval = kfd_gtt_sa_allocate(mm->dev, sizeof(struct v9_mqd),
mqd_mem_obj);
- if (retval != 0)
+ if (retval) {
+ kfree(*mqd_mem_obj);
return -ENOMEM;
+ }
m = (struct v9_mqd *) (*mqd_mem_obj)->cpu_ptr;
addr = (*mqd_mem_obj)->gpu_addr;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index dac7978..221de24 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -3644,6 +3644,13 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
{
struct amdgpu_device *adev = dm->ddev->dev_private;
+ /*
+ * Some of the properties below require access to state, like bpc.
+ * Allocate some default initial connector state with our reset helper.
+ */
+ if (aconnector->base.funcs->reset)
+ aconnector->base.funcs->reset(&aconnector->base);
+
aconnector->connector_id = link_index;
aconnector->dc_link = link;
aconnector->base.interlace_allowed = false;
@@ -3811,9 +3818,6 @@ static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
&aconnector->base,
&amdgpu_dm_connector_helper_funcs);
- if (aconnector->base.funcs->reset)
- aconnector->base.funcs->reset(&aconnector->base);
-
amdgpu_dm_connector_init_helper(
dm,
aconnector,
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index e3f5e5d..f4b89d1 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -462,8 +462,10 @@ void dc_link_set_test_pattern(struct dc_link *link,
static void destruct(struct dc *dc)
{
- dc_release_state(dc->current_state);
- dc->current_state = NULL;
+ if (dc->current_state) {
+ dc_release_state(dc->current_state);
+ dc->current_state = NULL;
+ }
destroy_links(dc);
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index e0a96ab..f0d68aa 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -222,7 +222,7 @@ bool resource_construct(
* PORT_CONNECTIVITY == 1 (as instructed by HW team).
*/
update_num_audio(&straps, &num_audio, &pool->audio_support);
- for (i = 0; i < pool->pipe_count && i < num_audio; i++) {
+ for (i = 0; i < caps->num_audio; i++) {
struct audio *aud = create_funcs->create_audio(ctx, i);
if (aud == NULL) {
@@ -1713,6 +1713,12 @@ static struct audio *find_first_free_audio(
return pool->audios[i];
}
}
+
+ /* use engine id to find free audio */
+ if ((id < pool->audio_count) && (res_ctx->is_audio_acquired[id] == false)) {
+ return pool->audios[id];
+ }
+
/*not found the matching one, first come first serve*/
for (i = 0; i < pool->audio_count; i++) {
if (res_ctx->is_audio_acquired[i] == false) {
@@ -1866,6 +1872,7 @@ static int get_norm_pix_clk(const struct dc_crtc_timing *timing)
pix_clk /= 2;
if (timing->pixel_encoding != PIXEL_ENCODING_YCBCR422) {
switch (timing->display_color_depth) {
+ case COLOR_DEPTH_666:
case COLOR_DEPTH_888:
normalized_pix_clk = pix_clk;
break;
@@ -1949,7 +1956,7 @@ enum dc_status resource_map_pool_resources(
/* TODO: Add check if ASIC support and EDID audio */
if (!stream->sink->converter_disable_audio &&
dc_is_audio_capable_signal(pipe_ctx->stream->signal) &&
- stream->audio_info.mode_count) {
+ stream->audio_info.mode_count && stream->audio_info.flags.all) {
pipe_ctx->stream_res.audio = find_first_free_audio(
&context->res_ctx, pool, pipe_ctx->stream_res.stream_enc->id);
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
index 29294db..da8b198 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
@@ -242,6 +242,10 @@ static void dmcu_set_backlight_level(
s2 |= (level << ATOM_S2_CURRENT_BL_LEVEL_SHIFT);
REG_WRITE(BIOS_SCRATCH_2, s2);
+
+ /* waitDMCUReadyForCmd */
+ REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT,
+ 0, 1, 80000);
}
static void dce_abm_init(struct abm *abm)
@@ -474,6 +478,8 @@ void dce_abm_destroy(struct abm **abm)
{
struct dce_abm *abm_dce = TO_DCE_ABM(*abm);
+ abm_dce->base.funcs->set_abm_immediate_disable(*abm);
+
kfree(abm_dce);
*abm = NULL;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
index 53ccacf..c3ad2bb 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
@@ -242,6 +242,9 @@ static void build_prescale_params(struct ipp_prescale_params *prescale_params,
prescale_params->mode = IPP_PRESCALE_MODE_FIXED_UNSIGNED;
switch (plane_state->format) {
+ case SURFACE_PIXEL_FORMAT_GRPH_RGB565:
+ prescale_params->scale = 0x2082;
+ break;
case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888:
case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888:
prescale_params->scale = 0x2020;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index 7736ef1..ead221c 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -23,6 +23,7 @@
*
*/
+#include <linux/delay.h>
#include "dm_services.h"
#include "core_types.h"
#include "resource.h"
diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
index c0b9ca1..f4469fa 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
@@ -159,7 +159,7 @@ struct resource_pool {
struct clock_source *clock_sources[MAX_CLOCK_SOURCES];
unsigned int clk_src_count;
- struct audio *audios[MAX_PIPES];
+ struct audio *audios[MAX_AUDIOS];
unsigned int audio_count;
struct audio_support audio_support;
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
index cf7433e..7190174 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
@@ -34,6 +34,7 @@
* Data types shared between different Virtual HW blocks
******************************************************************************/
+#define MAX_AUDIOS 7
#define MAX_PIPES 6
struct gamma_curve {
diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig
index bf6cad6..7a3e5a8 100644
--- a/drivers/gpu/drm/bridge/Kconfig
+++ b/drivers/gpu/drm/bridge/Kconfig
@@ -46,6 +46,7 @@
config DRM_LVDS_ENCODER
tristate "Transparent parallel to LVDS encoder support"
depends on OF
+ select DRM_KMS_HELPER
select DRM_PANEL_BRIDGE
help
Support for transparent parallel to LVDS encoders that don't require
diff --git a/drivers/gpu/drm/bridge/sii902x.c b/drivers/gpu/drm/bridge/sii902x.c
index e59a135..0cc6dbb 100644
--- a/drivers/gpu/drm/bridge/sii902x.c
+++ b/drivers/gpu/drm/bridge/sii902x.c
@@ -261,10 +261,11 @@ static void sii902x_bridge_mode_set(struct drm_bridge *bridge,
struct regmap *regmap = sii902x->regmap;
u8 buf[HDMI_INFOFRAME_SIZE(AVI)];
struct hdmi_avi_infoframe frame;
+ u16 pixel_clock_10kHz = adj->clock / 10;
int ret;
- buf[0] = adj->clock;
- buf[1] = adj->clock >> 8;
+ buf[0] = pixel_clock_10kHz & 0xff;
+ buf[1] = pixel_clock_10kHz >> 8;
buf[2] = adj->vrefresh;
buf[3] = 0x00;
buf[4] = adj->hdisplay;
diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c
index 3915473..aaca524 100644
--- a/drivers/gpu/drm/bridge/tc358767.c
+++ b/drivers/gpu/drm/bridge/tc358767.c
@@ -1149,6 +1149,13 @@ static int tc_connector_get_modes(struct drm_connector *connector)
struct tc_data *tc = connector_to_tc(connector);
struct edid *edid;
unsigned int count;
+ int ret;
+
+ ret = tc_get_display_props(tc);
+ if (ret < 0) {
+ dev_err(tc->dev, "failed to read display props: %d\n", ret);
+ return 0;
+ }
if (tc->panel && tc->panel->funcs && tc->panel->funcs->get_modes) {
count = tc->panel->funcs->get_modes(tc->panel);
diff --git a/drivers/gpu/drm/drm_debugfs_crc.c b/drivers/gpu/drm/drm_debugfs_crc.c
index 9996119..c88e5ff 100644
--- a/drivers/gpu/drm/drm_debugfs_crc.c
+++ b/drivers/gpu/drm/drm_debugfs_crc.c
@@ -379,12 +379,13 @@ int drm_crtc_add_crc_entry(struct drm_crtc *crtc, bool has_frame,
struct drm_crtc_crc *crc = &crtc->crc;
struct drm_crtc_crc_entry *entry;
int head, tail;
+ unsigned long flags;
- spin_lock(&crc->lock);
+ spin_lock_irqsave(&crc->lock, flags);
/* Caller may not have noticed yet that userspace has stopped reading */
if (!crc->entries) {
- spin_unlock(&crc->lock);
+ spin_unlock_irqrestore(&crc->lock, flags);
return -EINVAL;
}
@@ -395,7 +396,7 @@ int drm_crtc_add_crc_entry(struct drm_crtc *crtc, bool has_frame,
bool was_overflow = crc->overflow;
crc->overflow = true;
- spin_unlock(&crc->lock);
+ spin_unlock_irqrestore(&crc->lock, flags);
if (!was_overflow)
DRM_ERROR("Overflow of CRC buffer, userspace reads too slow.\n");
@@ -411,7 +412,7 @@ int drm_crtc_add_crc_entry(struct drm_crtc *crtc, bool has_frame,
head = (head + 1) & (DRM_CRC_ENTRIES_NR - 1);
crc->head = head;
- spin_unlock(&crc->lock);
+ spin_unlock_irqrestore(&crc->lock, flags);
wake_up_interruptible(&crc->wq);
diff --git a/drivers/gpu/drm/drm_edid_load.c b/drivers/gpu/drm/drm_edid_load.c
index a491509..a0e107a 100644
--- a/drivers/gpu/drm/drm_edid_load.c
+++ b/drivers/gpu/drm/drm_edid_load.c
@@ -290,6 +290,8 @@ struct edid *drm_load_edid_firmware(struct drm_connector *connector)
* the last one found one as a fallback.
*/
fwstr = kstrdup(edid_firmware, GFP_KERNEL);
+ if (!fwstr)
+ return ERR_PTR(-ENOMEM);
edidstr = fwstr;
while ((edidname = strsep(&edidstr, ","))) {
diff --git a/drivers/gpu/drm/drm_framebuffer.c b/drivers/gpu/drm/drm_framebuffer.c
index 77b1800..2fefca4 100644
--- a/drivers/gpu/drm/drm_framebuffer.c
+++ b/drivers/gpu/drm/drm_framebuffer.c
@@ -794,7 +794,7 @@ static int atomic_remove_fb(struct drm_framebuffer *fb)
struct drm_device *dev = fb->dev;
struct drm_atomic_state *state;
struct drm_plane *plane;
- struct drm_connector *conn;
+ struct drm_connector *conn __maybe_unused;
struct drm_connector_state *conn_state;
int i, ret;
unsigned plane_mask;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_scaler.c b/drivers/gpu/drm/exynos/exynos_drm_scaler.c
index 0ddb6ee..df22843 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_scaler.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_scaler.c
@@ -108,12 +108,12 @@ static inline int scaler_reset(struct scaler_context *scaler)
scaler_write(SCALER_CFG_SOFT_RESET, SCALER_CFG);
do {
cpu_relax();
- } while (retry > 1 &&
+ } while (--retry > 1 &&
scaler_read(SCALER_CFG) & SCALER_CFG_SOFT_RESET);
do {
cpu_relax();
scaler_write(1, SCALER_INT_EN);
- } while (retry > 0 && scaler_read(SCALER_INT_EN) != 1);
+ } while (--retry > 0 && scaler_read(SCALER_INT_EN) != 1);
return retry ? 0 : -EIO;
}
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index 12e4203..66abe06 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -1748,6 +1748,18 @@ int kvmgt_dma_map_guest_page(unsigned long handle, unsigned long gfn,
ret = __gvt_cache_add(info->vgpu, gfn, *dma_addr, size);
if (ret)
goto err_unmap;
+ } else if (entry->size != size) {
+ /* the same gfn with different size: unmap and re-map */
+ gvt_dma_unmap_page(vgpu, gfn, entry->dma_addr, entry->size);
+ __gvt_cache_remove_entry(vgpu, entry);
+
+ ret = gvt_dma_map_page(vgpu, gfn, dma_addr, size);
+ if (ret)
+ goto err_unlock;
+
+ ret = __gvt_cache_add(info->vgpu, gfn, *dma_addr, size);
+ if (ret)
+ goto err_unmap;
} else {
kref_get(&entry->ref);
*dma_addr = entry->dma_addr;
diff --git a/drivers/gpu/drm/i915/vlv_dsi_pll.c b/drivers/gpu/drm/i915/vlv_dsi_pll.c
index a132a80..77df790 100644
--- a/drivers/gpu/drm/i915/vlv_dsi_pll.c
+++ b/drivers/gpu/drm/i915/vlv_dsi_pll.c
@@ -413,8 +413,8 @@ static void glk_dsi_program_esc_clock(struct drm_device *dev,
else
txesc2_div = 10;
- I915_WRITE(MIPIO_TXESC_CLK_DIV1, txesc1_div & GLK_TX_ESC_CLK_DIV1_MASK);
- I915_WRITE(MIPIO_TXESC_CLK_DIV2, txesc2_div & GLK_TX_ESC_CLK_DIV2_MASK);
+ I915_WRITE(MIPIO_TXESC_CLK_DIV1, (1 << (txesc1_div - 1)) & GLK_TX_ESC_CLK_DIV1_MASK);
+ I915_WRITE(MIPIO_TXESC_CLK_DIV2, (1 << (txesc2_div - 1)) & GLK_TX_ESC_CLK_DIV2_MASK);
}
/* Program BXT Mipi clocks and dividers */
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 8b0605f..610139b 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -1298,7 +1298,8 @@ static int add_gpu_components(struct device *dev,
if (!np)
return 0;
- drm_of_component_match_add(dev, matchptr, compare_of, np);
+ if (of_device_is_available(np))
+ drm_of_component_match_add(dev, matchptr, compare_of, np);
of_node_put(np);
@@ -1336,16 +1337,24 @@ static int msm_pdev_probe(struct platform_device *pdev)
ret = add_gpu_components(&pdev->dev, &match);
if (ret)
- return ret;
+ goto fail;
/* on all devices that I am aware of, iommu's which can map
* any address the cpu can see are used:
*/
ret = dma_set_mask_and_coherent(&pdev->dev, ~0);
if (ret)
- return ret;
+ goto fail;
- return component_master_add_with_match(&pdev->dev, &msm_drm_ops, match);
+ ret = component_master_add_with_match(&pdev->dev, &msm_drm_ops, match);
+ if (ret)
+ goto fail;
+
+ return 0;
+
+fail:
+ of_platform_depopulate(&pdev->dev);
+ return ret;
}
static int msm_pdev_remove(struct platform_device *pdev)
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index 247f72c..fb0094f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -251,7 +251,7 @@ nouveau_conn_reset(struct drm_connector *connector)
return;
if (connector->state)
- __drm_atomic_helper_connector_destroy_state(connector->state);
+ nouveau_conn_atomic_destroy_state(connector, connector->state);
__drm_atomic_helper_connector_reset(connector, &asyc->state);
asyc->dither.mode = DITHERING_MODE_AUTO;
asyc->dither.depth = DITHERING_DEPTH_AUTO;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c
index b4e7404..a11637b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c
@@ -40,8 +40,7 @@ nvkm_i2c_aux_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
u8 *ptr = msg->buf;
while (remaining) {
- u8 cnt = (remaining > 16) ? 16 : remaining;
- u8 cmd;
+ u8 cnt, retries, cmd;
if (msg->flags & I2C_M_RD)
cmd = 1;
@@ -51,10 +50,19 @@ nvkm_i2c_aux_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
if (mcnt || remaining > 16)
cmd |= 4; /* MOT */
- ret = aux->func->xfer(aux, true, cmd, msg->addr, ptr, &cnt);
- if (ret < 0) {
- nvkm_i2c_aux_release(aux);
- return ret;
+ for (retries = 0, cnt = 0;
+ retries < 32 && !cnt;
+ retries++) {
+ cnt = min_t(u8, remaining, 16);
+ ret = aux->func->xfer(aux, true, cmd,
+ msg->addr, ptr, &cnt);
+ if (ret < 0)
+ goto out;
+ }
+ if (!cnt) {
+ AUX_TRACE(aux, "no data after 32 retries");
+ ret = -EIO;
+ goto out;
}
ptr += cnt;
@@ -64,8 +72,10 @@ nvkm_i2c_aux_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
msg++;
}
+ ret = num;
+out:
nvkm_i2c_aux_release(aux);
- return num;
+ return ret;
}
static u32
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index 97964f7..b1d41c4 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -2803,7 +2803,14 @@ static int panel_simple_dsi_probe(struct mipi_dsi_device *dsi)
dsi->format = desc->format;
dsi->lanes = desc->lanes;
- return mipi_dsi_attach(dsi);
+ err = mipi_dsi_attach(dsi);
+ if (err) {
+ struct panel_simple *panel = dev_get_drvdata(&dsi->dev);
+
+ drm_panel_remove(&panel->base);
+ }
+
+ return err;
}
static int panel_simple_dsi_remove(struct mipi_dsi_device *dsi)
diff --git a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
index 080f053..6a4da3a 100644
--- a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
+++ b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
@@ -436,7 +436,7 @@ static int rockchip_dp_resume(struct device *dev)
static const struct dev_pm_ops rockchip_dp_pm_ops = {
#ifdef CONFIG_PM_SLEEP
- .suspend = rockchip_dp_suspend,
+ .suspend_late = rockchip_dp_suspend,
.resume_early = rockchip_dp_resume,
#endif
};
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
index f8f9ae6..873624a 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
@@ -880,7 +880,8 @@ static bool vop_crtc_mode_fixup(struct drm_crtc *crtc,
struct vop *vop = to_vop(crtc);
adjusted_mode->clock =
- clk_round_rate(vop->dclk, mode->clock * 1000) / 1000;
+ DIV_ROUND_UP(clk_round_rate(vop->dclk, mode->clock * 1000),
+ 1000);
return true;
}
diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
index 7bdf6f0..8d2f5de 100644
--- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c
+++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
@@ -528,6 +528,9 @@ static int virtio_gpu_get_caps_ioctl(struct drm_device *dev,
if (!ret)
return -EBUSY;
+ /* is_valid check must proceed before copy of the cache entry. */
+ smp_rmb();
+
ptr = cache_ent->caps_cache;
copy_exit:
diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c
index 020070d4..c8a581b 100644
--- a/drivers/gpu/drm/virtio/virtgpu_vq.c
+++ b/drivers/gpu/drm/virtio/virtgpu_vq.c
@@ -588,6 +588,8 @@ static void virtio_gpu_cmd_capset_cb(struct virtio_gpu_device *vgdev,
cache_ent->id == le32_to_cpu(cmd->capset_id)) {
memcpy(cache_ent->caps_cache, resp->capset_data,
cache_ent->size);
+ /* Copy must occur before is_valid is signalled. */
+ smp_wmb();
atomic_set(&cache_ent->is_valid, 1);
break;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
index e4e09d4..59e9d05 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
@@ -389,8 +389,10 @@ static int vmw_recv_msg(struct rpc_channel *channel, void **msg,
break;
}
- if (retries == RETRIES)
+ if (retries == RETRIES) {
+ kfree(reply);
return -EINVAL;
+ }
*msg_len = reply_len;
*msg = reply;
diff --git a/drivers/gpu/host1x/bus.c b/drivers/gpu/host1x/bus.c
index 815bdb4..0121fe7 100644
--- a/drivers/gpu/host1x/bus.c
+++ b/drivers/gpu/host1x/bus.c
@@ -423,6 +423,9 @@ static int host1x_device_add(struct host1x *host1x,
of_dma_configure(&device->dev, host1x->dev->of_node, true);
+ device->dev.dma_parms = &device->dma_parms;
+ dma_set_max_seg_size(&device->dev, SZ_4M);
+
err = host1x_device_parse_dt(device, driver);
if (err < 0) {
kfree(device);
diff --git a/drivers/gpu/msm/adreno-gpulist.h b/drivers/gpu/msm/adreno-gpulist.h
index 5cb4cbb..481cd3b 100644
--- a/drivers/gpu/msm/adreno-gpulist.h
+++ b/drivers/gpu/msm/adreno-gpulist.h
@@ -194,7 +194,6 @@ static const struct adreno_a5xx_core adreno_gpu_core_a530v2 = {
.gpudev = &adreno_a5xx_gpudev,
.gmem_base = 0x100000,
.gmem_size = SZ_1M,
- .num_protected_regs = 0x20,
.busy_mask = 0xfffffffe,
.bus_width = 32,
},
@@ -220,7 +219,6 @@ static const struct adreno_a5xx_core adreno_gpu_core_a530v3 = {
.gpudev = &adreno_a5xx_gpudev,
.gmem_base = 0x100000,
.gmem_size = SZ_1M,
- .num_protected_regs = 0x20,
.busy_mask = 0xfffffffe,
.bus_width = 32,
},
@@ -286,7 +284,6 @@ static const struct adreno_a5xx_core adreno_gpu_core_a505 = {
.gpudev = &adreno_a5xx_gpudev,
.gmem_base = 0x100000,
.gmem_size = (SZ_128K + SZ_8K),
- .num_protected_regs = 0x20,
.busy_mask = 0xfffffffe,
.bus_width = 16,
},
@@ -306,7 +303,6 @@ static const struct adreno_a5xx_core adreno_gpu_core_a506 = {
.gpudev = &adreno_a5xx_gpudev,
.gmem_base = 0x100000,
.gmem_size = (SZ_128K + SZ_8K),
- .num_protected_regs = 0x20,
.busy_mask = 0xfffffffe,
.bus_width = 16,
},
@@ -384,7 +380,6 @@ static const struct adreno_a5xx_core adreno_gpu_core_a510 = {
.gpudev = &adreno_a5xx_gpudev,
.gmem_base = 0x100000,
.gmem_size = SZ_256K,
- .num_protected_regs = 0x20,
.busy_mask = 0xfffffffe,
.bus_width = 16,
},
@@ -510,7 +505,6 @@ static const struct adreno_a5xx_core adreno_gpu_core_a540v2 = {
.gpudev = &adreno_a5xx_gpudev,
.gmem_base = 0x100000,
.gmem_size = SZ_1M,
- .num_protected_regs = 0x20,
.busy_mask = 0xfffffffe,
.bus_width = 32,
},
@@ -593,7 +587,6 @@ static const struct adreno_a5xx_core adreno_gpu_core_a512 = {
.gpudev = &adreno_a5xx_gpudev,
.gmem_base = 0x100000,
.gmem_size = (SZ_256K + SZ_16K),
- .num_protected_regs = 0x20,
.busy_mask = 0xfffffffe,
.bus_width = 32,
},
@@ -612,7 +605,6 @@ static const struct adreno_a5xx_core adreno_gpu_core_a508 = {
.gpudev = &adreno_a5xx_gpudev,
.gmem_base = 0x100000,
.gmem_size = (SZ_128K + SZ_8K),
- .num_protected_regs = 0x20,
.busy_mask = 0xfffffffe,
.bus_width = 32,
},
@@ -740,6 +732,43 @@ static const struct adreno_reglist a630_vbif_regs[] = {
{A6XX_RBBM_VBIF_CLIENT_QOS_CNTL, 0x3},
};
+
+/* For a615, a616, a618, a630, a640 and a680 */
+static const struct a6xx_protected_regs a630_protected_regs[] = {
+ { A6XX_CP_PROTECT_REG + 0, 0x00000, 0x004ff, 0 },
+ { A6XX_CP_PROTECT_REG + 1, 0x00501, 0x00506, 0 },
+ { A6XX_CP_PROTECT_REG + 2, 0x0050b, 0x007ff, 0 },
+ { A6XX_CP_PROTECT_REG + 3, 0x0050e, 0x0050e, 1 },
+ { A6XX_CP_PROTECT_REG + 4, 0x00510, 0x00510, 1 },
+ { A6XX_CP_PROTECT_REG + 5, 0x00534, 0x00534, 1 },
+ { A6XX_CP_PROTECT_REG + 6, 0x00800, 0x00882, 1 },
+ { A6XX_CP_PROTECT_REG + 7, 0x008a0, 0x008a8, 1 },
+ { A6XX_CP_PROTECT_REG + 8, 0x008ab, 0x008cf, 1 },
+ { A6XX_CP_PROTECT_REG + 9, 0x008d0, 0x0098c, 0 },
+ { A6XX_CP_PROTECT_REG + 10, 0x00900, 0x0094d, 1 },
+ { A6XX_CP_PROTECT_REG + 11, 0x0098d, 0x00bff, 1 },
+ { A6XX_CP_PROTECT_REG + 12, 0x00e00, 0x00e0f, 1 },
+ { A6XX_CP_PROTECT_REG + 13, 0x03c00, 0x03cc3, 1 },
+ { A6XX_CP_PROTECT_REG + 14, 0x03cc4, 0x05cc3, 0 },
+ { A6XX_CP_PROTECT_REG + 15, 0x08630, 0x087ff, 1 },
+ { A6XX_CP_PROTECT_REG + 16, 0x08e00, 0x08e00, 1 },
+ { A6XX_CP_PROTECT_REG + 17, 0x08e08, 0x08e08, 1 },
+ { A6XX_CP_PROTECT_REG + 18, 0x08e50, 0x08e6f, 1 },
+ { A6XX_CP_PROTECT_REG + 19, 0x09624, 0x097ff, 1 },
+ { A6XX_CP_PROTECT_REG + 20, 0x09e70, 0x09e71, 1 },
+ { A6XX_CP_PROTECT_REG + 21, 0x09e78, 0x09fff, 1 },
+ { A6XX_CP_PROTECT_REG + 22, 0x0a630, 0x0a7ff, 1 },
+ { A6XX_CP_PROTECT_REG + 23, 0x0ae02, 0x0ae02, 1 },
+ { A6XX_CP_PROTECT_REG + 24, 0x0ae50, 0x0b17f, 1 },
+ { A6XX_CP_PROTECT_REG + 25, 0x0b604, 0x0b604, 1 },
+ { A6XX_CP_PROTECT_REG + 26, 0x0be02, 0x0be03, 1 },
+ { A6XX_CP_PROTECT_REG + 27, 0x0be20, 0x0de1f, 1 },
+ { A6XX_CP_PROTECT_REG + 28, 0x0f000, 0x0fbff, 1 },
+ { A6XX_CP_PROTECT_REG + 29, 0x0fc00, 0x11bff, 0 },
+ { A6XX_CP_PROTECT_REG + 31, 0x11c00, 0x00000, 1 },
+ { 0 },
+};
+
static const struct adreno_a6xx_core adreno_gpu_core_a630v2 = {
.base = {
DEFINE_ADRENO_REV(ADRENO_REV_A630, 6, 3, 0, ANY_ID),
@@ -749,7 +778,6 @@ static const struct adreno_a6xx_core adreno_gpu_core_a630v2 = {
.gpudev = &adreno_a6xx_gpudev,
.gmem_base = 0x100000,
.gmem_size = SZ_1M,
- .num_protected_regs = 0x20,
.busy_mask = 0xfffffffe,
.bus_width = 32,
},
@@ -764,6 +792,8 @@ static const struct adreno_a6xx_core adreno_gpu_core_a630v2 = {
.hwcg_count = ARRAY_SIZE(a630_hwcg_regs),
.vbif = a630_vbif_regs,
.vbif_count = ARRAY_SIZE(a630_vbif_regs),
+ .hang_detect_cycles = 0x3fffff,
+ .protected_regs = a630_protected_regs,
};
/* For a615, a616 and a618 */
@@ -847,7 +877,6 @@ static const struct adreno_a6xx_core adreno_gpu_core_a615 = {
.gpudev = &adreno_a6xx_gpudev,
.gmem_base = 0x100000,
.gmem_size = SZ_512K,
- .num_protected_regs = 0x20,
.busy_mask = 0xfffffffe,
.bus_width = 32,
},
@@ -862,6 +891,8 @@ static const struct adreno_a6xx_core adreno_gpu_core_a615 = {
.hwcg_count = ARRAY_SIZE(a615_hwcg_regs),
.vbif = a615_gbif_regs,
.vbif_count = ARRAY_SIZE(a615_gbif_regs),
+ .hang_detect_cycles = 0x3fffff,
+ .protected_regs = a630_protected_regs,
};
static const struct adreno_a6xx_core adreno_gpu_core_a618 = {
@@ -873,7 +904,6 @@ static const struct adreno_a6xx_core adreno_gpu_core_a618 = {
.gpudev = &adreno_a6xx_gpudev,
.gmem_base = 0x100000,
.gmem_size = SZ_512K,
- .num_protected_regs = 0x20,
.busy_mask = 0xfffffffe,
.bus_width = 32,
},
@@ -888,6 +918,8 @@ static const struct adreno_a6xx_core adreno_gpu_core_a618 = {
.hwcg_count = ARRAY_SIZE(a615_hwcg_regs),
.vbif = a615_gbif_regs,
.vbif_count = ARRAY_SIZE(a615_gbif_regs),
+ .hang_detect_cycles = 0x3fffff,
+ .protected_regs = a630_protected_regs,
};
static const struct adreno_reglist a620_hwcg_regs[] = {
@@ -951,6 +983,44 @@ static const struct adreno_reglist a650_gbif_regs[] = {
{A6XX_RBBM_GBIF_CLIENT_QOS_CNTL, 0x3},
};
+/* These are for a620 and a650 */
+static const struct a6xx_protected_regs a620_protected_regs[] = {
+ { A6XX_CP_PROTECT_REG + 0, 0x00000, 0x004ff, 0 },
+ { A6XX_CP_PROTECT_REG + 1, 0x00501, 0x00506, 0 },
+ { A6XX_CP_PROTECT_REG + 2, 0x0050b, 0x007ff, 0 },
+ { A6XX_CP_PROTECT_REG + 3, 0x0050e, 0x0050e, 1 },
+ { A6XX_CP_PROTECT_REG + 4, 0x00510, 0x00510, 1 },
+ { A6XX_CP_PROTECT_REG + 5, 0x00534, 0x00534, 1 },
+ { A6XX_CP_PROTECT_REG + 6, 0x00800, 0x00882, 1 },
+ { A6XX_CP_PROTECT_REG + 7, 0x008a0, 0x008a8, 1 },
+ { A6XX_CP_PROTECT_REG + 8, 0x008ab, 0x008cf, 1 },
+ { A6XX_CP_PROTECT_REG + 9, 0x008d0, 0x0098c, 0 },
+ { A6XX_CP_PROTECT_REG + 10, 0x00900, 0x0094d, 1 },
+ { A6XX_CP_PROTECT_REG + 11, 0x0098d, 0x00bff, 1 },
+ { A6XX_CP_PROTECT_REG + 12, 0x00e00, 0x00e0f, 1 },
+ { A6XX_CP_PROTECT_REG + 13, 0x03c00, 0x03cc3, 1 },
+ { A6XX_CP_PROTECT_REG + 14, 0x03cc4, 0x05cc3, 0 },
+ { A6XX_CP_PROTECT_REG + 15, 0x08630, 0x087ff, 1 },
+ { A6XX_CP_PROTECT_REG + 16, 0x08e00, 0x08e00, 1 },
+ { A6XX_CP_PROTECT_REG + 17, 0x08e08, 0x08e08, 1 },
+ { A6XX_CP_PROTECT_REG + 18, 0x08e50, 0x08e6f, 1 },
+ { A6XX_CP_PROTECT_REG + 19, 0x08e80, 0x090ff, 1 },
+ { A6XX_CP_PROTECT_REG + 20, 0x09624, 0x097ff, 1 },
+ { A6XX_CP_PROTECT_REG + 21, 0x09e60, 0x09e71, 1 },
+ { A6XX_CP_PROTECT_REG + 22, 0x09e78, 0x09fff, 1 },
+ { A6XX_CP_PROTECT_REG + 23, 0x0a630, 0x0a7ff, 1 },
+ { A6XX_CP_PROTECT_REG + 24, 0x0ae02, 0x0ae02, 1 },
+ { A6XX_CP_PROTECT_REG + 25, 0x0ae50, 0x0b17f, 1 },
+ { A6XX_CP_PROTECT_REG + 26, 0x0b604, 0x0b604, 1 },
+ { A6XX_CP_PROTECT_REG + 27, 0x0b608, 0x0b60f, 1 },
+ { A6XX_CP_PROTECT_REG + 28, 0x0be02, 0x0be03, 1 },
+ { A6XX_CP_PROTECT_REG + 29, 0x0be20, 0x0de1f, 1 },
+ { A6XX_CP_PROTECT_REG + 30, 0x0f000, 0x0fbff, 1 },
+ { A6XX_CP_PROTECT_REG + 31, 0x0fc00, 0x11bff, 0 },
+ { A6XX_CP_PROTECT_REG + 47, 0x11c00, 0x00000, 1 },
+ { 0 },
+};
+
static const struct adreno_a6xx_core adreno_gpu_core_a620 = {
.base = {
DEFINE_ADRENO_REV(ADRENO_REV_A620, 6, 2, 0, 0),
@@ -960,7 +1030,6 @@ static const struct adreno_a6xx_core adreno_gpu_core_a620 = {
.gpudev = &adreno_a6xx_gpudev,
.gmem_base = 0,
.gmem_size = SZ_512K,
- .num_protected_regs = 0x30,
.busy_mask = 0xfffffffe,
.bus_width = 32,
},
@@ -976,6 +1045,9 @@ static const struct adreno_a6xx_core adreno_gpu_core_a620 = {
.vbif = a650_gbif_regs,
.vbif_count = ARRAY_SIZE(a650_gbif_regs),
.veto_fal10 = true,
+ .hang_detect_cycles = 0x3ffff,
+ .protected_regs = a620_protected_regs,
+ .disable_tseskip = true,
};
static const struct adreno_reglist a640_hwcg_regs[] = {
@@ -1030,7 +1102,7 @@ static const struct adreno_reglist a640_hwcg_regs[] = {
{A6XX_RBBM_CLOCK_HYST_GMU_GX, 0x00000555},
};
-/* These apply to a640, a680 and a612 */
+/* These apply to a640, a680, a612 and a610 */
static const struct adreno_reglist a640_vbif_regs[] = {
{A6XX_GBIF_QSB_SIDE0, 0x00071620},
{A6XX_GBIF_QSB_SIDE1, 0x00071620},
@@ -1048,7 +1120,6 @@ static const struct adreno_a6xx_core adreno_gpu_core_a640 = {
.gpudev = &adreno_a6xx_gpudev,
.gmem_base = 0x100000,
.gmem_size = SZ_1M, //Verified 1MB
- .num_protected_regs = 0x20,
.busy_mask = 0xfffffffe,
.bus_width = 32,
},
@@ -1063,6 +1134,9 @@ static const struct adreno_a6xx_core adreno_gpu_core_a640 = {
.hwcg_count = ARRAY_SIZE(a640_hwcg_regs),
.vbif = a640_vbif_regs,
.vbif_count = ARRAY_SIZE(a640_vbif_regs),
+ .hang_detect_cycles = 0x3fffff,
+ .protected_regs = a630_protected_regs,
+ .disable_tseskip = true,
};
static const struct adreno_reglist a650_hwcg_regs[] = {
@@ -1119,14 +1193,13 @@ static const struct adreno_reglist a650_hwcg_regs[] = {
static const struct adreno_a6xx_core adreno_gpu_core_a650 = {
{
- DEFINE_ADRENO_REV(ADRENO_REV_A650, 6, 5, 0, ANY_ID),
+ DEFINE_ADRENO_REV(ADRENO_REV_A650, 6, 5, 0, 0),
.features = ADRENO_64BIT | ADRENO_RPMH | ADRENO_GPMU |
ADRENO_IOCOHERENT | ADRENO_CONTENT_PROTECTION |
ADRENO_IFPC,
.gpudev = &adreno_a6xx_gpudev,
.gmem_base = 0,
.gmem_size = SZ_1M + SZ_128K, /* verified 1152kB */
- .num_protected_regs = 0x30,
.busy_mask = 0xfffffffe,
.bus_width = 32,
},
@@ -1141,6 +1214,37 @@ static const struct adreno_a6xx_core adreno_gpu_core_a650 = {
.vbif_count = ARRAY_SIZE(a650_gbif_regs),
.veto_fal10 = true,
.pdc_in_aop = true,
+ .hang_detect_cycles = 0x3fffff,
+ .protected_regs = a620_protected_regs,
+ .disable_tseskip = true,
+};
+
+static const struct adreno_a6xx_core adreno_gpu_core_a650v2 = {
+ {
+ DEFINE_ADRENO_REV(ADRENO_REV_A650, 6, 5, 0, ANY_ID),
+ .features = ADRENO_64BIT | ADRENO_RPMH | ADRENO_GPMU |
+ ADRENO_IOCOHERENT | ADRENO_CONTENT_PROTECTION |
+ ADRENO_IFPC | ADRENO_PREEMPTION,
+ .gpudev = &adreno_a6xx_gpudev,
+ .gmem_base = 0,
+ .gmem_size = SZ_1M + SZ_128K, /* verified 1152kB */
+ .busy_mask = 0xfffffffe,
+ .bus_width = 32,
+ },
+ .prim_fifo_threshold = 0x00300000,
+ .pdc_address_offset = 0x000300A0,
+ .gmu_major = 2,
+ .gmu_minor = 0,
+ .sqefw_name = "a650_sqe.fw",
+ .gmufw_name = "a650_gmu.bin",
+ .zap_name = "a650_zap",
+ .vbif = a650_gbif_regs,
+ .vbif_count = ARRAY_SIZE(a650_gbif_regs),
+ .veto_fal10 = true,
+ .pdc_in_aop = true,
+ .hang_detect_cycles = 0x3ffff,
+ .protected_regs = a620_protected_regs,
+ .disable_tseskip = true,
};
static const struct adreno_a6xx_core adreno_gpu_core_a680 = {
@@ -1150,7 +1254,6 @@ static const struct adreno_a6xx_core adreno_gpu_core_a680 = {
.gpudev = &adreno_a6xx_gpudev,
.gmem_base = 0x100000,
.gmem_size = SZ_2M,
- .num_protected_regs = 0x20,
.busy_mask = 0xfffffffe,
.bus_width = 32,
},
@@ -1165,6 +1268,9 @@ static const struct adreno_a6xx_core adreno_gpu_core_a680 = {
.hwcg_count = ARRAY_SIZE(a640_hwcg_regs),
.vbif = a640_vbif_regs,
.vbif_count = ARRAY_SIZE(a640_vbif_regs),
+ .hang_detect_cycles = 0x3fffff,
+ .protected_regs = a630_protected_regs,
+ .disable_tseskip = true,
};
static const struct adreno_reglist a612_hwcg_regs[] = {
@@ -1221,11 +1327,10 @@ static const struct adreno_a6xx_core adreno_gpu_core_a612 = {
DEFINE_ADRENO_REV(ADRENO_REV_A612, 6, 1, 2, ANY_ID),
.features = ADRENO_64BIT | ADRENO_CONTENT_PROTECTION |
ADRENO_IOCOHERENT | ADRENO_PREEMPTION | ADRENO_GPMU |
- ADRENO_IFPC | ADRENO_PERFCTRL_RETAIN,
+ ADRENO_IFPC,
.gpudev = &adreno_a6xx_gpudev,
.gmem_base = 0x100000,
.gmem_size = (SZ_128K + SZ_4K),
- .num_protected_regs = 0x20,
.busy_mask = 0xfffffffe,
.bus_width = 32,
},
@@ -1238,6 +1343,8 @@ static const struct adreno_a6xx_core adreno_gpu_core_a612 = {
.hwcg_count = ARRAY_SIZE(a612_hwcg_regs),
.vbif = a640_vbif_regs,
.vbif_count = ARRAY_SIZE(a640_vbif_regs),
+ .hang_detect_cycles = 0x3fffff,
+ .protected_regs = a630_protected_regs,
};
static const struct adreno_a6xx_core adreno_gpu_core_a616 = {
@@ -1249,7 +1356,6 @@ static const struct adreno_a6xx_core adreno_gpu_core_a616 = {
.gpudev = &adreno_a6xx_gpudev,
.gmem_base = 0x100000,
.gmem_size = SZ_512K,
- .num_protected_regs = 0x20,
.busy_mask = 0xfffffffe,
.bus_width = 32,
},
@@ -1264,6 +1370,30 @@ static const struct adreno_a6xx_core adreno_gpu_core_a616 = {
.hwcg_count = ARRAY_SIZE(a615_hwcg_regs),
.vbif = a615_gbif_regs,
.vbif_count = ARRAY_SIZE(a615_gbif_regs),
+ .hang_detect_cycles = 0x3fffff,
+ .protected_regs = a630_protected_regs,
+};
+
+static const struct adreno_a6xx_core adreno_gpu_core_a610 = {
+ {
+ DEFINE_ADRENO_REV(ADRENO_REV_A610, 6, 1, 0, ANY_ID),
+ .features = ADRENO_64BIT | ADRENO_CONTENT_PROTECTION |
+ ADRENO_PREEMPTION,
+ .gpudev = &adreno_a6xx_gpudev,
+ .gmem_base = 0x100000,
+ .gmem_size = (SZ_128K + SZ_4K),
+ .busy_mask = 0xfffffffe,
+ .bus_width = 32,
+ },
+ .prim_fifo_threshold = 0x00080000,
+ .sqefw_name = "a630_sqe.fw",
+ .zap_name = "a610_zap",
+ .hwcg = a612_hwcg_regs,
+ .hwcg_count = ARRAY_SIZE(a612_hwcg_regs),
+ .vbif = a640_vbif_regs,
+ .vbif_count = ARRAY_SIZE(a640_vbif_regs),
+ .hang_detect_cycles = 0x3ffff,
+ .protected_regs = a630_protected_regs,
};
static const struct adreno_gpu_core *adreno_gpulist[] = {
@@ -1291,7 +1421,9 @@ static const struct adreno_gpu_core *adreno_gpulist[] = {
&adreno_gpu_core_a620.base,
&adreno_gpu_core_a640.base,
&adreno_gpu_core_a650.base,
+ &adreno_gpu_core_a650v2.base,
&adreno_gpu_core_a680.base,
&adreno_gpu_core_a612.base,
&adreno_gpu_core_a616.base,
+ &adreno_gpu_core_a610.base,
};
diff --git a/drivers/gpu/msm/adreno.c b/drivers/gpu/msm/adreno.c
index 5c7d91e..aa946dc 100644
--- a/drivers/gpu/msm/adreno.c
+++ b/drivers/gpu/msm/adreno.c
@@ -50,7 +50,6 @@ static struct adreno_device device_3d0 = {
.irq_name = "kgsl_3d0_irq",
},
.iomemname = "kgsl_3d0_reg_memory",
- .shadermemname = "kgsl_3d0_shader_memory",
.ftbl = &adreno_functable,
},
.ft_policy = KGSL_FT_DEFAULT_POLICY,
@@ -948,6 +947,9 @@ static int adreno_of_parse_pwrlevels(struct adreno_device *adreno_dev,
&level->gpu_freq))
return -EINVAL;
+ of_property_read_u32(child, "qcom,acd-level",
+ &level->acd_level);
+
ret = _of_property_read_ddrtype(child,
"qcom,bus-freq", &level->bus_freq);
if (ret) {
@@ -1559,7 +1561,6 @@ static int adreno_remove(struct platform_device *pdev)
if (efuse_base != NULL)
iounmap(efuse_base);
- adreno_perfcounter_close(adreno_dev);
kgsl_device_platform_remove(device);
gmu_core_remove(device);
@@ -1935,17 +1936,6 @@ static int _adreno_start(struct adreno_device *adreno_dev)
adreno_support_64bit(adreno_dev))
gpudev->enable_64bit(adreno_dev);
- if (adreno_dev->perfctr_pwr_lo == 0) {
- ret = adreno_perfcounter_get(adreno_dev,
- KGSL_PERFCOUNTER_GROUP_PWR, 1,
- &adreno_dev->perfctr_pwr_lo, NULL,
- PERFCOUNTER_FLAG_KERNEL);
-
- if (WARN_ONCE(ret, "Unable to get perfcounters for DCVS\n"))
- adreno_dev->perfctr_pwr_lo = 0;
- }
-
-
if (device->pwrctrl.bus_control) {
/* VBIF waiting for RAM */
if (adreno_dev->starved_ram_lo == 0) {
@@ -2051,15 +2041,6 @@ static int _adreno_start(struct adreno_device *adreno_dev)
}
}
- if (gmu_core_isenabled(device) && adreno_dev->perfctr_ifpc_lo == 0) {
- ret = adreno_perfcounter_get(adreno_dev,
- KGSL_PERFCOUNTER_GROUP_GPMU_PWR, 4,
- &adreno_dev->perfctr_ifpc_lo, NULL,
- PERFCOUNTER_FLAG_KERNEL);
- if (WARN_ONCE(ret, "Unable to get perf counter for IFPC\n"))
- adreno_dev->perfctr_ifpc_lo = 0;
- }
-
/* Clear the busy_data stats - we're starting over from scratch */
adreno_dev->busy_data.gpu_busy = 0;
adreno_dev->busy_data.bif_ram_cycles = 0;
@@ -2266,10 +2247,13 @@ static inline bool adreno_try_soft_reset(struct kgsl_device *device, int fault)
* needs a reset too) and also for below gpu
* A304: It can't do SMMU programming of any kind after a soft reset
* A612: IPC protocol between RGMU and CP will not restart after reset
+ * A610: An across chip issue with reset line in all 11nm chips,
+ * resulting in recommendation to not use soft reset
*/
if ((fault & ADRENO_IOMMU_PAGE_FAULT) || adreno_is_a304(adreno_dev) ||
- adreno_is_a612(adreno_dev))
+ adreno_is_a612(adreno_dev) ||
+ adreno_is_a610(adreno_dev))
return false;
return true;
@@ -3037,55 +3021,10 @@ static int adreno_suspend_context(struct kgsl_device *device)
return adreno_idle(device);
}
-/**
- * adreno_read - General read function to read adreno device memory
- * @device - Pointer to the GPU device struct (for adreno device)
- * @base - Base address (kernel virtual) where the device memory is mapped
- * @offsetwords - Offset in words from the base address, of the memory that
- * is to be read
- * @value - Value read from the device memory
- * @mem_len - Length of the device memory mapped to the kernel
- */
-static void adreno_read(struct kgsl_device *device, void __iomem *base,
- unsigned int offsetwords, unsigned int *value,
- unsigned int mem_len)
-{
-
- void __iomem *reg;
-
- /* Make sure we're not reading from invalid memory */
- if (WARN(offsetwords * sizeof(uint32_t) >= mem_len,
- "Out of bounds register read: 0x%x/0x%x\n",
- offsetwords, mem_len >> 2))
- return;
-
- reg = (base + (offsetwords << 2));
-
- if (!in_interrupt())
- kgsl_pre_hwaccess(device);
-
- *value = __raw_readl(reg);
- /*
- * ensure this read finishes before the next one.
- * i.e. act like normal readl()
- */
- rmb();
-}
-
static void adreno_retry_rbbm_read(struct kgsl_device *device,
- void __iomem *base, unsigned int offsetwords,
- unsigned int *value, unsigned int mem_len)
+ unsigned int offsetwords, unsigned int *value)
{
int i;
- void __iomem *reg;
-
- /* Make sure we're not reading from invalid memory */
- if (WARN(offsetwords * sizeof(uint32_t) >= mem_len,
- "Out of bounds register read: 0x%x/0x%x\n",
- offsetwords, mem_len >> 2))
- return;
-
- reg = (base + (offsetwords << 2));
/*
* If 0xdeafbead was transient, second read is expected to return the
@@ -3093,7 +3032,7 @@ static void adreno_retry_rbbm_read(struct kgsl_device *device,
* 0xdeafbead, read it enough times to guarantee that.
*/
for (i = 0; i < 16; i++) {
- *value = readl_relaxed(reg);
+ *value = readl_relaxed(device->reg_virt + (offsetwords << 2));
/*
* Read barrier needed so that register is read from hardware
* every iteration
@@ -3110,12 +3049,13 @@ static bool adreno_is_rbbm_batch_reg(struct kgsl_device *device,
{
if (adreno_is_a650(ADRENO_DEVICE(device)) ||
adreno_is_a620v1(ADRENO_DEVICE(device))) {
- if (((offsetwords > 0x0) && (offsetwords < 0x3FF)) ||
- ((offsetwords > 0x4FA) && (offsetwords < 0x53F)) ||
- ((offsetwords > 0x556) && (offsetwords < 0x5FF)) ||
- ((offsetwords > 0xF400) && (offsetwords < 0xFFFF)))
+ if (((offsetwords >= 0x0) && (offsetwords <= 0x3FF)) ||
+ ((offsetwords >= 0x4FA) && (offsetwords <= 0x53F)) ||
+ ((offsetwords >= 0x556) && (offsetwords <= 0x5FF)) ||
+ ((offsetwords >= 0xF400) && (offsetwords <= 0xFFFF)))
return true;
}
+
return false;
}
@@ -3127,26 +3067,22 @@ static bool adreno_is_rbbm_batch_reg(struct kgsl_device *device,
static void adreno_regread(struct kgsl_device *device, unsigned int offsetwords,
unsigned int *value)
{
- adreno_read(device, device->reg_virt, offsetwords, value,
- device->reg_len);
+ /* Make sure we're not reading from invalid memory */
+ if (WARN(offsetwords * sizeof(uint32_t) >= device->reg_len,
+ "Out of bounds register read: 0x%x/0x%x\n",
+ offsetwords, device->reg_len >> 2))
+ return;
+
+ if (!in_interrupt())
+ kgsl_pre_hwaccess(device);
+
+ *value = readl_relaxed(device->reg_virt + (offsetwords << 2));
+ /* Order this read with respect to the following memory accesses */
+ rmb();
if ((*value == 0xdeafbead) &&
adreno_is_rbbm_batch_reg(device, offsetwords))
- adreno_retry_rbbm_read(device, device->reg_virt, offsetwords,
- value, device->reg_len);
-}
-
-/**
- * adreno_shadermem_regread - Used to read GPU (adreno) shader memory
- * @device - GPU device whose shader memory is to be read
- * @offsetwords - Offset in words, of the shader memory address to be read
- * @value - Pointer to where the read shader mem value is to be stored
- */
-void adreno_shadermem_regread(struct kgsl_device *device,
- unsigned int offsetwords, unsigned int *value)
-{
- adreno_read(device, device->shader_mem_virt, offsetwords, value,
- device->shader_mem_len);
+ adreno_retry_rbbm_read(device, offsetwords, value);
}
static void adreno_regwrite(struct kgsl_device *device,
@@ -3615,33 +3551,29 @@ static void adreno_power_stats(struct kgsl_device *device,
struct kgsl_pwrctrl *pwr = &device->pwrctrl;
struct adreno_busy_data *busy = &adreno_dev->busy_data;
int64_t adj = 0;
+ u64 gpu_busy;
memset(stats, 0, sizeof(*stats));
- /* Get the busy cycles counted since the counter was last reset */
- if (adreno_dev->perfctr_pwr_lo != 0) {
- uint64_t gpu_busy;
+ gpu_busy = counter_delta(device, adreno_dev->perfctr_pwr_lo,
+ &busy->gpu_busy);
- gpu_busy = counter_delta(device, adreno_dev->perfctr_pwr_lo,
- &busy->gpu_busy);
+ if (gpudev->read_throttling_counters) {
+ adj = gpudev->read_throttling_counters(adreno_dev);
+ if (adj < 0 && -adj > gpu_busy)
+ adj = 0;
- if (gpudev->read_throttling_counters) {
- adj = gpudev->read_throttling_counters(adreno_dev);
- if (adj < 0 && -adj > gpu_busy)
- adj = 0;
+ gpu_busy += adj;
+ }
- gpu_busy += adj;
- }
-
- if (adreno_is_a6xx(adreno_dev)) {
- /* clock sourced from XO */
- stats->busy_time = gpu_busy * 10;
- do_div(stats->busy_time, 192);
- } else {
- /* clock sourced from GFX3D */
- stats->busy_time = adreno_ticks_to_us(gpu_busy,
- kgsl_pwrctrl_active_freq(pwr));
- }
+ if (adreno_is_a6xx(adreno_dev)) {
+ /* clock sourced from XO */
+ stats->busy_time = gpu_busy * 10;
+ do_div(stats->busy_time, 192);
+ } else {
+ /* clock sourced from GFX3D */
+ stats->busy_time = adreno_ticks_to_us(gpu_busy,
+ kgsl_pwrctrl_active_freq(pwr));
}
if (device->pwrctrl.bus_control) {
diff --git a/drivers/gpu/msm/adreno.h b/drivers/gpu/msm/adreno.h
index 032c7f00..3fea1f0 100644
--- a/drivers/gpu/msm/adreno.h
+++ b/drivers/gpu/msm/adreno.h
@@ -96,8 +96,6 @@
#define ADRENO_MIN_VOLT BIT(15)
/* The core supports IO-coherent memory */
#define ADRENO_IOCOHERENT BIT(16)
-/* To retain RBBM perfcntl enable setting in IFPC */
-#define ADRENO_PERFCTRL_RETAIN BIT(17)
/*
* The GMU supports Adaptive Clock Distribution (ACD)
* for droop mitigation
@@ -195,6 +193,7 @@ enum adreno_gpurev {
ADRENO_REV_A512 = 512,
ADRENO_REV_A530 = 530,
ADRENO_REV_A540 = 540,
+ ADRENO_REV_A610 = 610,
ADRENO_REV_A612 = 612,
ADRENO_REV_A615 = 615,
ADRENO_REV_A616 = 616,
@@ -352,7 +351,6 @@ struct adreno_reglist {
* @gpudev: Pointer to the GPU family specific functions for this core
* @gmem_base: Base address of binning memory (GMEM/OCMEM)
* @gmem_size: Amount of binning memory (GMEM/OCMEM) to reserve for the core
- * @num_protected_regs: number of protected registers
* @busy_mask: mask to check if GPU is busy in RBBM_STATUS
* @bus_width: Bytes transferred in 1 cycle
*/
@@ -363,7 +361,6 @@ struct adreno_gpu_core {
struct adreno_gpudev *gpudev;
unsigned long gmem_base;
size_t gmem_size;
- unsigned int num_protected_regs;
unsigned int busy_mask;
u32 bus_width;
};
@@ -891,7 +888,6 @@ struct adreno_gpudev {
int (*rb_start)(struct adreno_device *adreno_dev);
int (*microcode_read)(struct adreno_device *adreno_dev);
void (*perfcounter_init)(struct adreno_device *adreno_dev);
- void (*perfcounter_close)(struct adreno_device *adreno_dev);
void (*start)(struct adreno_device *adreno_dev);
bool (*is_sptp_idle)(struct adreno_device *adreno_dev);
int (*regulator_enable)(struct adreno_device *adreno_dev);
@@ -902,8 +898,6 @@ struct adreno_gpudev {
int64_t (*read_throttling_counters)(struct adreno_device *adreno_dev);
void (*count_throttles)(struct adreno_device *adreno_dev,
uint64_t adj);
- int (*enable_pwr_counters)(struct adreno_device *adrneo_dev,
- unsigned int counter);
unsigned int (*preemption_pre_ibsubmit)(
struct adreno_device *adreno_dev,
struct adreno_ringbuffer *rb,
@@ -930,7 +924,7 @@ struct adreno_gpudev {
void (*gpu_keepalive)(struct adreno_device *adreno_dev,
bool state);
bool (*hw_isidle)(struct adreno_device *adreno_dev);
- const char *(*iommu_fault_block)(struct adreno_device *adreno_dev,
+ const char *(*iommu_fault_block)(struct kgsl_device *device,
unsigned int fsynr1);
int (*reset)(struct kgsl_device *device, int fault);
int (*soft_reset)(struct adreno_device *adreno_dev);
@@ -1049,10 +1043,6 @@ int adreno_set_constraint(struct kgsl_device *device,
struct kgsl_context *context,
struct kgsl_device_constraint *constraint);
-void adreno_shadermem_regread(struct kgsl_device *device,
- unsigned int offsetwords,
- unsigned int *value);
-
void adreno_snapshot(struct kgsl_device *device,
struct kgsl_snapshot *snapshot,
struct kgsl_context *context);
@@ -1167,6 +1157,7 @@ static inline int adreno_is_a6xx(struct adreno_device *adreno_dev)
ADRENO_GPUREV(adreno_dev) < 700;
}
+ADRENO_TARGET(a610, ADRENO_REV_A610)
ADRENO_TARGET(a612, ADRENO_REV_A612)
ADRENO_TARGET(a618, ADRENO_REV_A618)
ADRENO_TARGET(a620, ADRENO_REV_A620)
@@ -1464,43 +1455,6 @@ static inline void adreno_put_gpu_halt(struct adreno_device *adreno_dev)
void adreno_reglist_write(struct adreno_device *adreno_dev,
const struct adreno_reglist *list, u32 count);
-/**
- * adreno_set_protected_registers() - Protect the specified range of registers
- * from being accessed by the GPU
- * @adreno_dev: pointer to the Adreno device
- * @index: Pointer to the index of the protect mode register to write to
- * @reg: Starting dword register to write
- * @mask_len: Size of the mask to protect (# of registers = 2 ** mask_len)
- *
- * Add the range of registers to the list of protected mode registers that will
- * cause an exception if the GPU accesses them. There are 16 available
- * protected mode registers. Index is used to specify which register to write
- * to - the intent is to call this function multiple times with the same index
- * pointer for each range and the registers will be magically programmed in
- * incremental fashion
- */
-static inline void adreno_set_protected_registers(
- struct adreno_device *adreno_dev, unsigned int *index,
- unsigned int reg, int mask_len)
-{
- unsigned int val;
- unsigned int base =
- adreno_getreg(adreno_dev, ADRENO_REG_CP_PROTECT_REG_0);
- unsigned int offset = *index;
- unsigned int max_slots = adreno_dev->gpucore->num_protected_regs ?
- adreno_dev->gpucore->num_protected_regs : 16;
-
- /* Do we have a free slot? */
- if (WARN(*index >= max_slots, "Protected register slots full: %d/%d\n",
- *index, max_slots))
- return;
-
- val = 0x60000000 | ((mask_len & 0x1F) << 24) | ((reg << 2) & 0xFFFFF);
-
- kgsl_regwrite(KGSL_DEVICE(adreno_dev), base + offset, val);
- *index = *index + 1;
-}
-
#ifdef CONFIG_DEBUG_FS
void adreno_debugfs_init(struct adreno_device *adreno_dev);
void adreno_context_debugfs_init(struct adreno_device *adreno_dev,
diff --git a/drivers/gpu/msm/adreno_a3xx.c b/drivers/gpu/msm/adreno_a3xx.c
index 3456c14..680afa0 100644
--- a/drivers/gpu/msm/adreno_a3xx.c
+++ b/drivers/gpu/msm/adreno_a3xx.c
@@ -611,6 +611,9 @@ static void a3xx_platform_setup(struct adreno_device *adreno_dev)
gpudev->vbif_xin_halt_ctrl0_mask = A30X_VBIF_XIN_HALT_CTRL0_MASK;
+ /* Set the GPU busy counter for frequency scaling */
+ adreno_dev->perfctr_pwr_lo = A3XX_RBBM_PERFCTR_PWR_1_LO;
+
/* Check efuse bits for various capabilties */
a3xx_check_features(adreno_dev);
}
@@ -968,8 +971,10 @@ static struct adreno_perfcount_register a3xx_perfcounters_rb[] = {
static struct adreno_perfcount_register a3xx_perfcounters_pwr[] = {
{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_PWR_0_LO,
A3XX_RBBM_PERFCTR_PWR_0_HI, -1, 0 },
- { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A3XX_RBBM_PERFCTR_PWR_1_LO,
- A3XX_RBBM_PERFCTR_PWR_1_HI, -1, 0 },
+ /*
+ * A3XX_RBBM_PERFCTR_PWR_1_LO is used for frequency scaling and removed
+ * from the pool of available counters
+ */
};
static struct adreno_perfcount_register a3xx_perfcounters_vbif[] = {
@@ -1063,64 +1068,50 @@ static void a3xx_perfcounter_init(struct adreno_device *adreno_dev)
counters->groups[KGSL_PERFCOUNTER_GROUP_VBIF_PWR].regs =
a3xx_perfcounters_vbif2_pwr;
}
-
- /*
- * Enable the GPU busy count counter. This is a fixed counter on
- * A3XX so we don't need to bother checking the return value
- */
- adreno_perfcounter_get(adreno_dev, KGSL_PERFCOUNTER_GROUP_PWR, 1,
- NULL, NULL, PERFCOUNTER_FLAG_KERNEL);
}
-static void a3xx_perfcounter_close(struct adreno_device *adreno_dev)
-{
- adreno_perfcounter_put(adreno_dev, KGSL_PERFCOUNTER_GROUP_PWR, 1,
- PERFCOUNTER_FLAG_KERNEL);
-}
+struct {
+ u32 reg;
+ u32 base;
+ u32 count;
+} a3xx_protected_blocks[] = {
+ /* RBBM */
+ { A3XX_CP_PROTECT_REG_0, 0x0018, 0 },
+ { A3XX_CP_PROTECT_REG_0 + 1, 0x0020, 2 },
+ { A3XX_CP_PROTECT_REG_0 + 2, 0x0033, 0 },
+ { A3XX_CP_PROTECT_REG_0 + 3, 0x0042, 0 },
+ { A3XX_CP_PROTECT_REG_0 + 4, 0x0050, 4 },
+ { A3XX_CP_PROTECT_REG_0 + 5, 0x0063, 0 },
+ { A3XX_CP_PROTECT_REG_0 + 6, 0x0100, 4 },
+ /* CP */
+ { A3XX_CP_PROTECT_REG_0 + 7, 0x01c0, 5 },
+ { A3XX_CP_PROTECT_REG_0 + 8, 0x01ec, 1 },
+ { A3XX_CP_PROTECT_REG_0 + 9, 0x01f6, 1 },
+ { A3XX_CP_PROTECT_REG_0 + 10, 0x01f8, 2 },
+ { A3XX_CP_PROTECT_REG_0 + 11, 0x045e, 2 },
+ { A3XX_CP_PROTECT_REG_0 + 12, 0x0460, 4 },
+ /* RB */
+ { A3XX_CP_PROTECT_REG_0 + 13, 0x0cc0, 0 },
+ /* VBIF */
+ { A3XX_CP_PROTECT_REG_0 + 14, 0x3000, 6 },
+ /* SMMU */
+ { A3XX_CP_PROTECT_REG_0 + 15, 0xa000, 12 },
+ /* There are no remaining protected mode registers for a3xx */
+};
-/**
- * a3xx_protect_init() - Initializes register protection on a3xx
- * @adreno_dev: Pointer to the device structure
- * Performs register writes to enable protected access to sensitive
- * registers
- */
-static void a3xx_protect_init(struct adreno_device *adreno_dev)
+static void a3xx_protect_init(struct kgsl_device *device)
{
- struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
- int index = 0;
- struct kgsl_protected_registers *iommu_regs;
+ int i;
- /* enable access protection to privileged registers */
kgsl_regwrite(device, A3XX_CP_PROTECT_CTRL, 0x00000007);
- /* RBBM registers */
- adreno_set_protected_registers(adreno_dev, &index, 0x18, 0);
- adreno_set_protected_registers(adreno_dev, &index, 0x20, 2);
- adreno_set_protected_registers(adreno_dev, &index, 0x33, 0);
- adreno_set_protected_registers(adreno_dev, &index, 0x42, 0);
- adreno_set_protected_registers(adreno_dev, &index, 0x50, 4);
- adreno_set_protected_registers(adreno_dev, &index, 0x63, 0);
- adreno_set_protected_registers(adreno_dev, &index, 0x100, 4);
+ for (i = 0; i < ARRAY_SIZE(a3xx_protected_blocks); i++) {
+ u32 val = 0x60000000 |
+ (a3xx_protected_blocks[i].count << 24) |
+ (a3xx_protected_blocks[i].base << 2);
- /* CP registers */
- adreno_set_protected_registers(adreno_dev, &index, 0x1C0, 5);
- adreno_set_protected_registers(adreno_dev, &index, 0x1EC, 1);
- adreno_set_protected_registers(adreno_dev, &index, 0x1F6, 1);
- adreno_set_protected_registers(adreno_dev, &index, 0x1F8, 2);
- adreno_set_protected_registers(adreno_dev, &index, 0x45E, 2);
- adreno_set_protected_registers(adreno_dev, &index, 0x460, 4);
-
- /* RB registers */
- adreno_set_protected_registers(adreno_dev, &index, 0xCC0, 0);
-
- /* VBIF registers */
- adreno_set_protected_registers(adreno_dev, &index, 0x3000, 6);
-
- /* SMMU registers */
- iommu_regs = kgsl_mmu_get_prot_regs(&device->mmu);
- if (iommu_regs)
- adreno_set_protected_registers(adreno_dev, &index,
- iommu_regs->base, ilog2(iommu_regs->range));
+ kgsl_regwrite(device, a3xx_protected_blocks[i].reg, val);
+ }
}
static void a3xx_start(struct adreno_device *adreno_dev)
@@ -1169,7 +1160,7 @@ static void a3xx_start(struct adreno_device *adreno_dev)
kgsl_regwrite(device, A3XX_RBBM_CLOCK_CTL, A3XX_RBBM_CLOCK_CTL_DEFAULT);
/* Turn on protection */
- a3xx_protect_init(adreno_dev);
+ a3xx_protect_init(device);
/* Turn on performance counters */
kgsl_regwrite(device, A3XX_RBBM_PERFCTR_CTL, 0x01);
@@ -1514,7 +1505,6 @@ struct adreno_gpudev adreno_a3xx_gpudev = {
.init = a3xx_init,
.microcode_read = a3xx_microcode_read,
.perfcounter_init = a3xx_perfcounter_init,
- .perfcounter_close = a3xx_perfcounter_close,
.start = a3xx_start,
.snapshot = a3xx_snapshot,
.coresight = {&a3xx_coresight},
diff --git a/drivers/gpu/msm/adreno_a3xx_snapshot.c b/drivers/gpu/msm/adreno_a3xx_snapshot.c
index e34be4c..b636991 100644
--- a/drivers/gpu/msm/adreno_a3xx_snapshot.c
+++ b/drivers/gpu/msm/adreno_a3xx_snapshot.c
@@ -104,13 +104,9 @@ static size_t a3xx_snapshot_shader_memory(struct kgsl_device *device,
u8 *buf, size_t remain, void *priv)
{
struct kgsl_snapshot_debug *header = (struct kgsl_snapshot_debug *)buf;
- unsigned int i;
- unsigned int *data = (unsigned int *)(buf + sizeof(*header));
+ void *data = buf + sizeof(*header);
unsigned int shader_read_len = SHADER_MEMORY_SIZE;
- if (shader_read_len > (device->shader_mem_len >> 2))
- shader_read_len = (device->shader_mem_len >> 2);
-
if (remain < DEBUG_SECTION_SZ(shader_read_len)) {
SNAPSHOT_ERR_NOMEM(device, "SHADER MEMORY");
return 0;
@@ -120,21 +116,23 @@ static size_t a3xx_snapshot_shader_memory(struct kgsl_device *device,
header->size = shader_read_len;
/* Map shader memory to kernel, for dumping */
- if (device->shader_mem_virt == NULL)
- device->shader_mem_virt = devm_ioremap(device->dev,
- device->shader_mem_phys,
- device->shader_mem_len);
+ if (IS_ERR_OR_NULL(device->shader_mem_virt)) {
+ struct resource *res;
- if (device->shader_mem_virt == NULL) {
- dev_err(device->dev,
- "Unable to map shader memory region\n");
+ res = platform_get_resource_byname(device->pdev,
+ IORESOURCE_MEM, "kgsl_3d0_shader_memory");
+
+ if (res)
+ device->shader_mem_virt =
+ devm_ioremap_resource(&device->pdev->dev, res);
+ }
+
+ if (IS_ERR_OR_NULL(device->shader_mem_virt)) {
+ dev_err(device->dev, "Unable to map the shader memory\n");
return 0;
}
- /* Now, dump shader memory to snapshot */
- for (i = 0; i < shader_read_len; i++)
- adreno_shadermem_regread(device, i, &data[i]);
-
+ memcpy_fromio(data, device->shader_mem_virt, shader_read_len << 2);
return DEBUG_SECTION_SZ(shader_read_len);
}
diff --git a/drivers/gpu/msm/adreno_a5xx.c b/drivers/gpu/msm/adreno_a5xx.c
index f392ef4..12f5c21 100644
--- a/drivers/gpu/msm/adreno_a5xx.c
+++ b/drivers/gpu/msm/adreno_a5xx.c
@@ -146,6 +146,9 @@ static void a5xx_platform_setup(struct adreno_device *adreno_dev)
adreno_dev->lm_leakage = A530_DEFAULT_LEAKAGE;
adreno_dev->speed_bin = 0;
+ /* Set the GPU busy counter to use for frequency scaling */
+ adreno_dev->perfctr_pwr_lo = A5XX_RBBM_PERFCTR_RBBM_0_LO;
+
/* Check efuse bits for various capabilties */
a5xx_check_features(adreno_dev);
}
@@ -290,57 +293,69 @@ static void a5xx_remove(struct adreno_device *adreno_dev)
a5xx_critical_packet_destroy(adreno_dev);
}
-/**
- * a5xx_protect_init() - Initializes register protection on a5xx
- * @device: Pointer to the device structure
- * Performs register writes to enable protected access to sensitive
- * registers
- */
+const static struct {
+ u32 reg;
+ u32 base;
+ u32 count;
+} a5xx_protected_blocks[] = {
+ /* RBBM */
+ { A5XX_CP_PROTECT_REG_0, 0x004, 2 },
+ { A5XX_CP_PROTECT_REG_0 + 1, 0x008, 3 },
+ { A5XX_CP_PROTECT_REG_0 + 2, 0x010, 4 },
+ { A5XX_CP_PROTECT_REG_0 + 3, 0x020, 5 },
+ { A5XX_CP_PROTECT_REG_0 + 4, 0x040, 6 },
+ { A5XX_CP_PROTECT_REG_0 + 5, 0x080, 6 },
+ /* Content protection */
+ { A5XX_CP_PROTECT_REG_0 + 6, A5XX_RBBM_SECVID_TSB_TRUSTED_BASE_LO, 4 },
+ { A5XX_CP_PROTECT_REG_0 + 7, A5XX_RBBM_SECVID_TRUST_CNTL, 1 },
+ /* CP */
+ { A5XX_CP_PROTECT_REG_0 + 8, 0x800, 6 },
+ { A5XX_CP_PROTECT_REG_0 + 9, 0x840, 3 },
+ { A5XX_CP_PROTECT_REG_0 + 10, 0x880, 5 },
+ { A5XX_CP_PROTECT_REG_0 + 11, 0xaa0, 0 },
+ /* RB */
+ { A5XX_CP_PROTECT_REG_0 + 12, 0xcc0, 0 },
+ { A5XX_CP_PROTECT_REG_0 + 13, 0xcf0, 1 },
+ /* VPC */
+ { A5XX_CP_PROTECT_REG_0 + 14, 0xe68, 3 },
+ { A5XX_CP_PROTECT_REG_0 + 15, 0xe70, 4 },
+ /* UCHE */
+ { A5XX_CP_PROTECT_REG_0 + 16, 0xe80, 4 },
+ /* A5XX_CP_PROTECT_REG_17 will be used for SMMU */
+ /* A5XX_CP_PROTECT_REG_18 - A5XX_CP_PROTECT_REG_31 are available */
+};
+
+static void _setprotectreg(struct kgsl_device *device, u32 offset,
+ u32 base, u32 count)
+{
+ kgsl_regwrite(device, offset, 0x60000000 | (count << 24) | (base << 2));
+}
+
static void a5xx_protect_init(struct adreno_device *adreno_dev)
{
struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
- int index = 0;
- struct kgsl_protected_registers *iommu_regs;
+ u32 reg;
+ int i;
/* enable access protection to privileged registers */
kgsl_regwrite(device, A5XX_CP_PROTECT_CNTL, 0x00000007);
- /* RBBM registers */
- adreno_set_protected_registers(adreno_dev, &index, 0x4, 2);
- adreno_set_protected_registers(adreno_dev, &index, 0x8, 3);
- adreno_set_protected_registers(adreno_dev, &index, 0x10, 4);
- adreno_set_protected_registers(adreno_dev, &index, 0x20, 5);
- adreno_set_protected_registers(adreno_dev, &index, 0x40, 6);
- adreno_set_protected_registers(adreno_dev, &index, 0x80, 6);
+ for (i = 0; i < ARRAY_SIZE(a5xx_protected_blocks); i++) {
+ reg = a5xx_protected_blocks[i].reg;
- /* Content protection registers */
- adreno_set_protected_registers(adreno_dev, &index,
- A5XX_RBBM_SECVID_TSB_TRUSTED_BASE_LO, 4);
- adreno_set_protected_registers(adreno_dev, &index,
- A5XX_RBBM_SECVID_TRUST_CNTL, 1);
+ _setprotectreg(device, reg, a5xx_protected_blocks[i].base,
+ a5xx_protected_blocks[i].count);
+ }
- /* CP registers */
- adreno_set_protected_registers(adreno_dev, &index, 0x800, 6);
- adreno_set_protected_registers(adreno_dev, &index, 0x840, 3);
- adreno_set_protected_registers(adreno_dev, &index, 0x880, 5);
- adreno_set_protected_registers(adreno_dev, &index, 0x0AA0, 0);
-
- /* RB registers */
- adreno_set_protected_registers(adreno_dev, &index, 0xCC0, 0);
- adreno_set_protected_registers(adreno_dev, &index, 0xCF0, 1);
-
- /* VPC registers */
- adreno_set_protected_registers(adreno_dev, &index, 0xE68, 3);
- adreno_set_protected_registers(adreno_dev, &index, 0xE70, 4);
-
- /* UCHE registers */
- adreno_set_protected_registers(adreno_dev, &index, 0xE80, ilog2(16));
-
- /* SMMU registers */
- iommu_regs = kgsl_mmu_get_prot_regs(&device->mmu);
- if (iommu_regs)
- adreno_set_protected_registers(adreno_dev, &index,
- iommu_regs->base, ilog2(iommu_regs->range));
+ /*
+ * For a530 and a540 the SMMU region is 0x20000 bytes long and 0x10000
+ * bytes on all other targets. The base offset for both is 0x40000.
+ * Write it to the next available slot
+ */
+ if (adreno_is_a530(adreno_dev) || adreno_is_a540(adreno_dev))
+ _setprotectreg(device, reg + 1, 0x40000, ilog2(0x20000));
+ else
+ _setprotectreg(device, reg + 1, 0x40000, ilog2(0x10000));
}
/*
@@ -1255,24 +1270,6 @@ static void a5xx_count_throttles(struct adreno_device *adreno_dev,
adreno_dev->lm_threshold_cross = adj;
}
-static int a5xx_enable_pwr_counters(struct adreno_device *adreno_dev,
- unsigned int counter)
-{
- /*
- * On 5XX we have to emulate the PWR counters which are physically
- * missing. Program countable 6 on RBBM_PERFCTR_RBBM_0 as a substitute
- * for PWR:1. Don't emulate PWR:0 as nobody uses it and we don't want
- * to take away too many of the generic RBBM counters.
- */
-
- if (counter == 0)
- return -EINVAL;
-
- kgsl_regwrite(KGSL_DEVICE(adreno_dev), A5XX_RBBM_PERFCTR_RBBM_SEL_0, 6);
-
- return 0;
-}
-
/* FW driven idle 10% throttle */
#define IDLE_10PCT 0
/* number of cycles when clock is throttled by 50% (CRC) */
@@ -1444,6 +1441,9 @@ static void a5xx_start(struct adreno_device *adreno_dev)
/* Make all blocks contribute to the GPU BUSY perf counter */
kgsl_regwrite(device, A5XX_RBBM_PERFCTR_GPU_BUSY_MASKED, 0xFFFFFFFF);
+ /* Program RBBM counter 0 to report GPU busy for frequency scaling */
+ kgsl_regwrite(device, A5XX_RBBM_PERFCTR_RBBM_SEL_0, 6);
+
/*
* Enable the RBBM error reporting bits. This lets us get
* useful information on failure
@@ -2093,11 +2093,11 @@ static struct adreno_perfcount_register a5xx_perfcounters_cp[] = {
A5XX_RBBM_PERFCTR_CP_7_HI, 7, A5XX_CP_PERFCTR_CP_SEL_7 },
};
-/*
- * Note that PERFCTR_RBBM_0 is missing - it is used to emulate the PWR counters.
- * See below.
- */
static struct adreno_perfcount_register a5xx_perfcounters_rbbm[] = {
+ /*
+ * A5XX_RBBM_PERFCTR_RBBM_0 is used for frequency scaling and omitted
+ * from the poool of available counters
+ */
{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A5XX_RBBM_PERFCTR_RBBM_1_LO,
A5XX_RBBM_PERFCTR_RBBM_1_HI, 9, A5XX_RBBM_PERFCTR_RBBM_SEL_1 },
{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A5XX_RBBM_PERFCTR_RBBM_2_LO,
@@ -2346,22 +2346,6 @@ static struct adreno_perfcount_register a5xx_perfcounters_alwayson[] = {
A5XX_RBBM_ALWAYSON_COUNTER_HI, -1 },
};
-/*
- * 5XX targets don't really have physical PERFCTR_PWR registers - we emulate
- * them using similar performance counters from the RBBM block. The difference
- * between using this group and the RBBM group is that the RBBM counters are
- * reloaded after a power collapse which is not how the PWR counters behaved on
- * legacy hardware. In order to limit the disruption on the rest of the system
- * we go out of our way to ensure backwards compatibility. Since RBBM counters
- * are in short supply, we don't emulate PWR:0 which nobody uses - mark it as
- * broken.
- */
-static struct adreno_perfcount_register a5xx_perfcounters_pwr[] = {
- { KGSL_PERFCOUNTER_BROKEN, 0, 0, 0, 0, -1, 0 },
- { KGSL_PERFCOUNTER_NOT_USED, 0, 0, A5XX_RBBM_PERFCTR_RBBM_0_LO,
- A5XX_RBBM_PERFCTR_RBBM_0_HI, -1, 0},
-};
-
static struct adreno_perfcount_register a5xx_pwrcounters_sp[] = {
{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A5XX_SP_POWER_COUNTER_0_LO,
A5XX_SP_POWER_COUNTER_0_HI, -1, A5XX_SP_POWERCTR_SP_SEL_0 },
@@ -2481,8 +2465,6 @@ static struct adreno_perfcount_group a5xx_perfcounter_groups
A5XX_PERFCOUNTER_GROUP(SP, sp),
A5XX_PERFCOUNTER_GROUP(RB, rb),
A5XX_PERFCOUNTER_GROUP(VSC, vsc),
- A5XX_PERFCOUNTER_GROUP_FLAGS(PWR, pwr,
- ADRENO_PERFCOUNTER_GROUP_FIXED),
A5XX_PERFCOUNTER_GROUP(VBIF, vbif),
A5XX_PERFCOUNTER_GROUP_FLAGS(VBIF_PWR, vbif_pwr,
ADRENO_PERFCOUNTER_GROUP_FIXED),
@@ -3184,7 +3166,6 @@ struct adreno_gpudev adreno_a5xx_gpudev = {
.pwrlevel_change_settings = a5xx_pwrlevel_change_settings,
.read_throttling_counters = a5xx_read_throttling_counters,
.count_throttles = a5xx_count_throttles,
- .enable_pwr_counters = a5xx_enable_pwr_counters,
.preemption_pre_ibsubmit = a5xx_preemption_pre_ibsubmit,
.preemption_yield_enable =
a5xx_preemption_yield_enable,
diff --git a/drivers/gpu/msm/adreno_a6xx.c b/drivers/gpu/msm/adreno_a6xx.c
index a1e7bcb9..6df5726 100644
--- a/drivers/gpu/msm/adreno_a6xx.c
+++ b/drivers/gpu/msm/adreno_a6xx.c
@@ -7,132 +7,99 @@
#include <linux/of.h>
#include <linux/of_fdt.h>
#include <soc/qcom/subsystem_restart.h>
+#include <linux/clk/qcom.h>
#include "adreno.h"
#include "adreno_a6xx.h"
#include "adreno_llc.h"
#include "adreno_pm4types.h"
#include "adreno_trace.h"
-#include "kgsl_gmu.h"
#include "kgsl_trace.h"
-static struct a6xx_protected_regs {
- unsigned int base;
- unsigned int count;
- int read_protect;
-} a6xx_protected_regs_group[] = {
- { 0x600, 0x51, 0 },
- { 0xAE50, 0x2, 1 },
- { 0x9624, 0x13, 1 },
- { 0x8630, 0x8, 1 },
- { 0x9E70, 0x1, 1 },
- { 0x9E78, 0x187, 1 },
- { 0xF000, 0x810, 1 },
- { 0xFC00, 0x3, 0 },
- { 0x50E, 0x0, 1 },
- { 0x50F, 0x0, 0 },
- { 0x510, 0x0, 1 },
- { 0x0, 0x4F9, 0 },
- { 0x501, 0xA, 0 },
- { 0x511, 0x44, 0 },
- { 0xE00, 0x1, 1 },
- { 0xE03, 0xB, 1 },
- { 0x8E00, 0x0, 1 },
- { 0x8E50, 0xF, 1 },
- { 0xBE02, 0x0, 1 },
- { 0xBE20, 0x11F3, 1 },
- { 0x800, 0x82, 1 },
- { 0x8A0, 0x8, 1 },
- { 0x8AB, 0x19, 1 },
- { 0x900, 0x4D, 1 },
- { 0x98D, 0x76, 1 },
- { 0x8D0, 0x23, 0 },
- { 0x980, 0x4, 0 },
- { 0xA630, 0x0, 1 },
-};
-
/* IFPC & Preemption static powerup restore list */
-static struct reg_list_pair {
- uint32_t offset;
- uint32_t val;
-} a6xx_pwrup_reglist[] = {
- { A6XX_VSC_ADDR_MODE_CNTL, 0x0 },
- { A6XX_GRAS_ADDR_MODE_CNTL, 0x0 },
- { A6XX_RB_ADDR_MODE_CNTL, 0x0 },
- { A6XX_PC_ADDR_MODE_CNTL, 0x0 },
- { A6XX_HLSQ_ADDR_MODE_CNTL, 0x0 },
- { A6XX_VFD_ADDR_MODE_CNTL, 0x0 },
- { A6XX_VPC_ADDR_MODE_CNTL, 0x0 },
- { A6XX_UCHE_ADDR_MODE_CNTL, 0x0 },
- { A6XX_SP_ADDR_MODE_CNTL, 0x0 },
- { A6XX_TPL1_ADDR_MODE_CNTL, 0x0 },
- { A6XX_UCHE_WRITE_RANGE_MAX_LO, 0x0 },
- { A6XX_UCHE_WRITE_RANGE_MAX_HI, 0x0 },
- { A6XX_UCHE_TRAP_BASE_LO, 0x0 },
- { A6XX_UCHE_TRAP_BASE_HI, 0x0 },
- { A6XX_UCHE_WRITE_THRU_BASE_LO, 0x0 },
- { A6XX_UCHE_WRITE_THRU_BASE_HI, 0x0 },
- { A6XX_UCHE_GMEM_RANGE_MIN_LO, 0x0 },
- { A6XX_UCHE_GMEM_RANGE_MIN_HI, 0x0 },
- { A6XX_UCHE_GMEM_RANGE_MAX_LO, 0x0 },
- { A6XX_UCHE_GMEM_RANGE_MAX_HI, 0x0 },
- { A6XX_UCHE_FILTER_CNTL, 0x0 },
- { A6XX_UCHE_CACHE_WAYS, 0x0 },
- { A6XX_UCHE_MODE_CNTL, 0x0 },
- { A6XX_RB_NC_MODE_CNTL, 0x0 },
- { A6XX_TPL1_NC_MODE_CNTL, 0x0 },
- { A6XX_SP_NC_MODE_CNTL, 0x0 },
- { A6XX_PC_DBG_ECO_CNTL, 0x0 },
- { A6XX_RB_CONTEXT_SWITCH_GMEM_SAVE_RESTORE, 0x0 },
+static u32 a6xx_pwrup_reglist[] = {
+ A6XX_VSC_ADDR_MODE_CNTL,
+ A6XX_GRAS_ADDR_MODE_CNTL,
+ A6XX_RB_ADDR_MODE_CNTL,
+ A6XX_PC_ADDR_MODE_CNTL,
+ A6XX_HLSQ_ADDR_MODE_CNTL,
+ A6XX_VFD_ADDR_MODE_CNTL,
+ A6XX_VPC_ADDR_MODE_CNTL,
+ A6XX_UCHE_ADDR_MODE_CNTL,
+ A6XX_SP_ADDR_MODE_CNTL,
+ A6XX_TPL1_ADDR_MODE_CNTL,
+ A6XX_UCHE_WRITE_RANGE_MAX_LO,
+ A6XX_UCHE_WRITE_RANGE_MAX_HI,
+ A6XX_UCHE_TRAP_BASE_LO,
+ A6XX_UCHE_TRAP_BASE_HI,
+ A6XX_UCHE_WRITE_THRU_BASE_LO,
+ A6XX_UCHE_WRITE_THRU_BASE_HI,
+ A6XX_UCHE_GMEM_RANGE_MIN_LO,
+ A6XX_UCHE_GMEM_RANGE_MIN_HI,
+ A6XX_UCHE_GMEM_RANGE_MAX_LO,
+ A6XX_UCHE_GMEM_RANGE_MAX_HI,
+ A6XX_UCHE_FILTER_CNTL,
+ A6XX_UCHE_CACHE_WAYS,
+ A6XX_UCHE_MODE_CNTL,
+ A6XX_RB_NC_MODE_CNTL,
+ A6XX_TPL1_NC_MODE_CNTL,
+ A6XX_SP_NC_MODE_CNTL,
+ A6XX_PC_DBG_ECO_CNTL,
+ A6XX_RB_CONTEXT_SWITCH_GMEM_SAVE_RESTORE,
};
/* IFPC only static powerup restore list */
-static struct reg_list_pair a6xx_ifpc_pwrup_reglist[] = {
- { A6XX_RBBM_VBIF_CLIENT_QOS_CNTL, 0x0 },
- { A6XX_CP_CHICKEN_DBG, 0x0 },
- { A6XX_CP_DBG_ECO_CNTL, 0x0 },
- { A6XX_CP_PROTECT_CNTL, 0x0 },
- { A6XX_CP_PROTECT_REG, 0x0 },
- { A6XX_CP_PROTECT_REG+1, 0x0 },
- { A6XX_CP_PROTECT_REG+2, 0x0 },
- { A6XX_CP_PROTECT_REG+3, 0x0 },
- { A6XX_CP_PROTECT_REG+4, 0x0 },
- { A6XX_CP_PROTECT_REG+5, 0x0 },
- { A6XX_CP_PROTECT_REG+6, 0x0 },
- { A6XX_CP_PROTECT_REG+7, 0x0 },
- { A6XX_CP_PROTECT_REG+8, 0x0 },
- { A6XX_CP_PROTECT_REG+9, 0x0 },
- { A6XX_CP_PROTECT_REG+10, 0x0 },
- { A6XX_CP_PROTECT_REG+11, 0x0 },
- { A6XX_CP_PROTECT_REG+12, 0x0 },
- { A6XX_CP_PROTECT_REG+13, 0x0 },
- { A6XX_CP_PROTECT_REG+14, 0x0 },
- { A6XX_CP_PROTECT_REG+15, 0x0 },
- { A6XX_CP_PROTECT_REG+16, 0x0 },
- { A6XX_CP_PROTECT_REG+17, 0x0 },
- { A6XX_CP_PROTECT_REG+18, 0x0 },
- { A6XX_CP_PROTECT_REG+19, 0x0 },
- { A6XX_CP_PROTECT_REG+20, 0x0 },
- { A6XX_CP_PROTECT_REG+21, 0x0 },
- { A6XX_CP_PROTECT_REG+22, 0x0 },
- { A6XX_CP_PROTECT_REG+23, 0x0 },
- { A6XX_CP_PROTECT_REG+24, 0x0 },
- { A6XX_CP_PROTECT_REG+25, 0x0 },
- { A6XX_CP_PROTECT_REG+26, 0x0 },
- { A6XX_CP_PROTECT_REG+27, 0x0 },
- { A6XX_CP_PROTECT_REG+28, 0x0 },
- { A6XX_CP_PROTECT_REG+29, 0x0 },
- { A6XX_CP_PROTECT_REG+30, 0x0 },
- { A6XX_CP_PROTECT_REG+31, 0x0 },
- { A6XX_CP_AHB_CNTL, 0x0 },
+static u32 a6xx_ifpc_pwrup_reglist[] = {
+ A6XX_RBBM_VBIF_CLIENT_QOS_CNTL,
+ A6XX_CP_CHICKEN_DBG,
+ A6XX_CP_DBG_ECO_CNTL,
+ A6XX_CP_PROTECT_CNTL,
+ A6XX_CP_PROTECT_REG,
+ A6XX_CP_PROTECT_REG+1,
+ A6XX_CP_PROTECT_REG+2,
+ A6XX_CP_PROTECT_REG+3,
+ A6XX_CP_PROTECT_REG+4,
+ A6XX_CP_PROTECT_REG+5,
+ A6XX_CP_PROTECT_REG+6,
+ A6XX_CP_PROTECT_REG+7,
+ A6XX_CP_PROTECT_REG+8,
+ A6XX_CP_PROTECT_REG+9,
+ A6XX_CP_PROTECT_REG+10,
+ A6XX_CP_PROTECT_REG+11,
+ A6XX_CP_PROTECT_REG+12,
+ A6XX_CP_PROTECT_REG+13,
+ A6XX_CP_PROTECT_REG+14,
+ A6XX_CP_PROTECT_REG+15,
+ A6XX_CP_PROTECT_REG+16,
+ A6XX_CP_PROTECT_REG+17,
+ A6XX_CP_PROTECT_REG+18,
+ A6XX_CP_PROTECT_REG+19,
+ A6XX_CP_PROTECT_REG+20,
+ A6XX_CP_PROTECT_REG+21,
+ A6XX_CP_PROTECT_REG+22,
+ A6XX_CP_PROTECT_REG+23,
+ A6XX_CP_PROTECT_REG+24,
+ A6XX_CP_PROTECT_REG+25,
+ A6XX_CP_PROTECT_REG+26,
+ A6XX_CP_PROTECT_REG+27,
+ A6XX_CP_PROTECT_REG+28,
+ A6XX_CP_PROTECT_REG+29,
+ A6XX_CP_PROTECT_REG+30,
+ A6XX_CP_PROTECT_REG+31,
+ A6XX_CP_AHB_CNTL,
};
-static struct reg_list_pair a615_pwrup_reglist[] = {
- { A6XX_UCHE_GBIF_GX_CONFIG, 0x0 },
+/* a620 and a650 need to program A6XX_CP_PROTECT_REG_47 for the infinite span */
+static u32 a650_pwrup_reglist[] = {
+ A6XX_CP_PROTECT_REG + 47,
};
-static struct reg_list_pair a6xx_ifpc_perfctr_reglist[] = {
- { A6XX_RBBM_PERFCTR_CNTL, 0x0 },
+static u32 a615_pwrup_reglist[] = {
+ A6XX_UCHE_GBIF_GX_CONFIG,
+};
+
+static u32 a612_pwrup_reglist[] = {
+ A6XX_RBBM_PERFCTR_CNTL,
};
static void _update_always_on_regs(struct adreno_device *adreno_dev)
@@ -146,21 +113,6 @@ static void _update_always_on_regs(struct adreno_device *adreno_dev)
A6XX_CP_ALWAYS_ON_COUNTER_HI;
}
-static void a6xx_pwrup_reglist_init(struct adreno_device *adreno_dev)
-{
- struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
-
- if (kgsl_allocate_global(device, &adreno_dev->pwrup_reglist,
- PAGE_SIZE, 0, KGSL_MEMDESC_CONTIG | KGSL_MEMDESC_PRIVILEGED,
- "powerup_register_list")) {
- adreno_dev->pwrup_reglist.gpuaddr = 0;
- return;
- }
-
- kgsl_sharedmem_set(device, &adreno_dev->pwrup_reglist, 0, 0,
- PAGE_SIZE);
-}
-
static void a6xx_init(struct adreno_device *adreno_dev)
{
struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
@@ -186,64 +138,42 @@ static void a6xx_init(struct adreno_device *adreno_dev)
* If the GMU is not enabled, rewrite the offset for the always on
* counters to point to the CP always on instead of GMU always on
*/
- if (!gmu_core_isenabled(KGSL_DEVICE(adreno_dev)))
+ if (!gmu_core_isenabled(device))
_update_always_on_regs(adreno_dev);
- a6xx_pwrup_reglist_init(adreno_dev);
+ kgsl_allocate_global(device, &adreno_dev->pwrup_reglist,
+ PAGE_SIZE, 0, KGSL_MEMDESC_CONTIG | KGSL_MEMDESC_PRIVILEGED,
+ "powerup_register_list");
}
-/**
- * a6xx_protect_init() - Initializes register protection on a6xx
- * @device: Pointer to the device structure
- * Performs register writes to enable protected access to sensitive
- * registers
- */
static void a6xx_protect_init(struct adreno_device *adreno_dev)
{
struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
- struct kgsl_protected_registers *mmu_prot =
- kgsl_mmu_get_prot_regs(&device->mmu);
- int i, num_sets;
- int req_sets = ARRAY_SIZE(a6xx_protected_regs_group);
- int max_sets = adreno_dev->gpucore->num_protected_regs;
- unsigned int mmu_base = 0, mmu_range = 0, cur_range;
+ const struct adreno_a6xx_core *a6xx_core = to_a6xx_core(adreno_dev);
+ const struct a6xx_protected_regs *regs = a6xx_core->protected_regs;
+ int i;
- /* enable access protection to privileged registers */
- kgsl_regwrite(device, A6XX_CP_PROTECT_CNTL, 0x00000003);
+ /*
+ * Enable access protection to privileged registers, fault on an access
+ * protect violation and select the last span to protect from the start
+ * address all the way to the end of the register address space
+ */
+ kgsl_regwrite(device, A6XX_CP_PROTECT_CNTL,
+ (1 << 0) | (1 << 1) | (1 << 3));
- if (mmu_prot) {
- mmu_base = mmu_prot->base;
- mmu_range = mmu_prot->range;
- req_sets += DIV_ROUND_UP(mmu_range, 0x2000);
- }
+ /* Program each register defined by the core definition */
+ for (i = 0; regs[i].reg; i++) {
+ u32 count;
- WARN(req_sets > max_sets,
- "Size exceeds the num of protection regs available\n");
+ /*
+ * This is the offset of the end register as counted from the
+ * start, i.e. # of registers in the range - 1
+ */
+ count = regs[i].end - regs[i].start;
- /* Protect GPU registers */
- num_sets = min_t(unsigned int,
- ARRAY_SIZE(a6xx_protected_regs_group), max_sets);
- for (i = 0; i < num_sets; i++) {
- struct a6xx_protected_regs *regs =
- &a6xx_protected_regs_group[i];
-
- kgsl_regwrite(device, A6XX_CP_PROTECT_REG + i,
- regs->base | (regs->count << 18) |
- (regs->read_protect << 31));
- }
-
- /* Protect MMU registers */
- if (mmu_prot) {
- while ((i < max_sets) && (mmu_range > 0)) {
- cur_range = min_t(unsigned int, mmu_range,
- 0x2000);
- kgsl_regwrite(device, A6XX_CP_PROTECT_REG + i,
- mmu_base | ((cur_range - 1) << 18) | (1 << 31));
-
- mmu_base += cur_range;
- mmu_range -= cur_range;
- i++;
- }
+ kgsl_regwrite(device, regs[i].reg,
+ regs[i].start | (count << 18) |
+ (regs[i].noaccess << 31));
}
}
@@ -270,7 +200,7 @@ __get_rbbm_clock_cntl_on(struct adreno_device *adreno_dev)
{
if (adreno_is_a630(adreno_dev))
return 0x8AA8AA02;
- else if (adreno_is_a612(adreno_dev))
+ else if (adreno_is_a612(adreno_dev) || adreno_is_a610(adreno_dev))
return 0xAAA8AA82;
else
return 0x8AA8AA82;
@@ -349,11 +279,12 @@ static void a6xx_hwcg_set(struct adreno_device *adreno_dev, bool on)
/*
* Disable SP clock before programming HWCG registers.
- * A612 GPU is not having the GX power domain. Hence
- * skip GMU_GX registers for A12.
+ * A612 and A610 GPU is not having the GX power domain.
+ * Hence skip GMU_GX registers for A12 and A610.
*/
- if (gmu_core_isenabled(device) && !adreno_is_a612(adreno_dev))
+ if (gmu_core_isenabled(device) && !adreno_is_a612(adreno_dev) &&
+ !adreno_is_a610(adreno_dev))
gmu_core_regrmw(device,
A6XX_GPU_GMU_GX_SPTPRAC_CLOCK_CONTROL, 1, 0);
@@ -363,10 +294,11 @@ static void a6xx_hwcg_set(struct adreno_device *adreno_dev, bool on)
/*
* Enable SP clock after programming HWCG registers.
- * A612 GPU is not having the GX power domain. Hence
- * skip GMU_GX registers for A612.
+ * A612 and A610 GPU is not having the GX power domain.
+ * Hence skip GMU_GX registers for A612.
*/
- if (gmu_core_isenabled(device) && !adreno_is_a612(adreno_dev))
+ if (gmu_core_isenabled(device) && !adreno_is_a612(adreno_dev) &&
+ !adreno_is_a610(adreno_dev))
gmu_core_regrmw(device,
A6XX_GPU_GMU_GX_SPTPRAC_CLOCK_CONTROL, 0, 1);
@@ -375,80 +307,63 @@ static void a6xx_hwcg_set(struct adreno_device *adreno_dev, bool on)
on ? __get_rbbm_clock_cntl_on(adreno_dev) : 0);
}
+struct a6xx_reglist_list {
+ u32 *regs;
+ u32 count;
+};
+
+#define REGLIST(_a) \
+ (struct a6xx_reglist_list) { .regs = _a, .count = ARRAY_SIZE(_a), }
+
static void a6xx_patch_pwrup_reglist(struct adreno_device *adreno_dev)
{
- uint32_t i;
- struct cpu_gpu_lock *lock;
- struct reg_list_pair *r;
+ struct a6xx_reglist_list reglist[3];
+ void *ptr = adreno_dev->pwrup_reglist.hostptr;
+ struct cpu_gpu_lock *lock = ptr;
+ int items = 0, i, j;
+ u32 *dest = ptr + sizeof(*lock);
- /* Set up the register values */
- for (i = 0; i < ARRAY_SIZE(a6xx_ifpc_pwrup_reglist); i++) {
- r = &a6xx_ifpc_pwrup_reglist[i];
- kgsl_regread(KGSL_DEVICE(adreno_dev), r->offset, &r->val);
+ /* Static IFPC-only registers */
+ reglist[items++] = REGLIST(a6xx_ifpc_pwrup_reglist);
+
+ /* Static IFPC + preemption registers */
+ reglist[items++] = REGLIST(a6xx_pwrup_reglist);
+
+ /* Add target specific registers */
+ if (adreno_is_a612(adreno_dev))
+ reglist[items++] = REGLIST(a612_pwrup_reglist);
+ else if (adreno_is_a615_family(adreno_dev))
+ reglist[items++] = REGLIST(a615_pwrup_reglist);
+ else if (adreno_is_a650(adreno_dev) || adreno_is_a620(adreno_dev))
+ reglist[items++] = REGLIST(a650_pwrup_reglist);
+
+ /*
+ * For each entry in each of the lists, write the offset and the current
+ * register value into the GPU buffer
+ */
+ for (i = 0; i < items; i++) {
+ u32 *r = reglist[i].regs;
+
+ for (j = 0; j < reglist[i].count; j++) {
+ *dest++ = r[j];
+ kgsl_regread(KGSL_DEVICE(adreno_dev), r[j], dest++);
+ }
+
+ lock->list_length += reglist[i].count * 2;
}
- for (i = 0; i < ARRAY_SIZE(a6xx_pwrup_reglist); i++) {
- r = &a6xx_pwrup_reglist[i];
- kgsl_regread(KGSL_DEVICE(adreno_dev), r->offset, &r->val);
- }
-
- lock = (struct cpu_gpu_lock *) adreno_dev->pwrup_reglist.hostptr;
- lock->flag_ucode = 0;
- lock->flag_kmd = 0;
- lock->turn = 0;
-
/*
* The overall register list is composed of
* 1. Static IFPC-only registers
* 2. Static IFPC + preemption registers
- * 2. Dynamic IFPC + preemption registers (ex: perfcounter selects)
+ * 3. Dynamic IFPC + preemption registers (ex: perfcounter selects)
*
* The CP views the second and third entries as one dynamic list
- * starting from list_offset. Thus, list_length should be the sum
- * of all three lists above (of which the third list will start off
- * empty). And list_offset should be specified as the size in dwords
- * of the static IFPC-only register list.
+ * starting from list_offset. list_length should be the total dwords in
+ * all the lists and list_offset should be specified as the size in
+ * dwords of the first entry in the list.
*/
- lock->list_length = (sizeof(a6xx_ifpc_pwrup_reglist) +
- sizeof(a6xx_pwrup_reglist)) >> 2;
- lock->list_offset = sizeof(a6xx_ifpc_pwrup_reglist) >> 2;
-
- memcpy(adreno_dev->pwrup_reglist.hostptr + sizeof(*lock),
- a6xx_ifpc_pwrup_reglist, sizeof(a6xx_ifpc_pwrup_reglist));
- memcpy(adreno_dev->pwrup_reglist.hostptr + sizeof(*lock)
- + sizeof(a6xx_ifpc_pwrup_reglist), a6xx_pwrup_reglist,
- sizeof(a6xx_pwrup_reglist));
-
- if (adreno_is_a615_family(adreno_dev)) {
- for (i = 0; i < ARRAY_SIZE(a615_pwrup_reglist); i++) {
- r = &a615_pwrup_reglist[i];
- kgsl_regread(KGSL_DEVICE(adreno_dev),
- r->offset, &r->val);
- }
-
- memcpy(adreno_dev->pwrup_reglist.hostptr + sizeof(*lock)
- + sizeof(a6xx_ifpc_pwrup_reglist)
- + sizeof(a6xx_pwrup_reglist), a615_pwrup_reglist,
- sizeof(a615_pwrup_reglist));
-
- lock->list_length += sizeof(a615_pwrup_reglist) >> 2;
- }
-
- if (ADRENO_FEATURE(adreno_dev, ADRENO_PERFCTRL_RETAIN)) {
- for (i = 0; i < ARRAY_SIZE(a6xx_ifpc_perfctr_reglist); i++) {
- r = &a6xx_ifpc_perfctr_reglist[i];
- kgsl_regread(KGSL_DEVICE(adreno_dev),
- r->offset, &r->val);
- }
-
- memcpy(adreno_dev->pwrup_reglist.hostptr + sizeof(*lock)
- + sizeof(a6xx_ifpc_pwrup_reglist)
- + sizeof(a6xx_pwrup_reglist),
- a6xx_ifpc_perfctr_reglist,
- sizeof(a6xx_ifpc_perfctr_reglist));
-
- lock->list_length += sizeof(a6xx_ifpc_perfctr_reglist) >> 2;
- }
+ lock->list_offset = reglist[0].count * 2;
}
/*
@@ -517,7 +432,7 @@ static void a6xx_start(struct adreno_device *adreno_dev)
if (ADRENO_GPUREV(adreno_dev) >= ADRENO_REV_A640) {
kgsl_regwrite(device, A6XX_CP_ROQ_THRESHOLDS_2, 0x02000140);
kgsl_regwrite(device, A6XX_CP_ROQ_THRESHOLDS_1, 0x8040362C);
- } else if (adreno_is_a612(adreno_dev)) {
+ } else if (adreno_is_a612(adreno_dev) || adreno_is_a610(adreno_dev)) {
kgsl_regwrite(device, A6XX_CP_ROQ_THRESHOLDS_2, 0x00800060);
kgsl_regwrite(device, A6XX_CP_ROQ_THRESHOLDS_1, 0x40201b16);
} else {
@@ -525,8 +440,8 @@ static void a6xx_start(struct adreno_device *adreno_dev)
kgsl_regwrite(device, A6XX_CP_ROQ_THRESHOLDS_1, 0x8040362C);
}
- if (adreno_is_a612(adreno_dev)) {
- /* For A612 Mem pool size is reduced to 48 */
+ if (adreno_is_a612(adreno_dev) || adreno_is_a610(adreno_dev)) {
+ /* For A612 and A610 Mem pool size is reduced to 48 */
kgsl_regwrite(device, A6XX_CP_MEM_POOL_SIZE, 48);
kgsl_regwrite(device, A6XX_CP_MEM_POOL_DBG_ADDR, 47);
} else {
@@ -543,6 +458,11 @@ static void a6xx_start(struct adreno_device *adreno_dev)
/* Turn on performance counters */
kgsl_regwrite(device, A6XX_RBBM_PERFCTR_CNTL, 0x1);
+ /* Turn on the IFPC counter (countable 4 on XOCLK4) */
+ if (gmu_core_isenabled(device))
+ gmu_core_regrmw(device, A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_1,
+ 0xff, 0x4);
+
/* Turn on GX_MEM retention */
if (gmu_core_isenabled(device) && adreno_is_a612(adreno_dev)) {
kgsl_regwrite(device, A6XX_RBBM_BLOCK_GX_RETENTION_CNTL, 0x7FB);
@@ -600,9 +520,8 @@ static void a6xx_start(struct adreno_device *adreno_dev)
kgsl_regwrite(device, A6XX_UCHE_MODE_CNTL, (mal << 23) |
(lower_bit << 21));
- /* Set hang detection threshold to 0x3FFFFF * 16 cycles */
kgsl_regwrite(device, A6XX_RBBM_INTERFACE_HANG_INT_CNTL,
- (1 << 30) | 0x3fffff);
+ (1 << 30) | a6xx_core->hang_detect_cycles);
kgsl_regwrite(device, A6XX_UCHE_CLIENT_PF, 1);
@@ -624,13 +543,22 @@ static void a6xx_start(struct adreno_device *adreno_dev)
kgsl_regrmw(device, A6XX_PC_DBG_ECO_CNTL, 0, (1 << 8));
/* Set the bit vccCacheSkipDis=1 to get rid of TSEskip logic */
- kgsl_regrmw(device, A6XX_PC_DBG_ECO_CNTL, 0, (1 << 9));
+ if (a6xx_core->disable_tseskip)
+ kgsl_regrmw(device, A6XX_PC_DBG_ECO_CNTL, 0, (1 << 9));
/* Enable the GMEM save/restore feature for preemption */
if (adreno_is_preemption_enabled(adreno_dev))
kgsl_regwrite(device, A6XX_RB_CONTEXT_SWITCH_GMEM_SAVE_RESTORE,
0x1);
+ /*
+ * Enable GMU power counter 0 to count GPU busy. This is applicable to
+ * all a6xx targets
+ */
+ kgsl_regwrite(device, A6XX_GPU_GMU_AO_GPU_CX_BUSY_MASK, 0xff000000);
+ kgsl_regrmw(device, A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_0, 0xff, 0x20);
+ kgsl_regwrite(device, A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE, 0x1);
+
a6xx_protect_init(adreno_dev);
if (!patch_reglist && (adreno_dev->pwrup_reglist.gpuaddr != 0)) {
@@ -929,9 +857,6 @@ static int a6xx_rb_start(struct adreno_device *adreno_dev)
*/
static int a6xx_sptprac_enable(struct adreno_device *adreno_dev)
{
- if (adreno_is_a612(adreno_dev))
- return 0;
-
return a6xx_gmu_sptprac_enable(adreno_dev);
}
@@ -941,9 +866,6 @@ static int a6xx_sptprac_enable(struct adreno_device *adreno_dev)
*/
static void a6xx_sptprac_disable(struct adreno_device *adreno_dev)
{
- if (adreno_is_a612(adreno_dev))
- return;
-
a6xx_gmu_sptprac_disable(adreno_dev);
}
@@ -1057,7 +979,6 @@ static int a6xx_microcode_read(struct adreno_device *adreno_dev)
{
int ret;
struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
- struct gmu_device *gmu = KGSL_GMU_DEVICE(device);
struct adreno_firmware *sqe_fw = ADRENO_FW(adreno_dev, ADRENO_FW_SQE);
const struct adreno_a6xx_core *a6xx_core = to_a6xx_core(adreno_dev);
@@ -1067,16 +988,6 @@ static int a6xx_microcode_read(struct adreno_device *adreno_dev)
return ret;
}
- ret = gmu_core_dev_load_firmware(device);
- if (ret)
- return ret;
-
- ret = gmu_memory_probe(device);
- if (ret)
- return ret;
-
- hfi_init(gmu);
-
return 0;
}
@@ -1104,40 +1015,52 @@ static int a6xx_soft_reset(struct adreno_device *adreno_dev)
return 0;
}
-/* Number of throttling counters for A6xx */
-#define A6XX_GMU_THROTTLE_COUNTERS 3
-
static int64_t a6xx_read_throttling_counters(struct adreno_device *adreno_dev)
{
- int i;
+ struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
int64_t adj = -1;
- uint32_t counts[A6XX_GMU_THROTTLE_COUNTERS];
+ u32 a, b, c;
struct adreno_busy_data *busy = &adreno_dev->busy_data;
- if (!ADRENO_FEATURE(adreno_dev, ADRENO_LM))
+ if (!ADRENO_FEATURE(adreno_dev, ADRENO_LM) ||
+ !test_bit(ADRENO_LM_CTRL, &adreno_dev->pwrctrl_flag))
return 0;
- for (i = 0; i < ARRAY_SIZE(counts); i++) {
- if (!adreno_dev->gpmu_throttle_counters[i])
- counts[i] = 0;
- else
- counts[i] = counter_delta(KGSL_DEVICE(adreno_dev),
- adreno_dev->gpmu_throttle_counters[i],
- &busy->throttle_cycles[i]);
- }
+ /* The counters are selected in a6xx_gmu_enable_lm() */
+ a = counter_delta(device, A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_1_L,
+ &busy->throttle_cycles[0]);
+
+ b = counter_delta(device, A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_2_L,
+ &busy->throttle_cycles[1]);
+
+ c = counter_delta(device, A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_3_L,
+ &busy->throttle_cycles[2]);
+
/*
* The adjustment is the number of cycles lost to throttling, which
* is calculated as a weighted average of the cycles throttled
- * at 15%, 50%, and 90%. The adjustment is negative because in A6XX,
+ * at different levels. The adjustment is negative because in A6XX,
* the busy count includes the throttled cycles. Therefore, we want
* to remove them to prevent appearing to be busier than
* we actually are.
*/
- adj *= ((counts[0] * 15) + (counts[1] * 50) + (counts[2] * 90)) / 100;
+ if (adreno_is_a620(adreno_dev) || adreno_is_a650(adreno_dev))
+ /*
+ * With the newer generations, CRC throttle from SIDs of 0x14
+ * and above cannot be observed in power counters. Since 90%
+ * throttle uses SID 0x16 the adjustment calculation needs
+ * correction. The throttling is in increments of 4.2%, and the
+ * 91.7% counter does a weighted count by the value of sid used
+ * which are taken into consideration for the final formula.
+ */
+ adj *= ((a * 42) + (b * 500) +
+ ((((int64_t)c - a - b * 12) / 22) * 917)) / 1000;
+ else
+ adj *= ((a * 5) + (b * 50) + (c * 90)) / 100;
- trace_kgsl_clock_throttling(0, counts[1], counts[2],
- counts[0], adj);
+ trace_kgsl_clock_throttling(0, b, c, a, adj);
+
return adj;
}
@@ -1345,47 +1268,66 @@ static void a6xx_llc_enable_overrides(struct adreno_device *adreno_dev)
A6XX_GPU_CX_MISC_SYSTEM_CACHE_CNTL_0, 0x3);
}
-static const char *fault_block[8] = {
- [0] = "CP",
- [1] = "UCHE",
- [2] = "VFD",
- [3] = "UCHE",
- [4] = "CCU",
- [5] = "unknown",
- [6] = "CDP Prefetch",
- [7] = "GPMU",
+static const char *uche_client[7][3] = {
+ {"SP | VSC | VPC | HLSQ | PC | LRZ", "TP", "VFD"},
+ {"VSC | VPC | HLSQ | PC | LRZ", "TP | VFD", "SP"},
+ {"SP | VPC | HLSQ | PC | LRZ", "TP | VFD", "VSC"},
+ {"SP | VSC | HLSQ | PC | LRZ", "TP | VFD", "VPC"},
+ {"SP | VSC | VPC | PC | LRZ", "TP | VFD", "HLSQ"},
+ {"SP | VSC | VPC | HLSQ | LRZ", "TP | VFD", "PC"},
+ {"SP | VSC | VPC | HLSQ | PC", "TP | VFD", "LRZ"},
};
-static const char *uche_client[8] = {
- [0] = "VFD",
- [1] = "SP",
- [2] = "VSC",
- [3] = "VPC",
- [4] = "HLSQ",
- [5] = "PC",
- [6] = "LRZ",
- [7] = "unknown",
-};
+#define SCOOBYDOO 0x5c00bd00
-static const char *a6xx_iommu_fault_block(struct adreno_device *adreno_dev,
- unsigned int fsynr1)
+static const char *a6xx_fault_block_uche(struct kgsl_device *device,
+ unsigned int mid)
{
- struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
- unsigned int client_id;
- unsigned int uche_client_id;
-
- client_id = fsynr1 & 0xff;
-
- if (client_id >= ARRAY_SIZE(fault_block))
- return "unknown";
- else if (client_id != 3)
- return fault_block[client_id];
+ unsigned int uche_client_id = 0;
+ static char str[40];
mutex_lock(&device->mutex);
+
+ if (!kgsl_state_is_awake(device)) {
+ mutex_unlock(&device->mutex);
+ return "UCHE: unknown";
+ }
+
kgsl_regread(device, A6XX_UCHE_CLIENT_PF, &uche_client_id);
mutex_unlock(&device->mutex);
- return uche_client[uche_client_id & A6XX_UCHE_CLIENT_PF_CLIENT_ID_MASK];
+ /* Ignore the value if the gpu is in IFPC */
+ if (uche_client_id == SCOOBYDOO)
+ return "UCHE: unknown";
+
+ uche_client_id &= A6XX_UCHE_CLIENT_PF_CLIENT_ID_MASK;
+ snprintf(str, sizeof(str), "UCHE: %s",
+ uche_client[uche_client_id][mid - 1]);
+
+ return str;
+}
+
+static const char *a6xx_iommu_fault_block(struct kgsl_device *device,
+ unsigned int fsynr1)
+{
+ unsigned int mid = fsynr1 & 0xff;
+
+ switch (mid) {
+ case 0:
+ return "CP";
+ case 1:
+ case 2:
+ case 3:
+ return a6xx_fault_block_uche(device, mid);
+ case 4:
+ return "CCU";
+ case 6:
+ return "CDP Prefetch";
+ case 7:
+ return "GPMU";
+ }
+
+ return "Unknown";
}
static void a6xx_cp_callback(struct adreno_device *adreno_dev, int bit)
@@ -2211,50 +2153,11 @@ static struct adreno_perfcount_register a6xx_perfcounters_gbif_pwr[] = {
A6XX_GBIF_PWR_CNT_HIGH2, -1, A6XX_GBIF_PERF_PWR_CNT_EN },
};
-static struct adreno_perfcount_register a6xx_perfcounters_pwr[] = {
- { KGSL_PERFCOUNTER_BROKEN, 0, 0, 0, 0, -1, 0 },
- { KGSL_PERFCOUNTER_NOT_USED, 0, 0,
- A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_0_L,
- A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_0_H, -1, 0 },
-};
-
static struct adreno_perfcount_register a6xx_perfcounters_alwayson[] = {
{ KGSL_PERFCOUNTER_NOT_USED, 0, 0, A6XX_CP_ALWAYS_ON_COUNTER_LO,
A6XX_CP_ALWAYS_ON_COUNTER_HI, -1 },
};
-static struct adreno_perfcount_register a6xx_pwrcounters_gpmu[] = {
- /*
- * A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_0 is used for the GPU
- * busy count (see the PWR group above). Mark it as broken
- * so it's not re-used.
- */
- { KGSL_PERFCOUNTER_BROKEN, 0, 0,
- A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_0_L,
- A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_0_H, -1,
- A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_0, },
- { KGSL_PERFCOUNTER_NOT_USED, 0, 0,
- A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_1_L,
- A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_1_H, -1,
- A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_0, },
- { KGSL_PERFCOUNTER_NOT_USED, 0, 0,
- A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_2_L,
- A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_2_H, -1,
- A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_0, },
- { KGSL_PERFCOUNTER_NOT_USED, 0, 0,
- A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_3_L,
- A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_3_H, -1,
- A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_0, },
- { KGSL_PERFCOUNTER_NOT_USED, 0, 0,
- A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_4_L,
- A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_4_H, -1,
- A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_1, },
- { KGSL_PERFCOUNTER_NOT_USED, 0, 0,
- A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_5_L,
- A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_5_H, -1,
- A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_1, },
-};
-
/*
* ADRENO_PERFCOUNTER_GROUP_RESTORE flag is enabled by default
* because most of the perfcounter groups need to be restored
@@ -2293,11 +2196,8 @@ static struct adreno_perfcount_group a6xx_perfcounter_groups
A6XX_PERFCOUNTER_GROUP_FLAGS(VBIF, vbif, 0),
A6XX_PERFCOUNTER_GROUP_FLAGS(VBIF_PWR, vbif_pwr,
ADRENO_PERFCOUNTER_GROUP_FIXED),
- A6XX_PERFCOUNTER_GROUP_FLAGS(PWR, pwr,
- ADRENO_PERFCOUNTER_GROUP_FIXED),
A6XX_PERFCOUNTER_GROUP_FLAGS(ALWAYSON, alwayson,
ADRENO_PERFCOUNTER_GROUP_FIXED),
- A6XX_POWER_COUNTER_GROUP(GPMU, gpmu),
};
static struct adreno_perfcounters a6xx_perfcounters = {
@@ -2305,33 +2205,6 @@ static struct adreno_perfcounters a6xx_perfcounters = {
ARRAY_SIZE(a6xx_perfcounter_groups),
};
-/* Program the GMU power counter to count GPU busy cycles */
-static int a6xx_enable_pwr_counters(struct adreno_device *adreno_dev,
- unsigned int counter)
-{
- struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
-
- /*
- * We have a limited number of power counters. Since we're not using
- * total GPU cycle count, return error if requested.
- */
- if (counter == 0)
- return -EINVAL;
-
- /* We can use GPU without GMU and allow it to count GPU busy cycles */
- if (!gmu_core_isenabled(device) &&
- !kgsl_is_register_offset(device,
- A6XX_GPU_GMU_AO_GPU_CX_BUSY_MASK))
- return -ENODEV;
-
- kgsl_regwrite(device, A6XX_GPU_GMU_AO_GPU_CX_BUSY_MASK, 0xFF000000);
- kgsl_regrmw(device,
- A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_0, 0xFF, 0x20);
- kgsl_regwrite(device, A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE, 0x1);
-
- return 0;
-}
-
static void a6xx_efuse_speed_bin(struct adreno_device *adreno_dev)
{
unsigned int val;
@@ -2391,8 +2264,24 @@ static void a6xx_platform_setup(struct adreno_device *adreno_dev)
gpudev->vbif_xin_halt_ctrl0_mask =
A6XX_VBIF_XIN_HALT_CTRL0_MASK;
+ /* Set the GPU busy counter for frequency scaling */
+ adreno_dev->perfctr_pwr_lo = A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_0_L;
+
+ /* Set the counter for IFPC */
+ if (gmu_core_isenabled(KGSL_DEVICE(adreno_dev)))
+ adreno_dev->perfctr_ifpc_lo =
+ A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_4_L;
+
/* Check efuse bits for various capabilties */
a6xx_check_features(adreno_dev);
+
+ /*
+ * A640 GPUs used a fuse to determine which frequency plan to
+ * use for the GPU. For A650 GPUs enable using higher frequencies
+ * based on the LM feature flag.
+ */
+ if (adreno_is_a650(adreno_dev) && ADRENO_FEATURE(adreno_dev, ADRENO_LM))
+ adreno_dev->speed_bin = 1;
}
@@ -2557,33 +2446,20 @@ static const struct adreno_reg_offsets a6xx_reg_offsets = {
.offset_0 = ADRENO_REG_REGISTER_MAX,
};
-static void a6xx_perfcounter_init(struct adreno_device *adreno_dev)
+static int cpu_gpu_lock(struct cpu_gpu_lock *lock)
{
- /*
- * A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_4/5 is not present on A612.
- * Mark them as broken so that they can't be used.
- */
- if (adreno_is_a612(adreno_dev)) {
- a6xx_pwrcounters_gpmu[4].countable = KGSL_PERFCOUNTER_BROKEN;
- a6xx_pwrcounters_gpmu[5].countable = KGSL_PERFCOUNTER_BROKEN;
- }
-}
-
-static int a6xx_perfcounter_update(struct adreno_device *adreno_dev,
- struct adreno_perfcount_register *reg, bool update_reg)
-{
- struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
- struct cpu_gpu_lock *lock = adreno_dev->pwrup_reglist.hostptr;
- struct reg_list_pair *reg_pair = (struct reg_list_pair *)(lock + 1);
- unsigned int i;
unsigned long timeout = jiffies + msecs_to_jiffies(1000);
- int ret = 0;
+ /* Indicate that the CPU wants the lock */
lock->flag_kmd = 1;
- /* Write flag_kmd before turn */
+
+ /* post the request */
wmb();
+
+ /* Wait for our turn */
lock->turn = 0;
- /* Write these fields before looping */
+
+ /* Finish all memory transactions before moving on */
mb();
/*
@@ -2595,60 +2471,99 @@ static int a6xx_perfcounter_update(struct adreno_device *adreno_dev,
cpu_relax();
/* Get the latest updates from GPU */
rmb();
- /*
- * Make sure we wait at least 1sec for the lock,
- * if we did not get it after 1sec return an error.
- */
- if (time_after(jiffies, timeout) &&
- (lock->flag_ucode == 1 && lock->turn == 0)) {
- ret = -EBUSY;
- goto unlock;
- }
+
+ if (time_after(jiffies, timeout))
+ break;
}
- /* Read flag_ucode and turn before list_length */
- rmb();
+ if (lock->flag_ucode == 1 && lock->turn == 0)
+ return -EBUSY;
+
+ return 0;
+}
+
+static void cpu_gpu_unlock(struct cpu_gpu_lock *lock)
+{
+ /* Make sure all writes are done before releasing the lock */
+ wmb();
+ lock->flag_kmd = 0;
+}
+
+static int a6xx_perfcounter_update(struct adreno_device *adreno_dev,
+ struct adreno_perfcount_register *reg, bool update_reg)
+{
+ void *ptr = adreno_dev->pwrup_reglist.hostptr;
+ struct cpu_gpu_lock *lock = ptr;
+ u32 *data = ptr + sizeof(*lock);
+ int i, offset = 0;
+
+ if (cpu_gpu_lock(lock)) {
+ cpu_gpu_unlock(lock);
+ return -EBUSY;
+ }
+
/*
* If the perfcounter select register is already present in reglist
* update it, otherwise append the <select register, value> pair to
* the end of the list.
*/
- for (i = 0; i < lock->list_length >> 1; i++)
- if (reg_pair[i].offset == reg->select)
- break;
- /*
- * If the perfcounter selct register is not present overwrite last entry
- * with new entry and add RBBM perf counter enable at the end.
- */
- if (ADRENO_FEATURE(adreno_dev, ADRENO_PERFCTRL_RETAIN) &&
- (i == lock->list_length >> 1)) {
- reg_pair[i-1].offset = reg->select;
- reg_pair[i-1].val = reg->countable;
+ for (i = 0; i < lock->list_length >> 1; i++) {
+ if (data[offset] == reg->select) {
+ data[offset + 1] = reg->countable;
+ goto update;
+ }
- /* Enable perf counter after performance counter selections */
- reg_pair[i].offset = A6XX_RBBM_PERFCTR_CNTL;
- reg_pair[i].val = 1;
-
- } else {
- /*
- * If perf counter select register is already present in reglist
- * just update list without adding the RBBM perfcontrol enable.
- */
- reg_pair[i].offset = reg->select;
- reg_pair[i].val = reg->countable;
+ offset += 2;
}
- if (i == lock->list_length >> 1)
- lock->list_length += 2;
+ /*
+ * For a612 targets A6XX_RBBM_PERFCTR_CNTL needs to be the last entry,
+ * so overwrite the existing A6XX_RBBM_PERFCNTL_CTRL and add it back to
+ * the end. All other targets just append the new counter to the end.
+ */
+ if (adreno_is_a612(adreno_dev)) {
+ data[offset - 2] = reg->select;
+ data[offset - 1] = reg->countable;
+ data[offset] = A6XX_RBBM_PERFCTR_CNTL,
+ data[offset + 1] = 1;
+ } else {
+ data[offset] = reg->select;
+ data[offset + 1] = reg->countable;
+ }
+
+ lock->list_length += 2;
+
+update:
if (update_reg)
- kgsl_regwrite(device, reg->select, reg->countable);
+ kgsl_regwrite(KGSL_DEVICE(adreno_dev), reg->select,
+ reg->countable);
-unlock:
- /* All writes done before releasing the lock */
- wmb();
- lock->flag_kmd = 0;
- return ret;
+ cpu_gpu_unlock(lock);
+ return 0;
+}
+
+static void a6xx_clk_set_options(struct adreno_device *adreno_dev,
+ const char *name, struct clk *clk, bool on)
+{
+ if (!adreno_is_a610(adreno_dev))
+ return;
+
+ /* Handle clock settings for GFX PSCBCs */
+ if (on) {
+ if (!strcmp(name, "mem_iface_clk")) {
+ clk_set_flags(clk, CLKFLAG_NORETAIN_PERIPH);
+ clk_set_flags(clk, CLKFLAG_NORETAIN_MEM);
+ } else if (!strcmp(name, "core_clk")) {
+ clk_set_flags(clk, CLKFLAG_RETAIN_PERIPH);
+ clk_set_flags(clk, CLKFLAG_RETAIN_MEM);
+ }
+ } else {
+ if (!strcmp(name, "core_clk")) {
+ clk_set_flags(clk, CLKFLAG_NORETAIN_PERIPH);
+ clk_set_flags(clk, CLKFLAG_NORETAIN_MEM);
+ }
+ }
}
struct adreno_gpudev adreno_a6xx_gpudev = {
@@ -2664,7 +2579,6 @@ struct adreno_gpudev adreno_a6xx_gpudev = {
.regulator_enable = a6xx_sptprac_enable,
.regulator_disable = a6xx_sptprac_disable,
.perfcounters = &a6xx_perfcounters,
- .enable_pwr_counters = a6xx_enable_pwr_counters,
.read_throttling_counters = a6xx_read_throttling_counters,
.microcode_read = a6xx_microcode_read,
.enable_64bit = a6xx_enable_64bit,
@@ -2686,7 +2600,7 @@ struct adreno_gpudev adreno_a6xx_gpudev = {
.preemption_context_destroy = a6xx_preemption_context_destroy,
.sptprac_is_on = a6xx_sptprac_is_on,
.ccu_invalidate = a6xx_ccu_invalidate,
- .perfcounter_init = a6xx_perfcounter_init,
.perfcounter_update = a6xx_perfcounter_update,
.coresight = {&a6xx_coresight, &a6xx_coresight_cx},
+ .clk_set_options = a6xx_clk_set_options,
};
diff --git a/drivers/gpu/msm/adreno_a6xx.h b/drivers/gpu/msm/adreno_a6xx.h
index ae6839a..ed45f1a 100644
--- a/drivers/gpu/msm/adreno_a6xx.h
+++ b/drivers/gpu/msm/adreno_a6xx.h
@@ -11,6 +11,26 @@
#include "a6xx_reg.h"
/**
+ * struct a6xx_protected_regs - container for a protect register span
+ */
+struct a6xx_protected_regs {
+ /** @reg: Physical protected mode register to write to */
+ u32 reg;
+ /** @start: Dword offset of the starting register in the range */
+ u32 start;
+ /**
+ * @end: Dword offset of the ending register in the range
+ * (inclusive)
+ */
+ u32 end;
+ /**
+ * @noaccess: 1 if the register should not be accessible from
+ * userspace, 0 if it can be read (but not written)
+ */
+ u32 noaccess;
+};
+
+/**
* struct adreno_a6xx_core - a6xx specific GPU core definitions
*/
struct adreno_a6xx_core {
@@ -42,6 +62,12 @@ struct adreno_a6xx_core {
bool veto_fal10;
/** @pdc_in_aop: True if PDC programmed in AOP */
bool pdc_in_aop;
+ /** @hang_detect_cycles: Hang detect counter timeout value */
+ u32 hang_detect_cycles;
+ /** @protected_regs: Array of protected registers for the target */
+ const struct a6xx_protected_regs *protected_regs;
+ /** @disable_tseskip: True if TSESkip logic is disabled */
+ bool disable_tseskip;
};
#define CP_CLUSTER_FE 0x0
diff --git a/drivers/gpu/msm/adreno_a6xx_gmu.c b/drivers/gpu/msm/adreno_a6xx_gmu.c
index 4c1856fc..184fd73 100644
--- a/drivers/gpu/msm/adreno_a6xx_gmu.c
+++ b/drivers/gpu/msm/adreno_a6xx_gmu.c
@@ -228,7 +228,7 @@ static int a6xx_load_pdc_ucode(struct kgsl_device *device)
_regwrite(cfg, PDC_GPU_TCS3_CMD0_MSGID + PDC_CMD_OFFSET, 0x10108);
_regwrite(cfg, PDC_GPU_TCS3_CMD0_ADDR + PDC_CMD_OFFSET, 0x30000);
- if (adreno_is_a618(adreno_dev) || adreno_is_a650(adreno_dev))
+ if (adreno_is_a618(adreno_dev) || adreno_is_a650_family(adreno_dev))
_regwrite(cfg, PDC_GPU_TCS3_CMD0_DATA + PDC_CMD_OFFSET, 0x2);
else
_regwrite(cfg, PDC_GPU_TCS3_CMD0_DATA + PDC_CMD_OFFSET, 0x3);
@@ -1167,38 +1167,8 @@ static int a6xx_gmu_load_firmware(struct kgsl_device *device)
return gmu_cache_finalize(device);
}
-#define A6XX_STATE_OF_CHILD (BIT(4) | BIT(5))
-#define A6XX_IDLE_FULL_LLM BIT(0)
-#define A6XX_WAKEUP_ACK BIT(1)
-#define A6XX_IDLE_FULL_ACK BIT(0)
#define A6XX_VBIF_XIN_HALT_CTRL1_ACKS (BIT(0) | BIT(1) | BIT(2) | BIT(3))
-static int a6xx_llm_glm_handshake(struct kgsl_device *device)
-{
- unsigned int val;
- const struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
- struct gmu_device *gmu = KGSL_GMU_DEVICE(device);
-
- if (!ADRENO_FEATURE(adreno_dev, ADRENO_LM) ||
- !test_bit(ADRENO_LM_CTRL, &adreno_dev->pwrctrl_flag))
- return 0;
-
- gmu_core_regread(device, A6XX_GMU_LLM_GLM_SLEEP_CTRL, &val);
- if (!(val & A6XX_STATE_OF_CHILD)) {
- gmu_core_regrmw(device, A6XX_GMU_LLM_GLM_SLEEP_CTRL, 0, BIT(4));
- gmu_core_regrmw(device, A6XX_GMU_LLM_GLM_SLEEP_CTRL, 0,
- A6XX_IDLE_FULL_LLM);
- if (timed_poll_check(device, A6XX_GMU_LLM_GLM_SLEEP_STATUS,
- A6XX_IDLE_FULL_ACK, GPU_RESET_TIMEOUT,
- A6XX_IDLE_FULL_ACK)) {
- dev_err(&gmu->pdev->dev, "LLM-GLM handshake failed\n");
- return -EINVAL;
- }
- }
-
- return 0;
-}
-
static void a6xx_isense_disable(struct kgsl_device *device)
{
unsigned int val;
@@ -1224,9 +1194,6 @@ static int a6xx_gmu_suspend(struct kgsl_device *device)
/* Disable ISENSE if it's on */
a6xx_isense_disable(device);
- /* LLM-GLM handshake sequence */
- a6xx_llm_glm_handshake(device);
-
/* If SPTP_RAC is on, turn off SPTP_RAC HS */
a6xx_gmu_sptprac_disable(ADRENO_DEVICE(device));
@@ -1370,40 +1337,10 @@ static int a6xx_gmu_rpmh_gpu_pwrctrl(struct kgsl_device *device,
return ret;
}
-static int _setup_throttling_counter(struct adreno_device *adreno_dev,
- int countable, u32 *offset)
-{
- if (*offset)
- return 0;
-
- return adreno_perfcounter_get(adreno_dev,
- KGSL_PERFCOUNTER_GROUP_GPMU_PWR,
- countable, offset, NULL,
- PERFCOUNTER_FLAG_KERNEL);
-}
-
-static void _setup_throttling_counters(struct adreno_device *adreno_dev)
-{
- struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
- struct gmu_device *gmu = KGSL_GMU_DEVICE(device);
- int ret;
-
- ret = _setup_throttling_counter(adreno_dev, 0x10,
- &adreno_dev->gpmu_throttle_counters[0]);
- ret |= _setup_throttling_counter(adreno_dev, 0x15,
- &adreno_dev->gpmu_throttle_counters[1]);
- ret |= _setup_throttling_counter(adreno_dev, 0x19,
- &adreno_dev->gpmu_throttle_counters[2]);
-
- if (ret)
- dev_err_once(&gmu->pdev->dev,
- "Could not get all the throttling counters for LM\n");
-
-}
-
void a6xx_gmu_enable_lm(struct kgsl_device *device)
{
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ u32 val;
memset(adreno_dev->busy_data.throttle_cycles, 0,
sizeof(adreno_dev->busy_data.throttle_cycles));
@@ -1412,7 +1349,25 @@ void a6xx_gmu_enable_lm(struct kgsl_device *device)
!test_bit(ADRENO_LM_CTRL, &adreno_dev->pwrctrl_flag))
return;
- _setup_throttling_counters(adreno_dev);
+ /*
+ * For throttling, use the following counters for throttled cycles:
+ * XOCLK1: countable: 0x10
+ * XOCLK2: countable: 0x16 for newer hardware / 0x15 for others
+ * XOCLK3: countable: 0xf for newer hardware / 0x19 for others
+ *
+ * POWER_CONTROL_SELECT_0 controls counters 0 - 3, each selector
+ * is 8 bits wide.
+ */
+
+ if (adreno_is_a620(adreno_dev) || adreno_is_a650(adreno_dev))
+ val = (0x10 << 8) | (0x16 << 16) | (0x0f << 24);
+ else
+ val = (0x10 << 8) | (0x15 << 16) | (0x19 << 24);
+
+
+ /* Make sure not to write over XOCLK0 */
+ gmu_core_regrmw(device, A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_0,
+ 0xffffff00, val);
gmu_core_regwrite(device, A6XX_GMU_AO_SPARE_CNTL, 1);
}
@@ -1662,6 +1617,8 @@ static void a6xx_gmu_snapshot(struct kgsl_device *device,
if (a6xx_gmu_gx_is_on(device)) {
/* Set fence to ALLOW mode so registers can be read */
kgsl_regwrite(device, A6XX_GMU_AO_AHB_FENCE_CTRL, 0);
+ /* Make sure the previous write posted before reading */
+ wmb();
kgsl_regread(device, A6XX_GMU_AO_AHB_FENCE_CTRL, &val);
dev_err(device->dev, "set FENCE to ALLOW mode:%x\n", val);
diff --git a/drivers/gpu/msm/adreno_a6xx_snapshot.c b/drivers/gpu/msm/adreno_a6xx_snapshot.c
index dcd01b2..fbeb4b5 100644
--- a/drivers/gpu/msm/adreno_a6xx_snapshot.c
+++ b/drivers/gpu/msm/adreno_a6xx_snapshot.c
@@ -379,6 +379,13 @@ static const unsigned int a6xx_pre_crashdumper_registers[] = {
0x825, 0x825,
};
+static const unsigned int a6xx_gmu_wrapper_registers[] = {
+ /* GMU CX */
+ 0x1f840, 0x1f840, 0x1f844, 0x1f845, 0x1f887, 0x1f889,
+ /* GMU AO*/
+ 0x23b0C, 0x23b0E, 0x23b15, 0x23b15,
+};
+
enum a6xx_debugbus_id {
A6XX_DBGBUS_CP = 0x1,
A6XX_DBGBUS_RBBM = 0x2,
@@ -1763,6 +1770,10 @@ void a6xx_snapshot(struct adreno_device *adreno_dev,
adreno_snapshot_registers(device, snapshot,
a6xx_rscc_snapshot_registers,
ARRAY_SIZE(a6xx_rscc_snapshot_registers) / 2);
+ } else if (adreno_is_a610(adreno_dev)) {
+ adreno_snapshot_registers(device, snapshot,
+ a6xx_gmu_wrapper_registers,
+ ARRAY_SIZE(a6xx_gmu_wrapper_registers) / 2);
}
sptprac_on = gpudev->sptprac_is_on(adreno_dev);
diff --git a/drivers/gpu/msm/adreno_perfcounter.c b/drivers/gpu/msm/adreno_perfcounter.c
index 7842dd9..5b4ae58 100644
--- a/drivers/gpu/msm/adreno_perfcounter.c
+++ b/drivers/gpu/msm/adreno_perfcounter.c
@@ -114,19 +114,6 @@ static void adreno_perfcounter_write(struct adreno_device *adreno_dev,
}
/**
- * adreno_perfcounter_close() - Release counters initialized by
- * adreno_perfcounter_close
- * @adreno_dev: Pointer to an adreno_device struct
- */
-void adreno_perfcounter_close(struct adreno_device *adreno_dev)
-{
- struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
-
- if (gpudev->perfcounter_close)
- gpudev->perfcounter_close(adreno_dev);
-}
-
-/**
* adreno_perfcounter_restore() - Restore performance counters
* @adreno_dev: adreno device to configure
*
@@ -869,7 +856,6 @@ static int adreno_perfcounter_enable(struct adreno_device *adreno_dev,
unsigned int group, unsigned int counter, unsigned int countable)
{
struct adreno_perfcounters *counters = ADRENO_PERFCOUNTERS(adreno_dev);
- struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
if (counters == NULL)
return -EINVAL;
@@ -885,8 +871,6 @@ static int adreno_perfcounter_enable(struct adreno_device *adreno_dev,
/* alwayson counter is global, so init value is 0 */
break;
case KGSL_PERFCOUNTER_GROUP_PWR:
- if (gpudev->enable_pwr_counters)
- return gpudev->enable_pwr_counters(adreno_dev, counter);
return 0;
case KGSL_PERFCOUNTER_GROUP_VBIF:
if (countable > VBIF2_PERF_CNT_SEL_MASK)
@@ -948,10 +932,6 @@ static uint64_t _perfcounter_read_pwr(struct adreno_device *adreno_dev,
reg = &group->regs[counter];
- /* Remember, counter 0 is not emulated on 5XX */
- if (adreno_is_a5xx(adreno_dev) && (counter == 0))
- return -EINVAL;
-
if (adreno_is_a3xx(adreno_dev)) {
/* On A3XX we need to freeze the counter so we can read it */
if (counter == 0)
diff --git a/drivers/gpu/msm/adreno_perfcounter.h b/drivers/gpu/msm/adreno_perfcounter.h
index 273c5aa..f5310d4 100644
--- a/drivers/gpu/msm/adreno_perfcounter.h
+++ b/drivers/gpu/msm/adreno_perfcounter.h
@@ -109,8 +109,6 @@ int adreno_perfcounter_query_group(struct adreno_device *adreno_dev,
int adreno_perfcounter_read_group(struct adreno_device *adreno_dev,
struct kgsl_perfcounter_read_group __user *reads, unsigned int count);
-void adreno_perfcounter_close(struct adreno_device *adreno_dev);
-
void adreno_perfcounter_restore(struct adreno_device *adreno_dev);
void adreno_perfcounter_save(struct adreno_device *adreno_dev);
diff --git a/drivers/gpu/msm/adreno_ringbuffer.c b/drivers/gpu/msm/adreno_ringbuffer.c
index 3b49324..60ddbfc 100644
--- a/drivers/gpu/msm/adreno_ringbuffer.c
+++ b/drivers/gpu/msm/adreno_ringbuffer.c
@@ -7,12 +7,15 @@
#include <linux/slab.h>
#include "a3xx_reg.h"
+#include "a5xx_reg.h"
+#include "a6xx_reg.h"
#include "adreno.h"
#include "adreno_pm4types.h"
#include "adreno_ringbuffer.h"
#include "adreno_trace.h"
#include "kgsl_trace.h"
+
#define RB_HOSTPTR(_rb, _pos) \
((unsigned int *) ((_rb)->buffer_desc.hostptr + \
((_pos) * sizeof(unsigned int))))
@@ -791,18 +794,21 @@ static inline int _get_alwayson_counter(struct adreno_device *adreno_dev,
*p++ = cp_mem_packet(adreno_dev, CP_REG_TO_MEM, 2, 1);
/*
- * For a4x and some a5x the alwayson_hi read through CPU
+ * For some a5x the alwayson_hi read through CPU
* will be masked. Only do 32 bit CP reads for keeping the
* numbers consistent
*/
- if (ADRENO_GPUREV(adreno_dev) >= 400 &&
- ADRENO_GPUREV(adreno_dev) <= ADRENO_REV_A530)
- *p++ = adreno_getreg(adreno_dev,
- ADRENO_REG_RBBM_ALWAYSON_COUNTER_LO);
- else
- *p++ = adreno_getreg(adreno_dev,
- ADRENO_REG_RBBM_ALWAYSON_COUNTER_LO) |
+ if (adreno_is_a5xx(adreno_dev)) {
+ if (ADRENO_GPUREV(adreno_dev) <= ADRENO_REV_A530)
+ *p++ = A5XX_RBBM_ALWAYSON_COUNTER_LO;
+ else
+ *p++ = A5XX_RBBM_ALWAYSON_COUNTER_LO |
+ (1 << 30) | (2 << 18);
+ } else if (adreno_is_a6xx(adreno_dev)) {
+ *p++ = A6XX_CP_ALWAYS_ON_COUNTER_LO |
(1 << 30) | (2 << 18);
+ }
+
p += cp_gpuaddr(adreno_dev, p, gpuaddr);
return (unsigned int)(p - cmds);
diff --git a/drivers/gpu/msm/adreno_snapshot.c b/drivers/gpu/msm/adreno_snapshot.c
index 8289f52..8d2a97e 100644
--- a/drivers/gpu/msm/adreno_snapshot.c
+++ b/drivers/gpu/msm/adreno_snapshot.c
@@ -738,7 +738,7 @@ static void setup_fault_process(struct kgsl_device *device,
if (kgsl_mmu_is_perprocess(&device->mmu)) {
struct kgsl_process_private *tmp;
- mutex_lock(&kgsl_driver.process_mutex);
+ spin_lock(&kgsl_driver.proclist_lock);
list_for_each_entry(tmp, &kgsl_driver.process_list, list) {
u64 pt_ttbr0;
@@ -749,7 +749,7 @@ static void setup_fault_process(struct kgsl_device *device,
break;
}
}
- mutex_unlock(&kgsl_driver.process_mutex);
+ spin_unlock(&kgsl_driver.proclist_lock);
}
done:
snapshot->process = process;
diff --git a/drivers/gpu/msm/kgsl.c b/drivers/gpu/msm/kgsl.c
index 1b298f9..2257294 100644
--- a/drivers/gpu/msm/kgsl.c
+++ b/drivers/gpu/msm/kgsl.c
@@ -921,7 +921,7 @@ struct kgsl_process_private *kgsl_process_private_find(pid_t pid)
{
struct kgsl_process_private *p, *private = NULL;
- mutex_lock(&kgsl_driver.process_mutex);
+ spin_lock(&kgsl_driver.proclist_lock);
list_for_each_entry(p, &kgsl_driver.process_list, list) {
if (p->pid == pid) {
if (kgsl_process_private_get(p))
@@ -929,7 +929,8 @@ struct kgsl_process_private *kgsl_process_private_find(pid_t pid)
break;
}
}
- mutex_unlock(&kgsl_driver.process_mutex);
+ spin_unlock(&kgsl_driver.proclist_lock);
+
return private;
}
@@ -1035,7 +1036,9 @@ static void kgsl_process_private_close(struct kgsl_device_private *dev_priv,
kgsl_mmu_detach_pagetable(private->pagetable);
/* Remove the process struct from the master list */
+ spin_lock(&kgsl_driver.proclist_lock);
list_del(&private->list);
+ spin_unlock(&kgsl_driver.proclist_lock);
/*
* Unlock the mutex before releasing the memory and the debugfs
@@ -1071,7 +1074,9 @@ static struct kgsl_process_private *kgsl_process_private_open(
kgsl_process_init_sysfs(device, private);
kgsl_process_init_debugfs(private);
+ spin_lock(&kgsl_driver.proclist_lock);
list_add(&private->list, &kgsl_driver.process_list);
+ spin_unlock(&kgsl_driver.proclist_lock);
}
done:
@@ -4870,6 +4875,7 @@ static const struct file_operations kgsl_fops = {
struct kgsl_driver kgsl_driver = {
.process_mutex = __MUTEX_INITIALIZER(kgsl_driver.process_mutex),
+ .proclist_lock = __SPIN_LOCK_UNLOCKED(kgsl_driver.proclist_lock),
.ptlock = __SPIN_LOCK_UNLOCKED(kgsl_driver.ptlock),
.devlock = __MUTEX_INITIALIZER(kgsl_driver.devlock),
/*
@@ -4973,7 +4979,6 @@ int kgsl_request_irq(struct platform_device *pdev, const char *name,
int kgsl_device_platform_probe(struct kgsl_device *device)
{
int status = -EINVAL;
- struct resource *res;
int cpu;
status = _register_device(device);
@@ -4986,34 +4991,6 @@ int kgsl_device_platform_probe(struct kgsl_device *device)
if (status)
goto error;
- /*
- * Check if a shadermemname is defined, and then get shader memory
- * details including shader memory starting physical address
- * and shader memory length
- */
- if (device->shadermemname != NULL) {
- res = platform_get_resource_byname(device->pdev, IORESOURCE_MEM,
- device->shadermemname);
-
- if (res == NULL) {
- dev_warn(device->dev,
- "Shader memory: platform_get_resource_byname failed\n");
- }
-
- else {
- device->shader_mem_phys = res->start;
- device->shader_mem_len = resource_size(res);
- }
-
- if (!devm_request_mem_region(device->dev,
- device->shader_mem_phys,
- device->shader_mem_len,
- device->name)) {
- dev_warn(device->dev,
- "request_mem_region_failed\n");
- }
- }
-
if (!devm_request_mem_region(device->dev, device->reg_phys,
device->reg_len, device->name)) {
dev_err(device->dev, "request_mem_region failed\n");
diff --git a/drivers/gpu/msm/kgsl.h b/drivers/gpu/msm/kgsl.h
index d824177..347a30c 100644
--- a/drivers/gpu/msm/kgsl.h
+++ b/drivers/gpu/msm/kgsl.h
@@ -113,6 +113,7 @@ struct kgsl_context;
* @pagetable_list: LIst of open pagetables
* @ptlock: Lock for accessing the pagetable list
* @process_mutex: Mutex for accessing the process list
+ * @proclist_lock: Lock for accessing the process list
* @devlock: Mutex protecting the device list
* @stats: Struct containing atomic memory statistics
* @full_cache_threshold: the threshold that triggers a full cache flush
@@ -131,6 +132,7 @@ struct kgsl_driver {
struct list_head pagetable_list;
spinlock_t ptlock;
struct mutex process_mutex;
+ spinlock_t proclist_lock;
struct mutex devlock;
struct {
atomic_long_t vmalloc;
@@ -331,16 +333,6 @@ struct kgsl_event_group {
};
/**
- * struct kgsl_protected_registers - Protected register range
- * @base: Offset of the range to be protected
- * @range: Range (# of registers = 2 ** range)
- */
-struct kgsl_protected_registers {
- unsigned int base;
- int range;
-};
-
-/**
* struct sparse_bind_object - Bind metadata
* @node: Node for the rb tree
* @p_memdesc: Physical memdesc bound to
diff --git a/drivers/gpu/msm/kgsl_device.h b/drivers/gpu/msm/kgsl_device.h
index 36ad144..1a86ca5 100644
--- a/drivers/gpu/msm/kgsl_device.h
+++ b/drivers/gpu/msm/kgsl_device.h
@@ -247,18 +247,12 @@ struct kgsl_device {
/* Kernel virtual address for GPU shader memory */
void __iomem *shader_mem_virt;
- /* Starting physical address for GPU shader memory */
- unsigned long shader_mem_phys;
-
/* Starting kernel virtual address for QDSS GFX DBG register block */
void __iomem *qdss_gfx_virt;
- /* GPU shader memory size */
- unsigned int shader_mem_len;
struct kgsl_memdesc memstore;
struct kgsl_memdesc scratch;
const char *iomemname;
- const char *shadermemname;
struct kgsl_mmu mmu;
struct gmu_core_device gmu_core;
diff --git a/drivers/gpu/msm/kgsl_gmu.c b/drivers/gpu/msm/kgsl_gmu.c
index de9e4f4..bc6ff13 100644
--- a/drivers/gpu/msm/kgsl_gmu.c
+++ b/drivers/gpu/msm/kgsl_gmu.c
@@ -383,6 +383,9 @@ static void gmu_memory_close(struct gmu_device *gmu)
md = &gmu->kmem_entries[i];
ctx = &gmu_ctx[md->ctx_idx];
+ if (!ctx->domain)
+ continue;
+
if (md->gmuaddr && md->mem_type != GMU_ITCM &&
md->mem_type != GMU_DTCM)
iommu_unmap(ctx->domain, md->gmuaddr, md->size);
@@ -447,7 +450,7 @@ int gmu_prealloc_req(struct kgsl_device *device, struct gmu_block_header *blk)
* to share with GMU in kernel mode.
* @device: Pointer to KGSL device
*/
-int gmu_memory_probe(struct kgsl_device *device)
+static int gmu_memory_probe(struct kgsl_device *device)
{
struct gmu_device *gmu = KGSL_GMU_DEVICE(device);
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
@@ -804,38 +807,6 @@ static void build_bwtable_cmd_cache(struct gmu_device *gmu)
votes->cnoc_votes.cmd_data[i][j];
}
-static int gmu_acd_probe(struct gmu_device *gmu, struct device_node *node)
-{
- struct hfi_acd_table_cmd *cmd = &gmu->hfi.acd_tbl_cmd;
- struct device_node *acd_node;
-
- acd_node = of_find_node_by_name(node, "qcom,gpu-acd-table");
- if (!acd_node)
- return -ENODEV;
-
- cmd->hdr = 0xFFFFFFFF;
- cmd->version = HFI_ACD_INIT_VERSION;
- cmd->enable_by_level = 0;
- cmd->stride = 0;
- cmd->num_levels = 0;
-
- of_property_read_u32(acd_node, "qcom,acd-stride", &cmd->stride);
- if (!cmd->stride || cmd->stride > MAX_ACD_STRIDE)
- return -EINVAL;
-
- of_property_read_u32(acd_node, "qcom,acd-num-levels", &cmd->num_levels);
- if (!cmd->num_levels || cmd->num_levels > MAX_ACD_NUM_LEVELS)
- return -EINVAL;
-
- of_property_read_u32(acd_node, "qcom,acd-enable-by-level",
- &cmd->enable_by_level);
- if (hweight32(cmd->enable_by_level) != cmd->num_levels)
- return -EINVAL;
-
- return of_property_read_u32_array(acd_node, "qcom,acd-data",
- cmd->data, cmd->stride * cmd->num_levels);
-}
-
/*
* gmu_bus_vote_init - initialized RPMh votes needed for bw scaling by GMU.
* @gmu: Pointer to GMU device
@@ -1278,6 +1249,41 @@ int gmu_cache_finalize(struct kgsl_device *device)
return 0;
}
+static void gmu_acd_probe(struct kgsl_device *device, struct gmu_device *gmu,
+ struct device_node *node)
+{
+ struct kgsl_pwrctrl *pwr = &device->pwrctrl;
+ struct hfi_acd_table_cmd *cmd = &gmu->hfi.acd_tbl_cmd;
+ u32 acd_level, cmd_idx, numlvl = pwr->num_pwrlevels;
+ int ret, i;
+
+ if (!ADRENO_FEATURE(ADRENO_DEVICE(device), ADRENO_ACD))
+ return;
+
+ cmd->hdr = 0xFFFFFFFF;
+ cmd->version = HFI_ACD_INIT_VERSION;
+ cmd->stride = 1;
+ cmd->enable_by_level = 0;
+
+ for (i = 0, cmd_idx = 0; i < numlvl; i++) {
+ acd_level = pwr->pwrlevels[numlvl - i - 1].acd_level;
+ if (acd_level) {
+ cmd->enable_by_level |= (1 << i);
+ cmd->data[cmd_idx++] = acd_level;
+ }
+ }
+
+ if (!cmd->enable_by_level)
+ return;
+
+ cmd->num_levels = cmd_idx;
+
+ ret = gmu_aop_mailbox_init(device, gmu);
+ if (ret)
+ dev_err(&gmu->pdev->dev,
+ "AOP mailbox init failed: %d\n", ret);
+}
+
/* Do not access any GMU registers in GMU probe function */
static int gmu_probe(struct kgsl_device *device, struct device_node *node)
{
@@ -1390,17 +1396,7 @@ static int gmu_probe(struct kgsl_device *device, struct device_node *node)
else
gmu->idle_level = GPU_HW_ACTIVE;
- if (ADRENO_FEATURE(adreno_dev, ADRENO_ACD)) {
- if (!gmu_acd_probe(gmu, node)) {
- /* Init the AOP mailbox if we have a valid ACD table */
- ret = gmu_aop_mailbox_init(device, gmu);
- if (ret)
- dev_err(&gmu->pdev->dev,
- "AOP mailbox init failed: %d\n", ret);
- } else
- dev_err(&gmu->pdev->dev,
- "ACD probe failed: missing or invalid table\n");
- }
+ gmu_acd_probe(device, gmu, node);
set_bit(GMU_ENABLED, &device->gmu_core.flags);
device->gmu_core.dev_ops = &adreno_a6xx_gmudev;
@@ -1562,6 +1558,24 @@ static void gmu_snapshot(struct kgsl_device *device)
gmu->fault_count++;
}
+static int gmu_init(struct kgsl_device *device)
+{
+ struct gmu_device *gmu = KGSL_GMU_DEVICE(device);
+ struct gmu_dev_ops *ops = GMU_DEVICE_OPS(device);
+ int ret;
+
+ ret = ops->load_firmware(device);
+ if (ret)
+ return ret;
+
+ ret = gmu_memory_probe(device);
+ if (ret)
+ return ret;
+
+ hfi_init(gmu);
+
+ return 0;
+}
/* To be called to power on both GPU and GMU */
static int gmu_start(struct kgsl_device *device)
{
@@ -1575,7 +1589,9 @@ static int gmu_start(struct kgsl_device *device)
case KGSL_STATE_INIT:
gmu_aop_send_acd_state(device, test_bit(ADRENO_ACD_CTRL,
&adreno_dev->pwrctrl_flag));
-
+ ret = gmu_init(device);
+ if (ret)
+ return ret;
case KGSL_STATE_SUSPEND:
WARN_ON(test_bit(GMU_CLK_ON, &device->gmu_core.flags));
diff --git a/drivers/gpu/msm/kgsl_gmu.h b/drivers/gpu/msm/kgsl_gmu.h
index f1dd0aa..040db02 100644
--- a/drivers/gpu/msm/kgsl_gmu.h
+++ b/drivers/gpu/msm/kgsl_gmu.h
@@ -218,7 +218,6 @@ unsigned int gmu_get_memtype_base(struct gmu_device *gmu,
enum gmu_mem_type type);
int gmu_prealloc_req(struct kgsl_device *device, struct gmu_block_header *blk);
-int gmu_memory_probe(struct kgsl_device *device);
int gmu_cache_finalize(struct kgsl_device *device);
#endif /* __KGSL_GMU_H */
diff --git a/drivers/gpu/msm/kgsl_gmu_core.c b/drivers/gpu/msm/kgsl_gmu_core.c
index f9993c07d..8af7840 100644
--- a/drivers/gpu/msm/kgsl_gmu_core.c
+++ b/drivers/gpu/msm/kgsl_gmu_core.c
@@ -306,16 +306,6 @@ void gmu_core_dev_enable_lm(struct kgsl_device *device)
ops->enable_lm(device);
}
-int gmu_core_dev_load_firmware(struct kgsl_device *device)
-{
- struct gmu_dev_ops *ops = GMU_DEVICE_OPS(device);
-
- if (ops && ops->load_firmware)
- return ops->load_firmware(device);
-
- return 0;
-}
-
void gmu_core_dev_snapshot(struct kgsl_device *device,
struct kgsl_snapshot *snapshot)
{
@@ -370,5 +360,5 @@ int gmu_core_dev_wait_for_active_transition(struct kgsl_device *device)
if (ops && ops->wait_for_active_transition)
return ops->wait_for_active_transition(device);
- return -ETIMEDOUT;
+ return 0;
}
diff --git a/drivers/gpu/msm/kgsl_gmu_core.h b/drivers/gpu/msm/kgsl_gmu_core.h
index ad4a9f3..22690aa 100644
--- a/drivers/gpu/msm/kgsl_gmu_core.h
+++ b/drivers/gpu/msm/kgsl_gmu_core.h
@@ -217,7 +217,6 @@ void gmu_core_dev_oob_clear(struct kgsl_device *device, enum oob_request req);
int gmu_core_dev_hfi_start_msg(struct kgsl_device *device);
int gmu_core_dev_wait_for_lowest_idle(struct kgsl_device *device);
void gmu_core_dev_enable_lm(struct kgsl_device *device);
-int gmu_core_dev_load_firmware(struct kgsl_device *device);
void gmu_core_dev_snapshot(struct kgsl_device *device,
struct kgsl_snapshot *snapshot);
bool gmu_core_dev_gx_is_on(struct kgsl_device *device);
diff --git a/drivers/gpu/msm/kgsl_iommu.c b/drivers/gpu/msm/kgsl_iommu.c
index 88177fb..488dacc 100644
--- a/drivers/gpu/msm/kgsl_iommu.c
+++ b/drivers/gpu/msm/kgsl_iommu.c
@@ -635,10 +635,8 @@ static void _get_entries(struct kgsl_process_private *private,
static void _find_mem_entries(struct kgsl_mmu *mmu, uint64_t faultaddr,
struct _mem_entry *preventry, struct _mem_entry *nextentry,
- struct kgsl_context *context)
+ struct kgsl_process_private *private)
{
- struct kgsl_process_private *private;
-
memset(preventry, 0, sizeof(*preventry));
memset(nextentry, 0, sizeof(*nextentry));
@@ -647,8 +645,7 @@ static void _find_mem_entries(struct kgsl_mmu *mmu, uint64_t faultaddr,
if (ADDR_IN_GLOBAL(mmu, faultaddr)) {
_get_global_entries(faultaddr, preventry, nextentry);
- } else if (context) {
- private = context->proc_priv;
+ } else if (private) {
spin_lock(&private->mem_lock);
_get_entries(private, faultaddr, preventry, nextentry);
spin_unlock(&private->mem_lock);
@@ -687,6 +684,29 @@ static void _check_if_freed(struct kgsl_iommu_context *ctx,
}
}
+static struct kgsl_process_private *kgsl_iommu_get_process(u64 ptbase)
+{
+ struct kgsl_process_private *p;
+ struct kgsl_iommu_pt *iommu_pt;
+
+ spin_lock(&kgsl_driver.proclist_lock);
+
+ list_for_each_entry(p, &kgsl_driver.process_list, list) {
+ iommu_pt = p->pagetable->priv;
+ if (iommu_pt->ttbr0 == ptbase) {
+ if (!kgsl_process_private_get(p))
+ p = NULL;
+
+ spin_unlock(&kgsl_driver.proclist_lock);
+ return p;
+ }
+ }
+
+ spin_unlock(&kgsl_driver.proclist_lock);
+
+ return NULL;
+}
+
static int kgsl_iommu_fault_handler(struct iommu_domain *domain,
struct device *dev, unsigned long addr, int flags, void *token)
{
@@ -695,7 +715,7 @@ static int kgsl_iommu_fault_handler(struct iommu_domain *domain,
struct kgsl_mmu *mmu = pt->mmu;
struct kgsl_iommu *iommu;
struct kgsl_iommu_context *ctx;
- u64 ptbase, proc_ptbase;
+ u64 ptbase;
u32 contextidr;
pid_t pid = 0;
pid_t ptname;
@@ -705,9 +725,9 @@ static int kgsl_iommu_fault_handler(struct iommu_domain *domain,
struct adreno_device *adreno_dev;
struct adreno_gpudev *gpudev;
unsigned int no_page_fault_log = 0;
- unsigned int curr_context_id = 0;
- struct kgsl_context *context;
char *fault_type = "unknown";
+ char *comm = "unknown";
+ struct kgsl_process_private *private;
static DEFINE_RATELIMIT_STATE(_rs,
DEFAULT_RATELIMIT_INTERVAL,
@@ -722,21 +742,6 @@ static int kgsl_iommu_fault_handler(struct iommu_domain *domain,
adreno_dev = ADRENO_DEVICE(device);
gpudev = ADRENO_GPU_DEVICE(adreno_dev);
- if (pt->name == KGSL_MMU_SECURE_PT)
- ctx = &iommu->ctx[KGSL_IOMMU_CONTEXT_SECURE];
-
- /*
- * set the fault bits and stuff before any printks so that if fault
- * handler runs then it will know it's dealing with a pagefault.
- * Read the global current timestamp because we could be in middle of
- * RB switch and hence the cur RB may not be reliable but global
- * one will always be reliable
- */
- kgsl_sharedmem_readl(&device->memstore, &curr_context_id,
- KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL, current_context));
-
- context = kgsl_context_get(device, curr_context_id);
-
write = (flags & IOMMU_FAULT_WRITE) ? 1 : 0;
if (flags & IOMMU_FAULT_TRANSLATION)
fault_type = "translation";
@@ -747,12 +752,17 @@ static int kgsl_iommu_fault_handler(struct iommu_domain *domain,
else if (flags & IOMMU_FAULT_TRANSACTION_STALLED)
fault_type = "transaction stalled";
- if (context != NULL) {
- /* save pagefault timestamp for GFT */
- set_bit(KGSL_CONTEXT_PRIV_PAGEFAULT, &context->priv);
- pid = context->proc_priv->pid;
+ ptbase = KGSL_IOMMU_GET_CTX_REG_Q(ctx, TTBR0);
+ private = kgsl_iommu_get_process(ptbase);
+
+ if (private) {
+ pid = private->pid;
+ comm = private->comm;
}
+ if (pt->name == KGSL_MMU_SECURE_PT)
+ ctx = &iommu->ctx[KGSL_IOMMU_CONTEXT_SECURE];
+
ctx->fault = 1;
if (test_bit(KGSL_FT_PAGEFAULT_GPUHALT_ENABLE,
@@ -767,9 +777,7 @@ static int kgsl_iommu_fault_handler(struct iommu_domain *domain,
mutex_unlock(&device->mutex);
}
- ptbase = KGSL_IOMMU_GET_CTX_REG_Q(ctx, TTBR0);
contextidr = KGSL_IOMMU_GET_CTX_REG(ctx, CONTEXTIDR);
-
ptname = MMU_FEATURE(mmu, KGSL_MMU_GLOBAL_PAGETABLE) ?
KGSL_MMU_GLOBAL_PT : pid;
/*
@@ -778,43 +786,19 @@ static int kgsl_iommu_fault_handler(struct iommu_domain *domain,
* search and delays the trace unnecessarily.
*/
trace_kgsl_mmu_pagefault(ctx->kgsldev, addr,
- ptname,
- context != NULL ? context->proc_priv->comm : "unknown",
- write ? "write" : "read");
+ ptname, comm, write ? "write" : "read");
if (test_bit(KGSL_FT_PAGEFAULT_LOG_ONE_PER_PAGE,
&adreno_dev->ft_pf_policy))
no_page_fault_log = kgsl_mmu_log_fault_addr(mmu, ptbase, addr);
if (!no_page_fault_log && __ratelimit(&_rs)) {
- const char *api_str;
-
- if (context != NULL) {
- struct adreno_context *drawctxt =
- ADRENO_CONTEXT(context);
-
- api_str = get_api_type_str(drawctxt->type);
- } else
- api_str = "UNKNOWN";
-
dev_crit(ctx->kgsldev->dev,
"GPU PAGE FAULT: addr = %lX pid= %d name=%s\n", addr,
- ptname,
- context != NULL ? context->proc_priv->comm : "unknown");
-
- if (context != NULL) {
- proc_ptbase = kgsl_mmu_pagetable_get_ttbr0(
- context->proc_priv->pagetable);
-
- if (ptbase != proc_ptbase)
- dev_crit(ctx->kgsldev->dev,
- "Pagetable address mismatch: HW address is 0x%llx but SW expected 0x%llx\n",
- ptbase, proc_ptbase);
- }
-
+ ptname, comm);
dev_crit(ctx->kgsldev->dev,
- "context=%s ctx_type=%s TTBR0=0x%llx CIDR=0x%x (%s %s fault)\n",
- ctx->name, api_str, ptbase, contextidr,
+ "context=%s TTBR0=0x%llx CIDR=0x%x (%s %s fault)\n",
+ ctx->name, ptbase, contextidr,
write ? "write" : "read", fault_type);
if (gpudev->iommu_fault_block) {
@@ -822,9 +806,8 @@ static int kgsl_iommu_fault_handler(struct iommu_domain *domain,
fsynr1 = KGSL_IOMMU_GET_CTX_REG(ctx, FSYNR1);
dev_crit(ctx->kgsldev->dev,
- "FAULTING BLOCK: %s\n",
- gpudev->iommu_fault_block(adreno_dev,
- fsynr1));
+ "FAULTING BLOCK: %s\n",
+ gpudev->iommu_fault_block(device, fsynr1));
}
/* Don't print the debug if this is a permissions fault */
@@ -834,7 +817,7 @@ static int kgsl_iommu_fault_handler(struct iommu_domain *domain,
dev_err(ctx->kgsldev->dev,
"---- nearby memory ----\n");
- _find_mem_entries(mmu, addr, &prev, &next, context);
+ _find_mem_entries(mmu, addr, &prev, &next, private);
if (prev.gpuaddr)
_print_entry(ctx->kgsldev, &prev);
else
@@ -877,7 +860,8 @@ static int kgsl_iommu_fault_handler(struct iommu_domain *domain,
adreno_dispatcher_schedule(device);
}
- kgsl_context_put(context);
+ kgsl_process_private_put(private);
+
return ret;
}
@@ -2167,14 +2151,6 @@ static int kgsl_iommu_set_pf_policy(struct kgsl_mmu *mmu,
return 0;
}
-static struct kgsl_protected_registers *
-kgsl_iommu_get_prot_regs(struct kgsl_mmu *mmu)
-{
- struct kgsl_iommu *iommu = _IOMMU_PRIV(mmu);
-
- return &iommu->protect;
-}
-
static struct kgsl_iommu_addr_entry *_find_gpuaddr(
struct kgsl_pagetable *pagetable, uint64_t gpuaddr)
{
@@ -2626,15 +2602,6 @@ static int _kgsl_iommu_probe(struct kgsl_device *device,
iommu->regstart = reg_val[0];
iommu->regsize = reg_val[1];
- /* Protecting the SMMU registers is mandatory */
- if (of_property_read_u32_array(node, "qcom,protect", reg_val, 2)) {
- dev_err(device->dev,
- "dt: no iommu protection range specified\n");
- return -EINVAL;
- }
- iommu->protect.base = reg_val[0] / sizeof(u32);
- iommu->protect.range = reg_val[1] / sizeof(u32);
-
of_property_for_each_string(node, "clock-names", prop, cname) {
struct clk *c = devm_clk_get(&pdev->dev, cname);
@@ -2722,7 +2689,6 @@ struct kgsl_mmu_ops kgsl_iommu_ops = {
.mmu_pt_equal = kgsl_iommu_pt_equal,
.mmu_set_pf_policy = kgsl_iommu_set_pf_policy,
.mmu_pagefault_resume = kgsl_iommu_pagefault_resume,
- .mmu_get_prot_regs = kgsl_iommu_get_prot_regs,
.mmu_init_pt = kgsl_iommu_init_pt,
.mmu_add_global = kgsl_iommu_add_global,
.mmu_remove_global = kgsl_iommu_remove_global,
diff --git a/drivers/gpu/msm/kgsl_iommu.h b/drivers/gpu/msm/kgsl_iommu.h
index b98f2c2..2e4c2ad 100644
--- a/drivers/gpu/msm/kgsl_iommu.h
+++ b/drivers/gpu/msm/kgsl_iommu.h
@@ -112,7 +112,6 @@ struct kgsl_iommu_context {
* @clk_enable_count: The ref count of clock enable calls
* @clks: Array of pointers to IOMMU clocks
* @smmu_info: smmu info used in a5xx preemption
- * @protect: register protection settings for the iommu.
*/
struct kgsl_iommu {
struct kgsl_iommu_context ctx[KGSL_IOMMU_CONTEXT_MAX];
@@ -123,7 +122,6 @@ struct kgsl_iommu {
atomic_t clk_enable_count;
struct clk *clks[KGSL_IOMMU_MAX_CLKS];
struct kgsl_memdesc smmu_info;
- struct kgsl_protected_registers protect;
};
/*
diff --git a/drivers/gpu/msm/kgsl_mmu.h b/drivers/gpu/msm/kgsl_mmu.h
index 637e57d..93012aa 100644
--- a/drivers/gpu/msm/kgsl_mmu.h
+++ b/drivers/gpu/msm/kgsl_mmu.h
@@ -68,8 +68,6 @@ struct kgsl_mmu_ops {
bool (*mmu_pt_equal)(struct kgsl_mmu *mmu,
struct kgsl_pagetable *pt, u64 ttbr0);
int (*mmu_set_pf_policy)(struct kgsl_mmu *mmu, unsigned long pf_policy);
- struct kgsl_protected_registers *(*mmu_get_prot_regs)
- (struct kgsl_mmu *mmu);
int (*mmu_init_pt)(struct kgsl_mmu *mmu, struct kgsl_pagetable *pt);
void (*mmu_add_global)(struct kgsl_mmu *mmu,
struct kgsl_memdesc *memdesc, const char *name);
@@ -328,15 +326,6 @@ static inline void kgsl_mmu_clear_fsr(struct kgsl_mmu *mmu)
return mmu->mmu_ops->mmu_clear_fsr(mmu);
}
-static inline struct kgsl_protected_registers *kgsl_mmu_get_prot_regs
- (struct kgsl_mmu *mmu)
-{
- if (MMU_OP_VALID(mmu, mmu_get_prot_regs))
- return mmu->mmu_ops->mmu_get_prot_regs(mmu);
-
- return NULL;
-}
-
static inline int kgsl_mmu_is_perprocess(struct kgsl_mmu *mmu)
{
return MMU_FEATURE(mmu, KGSL_MMU_GLOBAL_PAGETABLE) ? 0 : 1;
diff --git a/drivers/gpu/msm/kgsl_pwrctrl.c b/drivers/gpu/msm/kgsl_pwrctrl.c
index c9bf42f..6786ecb4 100644
--- a/drivers/gpu/msm/kgsl_pwrctrl.c
+++ b/drivers/gpu/msm/kgsl_pwrctrl.c
@@ -1991,9 +1991,9 @@ static bool _gpu_freq_supported(struct kgsl_pwrctrl *pwr, unsigned int freq)
return false;
}
-static void kgsl_pwrctrl_disable_unused_opp(struct kgsl_device *device)
+void kgsl_pwrctrl_disable_unused_opp(struct kgsl_device *device,
+ struct device *dev)
{
- struct device *dev = &device->pdev->dev;
struct dev_pm_opp *opp;
unsigned long freq = 0;
int ret;
@@ -2083,7 +2083,7 @@ int kgsl_pwrctrl_init(struct kgsl_device *device)
pwr->pwrlevels[i].gpu_freq = freq;
}
- kgsl_pwrctrl_disable_unused_opp(device);
+ kgsl_pwrctrl_disable_unused_opp(device, &pdev->dev);
kgsl_clk_set_rate(device, pwr->num_pwrlevels - 1);
diff --git a/drivers/gpu/msm/kgsl_pwrctrl.h b/drivers/gpu/msm/kgsl_pwrctrl.h
index f3a5648..6dc7c53 100644
--- a/drivers/gpu/msm/kgsl_pwrctrl.h
+++ b/drivers/gpu/msm/kgsl_pwrctrl.h
@@ -105,6 +105,7 @@ struct kgsl_pwrlevel {
unsigned int bus_freq;
unsigned int bus_min;
unsigned int bus_max;
+ unsigned int acd_level;
};
struct kgsl_regulator {
@@ -267,4 +268,7 @@ void kgsl_pwrctrl_set_constraint(struct kgsl_device *device,
void kgsl_pwrctrl_update_l2pc(struct kgsl_device *device,
unsigned long timeout_us);
void kgsl_pwrctrl_set_default_gpu_pwrlevel(struct kgsl_device *device);
+void kgsl_pwrctrl_disable_unused_opp(struct kgsl_device *device,
+ struct device *dev);
+
#endif /* __KGSL_PWRCTRL_H */
diff --git a/drivers/gpu/msm/kgsl_pwrscale.c b/drivers/gpu/msm/kgsl_pwrscale.c
index 89b227b..790b379 100644
--- a/drivers/gpu/msm/kgsl_pwrscale.c
+++ b/drivers/gpu/msm/kgsl_pwrscale.c
@@ -1041,6 +1041,12 @@ int kgsl_pwrscale_init(struct device *dev, const char *governor)
* frequency.
*/
ret = dev_pm_opp_of_add_table(device->busmondev);
+ /*
+ * Disable OPP which are not supported as per GPU freq plan.
+ * This is need to ensure freq_table specified in bus_profile
+ * above matches OPP table.
+ */
+ kgsl_pwrctrl_disable_unused_opp(device, device->busmondev);
if (!ret)
bus_devfreq = devfreq_add_device(device->busmondev,
&pwrscale->bus_profile.profile, "gpubw_mon",
diff --git a/drivers/hid/hid-a4tech.c b/drivers/hid/hid-a4tech.c
index 9428ea7..c52bd16 100644
--- a/drivers/hid/hid-a4tech.c
+++ b/drivers/hid/hid-a4tech.c
@@ -26,12 +26,36 @@
#define A4_2WHEEL_MOUSE_HACK_7 0x01
#define A4_2WHEEL_MOUSE_HACK_B8 0x02
+#define A4_WHEEL_ORIENTATION (HID_UP_GENDESK | 0x000000b8)
+
struct a4tech_sc {
unsigned long quirks;
unsigned int hw_wheel;
__s32 delayed_value;
};
+static int a4_input_mapping(struct hid_device *hdev, struct hid_input *hi,
+ struct hid_field *field, struct hid_usage *usage,
+ unsigned long **bit, int *max)
+{
+ struct a4tech_sc *a4 = hid_get_drvdata(hdev);
+
+ if (a4->quirks & A4_2WHEEL_MOUSE_HACK_B8 &&
+ usage->hid == A4_WHEEL_ORIENTATION) {
+ /*
+ * We do not want to have this usage mapped to anything as it's
+ * nonstandard and doesn't really behave like an HID report.
+ * It's only selecting the orientation (vertical/horizontal) of
+ * the previous mouse wheel report. The input_events will be
+ * generated once both reports are recorded in a4_event().
+ */
+ return -1;
+ }
+
+ return 0;
+
+}
+
static int a4_input_mapped(struct hid_device *hdev, struct hid_input *hi,
struct hid_field *field, struct hid_usage *usage,
unsigned long **bit, int *max)
@@ -53,8 +77,7 @@ static int a4_event(struct hid_device *hdev, struct hid_field *field,
struct a4tech_sc *a4 = hid_get_drvdata(hdev);
struct input_dev *input;
- if (!(hdev->claimed & HID_CLAIMED_INPUT) || !field->hidinput ||
- !usage->type)
+ if (!(hdev->claimed & HID_CLAIMED_INPUT) || !field->hidinput)
return 0;
input = field->hidinput->input;
@@ -65,7 +88,7 @@ static int a4_event(struct hid_device *hdev, struct hid_field *field,
return 1;
}
- if (usage->hid == 0x000100b8) {
+ if (usage->hid == A4_WHEEL_ORIENTATION) {
input_event(input, EV_REL, value ? REL_HWHEEL :
REL_WHEEL, a4->delayed_value);
return 1;
@@ -129,6 +152,7 @@ MODULE_DEVICE_TABLE(hid, a4_devices);
static struct hid_driver a4_driver = {
.name = "a4tech",
.id_table = a4_devices,
+ .input_mapping = a4_input_mapping,
.input_mapped = a4_input_mapped,
.event = a4_event,
.probe = a4_probe,
diff --git a/drivers/hid/hid-holtek-kbd.c b/drivers/hid/hid-holtek-kbd.c
index 6e1a4a4..ab9da59 100644
--- a/drivers/hid/hid-holtek-kbd.c
+++ b/drivers/hid/hid-holtek-kbd.c
@@ -126,9 +126,14 @@ static int holtek_kbd_input_event(struct input_dev *dev, unsigned int type,
/* Locate the boot interface, to receive the LED change events */
struct usb_interface *boot_interface = usb_ifnum_to_if(usb_dev, 0);
+ struct hid_device *boot_hid;
+ struct hid_input *boot_hid_input;
- struct hid_device *boot_hid = usb_get_intfdata(boot_interface);
- struct hid_input *boot_hid_input = list_first_entry(&boot_hid->inputs,
+ if (unlikely(boot_interface == NULL))
+ return -ENODEV;
+
+ boot_hid = usb_get_intfdata(boot_interface);
+ boot_hid_input = list_first_entry(&boot_hid->inputs,
struct hid_input, list);
return boot_hid_input->input->event(boot_hid_input->input, type, code,
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 6b5a70b..bb35715 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -559,6 +559,7 @@
#define USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0B4A 0x0b4a
#define USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE 0x134a
#define USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE_094A 0x094a
+#define USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE_0641 0x0641
#define USB_VENDOR_ID_HUION 0x256c
#define USB_DEVICE_ID_HUION_TABLET 0x006e
@@ -970,6 +971,7 @@
#define USB_DEVICE_ID_SAITEK_RAT7 0x0cd7
#define USB_DEVICE_ID_SAITEK_RAT9 0x0cfa
#define USB_DEVICE_ID_SAITEK_MMO7 0x0cd0
+#define USB_DEVICE_ID_SAITEK_X52 0x075c
#define USB_VENDOR_ID_SAMSUNG 0x0419
#define USB_DEVICE_ID_SAMSUNG_IR_REMOTE 0x0001
diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c
index 583615a..f032911 100644
--- a/drivers/hid/hid-quirks.c
+++ b/drivers/hid/hid-quirks.c
@@ -94,6 +94,7 @@ static const struct hid_device_id hid_quirks[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0B4A), HID_QUIRK_ALWAYS_POLL },
{ HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE), HID_QUIRK_ALWAYS_POLL },
{ HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE_094A), HID_QUIRK_ALWAYS_POLL },
+ { HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE_0641), HID_QUIRK_ALWAYS_POLL },
{ HID_USB_DEVICE(USB_VENDOR_ID_IDEACOM, USB_DEVICE_ID_IDEACOM_IDC6680), HID_QUIRK_MULTI_INPUT },
{ HID_USB_DEVICE(USB_VENDOR_ID_INNOMEDIA, USB_DEVICE_ID_INNEX_GENESIS_ATARI), HID_QUIRK_MULTI_INPUT },
{ HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_M610X), HID_QUIRK_MULTI_INPUT },
@@ -142,6 +143,7 @@ static const struct hid_device_id hid_quirks[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_RETROUSB, USB_DEVICE_ID_RETROUSB_SNES_RETROPAD), HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE },
{ HID_USB_DEVICE(USB_VENDOR_ID_RETROUSB, USB_DEVICE_ID_RETROUSB_SNES_RETROPORT), HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE },
{ HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RUMBLEPAD), HID_QUIRK_BADPAD },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_X52), HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE },
{ HID_USB_DEVICE(USB_VENDOR_ID_SEMICO, USB_DEVICE_ID_SEMICO_USB_KEYKOARD2), HID_QUIRK_NO_INIT_REPORTS },
{ HID_USB_DEVICE(USB_VENDOR_ID_SEMICO, USB_DEVICE_ID_SEMICO_USB_KEYKOARD), HID_QUIRK_NO_INIT_REPORTS },
{ HID_USB_DEVICE(USB_VENDOR_ID_SENNHEISER, USB_DEVICE_ID_SENNHEISER_BTD500USB), HID_QUIRK_NOGET },
diff --git a/drivers/hid/hid-sony.c b/drivers/hid/hid-sony.c
index 9671a4b..31f1023 100644
--- a/drivers/hid/hid-sony.c
+++ b/drivers/hid/hid-sony.c
@@ -587,10 +587,14 @@ static void sony_set_leds(struct sony_sc *sc);
static inline void sony_schedule_work(struct sony_sc *sc,
enum sony_worker which)
{
+ unsigned long flags;
+
switch (which) {
case SONY_WORKER_STATE:
- if (!sc->defer_initialization)
+ spin_lock_irqsave(&sc->lock, flags);
+ if (!sc->defer_initialization && sc->state_worker_initialized)
schedule_work(&sc->state_worker);
+ spin_unlock_irqrestore(&sc->lock, flags);
break;
case SONY_WORKER_HOTPLUG:
if (sc->hotplug_worker_initialized)
@@ -2553,13 +2557,18 @@ static inline void sony_init_output_report(struct sony_sc *sc,
static inline void sony_cancel_work_sync(struct sony_sc *sc)
{
+ unsigned long flags;
+
if (sc->hotplug_worker_initialized)
cancel_work_sync(&sc->hotplug_worker);
- if (sc->state_worker_initialized)
+ if (sc->state_worker_initialized) {
+ spin_lock_irqsave(&sc->lock, flags);
+ sc->state_worker_initialized = 0;
+ spin_unlock_irqrestore(&sc->lock, flags);
cancel_work_sync(&sc->state_worker);
+ }
}
-
static int sony_input_configured(struct hid_device *hdev,
struct hid_input *hidinput)
{
diff --git a/drivers/hid/hid-tmff.c b/drivers/hid/hid-tmff.c
index bea8def..30b8c32 100644
--- a/drivers/hid/hid-tmff.c
+++ b/drivers/hid/hid-tmff.c
@@ -34,6 +34,8 @@
#include "hid-ids.h"
+#define THRUSTMASTER_DEVICE_ID_2_IN_1_DT 0xb320
+
static const signed short ff_rumble[] = {
FF_RUMBLE,
-1
@@ -88,6 +90,7 @@ static int tmff_play(struct input_dev *dev, void *data,
struct hid_field *ff_field = tmff->ff_field;
int x, y;
int left, right; /* Rumbling */
+ int motor_swap;
switch (effect->type) {
case FF_CONSTANT:
@@ -112,6 +115,13 @@ static int tmff_play(struct input_dev *dev, void *data,
ff_field->logical_minimum,
ff_field->logical_maximum);
+ /* 2-in-1 strong motor is left */
+ if (hid->product == THRUSTMASTER_DEVICE_ID_2_IN_1_DT) {
+ motor_swap = left;
+ left = right;
+ right = motor_swap;
+ }
+
dbg_hid("(left,right)=(%08x, %08x)\n", left, right);
ff_field->value[0] = left;
ff_field->value[1] = right;
@@ -238,6 +248,8 @@ static const struct hid_device_id tm_devices[] = {
.driver_data = (unsigned long)ff_rumble },
{ HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb304), /* FireStorm Dual Power 2 (and 3) */
.driver_data = (unsigned long)ff_rumble },
+ { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, THRUSTMASTER_DEVICE_ID_2_IN_1_DT), /* Dual Trigger 2-in-1 */
+ .driver_data = (unsigned long)ff_rumble },
{ HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb323), /* Dual Trigger 3-in-1 (PC Mode) */
.driver_data = (unsigned long)ff_rumble },
{ HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb324), /* Dual Trigger 3-in-1 (PS3 Mode) */
diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
index a746017..5a949ca 100644
--- a/drivers/hid/usbhid/hiddev.c
+++ b/drivers/hid/usbhid/hiddev.c
@@ -297,6 +297,14 @@ static int hiddev_open(struct inode *inode, struct file *file)
spin_unlock_irq(&list->hiddev->list_lock);
mutex_lock(&hiddev->existancelock);
+ /*
+ * recheck exist with existance lock held to
+ * avoid opening a disconnected device
+ */
+ if (!list->hiddev->exist) {
+ res = -ENODEV;
+ goto bail_unlock;
+ }
if (!list->hiddev->open++)
if (list->hiddev->exist) {
struct hid_device *hid = hiddev->hid;
@@ -313,6 +321,10 @@ static int hiddev_open(struct inode *inode, struct file *file)
hid_hw_power(hid, PM_HINT_NORMAL);
bail_unlock:
mutex_unlock(&hiddev->existancelock);
+
+ spin_lock_irq(&list->hiddev->list_lock);
+ list_del(&list->node);
+ spin_unlock_irq(&list->hiddev->list_lock);
bail:
file->private_data = NULL;
vfree(list);
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
index 0ae8483..50ef7b6 100644
--- a/drivers/hid/wacom_wac.c
+++ b/drivers/hid/wacom_wac.c
@@ -537,14 +537,14 @@ static int wacom_intuos_pad(struct wacom_wac *wacom)
*/
buttons = (data[4] << 1) | (data[3] & 0x01);
} else if (features->type == CINTIQ_COMPANION_2) {
- /* d-pad right -> data[4] & 0x10
- * d-pad up -> data[4] & 0x20
- * d-pad left -> data[4] & 0x40
- * d-pad down -> data[4] & 0x80
- * d-pad center -> data[3] & 0x01
+ /* d-pad right -> data[2] & 0x10
+ * d-pad up -> data[2] & 0x20
+ * d-pad left -> data[2] & 0x40
+ * d-pad down -> data[2] & 0x80
+ * d-pad center -> data[1] & 0x01
*/
buttons = ((data[2] >> 4) << 7) |
- ((data[1] & 0x04) << 6) |
+ ((data[1] & 0x04) << 4) |
((data[2] & 0x0F) << 2) |
(data[1] & 0x03);
} else if (features->type >= INTUOS5S && features->type <= INTUOSPL) {
@@ -848,6 +848,8 @@ static int wacom_intuos_general(struct wacom_wac *wacom)
y >>= 1;
distance >>= 1;
}
+ if (features->type == INTUOSHT2)
+ distance = features->distance_max - distance;
input_report_abs(input, ABS_X, x);
input_report_abs(input, ABS_Y, y);
input_report_abs(input, ABS_DISTANCE, distance);
@@ -1061,7 +1063,7 @@ static int wacom_remote_irq(struct wacom_wac *wacom_wac, size_t len)
input_report_key(input, BTN_BASE2, (data[11] & 0x02));
if (data[12] & 0x80)
- input_report_abs(input, ABS_WHEEL, (data[12] & 0x7f));
+ input_report_abs(input, ABS_WHEEL, (data[12] & 0x7f) - 1);
else
input_report_abs(input, ABS_WHEEL, 0);
diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
index 2f164bd..fdb0f83 100644
--- a/drivers/hv/channel.c
+++ b/drivers/hv/channel.c
@@ -38,7 +38,7 @@
static unsigned long virt_to_hvpfn(void *addr)
{
- unsigned long paddr;
+ phys_addr_t paddr;
if (is_vmalloc_addr(addr))
paddr = page_to_phys(vmalloc_to_page(addr)) +
diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c
index 78603b7..eba692c 100644
--- a/drivers/hwmon/nct6775.c
+++ b/drivers/hwmon/nct6775.c
@@ -818,7 +818,7 @@ static const u16 NCT6106_REG_TARGET[] = { 0x111, 0x121, 0x131 };
static const u16 NCT6106_REG_WEIGHT_TEMP_SEL[] = { 0x168, 0x178, 0x188 };
static const u16 NCT6106_REG_WEIGHT_TEMP_STEP[] = { 0x169, 0x179, 0x189 };
static const u16 NCT6106_REG_WEIGHT_TEMP_STEP_TOL[] = { 0x16a, 0x17a, 0x18a };
-static const u16 NCT6106_REG_WEIGHT_DUTY_STEP[] = { 0x16b, 0x17b, 0x17c };
+static const u16 NCT6106_REG_WEIGHT_DUTY_STEP[] = { 0x16b, 0x17b, 0x18b };
static const u16 NCT6106_REG_WEIGHT_TEMP_BASE[] = { 0x16c, 0x17c, 0x18c };
static const u16 NCT6106_REG_WEIGHT_DUTY_BASE[] = { 0x16d, 0x17d, 0x18d };
@@ -3673,6 +3673,7 @@ static int nct6775_probe(struct platform_device *pdev)
data->REG_FAN_TIME[0] = NCT6106_REG_FAN_STOP_TIME;
data->REG_FAN_TIME[1] = NCT6106_REG_FAN_STEP_UP_TIME;
data->REG_FAN_TIME[2] = NCT6106_REG_FAN_STEP_DOWN_TIME;
+ data->REG_TOLERANCE_H = NCT6106_REG_TOLERANCE_H;
data->REG_PWM[0] = NCT6106_REG_PWM;
data->REG_PWM[1] = NCT6106_REG_FAN_START_OUTPUT;
data->REG_PWM[2] = NCT6106_REG_FAN_STOP_OUTPUT;
diff --git a/drivers/hwmon/nct7802.c b/drivers/hwmon/nct7802.c
index 2876c18..38ffbdb 100644
--- a/drivers/hwmon/nct7802.c
+++ b/drivers/hwmon/nct7802.c
@@ -768,7 +768,7 @@ static struct attribute *nct7802_in_attrs[] = {
&sensor_dev_attr_in3_alarm.dev_attr.attr,
&sensor_dev_attr_in3_beep.dev_attr.attr,
- &sensor_dev_attr_in4_input.dev_attr.attr, /* 17 */
+ &sensor_dev_attr_in4_input.dev_attr.attr, /* 16 */
&sensor_dev_attr_in4_min.dev_attr.attr,
&sensor_dev_attr_in4_max.dev_attr.attr,
&sensor_dev_attr_in4_alarm.dev_attr.attr,
@@ -794,9 +794,9 @@ static umode_t nct7802_in_is_visible(struct kobject *kobj,
if (index >= 6 && index < 11 && (reg & 0x03) != 0x03) /* VSEN1 */
return 0;
- if (index >= 11 && index < 17 && (reg & 0x0c) != 0x0c) /* VSEN2 */
+ if (index >= 11 && index < 16 && (reg & 0x0c) != 0x0c) /* VSEN2 */
return 0;
- if (index >= 17 && (reg & 0x30) != 0x30) /* VSEN3 */
+ if (index >= 16 && (reg & 0x30) != 0x30) /* VSEN3 */
return 0;
return attr->mode;
diff --git a/drivers/hwtracing/coresight/Kconfig b/drivers/hwtracing/coresight/Kconfig
index cda385a..dfb5638 100644
--- a/drivers/hwtracing/coresight/Kconfig
+++ b/drivers/hwtracing/coresight/Kconfig
@@ -123,6 +123,17 @@
hardware component to another. It can also be used to pass
software generated events.
+config CORESIGHT_CTI_SAVE_DISABLE
+ bool "Turn off CTI save and restore"
+ depends on CORESIGHT_CTI
+ help
+ Turns off CoreSight CTI save and restore support for cpu CTIs. This
+ avoids voting for the clocks during probe as well as the associated
+ save and restore latency at the cost of breaking cpu CTI support on
+ targets where cpu CTIs have to be preserved across power collapse.
+
+ If unsure, say 'N' here to avoid breaking cpu CTI support.
+
config CORESIGHT_OST
bool "CoreSight OST framework"
depends on CORESIGHT_STM
diff --git a/drivers/hwtracing/coresight/coresight-byte-cntr.c b/drivers/hwtracing/coresight/coresight-byte-cntr.c
index 8a0439d..6216417 100644
--- a/drivers/hwtracing/coresight/coresight-byte-cntr.c
+++ b/drivers/hwtracing/coresight/coresight-byte-cntr.c
@@ -17,6 +17,7 @@
#include "coresight-tmc.h"
#define USB_BLK_SIZE 65536
+#define USB_SG_NUM (USB_BLK_SIZE / PAGE_SIZE)
#define USB_BUF_NUM 255
static struct tmc_drvdata *tmcdrvdata;
@@ -315,10 +316,11 @@ static int byte_cntr_register_chardev(struct byte_cntr *byte_cntr_data)
static void usb_read_work_fn(struct work_struct *work)
{
- int ret, seq = 0;
+ int ret, i, seq = 0;
struct qdss_request *usb_req = NULL;
struct etr_buf *etr_buf = tmcdrvdata->etr_buf;
size_t actual, req_size;
+ char *buf;
struct byte_cntr *drvdata =
container_of(work, struct byte_cntr, read_work);
@@ -337,50 +339,65 @@ static void usb_read_work_fn(struct work_struct *work)
}
req_size = USB_BLK_SIZE;
- while (req_size > 0) {
- seq++;
- usb_req = kzalloc(sizeof(*usb_req), GFP_KERNEL);
- if (!usb_req)
- return;
+ seq++;
+ usb_req = devm_kzalloc(tmcdrvdata->dev, sizeof(*usb_req),
+ GFP_KERNEL);
+ if (!usb_req)
+ return;
+ usb_req->sg = devm_kzalloc(tmcdrvdata->dev,
+ sizeof(*(usb_req->sg)) * USB_SG_NUM, GFP_KERNEL);
+ if (!usb_req->sg) {
+ devm_kfree(tmcdrvdata->dev, usb_req->sg);
+ return;
+ }
+ usb_req->length = USB_BLK_SIZE;
+ drvdata->usb_req = usb_req;
+ for (i = 0; i < USB_SG_NUM; i++) {
actual = tmc_etr_buf_get_data(etr_buf, drvdata->offset,
- req_size, &usb_req->buf);
+ PAGE_SIZE, &buf);
if (actual <= 0) {
- kfree(usb_req);
+ devm_kfree(tmcdrvdata->dev, usb_req->sg);
+ devm_kfree(tmcdrvdata->dev, usb_req);
usb_req = NULL;
dev_err(tmcdrvdata->dev, "No data in ETR\n");
- break;
+ return;
}
- usb_req->length = actual;
- drvdata->usb_req = usb_req;
+ sg_set_buf(&usb_req->sg[i], buf, actual);
+ if (i == 0)
+ usb_req->buf = buf;
req_size -= actual;
- if ((drvdata->offset + usb_req->length)
- >= tmcdrvdata->size)
+ if ((drvdata->offset + actual) >= tmcdrvdata->size)
drvdata->offset = 0;
else
- drvdata->offset += usb_req->length;
- if (atomic_read(&drvdata->usb_free_buf) > 0) {
- ret = usb_qdss_write(tmcdrvdata->usbch,
- drvdata->usb_req);
- if (ret) {
- kfree(usb_req);
- usb_req = NULL;
- drvdata->usb_req = NULL;
- dev_err(tmcdrvdata->dev,
- "Write data failed:%d\n", ret);
- if (ret == -EAGAIN)
- continue;
- return;
- }
- atomic_dec(&drvdata->usb_free_buf);
-
- } else {
- dev_dbg(tmcdrvdata->dev,
- "Drop data, offset = %d, seq = %d, irq = %d\n",
- drvdata->offset, seq,
- atomic_read(&drvdata->irq_cnt));
- kfree(usb_req);
+ drvdata->offset += actual;
+ if (i == USB_SG_NUM - 1)
+ sg_mark_end(&usb_req->sg[i]);
+ }
+ usb_req->num_sgs = i;
+ if (atomic_read(&drvdata->usb_free_buf) > 0) {
+ ret = usb_qdss_write(tmcdrvdata->usbch,
+ drvdata->usb_req);
+ if (ret) {
+ devm_kfree(tmcdrvdata->dev, usb_req->sg);
+ devm_kfree(tmcdrvdata->dev, usb_req);
+ usb_req = NULL;
drvdata->usb_req = NULL;
+ dev_err(tmcdrvdata->dev,
+ "Write data failed:%d\n", ret);
+ if (ret == -EAGAIN)
+ continue;
+ return;
}
+ atomic_dec(&drvdata->usb_free_buf);
+
+ } else {
+ dev_dbg(tmcdrvdata->dev,
+ "Drop data, offset = %d, seq = %d, irq = %d\n",
+ drvdata->offset, seq,
+ atomic_read(&drvdata->irq_cnt));
+ devm_kfree(tmcdrvdata->dev, usb_req->sg);
+ devm_kfree(tmcdrvdata->dev, usb_req);
+ drvdata->usb_req = NULL;
}
if (atomic_read(&drvdata->irq_cnt) > 0)
atomic_dec(&drvdata->irq_cnt);
@@ -394,7 +411,8 @@ static void usb_write_done(struct byte_cntr *drvdata,
atomic_inc(&drvdata->usb_free_buf);
if (d_req->status)
pr_err_ratelimited("USB write failed err:%d\n", d_req->status);
- kfree(d_req);
+ devm_kfree(tmcdrvdata->dev, d_req->sg);
+ devm_kfree(tmcdrvdata->dev, d_req);
}
void usb_bypass_notifier(void *priv, unsigned int event,
diff --git a/drivers/hwtracing/coresight/coresight-common.h b/drivers/hwtracing/coresight/coresight-common.h
index b49a588..b6db835 100644
--- a/drivers/hwtracing/coresight/coresight-common.h
+++ b/drivers/hwtracing/coresight/coresight-common.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
*/
#ifndef _CORESIGHT_COMMON_H
@@ -16,6 +16,7 @@ struct coresight_csr {
#ifdef CONFIG_CORESIGHT_CSR
extern void msm_qdss_csr_enable_bam_to_usb(struct coresight_csr *csr);
+extern void msm_qdss_csr_enable_flush(struct coresight_csr *csr);
extern void msm_qdss_csr_disable_bam_to_usb(struct coresight_csr *csr);
extern void msm_qdss_csr_disable_flush(struct coresight_csr *csr);
extern int coresight_csr_hwctrl_set(struct coresight_csr *csr, uint64_t addr,
diff --git a/drivers/hwtracing/coresight/coresight-csr.c b/drivers/hwtracing/coresight/coresight-csr.c
index 309e5a0..e0d023719 100644
--- a/drivers/hwtracing/coresight/coresight-csr.c
+++ b/drivers/hwtracing/coresight/coresight-csr.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (c) 2012-2013, 2015-2-17 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2013, 2015-2017, 2019 The Linux Foundation. All rights reserved.
*/
#include <linux/kernel.h>
@@ -66,12 +66,15 @@ do { \
#define BLKSIZE_1024 2
#define BLKSIZE_2048 3
+#define FLUSHPERIOD_2048 0x800
+
struct csr_drvdata {
void __iomem *base;
phys_addr_t pbase;
struct device *dev;
struct coresight_device *csdev;
uint32_t blksize;
+ uint32_t flushperiod;
struct coresight_csr csr;
struct clk *clk;
spinlock_t spin_lock;
@@ -79,6 +82,7 @@ struct csr_drvdata {
bool hwctrl_set_support;
bool set_byte_cntr_support;
bool timestamp_support;
+ bool enable_flush;
};
static LIST_HEAD(csr_list);
@@ -86,10 +90,23 @@ static DEFINE_MUTEX(csr_lock);
#define to_csr_drvdata(c) container_of(c, struct csr_drvdata, csr)
+static void msm_qdss_csr_config_flush_period(struct csr_drvdata *drvdata)
+{
+ uint32_t usbflshctrl;
+
+ CSR_UNLOCK(drvdata);
+
+ usbflshctrl = csr_readl(drvdata, CSR_USBFLSHCTRL);
+ usbflshctrl = (usbflshctrl & ~0x3FFFC) | (drvdata->flushperiod << 2);
+ csr_writel(drvdata, usbflshctrl, CSR_USBFLSHCTRL);
+
+ CSR_LOCK(drvdata);
+}
+
void msm_qdss_csr_enable_bam_to_usb(struct coresight_csr *csr)
{
struct csr_drvdata *drvdata;
- uint32_t usbbamctrl, usbflshctrl;
+ uint32_t usbbamctrl;
unsigned long flags;
if (csr == NULL)
@@ -106,12 +123,6 @@ void msm_qdss_csr_enable_bam_to_usb(struct coresight_csr *csr)
usbbamctrl = (usbbamctrl & ~0x3) | drvdata->blksize;
csr_writel(drvdata, usbbamctrl, CSR_USBBAMCTRL);
- usbflshctrl = csr_readl(drvdata, CSR_USBFLSHCTRL);
- usbflshctrl = (usbflshctrl & ~0x3FFFC) | (0xFFFF << 2);
- csr_writel(drvdata, usbflshctrl, CSR_USBFLSHCTRL);
- usbflshctrl |= 0x2;
- csr_writel(drvdata, usbflshctrl, CSR_USBFLSHCTRL);
-
usbbamctrl |= 0x4;
csr_writel(drvdata, usbbamctrl, CSR_USBBAMCTRL);
@@ -120,6 +131,36 @@ void msm_qdss_csr_enable_bam_to_usb(struct coresight_csr *csr)
}
EXPORT_SYMBOL(msm_qdss_csr_enable_bam_to_usb);
+void msm_qdss_csr_enable_flush(struct coresight_csr *csr)
+{
+ struct csr_drvdata *drvdata;
+ uint32_t usbflshctrl;
+ unsigned long flags;
+
+ if (csr == NULL)
+ return;
+
+ drvdata = to_csr_drvdata(csr);
+ if (IS_ERR_OR_NULL(drvdata) || !drvdata->usb_bam_support)
+ return;
+
+ spin_lock_irqsave(&drvdata->spin_lock, flags);
+
+ msm_qdss_csr_config_flush_period(drvdata);
+
+ CSR_UNLOCK(drvdata);
+
+ usbflshctrl = csr_readl(drvdata, CSR_USBFLSHCTRL);
+ usbflshctrl |= 0x2;
+ csr_writel(drvdata, usbflshctrl, CSR_USBFLSHCTRL);
+
+ CSR_LOCK(drvdata);
+ drvdata->enable_flush = true;
+ spin_unlock_irqrestore(&drvdata->spin_lock, flags);
+}
+EXPORT_SYMBOL(msm_qdss_csr_enable_flush);
+
+
void msm_qdss_csr_disable_bam_to_usb(struct coresight_csr *csr)
{
struct csr_drvdata *drvdata;
@@ -166,6 +207,7 @@ void msm_qdss_csr_disable_flush(struct coresight_csr *csr)
csr_writel(drvdata, usbflshctrl, CSR_USBFLSHCTRL);
CSR_LOCK(drvdata);
+ drvdata->enable_flush = false;
spin_unlock_irqrestore(&drvdata->spin_lock, flags);
}
EXPORT_SYMBOL(msm_qdss_csr_disable_flush);
@@ -295,14 +337,66 @@ static ssize_t timestamp_show(struct device *dev,
static DEVICE_ATTR_RO(timestamp);
+static ssize_t flushperiod_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct csr_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ if (IS_ERR_OR_NULL(drvdata) || !drvdata->usb_bam_support) {
+ dev_err(dev, "Invalid param\n");
+ return -EINVAL;
+ }
+
+ return scnprintf(buf, PAGE_SIZE, "%#lx\n", drvdata->flushperiod);
+}
+
+static ssize_t flushperiod_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t size)
+{
+ unsigned long flags;
+ unsigned long val;
+ struct csr_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+ if (IS_ERR_OR_NULL(drvdata) || !drvdata->usb_bam_support) {
+ dev_err(dev, "Invalid param\n");
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&drvdata->spin_lock, flags);
+
+ if (kstrtoul(buf, 0, &val) || val > 0xffff) {
+ spin_unlock_irqrestore(&drvdata->spin_lock, flags);
+ return -EINVAL;
+ }
+
+ if (drvdata->flushperiod == val)
+ goto out;
+
+ drvdata->flushperiod = val;
+
+ if (drvdata->enable_flush)
+ msm_qdss_csr_config_flush_period(drvdata);
+
+out:
+ spin_unlock_irqrestore(&drvdata->spin_lock, flags);
+ return size;
+}
+
+static DEVICE_ATTR_RW(flushperiod);
+
static struct attribute *csr_attrs[] = {
&dev_attr_timestamp.attr,
+ &dev_attr_flushperiod.attr,
NULL,
};
static struct attribute_group csr_attr_grp = {
.attrs = csr_attrs,
};
+
static const struct attribute_group *csr_attr_grps[] = {
&csr_attr_grp,
NULL,
@@ -374,14 +468,16 @@ static int csr_probe(struct platform_device *pdev)
else
dev_dbg(dev, "timestamp_support operation supported\n");
+ if (drvdata->usb_bam_support)
+ drvdata->flushperiod = FLUSHPERIOD_2048;
+
desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL);
if (!desc)
return -ENOMEM;
desc->type = CORESIGHT_DEV_TYPE_NONE;
desc->pdata = pdev->dev.platform_data;
desc->dev = &pdev->dev;
- if (drvdata->timestamp_support)
- desc->groups = csr_attr_grps;
+ desc->groups = csr_attr_grps;
drvdata->csdev = coresight_register(desc);
if (IS_ERR(drvdata->csdev))
diff --git a/drivers/hwtracing/coresight/coresight-tmc-etr.c b/drivers/hwtracing/coresight/coresight-tmc-etr.c
index 4217742..f98abee 100644
--- a/drivers/hwtracing/coresight/coresight-tmc-etr.c
+++ b/drivers/hwtracing/coresight/coresight-tmc-etr.c
@@ -1015,11 +1015,12 @@ static void tmc_etr_sync_sysfs_buf(struct tmc_drvdata *drvdata)
tmc_sync_etr_buf(drvdata);
}
-void tmc_etr_disable_hw(struct tmc_drvdata *drvdata)
+void tmc_etr_disable_hw(struct tmc_drvdata *drvdata, bool flush)
{
CS_UNLOCK(drvdata->base);
- tmc_flush_and_stop(drvdata);
+ if (flush)
+ tmc_flush_and_stop(drvdata);
/*
* When operating in sysFS mode the content of the buffer needs to be
* read before the TMC is disabled.
@@ -1114,6 +1115,7 @@ static void __tmc_etr_enable_to_bam(struct tmc_drvdata *drvdata)
CS_LOCK(drvdata->base);
+ msm_qdss_csr_enable_flush(drvdata->csr);
drvdata->enable_to_bam = true;
}
@@ -1442,7 +1444,7 @@ static int tmc_enable_etr_sink(struct coresight_device *csdev, u32 mode)
return -EINVAL;
}
-static void tmc_disable_etr_sink(struct coresight_device *csdev)
+static void _tmc_disable_etr_sink(struct coresight_device *csdev, bool flush)
{
unsigned long flags;
struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
@@ -1468,10 +1470,10 @@ static void tmc_disable_etr_sink(struct coresight_device *csdev)
goto out;
} else {
usb_qdss_close(drvdata->usbch);
- tmc_etr_disable_hw(drvdata);
+ tmc_etr_disable_hw(drvdata, flush);
}
} else {
- tmc_etr_disable_hw(drvdata);
+ tmc_etr_disable_hw(drvdata, flush);
}
drvdata->mode = CS_MODE_DISABLED;
}
@@ -1506,6 +1508,11 @@ static void tmc_disable_etr_sink(struct coresight_device *csdev)
dev_info(drvdata->dev, "TMC-ETR disabled\n");
}
+static void tmc_disable_etr_sink(struct coresight_device *csdev)
+{
+ _tmc_disable_etr_sink(csdev, true);
+}
+
int tmc_etr_switch_mode(struct tmc_drvdata *drvdata, const char *out_mode)
{
enum tmc_etr_out_mode new_mode, old_mode;
@@ -1525,7 +1532,7 @@ int tmc_etr_switch_mode(struct tmc_drvdata *drvdata, const char *out_mode)
return 0;
}
- tmc_disable_etr_sink(drvdata->csdev);
+ _tmc_disable_etr_sink(drvdata->csdev, false);
old_mode = drvdata->out_mode;
drvdata->out_mode = new_mode;
if (tmc_enable_etr_sink_sysfs(drvdata->csdev)) {
@@ -1587,7 +1594,7 @@ int tmc_read_prepare_etr(struct tmc_drvdata *drvdata)
/* Disable the TMC if need be */
if (drvdata->mode == CS_MODE_SYSFS)
- tmc_etr_disable_hw(drvdata);
+ tmc_etr_disable_hw(drvdata, true);
drvdata->reading = true;
out:
diff --git a/drivers/hwtracing/coresight/coresight-tmc.h b/drivers/hwtracing/coresight/coresight-tmc.h
index 994339a..a30e360 100644
--- a/drivers/hwtracing/coresight/coresight-tmc.h
+++ b/drivers/hwtracing/coresight/coresight-tmc.h
@@ -312,7 +312,7 @@ void tmc_free_etr_buf(struct etr_buf *etr_buf);
void __tmc_etr_disable_to_bam(struct tmc_drvdata *drvdata);
void tmc_etr_bam_disable(struct tmc_drvdata *drvdata);
void tmc_etr_enable_hw(struct tmc_drvdata *drvdata);
-void tmc_etr_disable_hw(struct tmc_drvdata *drvdata);
+void tmc_etr_disable_hw(struct tmc_drvdata *drvdata, bool flush);
void usb_notifier(void *priv, unsigned int event, struct qdss_request *d_req,
struct usb_qdss_ch *ch);
int tmc_etr_bam_init(struct amba_device *adev,
diff --git a/drivers/i2c/busses/i2c-stm32f7.c b/drivers/i2c/busses/i2c-stm32f7.c
index a492da9..ac9c948 100644
--- a/drivers/i2c/busses/i2c-stm32f7.c
+++ b/drivers/i2c/busses/i2c-stm32f7.c
@@ -24,7 +24,6 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_address.h>
-#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/reset.h>
@@ -1782,15 +1781,14 @@ static struct i2c_algorithm stm32f7_i2c_algo = {
static int stm32f7_i2c_probe(struct platform_device *pdev)
{
- struct device_node *np = pdev->dev.of_node;
struct stm32f7_i2c_dev *i2c_dev;
const struct stm32f7_i2c_setup *setup;
struct resource *res;
- u32 irq_error, irq_event, clk_rate, rise_time, fall_time;
+ u32 clk_rate, rise_time, fall_time;
struct i2c_adapter *adap;
struct reset_control *rst;
dma_addr_t phy_addr;
- int ret;
+ int irq_error, irq_event, ret;
i2c_dev = devm_kzalloc(&pdev->dev, sizeof(*i2c_dev), GFP_KERNEL);
if (!i2c_dev)
@@ -1802,16 +1800,20 @@ static int stm32f7_i2c_probe(struct platform_device *pdev)
return PTR_ERR(i2c_dev->base);
phy_addr = (dma_addr_t)res->start;
- irq_event = irq_of_parse_and_map(np, 0);
- if (!irq_event) {
- dev_err(&pdev->dev, "IRQ event missing or invalid\n");
- return -EINVAL;
+ irq_event = platform_get_irq(pdev, 0);
+ if (irq_event <= 0) {
+ if (irq_event != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "Failed to get IRQ event: %d\n",
+ irq_event);
+ return irq_event ? : -ENOENT;
}
- irq_error = irq_of_parse_and_map(np, 1);
- if (!irq_error) {
- dev_err(&pdev->dev, "IRQ error missing or invalid\n");
- return -EINVAL;
+ irq_error = platform_get_irq(pdev, 1);
+ if (irq_error <= 0) {
+ if (irq_error != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "Failed to get IRQ error: %d\n",
+ irq_error);
+ return irq_error ? : -ENOENT;
}
i2c_dev->clk = devm_clk_get(&pdev->dev, NULL);
diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c
index 5b0e1d9..1de10e5 100644
--- a/drivers/i2c/i2c-core-base.c
+++ b/drivers/i2c/i2c-core-base.c
@@ -185,7 +185,7 @@ static int i2c_generic_bus_free(struct i2c_adapter *adap)
int i2c_generic_scl_recovery(struct i2c_adapter *adap)
{
struct i2c_bus_recovery_info *bri = adap->bus_recovery_info;
- int i = 0, scl = 1, ret;
+ int i = 0, scl = 1, ret = 0;
if (bri->prepare_recovery)
bri->prepare_recovery(adap);
diff --git a/drivers/iio/accel/cros_ec_accel_legacy.c b/drivers/iio/accel/cros_ec_accel_legacy.c
index 063e89e..c776a35 100644
--- a/drivers/iio/accel/cros_ec_accel_legacy.c
+++ b/drivers/iio/accel/cros_ec_accel_legacy.c
@@ -328,7 +328,6 @@ static const struct iio_chan_spec_ext_info cros_ec_accel_legacy_ext_info[] = {
.modified = 1, \
.info_mask_separate = \
BIT(IIO_CHAN_INFO_RAW) | \
- BIT(IIO_CHAN_INFO_SCALE) | \
BIT(IIO_CHAN_INFO_CALIBBIAS), \
.info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SCALE), \
.ext_info = cros_ec_accel_legacy_ext_info, \
diff --git a/drivers/iio/adc/max9611.c b/drivers/iio/adc/max9611.c
index 0538ff8..49c1956 100644
--- a/drivers/iio/adc/max9611.c
+++ b/drivers/iio/adc/max9611.c
@@ -86,7 +86,7 @@
#define MAX9611_TEMP_MAX_POS 0x7f80
#define MAX9611_TEMP_MAX_NEG 0xff80
#define MAX9611_TEMP_MIN_NEG 0xd980
-#define MAX9611_TEMP_MASK GENMASK(7, 15)
+#define MAX9611_TEMP_MASK GENMASK(15, 7)
#define MAX9611_TEMP_SHIFT 0x07
#define MAX9611_TEMP_RAW(_r) ((_r) >> MAX9611_TEMP_SHIFT)
#define MAX9611_TEMP_SCALE_NUM 1000000
@@ -483,7 +483,7 @@ static int max9611_init(struct max9611_dev *max9611)
if (ret)
return ret;
- regval = ret & MAX9611_TEMP_MASK;
+ regval &= MAX9611_TEMP_MASK;
if ((regval > MAX9611_TEMP_MAX_POS &&
regval < MAX9611_TEMP_MIN_NEG) ||
diff --git a/drivers/iio/adc/stm32-dfsdm-adc.c b/drivers/iio/adc/stm32-dfsdm-adc.c
index fcd4a1c..15a1152 100644
--- a/drivers/iio/adc/stm32-dfsdm-adc.c
+++ b/drivers/iio/adc/stm32-dfsdm-adc.c
@@ -1144,6 +1144,12 @@ static int stm32_dfsdm_adc_probe(struct platform_device *pdev)
* So IRQ associated to filter instance 0 is dedicated to the Filter 0.
*/
irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ if (irq != -EPROBE_DEFER)
+ dev_err(dev, "Failed to get IRQ: %d\n", irq);
+ return irq;
+ }
+
ret = devm_request_irq(dev, irq, stm32_dfsdm_irq,
0, pdev->name, adc);
if (ret < 0) {
diff --git a/drivers/iio/adc/stm32-dfsdm-core.c b/drivers/iio/adc/stm32-dfsdm-core.c
index bf089f5..9416306 100644
--- a/drivers/iio/adc/stm32-dfsdm-core.c
+++ b/drivers/iio/adc/stm32-dfsdm-core.c
@@ -213,6 +213,8 @@ static int stm32_dfsdm_parse_of(struct platform_device *pdev,
}
priv->dfsdm.phys_base = res->start;
priv->dfsdm.base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(priv->dfsdm.base))
+ return PTR_ERR(priv->dfsdm.base);
/*
* "dfsdm" clock is mandatory for DFSDM peripheral clocking.
@@ -222,8 +224,10 @@ static int stm32_dfsdm_parse_of(struct platform_device *pdev,
*/
priv->clk = devm_clk_get(&pdev->dev, "dfsdm");
if (IS_ERR(priv->clk)) {
- dev_err(&pdev->dev, "No stm32_dfsdm_clk clock found\n");
- return -EINVAL;
+ ret = PTR_ERR(priv->clk);
+ if (ret != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "Failed to get clock (%d)\n", ret);
+ return ret;
}
priv->aclk = devm_clk_get(&pdev->dev, "audio");
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index ef459f2..7586c1d 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -3182,18 +3182,18 @@ static int ib_mad_port_open(struct ib_device *device,
if (has_smi)
cq_size *= 2;
+ port_priv->pd = ib_alloc_pd(device, 0);
+ if (IS_ERR(port_priv->pd)) {
+ dev_err(&device->dev, "Couldn't create ib_mad PD\n");
+ ret = PTR_ERR(port_priv->pd);
+ goto error3;
+ }
+
port_priv->cq = ib_alloc_cq(port_priv->device, port_priv, cq_size, 0,
IB_POLL_WORKQUEUE);
if (IS_ERR(port_priv->cq)) {
dev_err(&device->dev, "Couldn't create ib_mad CQ\n");
ret = PTR_ERR(port_priv->cq);
- goto error3;
- }
-
- port_priv->pd = ib_alloc_pd(device, 0);
- if (IS_ERR(port_priv->pd)) {
- dev_err(&device->dev, "Couldn't create ib_mad PD\n");
- ret = PTR_ERR(port_priv->pd);
goto error4;
}
@@ -3236,11 +3236,11 @@ static int ib_mad_port_open(struct ib_device *device,
error7:
destroy_mad_qp(&port_priv->qp_info[0]);
error6:
- ib_dealloc_pd(port_priv->pd);
-error4:
ib_free_cq(port_priv->cq);
cleanup_recv_queue(&port_priv->qp_info[1]);
cleanup_recv_queue(&port_priv->qp_info[0]);
+error4:
+ ib_dealloc_pd(port_priv->pd);
error3:
kfree(port_priv);
@@ -3270,8 +3270,8 @@ static int ib_mad_port_close(struct ib_device *device, int port_num)
destroy_workqueue(port_priv->wq);
destroy_mad_qp(&port_priv->qp_info[1]);
destroy_mad_qp(&port_priv->qp_info[0]);
- ib_dealloc_pd(port_priv->pd);
ib_free_cq(port_priv->cq);
+ ib_dealloc_pd(port_priv->pd);
cleanup_recv_queue(&port_priv->qp_info[1]);
cleanup_recv_queue(&port_priv->qp_info[0]);
/* XXX: Handle deallocation of MAD registration tables */
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c
index 7b794a1..8be082e 100644
--- a/drivers/infiniband/core/sa_query.c
+++ b/drivers/infiniband/core/sa_query.c
@@ -1232,7 +1232,6 @@ static int roce_resolve_route_from_path(struct sa_path_rec *rec,
{
struct rdma_dev_addr dev_addr = {};
union {
- struct sockaddr _sockaddr;
struct sockaddr_in _sockaddr_in;
struct sockaddr_in6 _sockaddr_in6;
} sgid_addr, dgid_addr;
@@ -1249,12 +1248,12 @@ static int roce_resolve_route_from_path(struct sa_path_rec *rec,
*/
dev_addr.net = &init_net;
- rdma_gid2ip(&sgid_addr._sockaddr, &rec->sgid);
- rdma_gid2ip(&dgid_addr._sockaddr, &rec->dgid);
+ rdma_gid2ip((struct sockaddr *)&sgid_addr, &rec->sgid);
+ rdma_gid2ip((struct sockaddr *)&dgid_addr, &rec->dgid);
/* validate the route */
- ret = rdma_resolve_ip_route(&sgid_addr._sockaddr,
- &dgid_addr._sockaddr, &dev_addr);
+ ret = rdma_resolve_ip_route((struct sockaddr *)&sgid_addr,
+ (struct sockaddr *)&dgid_addr, &dev_addr);
if (ret)
return ret;
diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c
index c34a685..a18f3f8 100644
--- a/drivers/infiniband/core/user_mad.c
+++ b/drivers/infiniband/core/user_mad.c
@@ -49,6 +49,7 @@
#include <linux/sched.h>
#include <linux/semaphore.h>
#include <linux/slab.h>
+#include <linux/nospec.h>
#include <linux/uaccess.h>
@@ -868,11 +869,14 @@ static int ib_umad_unreg_agent(struct ib_umad_file *file, u32 __user *arg)
if (get_user(id, arg))
return -EFAULT;
+ if (id >= IB_UMAD_MAX_AGENTS)
+ return -EINVAL;
mutex_lock(&file->port->file_mutex);
mutex_lock(&file->mutex);
- if (id >= IB_UMAD_MAX_AGENTS || !__get_agent(file, id)) {
+ id = array_index_nospec(id, IB_UMAD_MAX_AGENTS);
+ if (!__get_agent(file, id)) {
ret = -EINVAL;
goto out;
}
diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c
index d8eb4dc..6aa5a8a 100644
--- a/drivers/infiniband/hw/hfi1/chip.c
+++ b/drivers/infiniband/hw/hfi1/chip.c
@@ -14586,7 +14586,7 @@ void hfi1_deinit_vnic_rsm(struct hfi1_devdata *dd)
clear_rcvctrl(dd, RCV_CTRL_RCV_RSM_ENABLE_SMASK);
}
-static void init_rxe(struct hfi1_devdata *dd)
+static int init_rxe(struct hfi1_devdata *dd)
{
struct rsm_map_table *rmt;
u64 val;
@@ -14595,6 +14595,9 @@ static void init_rxe(struct hfi1_devdata *dd)
write_csr(dd, RCV_ERR_MASK, ~0ull);
rmt = alloc_rsm_map_table(dd);
+ if (!rmt)
+ return -ENOMEM;
+
/* set up QOS, including the QPN map table */
init_qos(dd, rmt);
init_user_fecn_handling(dd, rmt);
@@ -14621,6 +14624,7 @@ static void init_rxe(struct hfi1_devdata *dd)
val |= ((4ull & RCV_BYPASS_HDR_SIZE_MASK) <<
RCV_BYPASS_HDR_SIZE_SHIFT);
write_csr(dd, RCV_BYPASS, val);
+ return 0;
}
static void init_other(struct hfi1_devdata *dd)
@@ -15163,7 +15167,10 @@ struct hfi1_devdata *hfi1_init_dd(struct pci_dev *pdev,
goto bail_cleanup;
/* set initial RXE CSRs */
- init_rxe(dd);
+ ret = init_rxe(dd);
+ if (ret)
+ goto bail_cleanup;
+
/* set initial TXE CSRs */
init_txe(dd);
/* set initial non-RXE, non-TXE CSRs */
diff --git a/drivers/infiniband/hw/hfi1/verbs.c b/drivers/infiniband/hw/hfi1/verbs.c
index 27d9c4c..1ad38c8 100644
--- a/drivers/infiniband/hw/hfi1/verbs.c
+++ b/drivers/infiniband/hw/hfi1/verbs.c
@@ -54,6 +54,7 @@
#include <linux/mm.h>
#include <linux/vmalloc.h>
#include <rdma/opa_addr.h>
+#include <linux/nospec.h>
#include "hfi.h"
#include "common.h"
@@ -1596,6 +1597,7 @@ static int hfi1_check_ah(struct ib_device *ibdev, struct rdma_ah_attr *ah_attr)
sl = rdma_ah_get_sl(ah_attr);
if (sl >= ARRAY_SIZE(ibp->sl_to_sc))
return -EINVAL;
+ sl = array_index_nospec(sl, ARRAY_SIZE(ibp->sl_to_sc));
sc5 = ibp->sl_to_sc[sl];
if (sc_to_vlt(dd, sc5) > num_vls && sc_to_vlt(dd, sc5) != 0xf)
diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
index e2e6c74..a5e3349 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
@@ -806,6 +806,8 @@ static int i40iw_query_qp(struct ib_qp *ibqp,
struct i40iw_qp *iwqp = to_iwqp(ibqp);
struct i40iw_sc_qp *qp = &iwqp->sc_qp;
+ attr->qp_state = iwqp->ibqp_state;
+ attr->cur_qp_state = attr->qp_state;
attr->qp_access_flags = 0;
attr->cap.max_send_wr = qp->qp_uk.sq_size;
attr->cap.max_recv_wr = qp->qp_uk.rq_size;
diff --git a/drivers/infiniband/hw/mlx5/mad.c b/drivers/infiniband/hw/mlx5/mad.c
index 32a9e92..cdf6e26 100644
--- a/drivers/infiniband/hw/mlx5/mad.c
+++ b/drivers/infiniband/hw/mlx5/mad.c
@@ -197,19 +197,33 @@ static void pma_cnt_assign(struct ib_pma_portcounters *pma_cnt,
vl_15_dropped);
}
-static int process_pma_cmd(struct mlx5_core_dev *mdev, u8 port_num,
+static int process_pma_cmd(struct mlx5_ib_dev *dev, u8 port_num,
const struct ib_mad *in_mad, struct ib_mad *out_mad)
{
- int err;
+ struct mlx5_core_dev *mdev;
+ bool native_port = true;
+ u8 mdev_port_num;
void *out_cnt;
+ int err;
+ mdev = mlx5_ib_get_native_port_mdev(dev, port_num, &mdev_port_num);
+ if (!mdev) {
+ /* Fail to get the native port, likely due to 2nd port is still
+ * unaffiliated. In such case default to 1st port and attached
+ * PF device.
+ */
+ native_port = false;
+ mdev = dev->mdev;
+ mdev_port_num = 1;
+ }
/* Declaring support of extended counters */
if (in_mad->mad_hdr.attr_id == IB_PMA_CLASS_PORT_INFO) {
struct ib_class_port_info cpi = {};
cpi.capability_mask = IB_PMA_CLASS_CAP_EXT_WIDTH;
memcpy((out_mad->data + 40), &cpi, sizeof(cpi));
- return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
+ err = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
+ goto done;
}
if (in_mad->mad_hdr.attr_id == IB_PMA_PORT_COUNTERS_EXT) {
@@ -218,11 +232,13 @@ static int process_pma_cmd(struct mlx5_core_dev *mdev, u8 port_num,
int sz = MLX5_ST_SZ_BYTES(query_vport_counter_out);
out_cnt = kvzalloc(sz, GFP_KERNEL);
- if (!out_cnt)
- return IB_MAD_RESULT_FAILURE;
+ if (!out_cnt) {
+ err = IB_MAD_RESULT_FAILURE;
+ goto done;
+ }
err = mlx5_core_query_vport_counter(mdev, 0, 0,
- port_num, out_cnt, sz);
+ mdev_port_num, out_cnt, sz);
if (!err)
pma_cnt_ext_assign(pma_cnt_ext, out_cnt);
} else {
@@ -231,20 +247,23 @@ static int process_pma_cmd(struct mlx5_core_dev *mdev, u8 port_num,
int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
out_cnt = kvzalloc(sz, GFP_KERNEL);
- if (!out_cnt)
- return IB_MAD_RESULT_FAILURE;
+ if (!out_cnt) {
+ err = IB_MAD_RESULT_FAILURE;
+ goto done;
+ }
- err = mlx5_core_query_ib_ppcnt(mdev, port_num,
+ err = mlx5_core_query_ib_ppcnt(mdev, mdev_port_num,
out_cnt, sz);
if (!err)
pma_cnt_assign(pma_cnt, out_cnt);
- }
-
+ }
kvfree(out_cnt);
- if (err)
- return IB_MAD_RESULT_FAILURE;
-
- return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
+ err = err ? IB_MAD_RESULT_FAILURE :
+ IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
+done:
+ if (native_port)
+ mlx5_ib_put_native_port_mdev(dev, port_num);
+ return err;
}
int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
@@ -256,8 +275,6 @@ int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
struct mlx5_ib_dev *dev = to_mdev(ibdev);
const struct ib_mad *in_mad = (const struct ib_mad *)in;
struct ib_mad *out_mad = (struct ib_mad *)out;
- struct mlx5_core_dev *mdev;
- u8 mdev_port_num;
int ret;
if (WARN_ON_ONCE(in_mad_size != sizeof(*in_mad) ||
@@ -266,19 +283,14 @@ int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
memset(out_mad->data, 0, sizeof(out_mad->data));
- mdev = mlx5_ib_get_native_port_mdev(dev, port_num, &mdev_port_num);
- if (!mdev)
- return IB_MAD_RESULT_FAILURE;
-
- if (MLX5_CAP_GEN(mdev, vport_counters) &&
+ if (MLX5_CAP_GEN(dev->mdev, vport_counters) &&
in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT &&
in_mad->mad_hdr.method == IB_MGMT_METHOD_GET) {
- ret = process_pma_cmd(mdev, mdev_port_num, in_mad, out_mad);
+ ret = process_pma_cmd(dev, port_num, in_mad, out_mad);
} else {
ret = process_mad(ibdev, mad_flags, port_num, in_wc, in_grh,
in_mad, out_mad);
}
- mlx5_ib_put_native_port_mdev(dev, port_num);
return ret;
}
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index 320d4df..941d1df 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -467,6 +467,7 @@ struct mlx5_umr_wr {
u64 length;
int access_flags;
u32 mkey;
+ u8 ignore_free_state:1;
};
static inline const struct mlx5_umr_wr *umr_wr(const struct ib_send_wr *wr)
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index 7df4a4f..bd1fdad 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -51,22 +51,12 @@ static void clean_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
static void dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
static int mr_cache_max_order(struct mlx5_ib_dev *dev);
static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
-static bool umr_can_modify_entity_size(struct mlx5_ib_dev *dev)
-{
- return !MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled);
-}
static bool umr_can_use_indirect_mkey(struct mlx5_ib_dev *dev)
{
return !MLX5_CAP_GEN(dev->mdev, umr_indirect_mkey_disabled);
}
-static bool use_umr(struct mlx5_ib_dev *dev, int order)
-{
- return order <= mr_cache_max_order(dev) &&
- umr_can_modify_entity_size(dev);
-}
-
static int destroy_mkey(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
{
int err = mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey);
@@ -548,14 +538,17 @@ void mlx5_mr_cache_free(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
return;
c = order2idx(dev, mr->order);
- if (c < 0 || c >= MAX_MR_CACHE_ENTRIES) {
- mlx5_ib_warn(dev, "order %d, cache index %d\n", mr->order, c);
+ WARN_ON(c < 0 || c >= MAX_MR_CACHE_ENTRIES);
+
+ if (unreg_umr(dev, mr)) {
+ mr->allocated_from_cache = false;
+ destroy_mkey(dev, mr);
+ ent = &cache->ent[c];
+ if (ent->cur < ent->limit)
+ queue_work(cache->wq, &ent->work);
return;
}
- if (unreg_umr(dev, mr))
- return;
-
ent = &cache->ent[c];
spin_lock_irq(&ent->lock);
list_add_tail(&mr->list, &ent->head);
@@ -1302,7 +1295,7 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
{
struct mlx5_ib_dev *dev = to_mdev(pd->device);
struct mlx5_ib_mr *mr = NULL;
- bool populate_mtts = false;
+ bool use_umr;
struct ib_umem *umem;
int page_shift;
int npages;
@@ -1335,29 +1328,30 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
if (err < 0)
return ERR_PTR(err);
- if (use_umr(dev, order)) {
+ use_umr = !MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled) &&
+ (!MLX5_CAP_GEN(dev->mdev, umr_modify_atomic_disabled) ||
+ !MLX5_CAP_GEN(dev->mdev, atomic));
+
+ if (order <= mr_cache_max_order(dev) && use_umr) {
mr = alloc_mr_from_cache(pd, umem, virt_addr, length, ncont,
page_shift, order, access_flags);
if (PTR_ERR(mr) == -EAGAIN) {
mlx5_ib_dbg(dev, "cache empty for order %d\n", order);
mr = NULL;
}
- populate_mtts = false;
} else if (!MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset)) {
if (access_flags & IB_ACCESS_ON_DEMAND) {
err = -EINVAL;
pr_err("Got MR registration for ODP MR > 512MB, not supported for Connect-IB\n");
goto error;
}
- populate_mtts = true;
+ use_umr = false;
}
if (!mr) {
- if (!umr_can_modify_entity_size(dev))
- populate_mtts = true;
mutex_lock(&dev->slow_path_mutex);
mr = reg_create(NULL, pd, virt_addr, length, umem, ncont,
- page_shift, access_flags, populate_mtts);
+ page_shift, access_flags, !use_umr);
mutex_unlock(&dev->slow_path_mutex);
}
@@ -1375,7 +1369,7 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
update_odp_mr(mr);
#endif
- if (!populate_mtts) {
+ if (use_umr) {
int update_xlt_flags = MLX5_IB_UPD_XLT_ENABLE;
if (access_flags & IB_ACCESS_ON_DEMAND)
@@ -1408,9 +1402,11 @@ static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
return 0;
umrwr.wr.send_flags = MLX5_IB_SEND_UMR_DISABLE_MR |
- MLX5_IB_SEND_UMR_FAIL_IF_FREE;
+ MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS;
umrwr.wr.opcode = MLX5_IB_WR_UMR;
+ umrwr.pd = dev->umrc.pd;
umrwr.mkey = mr->mmkey.key;
+ umrwr.ignore_free_state = 1;
return mlx5_ib_post_send_wait(dev, &umrwr);
}
@@ -1615,10 +1611,10 @@ static void clean_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
mr->sig = NULL;
}
- mlx5_free_priv_descs(mr);
-
- if (!allocated_from_cache)
+ if (!allocated_from_cache) {
destroy_mkey(dev, mr);
+ mlx5_free_priv_descs(mr);
+ }
}
static void dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index 183fe5c..77b1f3f 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -1501,7 +1501,6 @@ static int create_rss_raw_qp_tir(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
}
MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_TOEPLITZ);
- MLX5_SET(tirc, tirc, rx_hash_symmetric, 1);
memcpy(rss_key, ucmd.rx_hash_key, len);
break;
}
@@ -3717,10 +3716,14 @@ static int set_reg_umr_segment(struct mlx5_ib_dev *dev,
memset(umr, 0, sizeof(*umr));
- if (wr->send_flags & MLX5_IB_SEND_UMR_FAIL_IF_FREE)
- umr->flags = MLX5_UMR_CHECK_FREE; /* fail if free */
- else
- umr->flags = MLX5_UMR_CHECK_NOT_FREE; /* fail if not free */
+ if (!umrwr->ignore_free_state) {
+ if (wr->send_flags & MLX5_IB_SEND_UMR_FAIL_IF_FREE)
+ /* fail if free */
+ umr->flags = MLX5_UMR_CHECK_FREE;
+ else
+ /* fail if not free */
+ umr->flags = MLX5_UMR_CHECK_NOT_FREE;
+ }
umr->xlt_octowords = cpu_to_be16(get_xlt_octo(umrwr->xlt_size));
if (wr->send_flags & MLX5_IB_SEND_UMR_UPDATE_XLT) {
diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c
index 4111b79..681d8e0 100644
--- a/drivers/infiniband/sw/rxe/rxe_resp.c
+++ b/drivers/infiniband/sw/rxe/rxe_resp.c
@@ -435,6 +435,7 @@ static enum resp_states check_rkey(struct rxe_qp *qp,
qp->resp.va = reth_va(pkt);
qp->resp.rkey = reth_rkey(pkt);
qp->resp.resid = reth_len(pkt);
+ qp->resp.length = reth_len(pkt);
}
access = (pkt->mask & RXE_READ_MASK) ? IB_ACCESS_REMOTE_READ
: IB_ACCESS_REMOTE_WRITE;
@@ -859,7 +860,9 @@ static enum resp_states do_complete(struct rxe_qp *qp,
pkt->mask & RXE_WRITE_MASK) ?
IB_WC_RECV_RDMA_WITH_IMM : IB_WC_RECV;
wc->vendor_err = 0;
- wc->byte_len = wqe->dma.length - wqe->dma.resid;
+ wc->byte_len = (pkt->mask & RXE_IMMDT_MASK &&
+ pkt->mask & RXE_WRITE_MASK) ?
+ qp->resp.length : wqe->dma.length - wqe->dma.resid;
/* fields after byte_len are different between kernel and user
* space
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.h b/drivers/infiniband/sw/rxe/rxe_verbs.h
index 332a16da..3b731c7 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.h
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.h
@@ -212,6 +212,7 @@ struct rxe_resp_info {
struct rxe_mem *mr;
u32 resid;
u32 rkey;
+ u32 length;
u64 atomic_orig;
/* SRQ only */
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 0096154..78dd36d 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -1892,12 +1892,6 @@ static void ipoib_child_init(struct net_device *ndev)
struct ipoib_dev_priv *priv = ipoib_priv(ndev);
struct ipoib_dev_priv *ppriv = ipoib_priv(priv->parent);
- dev_hold(priv->parent);
-
- down_write(&ppriv->vlan_rwsem);
- list_add_tail(&priv->list, &ppriv->child_intfs);
- up_write(&ppriv->vlan_rwsem);
-
priv->max_ib_mtu = ppriv->max_ib_mtu;
set_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags);
memcpy(priv->dev->dev_addr, ppriv->dev->dev_addr, INFINIBAND_ALEN);
@@ -1940,6 +1934,17 @@ static int ipoib_ndo_init(struct net_device *ndev)
if (rc) {
pr_warn("%s: failed to initialize device: %s port %d (ret = %d)\n",
priv->ca->name, priv->dev->name, priv->port, rc);
+ return rc;
+ }
+
+ if (priv->parent) {
+ struct ipoib_dev_priv *ppriv = ipoib_priv(priv->parent);
+
+ dev_hold(priv->parent);
+
+ down_write(&ppriv->vlan_rwsem);
+ list_add_tail(&priv->list, &ppriv->child_intfs);
+ up_write(&ppriv->vlan_rwsem);
}
return 0;
@@ -1957,6 +1962,14 @@ static void ipoib_ndo_uninit(struct net_device *dev)
*/
WARN_ON(!list_empty(&priv->child_intfs));
+ if (priv->parent) {
+ struct ipoib_dev_priv *ppriv = ipoib_priv(priv->parent);
+
+ down_write(&ppriv->vlan_rwsem);
+ list_del(&priv->list);
+ up_write(&ppriv->vlan_rwsem);
+ }
+
ipoib_neigh_hash_uninit(dev);
ipoib_ib_dev_cleanup(dev);
@@ -1968,15 +1981,8 @@ static void ipoib_ndo_uninit(struct net_device *dev)
priv->wq = NULL;
}
- if (priv->parent) {
- struct ipoib_dev_priv *ppriv = ipoib_priv(priv->parent);
-
- down_write(&ppriv->vlan_rwsem);
- list_del(&priv->list);
- up_write(&ppriv->vlan_rwsem);
-
+ if (priv->parent)
dev_put(priv->parent);
- }
}
static int ipoib_set_vf_link_state(struct net_device *dev, int vf, int link_state)
diff --git a/drivers/input/joystick/iforce/iforce-usb.c b/drivers/input/joystick/iforce/iforce-usb.c
index 7807325..c431df7 100644
--- a/drivers/input/joystick/iforce/iforce-usb.c
+++ b/drivers/input/joystick/iforce/iforce-usb.c
@@ -141,7 +141,12 @@ static int iforce_usb_probe(struct usb_interface *intf,
return -ENODEV;
epirq = &interface->endpoint[0].desc;
+ if (!usb_endpoint_is_int_in(epirq))
+ return -ENODEV;
+
epout = &interface->endpoint[1].desc;
+ if (!usb_endpoint_is_int_out(epout))
+ return -ENODEV;
if (!(iforce = kzalloc(sizeof(struct iforce) + 32, GFP_KERNEL)))
goto fail;
diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
index 530142b..eb9b9de 100644
--- a/drivers/input/mouse/elantech.c
+++ b/drivers/input/mouse/elantech.c
@@ -1810,6 +1810,30 @@ static int elantech_create_smbus(struct psmouse *psmouse,
leave_breadcrumbs);
}
+static bool elantech_use_host_notify(struct psmouse *psmouse,
+ struct elantech_device_info *info)
+{
+ if (ETP_NEW_IC_SMBUS_HOST_NOTIFY(info->fw_version))
+ return true;
+
+ switch (info->bus) {
+ case ETP_BUS_PS2_ONLY:
+ /* expected case */
+ break;
+ case ETP_BUS_SMB_HST_NTFY_ONLY:
+ case ETP_BUS_PS2_SMB_HST_NTFY:
+ /* SMbus implementation is stable since 2018 */
+ if (dmi_get_bios_year() >= 2018)
+ return true;
+ default:
+ psmouse_dbg(psmouse,
+ "Ignoring SMBus bus provider %d\n", info->bus);
+ break;
+ }
+
+ return false;
+}
+
/**
* elantech_setup_smbus - called once the PS/2 devices are enumerated
* and decides to instantiate a SMBus InterTouch device.
@@ -1829,7 +1853,7 @@ static int elantech_setup_smbus(struct psmouse *psmouse,
* i2c_blacklist_pnp_ids.
* Old ICs are up to the user to decide.
*/
- if (!ETP_NEW_IC_SMBUS_HOST_NOTIFY(info->fw_version) ||
+ if (!elantech_use_host_notify(psmouse, info) ||
psmouse_matches_pnp_id(psmouse, i2c_blacklist_pnp_ids))
return -ENXIO;
}
@@ -1849,34 +1873,6 @@ static int elantech_setup_smbus(struct psmouse *psmouse,
return 0;
}
-static bool elantech_use_host_notify(struct psmouse *psmouse,
- struct elantech_device_info *info)
-{
- if (ETP_NEW_IC_SMBUS_HOST_NOTIFY(info->fw_version))
- return true;
-
- switch (info->bus) {
- case ETP_BUS_PS2_ONLY:
- /* expected case */
- break;
- case ETP_BUS_SMB_ALERT_ONLY:
- /* fall-through */
- case ETP_BUS_PS2_SMB_ALERT:
- psmouse_dbg(psmouse, "Ignoring SMBus provider through alert protocol.\n");
- break;
- case ETP_BUS_SMB_HST_NTFY_ONLY:
- /* fall-through */
- case ETP_BUS_PS2_SMB_HST_NTFY:
- return true;
- default:
- psmouse_dbg(psmouse,
- "Ignoring SMBus bus provider %d.\n",
- info->bus);
- }
-
- return false;
-}
-
int elantech_init_smbus(struct psmouse *psmouse)
{
struct elantech_device_info info;
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index af7d484..06cebde 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -185,6 +185,7 @@ static const char * const smbus_pnp_ids[] = {
"LEN2055", /* E580 */
"SYN3052", /* HP EliteBook 840 G4 */
"SYN3221", /* HP 15-ay000 */
+ "SYN323d", /* HP Spectre X360 13-w013dx */
NULL
};
diff --git a/drivers/input/mouse/trackpoint.h b/drivers/input/mouse/trackpoint.h
index 10a0391..538986e 100644
--- a/drivers/input/mouse/trackpoint.h
+++ b/drivers/input/mouse/trackpoint.h
@@ -161,7 +161,8 @@ struct trackpoint_data {
#ifdef CONFIG_MOUSE_PS2_TRACKPOINT
int trackpoint_detect(struct psmouse *psmouse, bool set_properties);
#else
-inline int trackpoint_detect(struct psmouse *psmouse, bool set_properties)
+static inline int trackpoint_detect(struct psmouse *psmouse,
+ bool set_properties)
{
return -ENOSYS;
}
diff --git a/drivers/input/tablet/kbtab.c b/drivers/input/tablet/kbtab.c
index 75b5006..b1cf0c9 100644
--- a/drivers/input/tablet/kbtab.c
+++ b/drivers/input/tablet/kbtab.c
@@ -116,6 +116,10 @@ static int kbtab_probe(struct usb_interface *intf, const struct usb_device_id *i
if (intf->cur_altsetting->desc.bNumEndpoints < 1)
return -ENODEV;
+ endpoint = &intf->cur_altsetting->endpoint[0].desc;
+ if (!usb_endpoint_is_int_in(endpoint))
+ return -ENODEV;
+
kbtab = kzalloc(sizeof(struct kbtab), GFP_KERNEL);
input_dev = input_allocate_device();
if (!kbtab || !input_dev)
@@ -154,8 +158,6 @@ static int kbtab_probe(struct usb_interface *intf, const struct usb_device_id *i
input_set_abs_params(input_dev, ABS_Y, 0, 0x1750, 4, 0);
input_set_abs_params(input_dev, ABS_PRESSURE, 0, 0xff, 0, 0);
- endpoint = &intf->cur_altsetting->endpoint[0].desc;
-
usb_fill_int_urb(kbtab->irq, dev,
usb_rcvintpipe(dev, endpoint->bEndpointAddress),
kbtab->data, 8,
diff --git a/drivers/input/touchscreen/st/fts.c b/drivers/input/touchscreen/st/fts.c
index 6a3953d..be3f1ae 100644
--- a/drivers/input/touchscreen/st/fts.c
+++ b/drivers/input/touchscreen/st/fts.c
@@ -4537,7 +4537,36 @@ static int check_dt(struct device_node *np)
return -ENODEV;
}
-static int fts_probe(struct i2c_client *client,
+static int check_default_tp(struct device_node *dt, const char *prop)
+{
+ const char *active_tp;
+ const char *compatible;
+ char *start;
+ int ret;
+
+ ret = of_property_read_string(dt->parent, prop, &active_tp);
+ if (ret) {
+ pr_err(" %s:fail to read %s %d\n", __func__, prop, ret);
+ return -ENODEV;
+ }
+
+ ret = of_property_read_string(dt, "compatible", &compatible);
+ if (ret < 0) {
+ pr_err(" %s:fail to read %s %d\n", __func__, "compatible", ret);
+ return -ENODEV;
+ }
+
+ start = strnstr(active_tp, compatible, strlen(active_tp));
+ if (start == NULL) {
+ pr_err(" %s:no match compatible, %s, %s\n",
+ __func__, compatible, active_tp);
+ ret = -ENODEV;
+ }
+
+ return ret;
+}
+
+static int fts_probe_internal(struct i2c_client *client,
const struct i2c_device_id *idp)
{
struct fts_ts_info *info = NULL;
@@ -4548,10 +4577,7 @@ static int fts_probe(struct i2c_client *client,
logError(0, "%s %s: driver probe begin!\n", tag, __func__);
- error = check_dt(dp);
-
- if (error != OK ||
- !i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
logError(1, "%s Unsupported I2C functionality\n", tag);
error = -EIO;
goto ProbeErrorExit_0;
@@ -4928,6 +4954,23 @@ static int fts_probe(struct i2c_client *client,
return error;
}
+static int fts_probe(struct i2c_client *client, const struct i2c_device_id *idp)
+{
+ int error = 0;
+ struct device_node *dp = client->dev.of_node;
+
+ if (check_dt(dp)) {
+ if (!check_default_tp(dp, "qcom,i2c-touch-active"))
+ error = -EPROBE_DEFER;
+ else
+ error = -ENODEV;
+
+ return error;
+ }
+
+ return fts_probe_internal(client, idp);
+}
+
static int fts_remove(struct i2c_client *client)
{
struct fts_ts_info *info = i2c_get_clientdata(client);
diff --git a/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_i2c.c b/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_i2c.c
old mode 100755
new mode 100644
index 900fc8e..00ea777
--- a/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_i2c.c
+++ b/drivers/input/touchscreen/synaptics_dsx/synaptics_dsx_i2c.c
@@ -451,7 +451,7 @@ static int synaptics_rmi4_i2c_write(struct synaptics_rmi4_data *rmi4_data,
return retval;
}
-int check_dt(struct device_node *np)
+static int check_dt(struct device_node *np)
{
int i;
int count;
@@ -475,6 +475,35 @@ int check_dt(struct device_node *np)
return -ENODEV;
}
+static int check_default_tp(struct device_node *dt, const char *prop)
+{
+ const char *active_tp;
+ const char *compatible;
+ char *start;
+ int ret;
+
+ ret = of_property_read_string(dt->parent, prop, &active_tp);
+ if (ret) {
+ pr_err(" %s:fail to read %s %d\n", __func__, prop, ret);
+ return -ENODEV;
+ }
+
+ ret = of_property_read_string(dt, "compatible", &compatible);
+ if (ret < 0) {
+ pr_err(" %s:fail to read %s %d\n", __func__, "compatible", ret);
+ return -ENODEV;
+ }
+
+ start = strnstr(active_tp, compatible, strlen(active_tp));
+ if (start == NULL) {
+ pr_err(" %s:no match compatible, %s, %s\n",
+ __func__, compatible, active_tp);
+ ret = -ENODEV;
+ }
+
+ return ret;
+}
+
static struct synaptics_dsx_bus_access bus_access = {
.type = BUS_I2C,
.read = synaptics_rmi4_i2c_read,
@@ -490,9 +519,16 @@ static int synaptics_rmi4_i2c_probe(struct i2c_client *client,
const struct i2c_device_id *dev_id)
{
int retval;
+ struct device_node *dp = client->dev.of_node;
- if (check_dt(client->dev.of_node))
- return -ENODEV;
+ if (check_dt(dp)) {
+ if (!check_default_tp(dp, "qcom,i2c-touch-active"))
+ retval = -EPROBE_DEFER;
+ else
+ retval = -ENODEV;
+
+ return retval;
+ }
if (!i2c_check_functionality(client->adapter,
I2C_FUNC_SMBUS_BYTE_DATA)) {
diff --git a/drivers/input/touchscreen/usbtouchscreen.c b/drivers/input/touchscreen/usbtouchscreen.c
index d61570d..48304e2 100644
--- a/drivers/input/touchscreen/usbtouchscreen.c
+++ b/drivers/input/touchscreen/usbtouchscreen.c
@@ -1672,6 +1672,8 @@ static int usbtouch_probe(struct usb_interface *intf,
if (!usbtouch || !input_dev)
goto out_free;
+ mutex_init(&usbtouch->pm_mutex);
+
type = &usbtouch_dev_info[id->driver_info];
usbtouch->type = type;
if (!type->process_pkt)
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
index 3a1d303..66b4800 100644
--- a/drivers/iommu/amd_iommu_init.c
+++ b/drivers/iommu/amd_iommu_init.c
@@ -1710,7 +1710,7 @@ static const struct attribute_group *amd_iommu_groups[] = {
NULL,
};
-static int iommu_init_pci(struct amd_iommu *iommu)
+static int __init iommu_init_pci(struct amd_iommu *iommu)
{
int cap_ptr = iommu->cap_ptr;
u32 range, misc, low, high;
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index 8ced186..fb793de 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -1147,21 +1147,44 @@ static void arm_smmu_tlb_sync_global(struct arm_smmu_device *smmu)
spin_unlock_irqrestore(&smmu->global_sync_lock, flags);
}
+static void arm_smmu_tlb_inv_context_s1(void *cookie);
+
static void arm_smmu_tlb_sync_context(void *cookie)
{
struct arm_smmu_domain *smmu_domain = cookie;
struct arm_smmu_device *smmu = smmu_domain->smmu;
+ struct device *dev = smmu_domain->dev;
+ struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
void __iomem *base = ARM_SMMU_CB(smmu, smmu_domain->cfg.cbndx);
unsigned long flags;
+ size_t ret;
+ bool use_tlbiall = smmu->options & ARM_SMMU_OPT_NO_ASID_RETENTION;
+ ktime_t cur = ktime_get();
+
+ ret = arm_smmu_domain_power_on(&smmu_domain->domain,
+ smmu_domain->smmu);
+ if (ret)
+ return;
+
+ trace_tlbi_start(dev, 0);
+
+ if (!use_tlbiall)
+ writel_relaxed(cfg->asid, base + ARM_SMMU_CB_S1_TLBIASID);
+ else
+ writel_relaxed(0, base + ARM_SMMU_CB_S1_TLBIALL);
spin_lock_irqsave(&smmu_domain->sync_lock, flags);
if (__arm_smmu_tlb_sync(smmu, base + ARM_SMMU_CB_TLBSYNC,
- base + ARM_SMMU_CB_TLBSTATUS))
+ base + ARM_SMMU_CB_TLBSTATUS))
dev_err_ratelimited(smmu->dev,
- "TLB sync on cb%d failed for device %s\n",
- smmu_domain->cfg.cbndx,
- dev_name(smmu_domain->dev));
+ "TLB sync on cb%d failed for device %s\n",
+ smmu_domain->cfg.cbndx,
+ dev_name(smmu_domain->dev));
spin_unlock_irqrestore(&smmu_domain->sync_lock, flags);
+
+ trace_tlbi_end(dev, ktime_us_delta(ktime_get(), cur));
+
+ arm_smmu_domain_power_off(&smmu_domain->domain, smmu_domain->smmu);
}
static void arm_smmu_tlb_sync_vmid(void *cookie)
@@ -1173,23 +1196,7 @@ static void arm_smmu_tlb_sync_vmid(void *cookie)
static void arm_smmu_tlb_inv_context_s1(void *cookie)
{
- struct arm_smmu_domain *smmu_domain = cookie;
- struct device *dev = smmu_domain->dev;
- struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
- struct arm_smmu_device *smmu = smmu_domain->smmu;
- void __iomem *base = ARM_SMMU_CB(smmu_domain->smmu, cfg->cbndx);
- bool use_tlbiall = smmu->options & ARM_SMMU_OPT_NO_ASID_RETENTION;
- ktime_t cur = ktime_get();
-
- trace_tlbi_start(dev, 0);
-
- if (!use_tlbiall)
- writel_relaxed(cfg->asid, base + ARM_SMMU_CB_S1_TLBIASID);
- else
- writel_relaxed(0, base + ARM_SMMU_CB_S1_TLBIALL);
-
- arm_smmu_tlb_sync_context(cookie);
- trace_tlbi_end(dev, ktime_us_delta(ktime_get(), cur));
+ return;
}
static void arm_smmu_tlb_inv_context_s2(void *cookie)
@@ -1483,6 +1490,7 @@ static phys_addr_t arm_smmu_verify_fault(struct iommu_domain *domain,
phys = arm_smmu_iova_to_phys_hard(domain, iova);
smmu_domain->pgtbl_cfg.tlb->tlb_flush_all(smmu_domain);
+ smmu_domain->pgtbl_cfg.tlb->tlb_sync(smmu_domain);
phys_post_tlbiall = arm_smmu_iova_to_phys_hard(domain, iova);
if (phys != phys_post_tlbiall) {
@@ -2539,6 +2547,7 @@ static void arm_smmu_domain_remove_master(struct arm_smmu_domain *smmu_domain,
/* Ensure there are no stale mappings for this context bank */
tlb->tlb_flush_all(smmu_domain);
+ tlb->tlb_sync(smmu_domain);
}
static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
@@ -3025,17 +3034,12 @@ static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
if (arm_smmu_is_slave_side_secure(smmu_domain))
return msm_secure_smmu_unmap(domain, iova, size);
- ret = arm_smmu_domain_power_on(domain, smmu_domain->smmu);
- if (ret)
- return ret;
-
arm_smmu_secure_domain_lock(smmu_domain);
spin_lock_irqsave(&smmu_domain->cb_lock, flags);
ret = ops->unmap(ops, iova, size);
spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);
- arm_smmu_domain_power_off(domain, smmu_domain->smmu);
/*
* While splitting up block mappings, we might allocate page table
* memory during unmap, so the vmids needs to be assigned to the
@@ -3194,6 +3198,14 @@ static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
return ret;
}
+static void arm_smmu_iotlb_sync(struct iommu_domain *domain)
+{
+ struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
+
+ if (smmu_domain->tlb_ops)
+ smmu_domain->tlb_ops->tlb_sync(smmu_domain);
+}
+
/*
* This function can sleep, and cannot be called from atomic context. Will
* power on register block if required. This restriction does not apply to the
@@ -3960,6 +3972,8 @@ static struct iommu_ops arm_smmu_ops = {
.map = arm_smmu_map,
.unmap = arm_smmu_unmap,
.map_sg = arm_smmu_map_sg,
+ .flush_iotlb_all = arm_smmu_iotlb_sync,
+ .iotlb_sync = arm_smmu_iotlb_sync,
.iova_to_phys = arm_smmu_iova_to_phys,
.iova_to_phys_hard = arm_smmu_iova_to_phys_hard,
.add_device = arm_smmu_add_device,
diff --git a/drivers/iommu/dma-mapping-fast.c b/drivers/iommu/dma-mapping-fast.c
index 899d7c3e..ec88a51 100644
--- a/drivers/iommu/dma-mapping-fast.c
+++ b/drivers/iommu/dma-mapping-fast.c
@@ -153,6 +153,7 @@ static dma_addr_t __fast_smmu_alloc_iova(struct dma_fast_smmu_mapping *mapping,
bool skip_sync = (attrs & DMA_ATTR_SKIP_CPU_SYNC);
iommu_tlbiall(mapping->domain);
+ iommu_tlb_sync(mapping->domain);
mapping->have_stale_tlbs = false;
av8l_fast_clear_stale_ptes(mapping->pgtbl_ops, skip_sync);
}
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index c143901..b9af241 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -3721,7 +3721,7 @@ static void intel_unmap(struct device *dev, dma_addr_t dev_addr, size_t size)
freelist = domain_unmap(domain, start_pfn, last_pfn);
- if (intel_iommu_strict) {
+ if (intel_iommu_strict || !has_iova_flush_queue(&domain->iovad)) {
iommu_flush_iotlb_psi(iommu, domain, start_pfn,
nrpages, !freelist, 0);
/* free iova */
diff --git a/drivers/iommu/io-pgtable.c b/drivers/iommu/io-pgtable.c
index 541abb2..688e037 100644
--- a/drivers/iommu/io-pgtable.c
+++ b/drivers/iommu/io-pgtable.c
@@ -86,6 +86,7 @@ void free_io_pgtable_ops(struct io_pgtable_ops *ops)
iop = container_of(ops, struct io_pgtable, ops);
io_pgtable_tlb_flush_all(iop);
+ io_pgtable_tlb_sync(iop);
io_pgtable_init_table[iop->fmt]->free(iop);
}
diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c
index b7b3339..14651aa 100644
--- a/drivers/iommu/iova.c
+++ b/drivers/iommu/iova.c
@@ -65,9 +65,14 @@ init_iova_domain(struct iova_domain *iovad, unsigned long granule,
}
EXPORT_SYMBOL_GPL(init_iova_domain);
+bool has_iova_flush_queue(struct iova_domain *iovad)
+{
+ return !!iovad->fq;
+}
+
static void free_iova_flush_queue(struct iova_domain *iovad)
{
- if (!iovad->fq)
+ if (!has_iova_flush_queue(iovad))
return;
if (timer_pending(&iovad->fq_timer))
@@ -85,13 +90,14 @@ static void free_iova_flush_queue(struct iova_domain *iovad)
int init_iova_flush_queue(struct iova_domain *iovad,
iova_flush_cb flush_cb, iova_entry_dtor entry_dtor)
{
+ struct iova_fq __percpu *queue;
int cpu;
atomic64_set(&iovad->fq_flush_start_cnt, 0);
atomic64_set(&iovad->fq_flush_finish_cnt, 0);
- iovad->fq = alloc_percpu(struct iova_fq);
- if (!iovad->fq)
+ queue = alloc_percpu(struct iova_fq);
+ if (!queue)
return -ENOMEM;
iovad->flush_cb = flush_cb;
@@ -100,13 +106,17 @@ int init_iova_flush_queue(struct iova_domain *iovad,
for_each_possible_cpu(cpu) {
struct iova_fq *fq;
- fq = per_cpu_ptr(iovad->fq, cpu);
+ fq = per_cpu_ptr(queue, cpu);
fq->head = 0;
fq->tail = 0;
spin_lock_init(&fq->lock);
}
+ smp_wmb();
+
+ iovad->fq = queue;
+
timer_setup(&iovad->fq_timer, fq_flush_timeout, 0);
atomic_set(&iovad->fq_timer_on, 0);
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index ee30e89..9ba73e1 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -2883,7 +2883,7 @@ static int its_vpe_init(struct its_vpe *vpe)
if (!its_alloc_vpe_table(vpe_id)) {
its_vpe_id_free(vpe_id);
- its_free_pending_table(vpe->vpt_page);
+ its_free_pending_table(vpt_page);
return -ENOMEM;
}
diff --git a/drivers/irqchip/irq-imx-gpcv2.c b/drivers/irqchip/irq-imx-gpcv2.c
index 4760307..cef8f5e 100644
--- a/drivers/irqchip/irq-imx-gpcv2.c
+++ b/drivers/irqchip/irq-imx-gpcv2.c
@@ -131,6 +131,7 @@ static struct irq_chip gpcv2_irqchip_data_chip = {
.irq_unmask = imx_gpcv2_irq_unmask,
.irq_set_wake = imx_gpcv2_irq_set_wake,
.irq_retrigger = irq_chip_retrigger_hierarchy,
+ .irq_set_type = irq_chip_set_type_parent,
#ifdef CONFIG_SMP
.irq_set_affinity = irq_chip_set_affinity_parent,
#endif
diff --git a/drivers/isdn/hardware/mISDN/hfcsusb.c b/drivers/isdn/hardware/mISDN/hfcsusb.c
index 6d05946..c952002 100644
--- a/drivers/isdn/hardware/mISDN/hfcsusb.c
+++ b/drivers/isdn/hardware/mISDN/hfcsusb.c
@@ -1406,6 +1406,7 @@ start_isoc_chain(struct usb_fifo *fifo, int num_packets_per_urb,
printk(KERN_DEBUG
"%s: %s: alloc urb for fifo %i failed",
hw->name, __func__, fifo->fifonum);
+ continue;
}
fifo->iso[i].owner_fifo = (struct usb_fifo *) fifo;
fifo->iso[i].indx = i;
@@ -1704,13 +1705,23 @@ hfcsusb_stop_endpoint(struct hfcsusb *hw, int channel)
static int
setup_hfcsusb(struct hfcsusb *hw)
{
+ void *dmabuf = kmalloc(sizeof(u_char), GFP_KERNEL);
u_char b;
+ int ret;
if (debug & DBG_HFC_CALL_TRACE)
printk(KERN_DEBUG "%s: %s\n", hw->name, __func__);
+ if (!dmabuf)
+ return -ENOMEM;
+
+ ret = read_reg_atomic(hw, HFCUSB_CHIP_ID, dmabuf);
+
+ memcpy(&b, dmabuf, sizeof(u_char));
+ kfree(dmabuf);
+
/* check the chip id */
- if (read_reg_atomic(hw, HFCUSB_CHIP_ID, &b) != 1) {
+ if (ret != 1) {
printk(KERN_DEBUG "%s: %s: cannot read chip id\n",
hw->name, __func__);
return 1;
@@ -1967,6 +1978,9 @@ hfcsusb_probe(struct usb_interface *intf, const struct usb_device_id *id)
/* get endpoint base */
idx = ((ep_addr & 0x7f) - 1) * 2;
+ if (idx > 15)
+ return -EIO;
+
if (ep_addr & 0x80)
idx++;
attr = ep->desc.bmAttributes;
diff --git a/drivers/mailbox/mailbox.c b/drivers/mailbox/mailbox.c
index 5a5c566..db5aa29 100644
--- a/drivers/mailbox/mailbox.c
+++ b/drivers/mailbox/mailbox.c
@@ -409,11 +409,13 @@ struct mbox_chan *mbox_request_channel_byname(struct mbox_client *cl,
of_property_for_each_string(np, "mbox-names", prop, mbox_name) {
if (!strncmp(name, mbox_name, strlen(name)))
- break;
+ return mbox_request_channel(cl, index);
index++;
}
- return mbox_request_channel(cl, index);
+ dev_err(cl->dev, "%s() could not locate channel named \"%s\"\n",
+ __func__, name);
+ return ERR_PTR(-EINVAL);
}
EXPORT_SYMBOL_GPL(mbox_request_channel_byname);
diff --git a/drivers/mailbox/qcom-apcs-ipc-mailbox.c b/drivers/mailbox/qcom-apcs-ipc-mailbox.c
index f6fd115..b3cb7fe 100644
--- a/drivers/mailbox/qcom-apcs-ipc-mailbox.c
+++ b/drivers/mailbox/qcom-apcs-ipc-mailbox.c
@@ -132,6 +132,7 @@ static const struct of_device_id qcom_apcs_ipc_of_match[] = {
{ .compatible = "qcom,sm8150-apcs-hmss-global", .data = (void *) 12 },
{ .compatible = "qcom,sm8150-spcs-global", .data = (void *)0 },
{ .compatible = "qcom,kona-spcs-global", .data = (void *)0 },
+ { .compatible = "qcom,bengal-apcs-hmss-global", .data = (void *)8 },
{}
};
MODULE_DEVICE_TABLE(of, qcom_apcs_ipc_of_match);
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
index b1d0ae2..dc385b7 100644
--- a/drivers/md/dm-bufio.c
+++ b/drivers/md/dm-bufio.c
@@ -1602,7 +1602,9 @@ dm_bufio_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
unsigned long freed;
c = container_of(shrink, struct dm_bufio_client, shrinker);
- if (!dm_bufio_trylock(c))
+ if (sc->gfp_mask & __GFP_FS)
+ dm_bufio_lock(c);
+ else if (!dm_bufio_trylock(c))
return SHRINK_STOP;
freed = __scan(c, sc->nr_to_scan, sc->gfp_mask);
diff --git a/drivers/md/dm-core.h b/drivers/md/dm-core.h
index 7d480c9..7e426e4 100644
--- a/drivers/md/dm-core.h
+++ b/drivers/md/dm-core.h
@@ -130,6 +130,7 @@ struct mapped_device {
};
int md_in_flight(struct mapped_device *md);
+void disable_discard(struct mapped_device *md);
void disable_write_same(struct mapped_device *md);
void disable_write_zeroes(struct mapped_device *md);
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
index 562b621..e71aecc 100644
--- a/drivers/md/dm-integrity.c
+++ b/drivers/md/dm-integrity.c
@@ -1760,7 +1760,22 @@ static void dm_integrity_map_continue(struct dm_integrity_io *dio, bool from_map
queue_work(ic->wait_wq, &dio->work);
return;
}
+ if (journal_read_pos != NOT_FOUND)
+ dio->range.n_sectors = ic->sectors_per_block;
wait_and_add_new_range(ic, &dio->range);
+ /*
+ * wait_and_add_new_range drops the spinlock, so the journal
+ * may have been changed arbitrarily. We need to recheck.
+ * To simplify the code, we restrict I/O size to just one block.
+ */
+ if (journal_read_pos != NOT_FOUND) {
+ sector_t next_sector;
+ unsigned new_pos = find_journal_node(ic, dio->range.logical_sector, &next_sector);
+ if (unlikely(new_pos != journal_read_pos)) {
+ remove_range_unlocked(ic, &dio->range);
+ goto retry;
+ }
+ }
}
spin_unlock_irq(&ic->endio_wait.lock);
diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c
index 671c243..3f694d9 100644
--- a/drivers/md/dm-kcopyd.c
+++ b/drivers/md/dm-kcopyd.c
@@ -548,8 +548,10 @@ static int run_io_job(struct kcopyd_job *job)
* no point in continuing.
*/
if (test_bit(DM_KCOPYD_WRITE_SEQ, &job->flags) &&
- job->master_job->write_err)
+ job->master_job->write_err) {
+ job->write_err = job->master_job->write_err;
return -EIO;
+ }
io_job_start(job->kc->throttle);
@@ -601,6 +603,7 @@ static int process_jobs(struct list_head *jobs, struct dm_kcopyd_client *kc,
else
job->read_err = 1;
push(&kc->complete_jobs, job);
+ wake(kc);
break;
}
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index c44925e..b78a8a4 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -3199,7 +3199,7 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv)
*/
r = rs_prepare_reshape(rs);
if (r)
- return r;
+ goto bad;
/* Reshaping ain't recovery, so disable recovery */
rs_setup_recovery(rs, MaxSector);
diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c
index 6e547b8..264b84e 100644
--- a/drivers/md/dm-rq.c
+++ b/drivers/md/dm-rq.c
@@ -295,11 +295,14 @@ static void dm_done(struct request *clone, blk_status_t error, bool mapped)
}
if (unlikely(error == BLK_STS_TARGET)) {
- if (req_op(clone) == REQ_OP_WRITE_SAME &&
- !clone->q->limits.max_write_same_sectors)
+ if (req_op(clone) == REQ_OP_DISCARD &&
+ !clone->q->limits.max_discard_sectors)
+ disable_discard(tio->md);
+ else if (req_op(clone) == REQ_OP_WRITE_SAME &&
+ !clone->q->limits.max_write_same_sectors)
disable_write_same(tio->md);
- if (req_op(clone) == REQ_OP_WRITE_ZEROES &&
- !clone->q->limits.max_write_zeroes_sectors)
+ else if (req_op(clone) == REQ_OP_WRITE_ZEROES &&
+ !clone->q->limits.max_write_zeroes_sectors)
disable_write_zeroes(tio->md);
}
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 59d17c4..96343c7 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -1350,7 +1350,7 @@ void dm_table_event(struct dm_table *t)
}
EXPORT_SYMBOL(dm_table_event);
-sector_t dm_table_get_size(struct dm_table *t)
+inline sector_t dm_table_get_size(struct dm_table *t)
{
return t->num_targets ? (t->highs[t->num_targets - 1] + 1) : 0;
}
@@ -1375,6 +1375,9 @@ struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector)
unsigned int l, n = 0, k = 0;
sector_t *node;
+ if (unlikely(sector >= dm_table_get_size(t)))
+ return &t->targets[t->num_targets];
+
for (l = 0; l < t->depth; l++) {
n = get_child(n, k);
node = get_node(t, l, n);
diff --git a/drivers/md/dm-zoned-metadata.c b/drivers/md/dm-zoned-metadata.c
index 4cdde7a..7e8d7fc 100644
--- a/drivers/md/dm-zoned-metadata.c
+++ b/drivers/md/dm-zoned-metadata.c
@@ -401,15 +401,18 @@ static struct dmz_mblock *dmz_get_mblock_slow(struct dmz_metadata *zmd,
sector_t block = zmd->sb[zmd->mblk_primary].block + mblk_no;
struct bio *bio;
+ if (dmz_bdev_is_dying(zmd->dev))
+ return ERR_PTR(-EIO);
+
/* Get a new block and a BIO to read it */
mblk = dmz_alloc_mblock(zmd, mblk_no);
if (!mblk)
- return NULL;
+ return ERR_PTR(-ENOMEM);
bio = bio_alloc(GFP_NOIO, 1);
if (!bio) {
dmz_free_mblock(zmd, mblk);
- return NULL;
+ return ERR_PTR(-ENOMEM);
}
spin_lock(&zmd->mblk_lock);
@@ -540,8 +543,8 @@ static struct dmz_mblock *dmz_get_mblock(struct dmz_metadata *zmd,
if (!mblk) {
/* Cache miss: read the block from disk */
mblk = dmz_get_mblock_slow(zmd, mblk_no);
- if (!mblk)
- return ERR_PTR(-ENOMEM);
+ if (IS_ERR(mblk))
+ return mblk;
}
/* Wait for on-going read I/O and check for error */
@@ -569,16 +572,19 @@ static void dmz_dirty_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk)
/*
* Issue a metadata block write BIO.
*/
-static void dmz_write_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk,
- unsigned int set)
+static int dmz_write_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk,
+ unsigned int set)
{
sector_t block = zmd->sb[set].block + mblk->no;
struct bio *bio;
+ if (dmz_bdev_is_dying(zmd->dev))
+ return -EIO;
+
bio = bio_alloc(GFP_NOIO, 1);
if (!bio) {
set_bit(DMZ_META_ERROR, &mblk->state);
- return;
+ return -ENOMEM;
}
set_bit(DMZ_META_WRITING, &mblk->state);
@@ -590,6 +596,8 @@ static void dmz_write_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk,
bio_set_op_attrs(bio, REQ_OP_WRITE, REQ_META | REQ_PRIO);
bio_add_page(bio, mblk->page, DMZ_BLOCK_SIZE, 0);
submit_bio(bio);
+
+ return 0;
}
/*
@@ -601,6 +609,9 @@ static int dmz_rdwr_block(struct dmz_metadata *zmd, int op, sector_t block,
struct bio *bio;
int ret;
+ if (dmz_bdev_is_dying(zmd->dev))
+ return -EIO;
+
bio = bio_alloc(GFP_NOIO, 1);
if (!bio)
return -ENOMEM;
@@ -658,22 +669,29 @@ static int dmz_write_dirty_mblocks(struct dmz_metadata *zmd,
{
struct dmz_mblock *mblk;
struct blk_plug plug;
- int ret = 0;
+ int ret = 0, nr_mblks_submitted = 0;
/* Issue writes */
blk_start_plug(&plug);
- list_for_each_entry(mblk, write_list, link)
- dmz_write_mblock(zmd, mblk, set);
+ list_for_each_entry(mblk, write_list, link) {
+ ret = dmz_write_mblock(zmd, mblk, set);
+ if (ret)
+ break;
+ nr_mblks_submitted++;
+ }
blk_finish_plug(&plug);
/* Wait for completion */
list_for_each_entry(mblk, write_list, link) {
+ if (!nr_mblks_submitted)
+ break;
wait_on_bit_io(&mblk->state, DMZ_META_WRITING,
TASK_UNINTERRUPTIBLE);
if (test_bit(DMZ_META_ERROR, &mblk->state)) {
clear_bit(DMZ_META_ERROR, &mblk->state);
ret = -EIO;
}
+ nr_mblks_submitted--;
}
/* Flush drive cache (this will also sync data) */
@@ -735,6 +753,11 @@ int dmz_flush_metadata(struct dmz_metadata *zmd)
*/
dmz_lock_flush(zmd);
+ if (dmz_bdev_is_dying(zmd->dev)) {
+ ret = -EIO;
+ goto out;
+ }
+
/* Get dirty blocks */
spin_lock(&zmd->mblk_lock);
list_splice_init(&zmd->mblk_dirty_list, &write_list);
@@ -1534,7 +1557,7 @@ static struct dm_zone *dmz_get_rnd_zone_for_reclaim(struct dmz_metadata *zmd)
struct dm_zone *zone;
if (list_empty(&zmd->map_rnd_list))
- return NULL;
+ return ERR_PTR(-EBUSY);
list_for_each_entry(zone, &zmd->map_rnd_list, link) {
if (dmz_is_buf(zone))
@@ -1545,7 +1568,7 @@ static struct dm_zone *dmz_get_rnd_zone_for_reclaim(struct dmz_metadata *zmd)
return dzone;
}
- return NULL;
+ return ERR_PTR(-EBUSY);
}
/*
@@ -1556,7 +1579,7 @@ static struct dm_zone *dmz_get_seq_zone_for_reclaim(struct dmz_metadata *zmd)
struct dm_zone *zone;
if (list_empty(&zmd->map_seq_list))
- return NULL;
+ return ERR_PTR(-EBUSY);
list_for_each_entry(zone, &zmd->map_seq_list, link) {
if (!zone->bzone)
@@ -1565,7 +1588,7 @@ static struct dm_zone *dmz_get_seq_zone_for_reclaim(struct dmz_metadata *zmd)
return zone;
}
- return NULL;
+ return ERR_PTR(-EBUSY);
}
/*
@@ -1623,6 +1646,10 @@ struct dm_zone *dmz_get_chunk_mapping(struct dmz_metadata *zmd, unsigned int chu
/* Alloate a random zone */
dzone = dmz_alloc_zone(zmd, DMZ_ALLOC_RND);
if (!dzone) {
+ if (dmz_bdev_is_dying(zmd->dev)) {
+ dzone = ERR_PTR(-EIO);
+ goto out;
+ }
dmz_wait_for_free_zones(zmd);
goto again;
}
@@ -1720,6 +1747,10 @@ struct dm_zone *dmz_get_chunk_buffer(struct dmz_metadata *zmd,
/* Alloate a random zone */
bzone = dmz_alloc_zone(zmd, DMZ_ALLOC_RND);
if (!bzone) {
+ if (dmz_bdev_is_dying(zmd->dev)) {
+ bzone = ERR_PTR(-EIO);
+ goto out;
+ }
dmz_wait_for_free_zones(zmd);
goto again;
}
diff --git a/drivers/md/dm-zoned-reclaim.c b/drivers/md/dm-zoned-reclaim.c
index edf4b95..9470b8f 100644
--- a/drivers/md/dm-zoned-reclaim.c
+++ b/drivers/md/dm-zoned-reclaim.c
@@ -37,7 +37,7 @@ enum {
/*
* Number of seconds of target BIO inactivity to consider the target idle.
*/
-#define DMZ_IDLE_PERIOD (10UL * HZ)
+#define DMZ_IDLE_PERIOD (10UL * HZ)
/*
* Percentage of unmapped (free) random zones below which reclaim starts
@@ -134,6 +134,9 @@ static int dmz_reclaim_copy(struct dmz_reclaim *zrc,
set_bit(DM_KCOPYD_WRITE_SEQ, &flags);
while (block < end_block) {
+ if (dev->flags & DMZ_BDEV_DYING)
+ return -EIO;
+
/* Get a valid region from the source zone */
ret = dmz_first_valid_block(zmd, src_zone, &block);
if (ret <= 0)
@@ -215,7 +218,7 @@ static int dmz_reclaim_buf(struct dmz_reclaim *zrc, struct dm_zone *dzone)
dmz_unlock_flush(zmd);
- return 0;
+ return ret;
}
/*
@@ -259,7 +262,7 @@ static int dmz_reclaim_seq_data(struct dmz_reclaim *zrc, struct dm_zone *dzone)
dmz_unlock_flush(zmd);
- return 0;
+ return ret;
}
/*
@@ -312,7 +315,7 @@ static int dmz_reclaim_rnd_data(struct dmz_reclaim *zrc, struct dm_zone *dzone)
dmz_unlock_flush(zmd);
- return 0;
+ return ret;
}
/*
@@ -334,7 +337,7 @@ static void dmz_reclaim_empty(struct dmz_reclaim *zrc, struct dm_zone *dzone)
/*
* Find a candidate zone for reclaim and process it.
*/
-static void dmz_reclaim(struct dmz_reclaim *zrc)
+static int dmz_do_reclaim(struct dmz_reclaim *zrc)
{
struct dmz_metadata *zmd = zrc->metadata;
struct dm_zone *dzone;
@@ -344,8 +347,8 @@ static void dmz_reclaim(struct dmz_reclaim *zrc)
/* Get a data zone */
dzone = dmz_get_zone_for_reclaim(zmd);
- if (!dzone)
- return;
+ if (IS_ERR(dzone))
+ return PTR_ERR(dzone);
start = jiffies;
@@ -391,13 +394,20 @@ static void dmz_reclaim(struct dmz_reclaim *zrc)
out:
if (ret) {
dmz_unlock_zone_reclaim(dzone);
- return;
+ return ret;
}
- (void) dmz_flush_metadata(zrc->metadata);
+ ret = dmz_flush_metadata(zrc->metadata);
+ if (ret) {
+ dmz_dev_debug(zrc->dev,
+ "Metadata flush for zone %u failed, err %d\n",
+ dmz_id(zmd, rzone), ret);
+ return ret;
+ }
dmz_dev_debug(zrc->dev, "Reclaimed zone %u in %u ms",
dmz_id(zmd, rzone), jiffies_to_msecs(jiffies - start));
+ return 0;
}
/*
@@ -442,6 +452,10 @@ static void dmz_reclaim_work(struct work_struct *work)
struct dmz_metadata *zmd = zrc->metadata;
unsigned int nr_rnd, nr_unmap_rnd;
unsigned int p_unmap_rnd;
+ int ret;
+
+ if (dmz_bdev_is_dying(zrc->dev))
+ return;
if (!dmz_should_reclaim(zrc)) {
mod_delayed_work(zrc->wq, &zrc->work, DMZ_IDLE_PERIOD);
@@ -471,7 +485,17 @@ static void dmz_reclaim_work(struct work_struct *work)
(dmz_target_idle(zrc) ? "Idle" : "Busy"),
p_unmap_rnd, nr_unmap_rnd, nr_rnd);
- dmz_reclaim(zrc);
+ ret = dmz_do_reclaim(zrc);
+ if (ret) {
+ dmz_dev_debug(zrc->dev, "Reclaim error %d\n", ret);
+ if (ret == -EIO)
+ /*
+ * LLD might be performing some error handling sequence
+ * at the underlying device. To not interfere, do not
+ * attempt to schedule the next reclaim run immediately.
+ */
+ return;
+ }
dmz_schedule_reclaim(zrc);
}
diff --git a/drivers/md/dm-zoned-target.c b/drivers/md/dm-zoned-target.c
index 85fb2ba..1030c42a 100644
--- a/drivers/md/dm-zoned-target.c
+++ b/drivers/md/dm-zoned-target.c
@@ -133,6 +133,8 @@ static int dmz_submit_bio(struct dmz_target *dmz, struct dm_zone *zone,
atomic_inc(&bioctx->ref);
generic_make_request(clone);
+ if (clone->bi_status == BLK_STS_IOERR)
+ return -EIO;
if (bio_op(bio) == REQ_OP_WRITE && dmz_is_seq(zone))
zone->wp_block += nr_blocks;
@@ -277,8 +279,8 @@ static int dmz_handle_buffered_write(struct dmz_target *dmz,
/* Get the buffer zone. One will be allocated if needed */
bzone = dmz_get_chunk_buffer(zmd, zone);
- if (!bzone)
- return -ENOSPC;
+ if (IS_ERR(bzone))
+ return PTR_ERR(bzone);
if (dmz_is_readonly(bzone))
return -EROFS;
@@ -389,6 +391,11 @@ static void dmz_handle_bio(struct dmz_target *dmz, struct dm_chunk_work *cw,
dmz_lock_metadata(zmd);
+ if (dmz->dev->flags & DMZ_BDEV_DYING) {
+ ret = -EIO;
+ goto out;
+ }
+
/*
* Get the data zone mapping the chunk. There may be no
* mapping for read and discard. If a mapping is obtained,
@@ -493,6 +500,8 @@ static void dmz_flush_work(struct work_struct *work)
/* Flush dirty metadata blocks */
ret = dmz_flush_metadata(dmz->metadata);
+ if (ret)
+ dmz_dev_debug(dmz->dev, "Metadata flush failed, rc=%d\n", ret);
/* Process queued flush requests */
while (1) {
@@ -513,22 +522,24 @@ static void dmz_flush_work(struct work_struct *work)
* Get a chunk work and start it to process a new BIO.
* If the BIO chunk has no work yet, create one.
*/
-static void dmz_queue_chunk_work(struct dmz_target *dmz, struct bio *bio)
+static int dmz_queue_chunk_work(struct dmz_target *dmz, struct bio *bio)
{
unsigned int chunk = dmz_bio_chunk(dmz->dev, bio);
struct dm_chunk_work *cw;
+ int ret = 0;
mutex_lock(&dmz->chunk_lock);
/* Get the BIO chunk work. If one is not active yet, create one */
cw = radix_tree_lookup(&dmz->chunk_rxtree, chunk);
if (!cw) {
- int ret;
/* Create a new chunk work */
cw = kmalloc(sizeof(struct dm_chunk_work), GFP_NOIO);
- if (!cw)
+ if (unlikely(!cw)) {
+ ret = -ENOMEM;
goto out;
+ }
INIT_WORK(&cw->work, dmz_chunk_work);
atomic_set(&cw->refcount, 0);
@@ -539,7 +550,6 @@ static void dmz_queue_chunk_work(struct dmz_target *dmz, struct bio *bio)
ret = radix_tree_insert(&dmz->chunk_rxtree, chunk, cw);
if (unlikely(ret)) {
kfree(cw);
- cw = NULL;
goto out;
}
}
@@ -547,10 +557,38 @@ static void dmz_queue_chunk_work(struct dmz_target *dmz, struct bio *bio)
bio_list_add(&cw->bio_list, bio);
dmz_get_chunk_work(cw);
+ dmz_reclaim_bio_acc(dmz->reclaim);
if (queue_work(dmz->chunk_wq, &cw->work))
dmz_get_chunk_work(cw);
out:
mutex_unlock(&dmz->chunk_lock);
+ return ret;
+}
+
+/*
+ * Check the backing device availability. If it's on the way out,
+ * start failing I/O. Reclaim and metadata components also call this
+ * function to cleanly abort operation in the event of such failure.
+ */
+bool dmz_bdev_is_dying(struct dmz_dev *dmz_dev)
+{
+ struct gendisk *disk;
+
+ if (!(dmz_dev->flags & DMZ_BDEV_DYING)) {
+ disk = dmz_dev->bdev->bd_disk;
+ if (blk_queue_dying(bdev_get_queue(dmz_dev->bdev))) {
+ dmz_dev_warn(dmz_dev, "Backing device queue dying");
+ dmz_dev->flags |= DMZ_BDEV_DYING;
+ } else if (disk->fops->check_events) {
+ if (disk->fops->check_events(disk, 0) &
+ DISK_EVENT_MEDIA_CHANGE) {
+ dmz_dev_warn(dmz_dev, "Backing device offline");
+ dmz_dev->flags |= DMZ_BDEV_DYING;
+ }
+ }
+ }
+
+ return dmz_dev->flags & DMZ_BDEV_DYING;
}
/*
@@ -564,6 +602,10 @@ static int dmz_map(struct dm_target *ti, struct bio *bio)
sector_t sector = bio->bi_iter.bi_sector;
unsigned int nr_sectors = bio_sectors(bio);
sector_t chunk_sector;
+ int ret;
+
+ if (dmz_bdev_is_dying(dmz->dev))
+ return DM_MAPIO_KILL;
dmz_dev_debug(dev, "BIO op %d sector %llu + %u => chunk %llu, block %llu, %u blocks",
bio_op(bio), (unsigned long long)sector, nr_sectors,
@@ -601,8 +643,14 @@ static int dmz_map(struct dm_target *ti, struct bio *bio)
dm_accept_partial_bio(bio, dev->zone_nr_sectors - chunk_sector);
/* Now ready to handle this BIO */
- dmz_reclaim_bio_acc(dmz->reclaim);
- dmz_queue_chunk_work(dmz, bio);
+ ret = dmz_queue_chunk_work(dmz, bio);
+ if (ret) {
+ dmz_dev_debug(dmz->dev,
+ "BIO op %d, can't process chunk %llu, err %i\n",
+ bio_op(bio), (u64)dmz_bio_chunk(dmz->dev, bio),
+ ret);
+ return DM_MAPIO_REQUEUE;
+ }
return DM_MAPIO_SUBMITTED;
}
@@ -856,6 +904,9 @@ static int dmz_prepare_ioctl(struct dm_target *ti, struct block_device **bdev)
{
struct dmz_target *dmz = ti->private;
+ if (dmz_bdev_is_dying(dmz->dev))
+ return -ENODEV;
+
*bdev = dmz->dev->bdev;
return 0;
diff --git a/drivers/md/dm-zoned.h b/drivers/md/dm-zoned.h
index ed8de49..93a6452 100644
--- a/drivers/md/dm-zoned.h
+++ b/drivers/md/dm-zoned.h
@@ -56,6 +56,8 @@ struct dmz_dev {
unsigned int nr_zones;
+ unsigned int flags;
+
sector_t zone_nr_sectors;
unsigned int zone_nr_sectors_shift;
@@ -67,6 +69,9 @@ struct dmz_dev {
(dev)->zone_nr_sectors_shift)
#define dmz_chunk_block(dev, b) ((b) & ((dev)->zone_nr_blocks - 1))
+/* Device flags. */
+#define DMZ_BDEV_DYING (1 << 0)
+
/*
* Zone descriptor.
*/
@@ -245,4 +250,9 @@ void dmz_resume_reclaim(struct dmz_reclaim *zrc);
void dmz_reclaim_bio_acc(struct dmz_reclaim *zrc);
void dmz_schedule_reclaim(struct dmz_reclaim *zrc);
+/*
+ * Functions defined in dm-zoned-target.c
+ */
+bool dmz_bdev_is_dying(struct dmz_dev *dmz_dev);
+
#endif /* DM_ZONED_H */
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 42768fe..c9860e3 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -910,6 +910,15 @@ static void dec_pending(struct dm_io *io, blk_status_t error)
}
}
+void disable_discard(struct mapped_device *md)
+{
+ struct queue_limits *limits = dm_get_queue_limits(md);
+
+ /* device doesn't really support DISCARD, disable it */
+ limits->max_discard_sectors = 0;
+ blk_queue_flag_clear(QUEUE_FLAG_DISCARD, md->queue);
+}
+
void disable_write_same(struct mapped_device *md)
{
struct queue_limits *limits = dm_get_queue_limits(md);
@@ -935,11 +944,14 @@ static void clone_endio(struct bio *bio)
dm_endio_fn endio = tio->ti->type->end_io;
if (unlikely(error == BLK_STS_TARGET) && md->type != DM_TYPE_NVME_BIO_BASED) {
- if (bio_op(bio) == REQ_OP_WRITE_SAME &&
- !bio->bi_disk->queue->limits.max_write_same_sectors)
+ if (bio_op(bio) == REQ_OP_DISCARD &&
+ !bio->bi_disk->queue->limits.max_discard_sectors)
+ disable_discard(md);
+ else if (bio_op(bio) == REQ_OP_WRITE_SAME &&
+ !bio->bi_disk->queue->limits.max_write_same_sectors)
disable_write_same(md);
- if (bio_op(bio) == REQ_OP_WRITE_ZEROES &&
- !bio->bi_disk->queue->limits.max_write_zeroes_sectors)
+ else if (bio_op(bio) == REQ_OP_WRITE_ZEROES &&
+ !bio->bi_disk->queue->limits.max_write_zeroes_sectors)
disable_write_zeroes(md);
}
diff --git a/drivers/md/persistent-data/dm-btree.c b/drivers/md/persistent-data/dm-btree.c
index 58b3197..8aae062 100644
--- a/drivers/md/persistent-data/dm-btree.c
+++ b/drivers/md/persistent-data/dm-btree.c
@@ -628,39 +628,40 @@ static int btree_split_beneath(struct shadow_spine *s, uint64_t key)
new_parent = shadow_current(s);
+ pn = dm_block_data(new_parent);
+ size = le32_to_cpu(pn->header.flags) & INTERNAL_NODE ?
+ sizeof(__le64) : s->info->value_type.size;
+
+ /* create & init the left block */
r = new_block(s->info, &left);
if (r < 0)
return r;
+ ln = dm_block_data(left);
+ nr_left = le32_to_cpu(pn->header.nr_entries) / 2;
+
+ ln->header.flags = pn->header.flags;
+ ln->header.nr_entries = cpu_to_le32(nr_left);
+ ln->header.max_entries = pn->header.max_entries;
+ ln->header.value_size = pn->header.value_size;
+ memcpy(ln->keys, pn->keys, nr_left * sizeof(pn->keys[0]));
+ memcpy(value_ptr(ln, 0), value_ptr(pn, 0), nr_left * size);
+
+ /* create & init the right block */
r = new_block(s->info, &right);
if (r < 0) {
unlock_block(s->info, left);
return r;
}
- pn = dm_block_data(new_parent);
- ln = dm_block_data(left);
rn = dm_block_data(right);
-
- nr_left = le32_to_cpu(pn->header.nr_entries) / 2;
nr_right = le32_to_cpu(pn->header.nr_entries) - nr_left;
- ln->header.flags = pn->header.flags;
- ln->header.nr_entries = cpu_to_le32(nr_left);
- ln->header.max_entries = pn->header.max_entries;
- ln->header.value_size = pn->header.value_size;
-
rn->header.flags = pn->header.flags;
rn->header.nr_entries = cpu_to_le32(nr_right);
rn->header.max_entries = pn->header.max_entries;
rn->header.value_size = pn->header.value_size;
-
- memcpy(ln->keys, pn->keys, nr_left * sizeof(pn->keys[0]));
memcpy(rn->keys, pn->keys + nr_left, nr_right * sizeof(pn->keys[0]));
-
- size = le32_to_cpu(pn->header.flags) & INTERNAL_NODE ?
- sizeof(__le64) : s->info->value_type.size;
- memcpy(value_ptr(ln, 0), value_ptr(pn, 0), nr_left * size);
memcpy(value_ptr(rn, 0), value_ptr(pn, nr_left),
nr_right * size);
diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c
index aec44924..2532858 100644
--- a/drivers/md/persistent-data/dm-space-map-metadata.c
+++ b/drivers/md/persistent-data/dm-space-map-metadata.c
@@ -249,7 +249,7 @@ static int out(struct sm_metadata *smm)
}
if (smm->recursion_count == 1)
- apply_bops(smm);
+ r = apply_bops(smm);
smm->recursion_count--;
diff --git a/drivers/media/platform/msm/cvp/cvp_core_hfi.h b/drivers/media/platform/msm/cvp/cvp_core_hfi.h
index 59b67b8..5481ea2 100644
--- a/drivers/media/platform/msm/cvp/cvp_core_hfi.h
+++ b/drivers/media/platform/msm/cvp/cvp_core_hfi.h
@@ -266,6 +266,7 @@ struct iris_hfi_device {
unsigned int skip_pc_count;
struct msm_cvp_capability *sys_init_capabilities;
struct iris_hfi_vpu_ops *vpu_ops;
+ struct delayed_work dsp_init_work;
};
void cvp_iris_hfi_delete_device(void *device);
diff --git a/drivers/media/platform/msm/cvp/cvp_hfi.c b/drivers/media/platform/msm/cvp/cvp_hfi.c
index 4700cae..9b94868 100644
--- a/drivers/media/platform/msm/cvp/cvp_hfi.c
+++ b/drivers/media/platform/msm/cvp/cvp_hfi.c
@@ -266,6 +266,7 @@ const int cvp_max_packets = 32;
static void iris_hfi_pm_handler(struct work_struct *work);
static DECLARE_DELAYED_WORK(iris_hfi_pm_work, iris_hfi_pm_handler);
+static void dsp_init_work_handler(struct work_struct *work);
static inline int __resume(struct iris_hfi_device *device);
static inline int __suspend(struct iris_hfi_device *device);
static int __disable_regulators(struct iris_hfi_device *device);
@@ -504,9 +505,9 @@ static int __dsp_send_hfi_queue(struct iris_hfi_device *device)
device->dsp_iface_q_table.mem_data.size);
rc = cvp_dsp_send_cmd_hfi_queue(
(phys_addr_t *)device->dsp_iface_q_table.mem_data.dma_handle,
- device->dsp_iface_q_table.mem_data.size);
+ device->dsp_iface_q_table.mem_data.size, device);
if (rc) {
- dprintk(CVP_ERR, "%s: dsp init failed\n", __func__);
+ dprintk(CVP_ERR, "%s: dsp hfi queue init failed\n", __func__);
return rc;
}
@@ -1386,6 +1387,8 @@ static void cvp_dump_csr(struct iris_hfi_device *dev)
if (!dev)
return;
+ if (!dev->power_enabled)
+ return;
reg = __read_register(dev, CVP_WRAPPER_CPU_STATUS);
dprintk(CVP_ERR, "CVP_WRAPPER_CPU_STATUS: %x\n", reg);
reg = __read_register(dev, CVP_CPU_CS_SCIACMDARG0);
@@ -2059,7 +2062,8 @@ static int __interface_queues_init(struct iris_hfi_device *dev)
}
vsfr = (struct cvp_hfi_sfr_struct *) dev->sfr.align_virtual_addr;
- vsfr->bufSize = ALIGNED_SFR_SIZE;
+ if (vsfr)
+ vsfr->bufSize = ALIGNED_SFR_SIZE;
rc = __interface_dsp_queues_init(dev);
if (rc) {
@@ -2154,6 +2158,43 @@ static int __sys_set_power_control(struct iris_hfi_device *device,
return 0;
}
+static void dsp_init_work_handler(struct work_struct *work)
+{
+ int rc = 0;
+ static int retry_count;
+ struct iris_hfi_device *device;
+
+ if (!work) {
+ dprintk(CVP_ERR, "%s: NULL device\n", __func__);
+ return;
+ }
+
+ device = container_of(work, struct iris_hfi_device, dsp_init_work.work);
+ if (!device) {
+ dprintk(CVP_ERR, "%s: NULL device\n", __func__);
+ return;
+ }
+
+ dprintk(CVP_PROF, "Entering %s\n", __func__);
+
+ mutex_lock(&device->lock);
+ rc = __dsp_send_hfi_queue(device);
+ mutex_unlock(&device->lock);
+
+ if (rc) {
+ if (retry_count > MAX_DSP_INIT_ATTEMPTS) {
+ dprintk(CVP_ERR, "%s: max trials exceeded\n", __func__);
+ return;
+ }
+ dprintk(CVP_PROF, "%s: Attempt to init DSP %d\n",
+ __func__, retry_count);
+
+ schedule_delayed_work(&device->dsp_init_work,
+ msecs_to_jiffies(CVP_MAX_WAIT_TIME));
+ ++retry_count;
+ }
+}
+
static int iris_hfi_core_init(void *device)
{
int rc = 0;
@@ -2231,7 +2272,6 @@ static int iris_hfi_core_init(void *device)
__enable_subcaches(device);
__set_subcaches(device);
- __dsp_send_hfi_queue(device);
__set_ubwc_config(device);
__sys_set_idle_indicator(device, true);
@@ -2244,9 +2284,15 @@ static int iris_hfi_core_init(void *device)
pm_qos_add_request(&dev->qos, PM_QOS_CPU_DMA_LATENCY,
dev->res->pm_qos_latency_us);
}
+
+ rc = __dsp_send_hfi_queue(device);
+ if (rc)
+ schedule_delayed_work(&dev->dsp_init_work,
+ msecs_to_jiffies(CVP_MAX_WAIT_TIME));
+
dprintk(CVP_DBG, "Core inited successfully\n");
mutex_unlock(&dev->lock);
- return rc;
+ return 0;
err_core_init:
__set_state(dev, IRIS_STATE_DEINIT);
__unload_fw(dev);
@@ -4626,7 +4672,7 @@ static void __unload_fw(struct iris_hfi_device *device)
device->resources.fw.cookie = NULL;
__deinit_resources(device);
- dprintk(CVP_DBG, "Firmware unloaded successfully\n");
+ dprintk(CVP_WARN, "Firmware unloaded\n");
}
static int iris_hfi_get_fw_info(void *dev, struct cvp_hal_fw_info *fw_info)
@@ -4852,6 +4898,8 @@ static struct iris_hfi_device *__add_device(u32 device_id,
mutex_init(&hdevice->lock);
INIT_LIST_HEAD(&hdevice->sess_head);
+ INIT_DELAYED_WORK(&hdevice->dsp_init_work, dsp_init_work_handler);
+
return hdevice;
err_cleanup:
diff --git a/drivers/media/platform/msm/cvp/hfi_response_handler.c b/drivers/media/platform/msm/cvp/hfi_response_handler.c
index 393ff3c..00f2d7c 100644
--- a/drivers/media/platform/msm/cvp/hfi_response_handler.c
+++ b/drivers/media/platform/msm/cvp/hfi_response_handler.c
@@ -480,10 +480,18 @@ static int hfi_process_session_cvp_msg(u32 device_id,
if (pkt->packet_type == HFI_MSG_SESSION_CVP_DFS
|| pkt->packet_type == HFI_MSG_SESSION_CVP_DME
|| pkt->packet_type == HFI_MSG_SESSION_CVP_ICA
- || pkt->packet_type == HFI_MSG_SESSION_CVP_FD)
+ || pkt->packet_type == HFI_MSG_SESSION_CVP_FD) {
+ u64 ktid;
+ u32 kdata1, kdata2;
+
+ kdata1 = pkt->client_data.kdata1;
+ kdata2 = pkt->client_data.kdata2;
+ ktid = ((u64)kdata2 << 32) | kdata1;
+ msm_cvp_unmap_buf_cpu(inst, ktid);
+
return _deprecated_hfi_msg_process(device_id,
pkt, info, inst);
-
+ }
dprintk(CVP_ERR, "Invalid deprecate_bitmask %#x\n",
inst->deprecate_bitmask);
}
diff --git a/drivers/media/platform/msm/cvp/msm_cvp.c b/drivers/media/platform/msm/cvp/msm_cvp.c
index 44aa779..8cb781d 100644
--- a/drivers/media/platform/msm/cvp/msm_cvp.c
+++ b/drivers/media/platform/msm/cvp/msm_cvp.c
@@ -369,6 +369,75 @@ static void __msm_cvp_cache_operations(struct msm_cvp_internal_buffer *cbuf)
cbuf->buf.offset, cbuf->buf.size);
}
+static int msm_cvp_map_buf_user_persist(struct msm_cvp_inst *inst,
+ struct cvp_buf_type *in_buf,
+ u32 *iova)
+{
+ int rc = 0;
+ struct cvp_internal_buf *cbuf;
+ struct dma_buf *dma_buf;
+
+ if (!inst || !iova) {
+ dprintk(CVP_ERR, "%s: invalid params\n", __func__);
+ return -EINVAL;
+ }
+
+ if (in_buf->fd > 0) {
+ dma_buf = msm_cvp_smem_get_dma_buf(in_buf->fd);
+ if (!dma_buf) {
+ dprintk(CVP_ERR, "%s: Invalid fd=%d", __func__,
+ in_buf->fd);
+ return -EINVAL;
+ }
+ in_buf->dbuf = dma_buf;
+ msm_cvp_smem_put_dma_buf(dma_buf);
+ }
+
+ rc = msm_cvp_session_get_iova_addr(inst, in_buf, iova);
+ if (!rc && *iova != 0)
+ return 0;
+ cbuf = kzalloc(sizeof(*cbuf), GFP_KERNEL);
+ if (!cbuf)
+ return -ENOMEM;
+
+ cbuf->smem.buffer_type = in_buf->flags;
+ cbuf->smem.fd = in_buf->fd;
+ cbuf->smem.size = in_buf->size;
+ cbuf->smem.flags = 0;
+ cbuf->smem.offset = 0;
+ cbuf->smem.dma_buf = in_buf->dbuf;
+ cbuf->buffer_ownership = CLIENT;
+
+ rc = msm_cvp_smem_map_dma_buf(inst, &cbuf->smem);
+ if (rc) {
+ dprintk(CVP_ERR,
+ "%s: %x : fd %d %s size %d",
+ "map persist failed", hash32_ptr(inst->session), cbuf->smem.fd,
+ cbuf->smem.dma_buf->name, cbuf->smem.size);
+ goto exit;
+ }
+
+ /* Assign mapped dma_buf back because it could be zero previously */
+ in_buf->dbuf = cbuf->smem.dma_buf;
+
+ mutex_lock(&inst->persistbufs.lock);
+ list_add_tail(&cbuf->list, &inst->persistbufs.list);
+ mutex_unlock(&inst->persistbufs.lock);
+
+ *iova = cbuf->smem.device_addr;
+
+ dprintk(CVP_DBG,
+ "%s: %x : fd %d %s size %d", "map persist", hash32_ptr(inst->session),
+ cbuf->smem.fd, cbuf->smem.dma_buf->name, cbuf->smem.size);
+ return rc;
+
+exit:
+ kfree(cbuf);
+ cbuf = NULL;
+
+ return rc;
+}
+
static int msm_cvp_map_buf_cpu(struct msm_cvp_inst *inst,
struct cvp_buf_type *in_buf,
u32 *iova,
@@ -625,6 +694,56 @@ static int msm_cvp_session_receive_hfi(struct msm_cvp_inst *inst,
return rc;
}
+static int msm_cvp_map_user_persist(struct msm_cvp_inst *inst,
+ struct cvp_kmd_hfi_packet *in_pkt,
+ unsigned int offset, unsigned int buf_num)
+{
+ struct cvp_buf_desc *buf_ptr;
+ struct cvp_buf_type *new_buf;
+ int i, rc = 0;
+ unsigned int iova;
+
+ if (!offset || !buf_num)
+ return 0;
+
+ for (i = 0; i < buf_num; i++) {
+ buf_ptr = (struct cvp_buf_desc *)
+ &in_pkt->pkt_data[offset];
+
+ offset += sizeof(*new_buf) >> 2;
+ new_buf = (struct cvp_buf_type *)buf_ptr;
+
+ /*
+ * Make sure fd or dma_buf field doesn't have any
+ * garbage value.
+ */
+ if (inst->session_type == MSM_CVP_USER) {
+ new_buf->dbuf = 0;
+ } else if (inst->session_type == MSM_CVP_KERNEL) {
+ new_buf->fd = -1;
+ } else if (inst->session_type >= MSM_CVP_UNKNOWN) {
+ dprintk(CVP_ERR,
+ "%s: unknown session type %d\n",
+ __func__, inst->session_type);
+ return -EINVAL;
+ }
+
+ if (new_buf->fd <= 0 && !new_buf->dbuf)
+ continue;
+
+ rc = msm_cvp_map_buf_user_persist(inst, new_buf, &iova);
+ if (rc) {
+ dprintk(CVP_ERR,
+ "%s: buf %d register failed.\n",
+ __func__, i);
+
+ return rc;
+ }
+ new_buf->fd = iova;
+ }
+ return rc;
+}
+
static int msm_cvp_map_buf(struct msm_cvp_inst *inst,
struct cvp_kmd_hfi_packet *in_pkt,
unsigned int offset, unsigned int buf_num)
@@ -818,7 +937,11 @@ static int msm_cvp_session_process_hfi(
buf_num = in_buf_num;
}
- rc = msm_cvp_map_buf(inst, in_pkt, offset, buf_num);
+ if (in_pkt->pkt_data[1] == HFI_CMD_SESSION_CVP_SET_PERSIST_BUFFERS)
+ rc = msm_cvp_map_user_persist(inst, in_pkt, offset, buf_num);
+ else
+ rc = msm_cvp_map_buf(inst, in_pkt, offset, buf_num);
+
if (rc)
goto exit;
diff --git a/drivers/media/platform/msm/cvp/msm_cvp_common.c b/drivers/media/platform/msm/cvp/msm_cvp_common.c
index e2bd3f8..2fb9850 100644
--- a/drivers/media/platform/msm/cvp/msm_cvp_common.c
+++ b/drivers/media/platform/msm/cvp/msm_cvp_common.c
@@ -638,15 +638,17 @@ static void handle_sys_error(enum hal_command_response cmd, void *data)
call_hfi_op(hdev, flush_debug_queue, hdev->hfi_device_data);
list_for_each_entry(inst, &core->instances, list) {
dprintk(CVP_WARN,
- "%s: sys error for inst %#x kref %x, cmd %x\n",
+ "%s: sys error inst %#x kref %x, cmd %x state %x\n",
__func__, inst, kref_read(&inst->kref),
- inst->cur_cmd_type);
- change_cvp_inst_state(inst, MSM_CVP_CORE_INVALID);
-
- spin_lock_irqsave(&inst->event_handler.lock, flags);
- inst->event_handler.event = CVP_SSR_EVENT;
- spin_unlock_irqrestore(&inst->event_handler.lock, flags);
- wake_up_all(&inst->event_handler.wq);
+ inst->cur_cmd_type, inst->state);
+ if (inst->state != MSM_CVP_CORE_INVALID) {
+ change_cvp_inst_state(inst, MSM_CVP_CORE_INVALID);
+ spin_lock_irqsave(&inst->event_handler.lock, flags);
+ inst->event_handler.event = CVP_SSR_EVENT;
+ spin_unlock_irqrestore(
+ &inst->event_handler.lock, flags);
+ wake_up_all(&inst->event_handler.wq);
+ }
if (!core->trigger_ssr)
msm_cvp_comm_print_inst_info(inst);
@@ -1477,7 +1479,7 @@ int msm_cvp_comm_kill_session(struct msm_cvp_inst *inst)
}
}
- if (inst->state == MSM_CVP_CORE_UNINIT) {
+ if (inst->state >= MSM_CVP_CORE_UNINIT) {
spin_lock_irqsave(&inst->event_handler.lock, flags);
inst->event_handler.event = CVP_SSR_EVENT;
spin_unlock_irqrestore(&inst->event_handler.lock, flags);
@@ -1655,6 +1657,7 @@ static int allocate_and_set_internal_bufs(struct msm_cvp_inst *inst,
}
binfo->buffer_type = HFI_BUFFER_INTERNAL_PERSIST_1;
+ binfo->buffer_ownership = DRIVER;
rc = set_internal_buf_on_fw(inst, &binfo->smem, false);
if (rc)
@@ -1711,6 +1714,7 @@ int cvp_comm_release_persist_buffers(struct msm_cvp_inst *inst)
int rc = 0;
struct msm_cvp_core *core;
struct cvp_hfi_device *hdev;
+ int all_released;
if (!inst) {
dprintk(CVP_ERR, "Invalid instance pointer = %pK\n", inst);
@@ -1729,6 +1733,8 @@ int cvp_comm_release_persist_buffers(struct msm_cvp_inst *inst)
}
dprintk(CVP_DBG, "release persist buffer!\n");
+ all_released = 0;
+
mutex_lock(&inst->persistbufs.lock);
list_for_each_safe(ptr, next, &inst->persistbufs.list) {
buf = list_entry(ptr, struct cvp_internal_buf, list);
@@ -1738,36 +1744,49 @@ int cvp_comm_release_persist_buffers(struct msm_cvp_inst *inst)
mutex_unlock(&inst->persistbufs.lock);
return -EINVAL;
}
- if (inst->state > MSM_CVP_CLOSE_DONE) {
- list_del(&buf->list);
- msm_cvp_smem_free(handle);
- kfree(buf);
- continue;
- }
- buffer_info.buffer_size = handle->size;
- buffer_info.buffer_type = buf->buffer_type;
- buffer_info.num_buffers = 1;
- buffer_info.align_device_addr = handle->device_addr;
- buffer_info.response_required = true;
- rc = call_hfi_op(hdev, session_release_buffers,
- (void *)inst->session, &buffer_info);
- if (!rc) {
- mutex_unlock(&inst->persistbufs.lock);
- rc = wait_for_sess_signal_receipt(inst,
+
+ /* Workaround for FW: release buffer means release all */
+ if (inst->state <= MSM_CVP_CLOSE_DONE && !all_released) {
+ buffer_info.buffer_size = handle->size;
+ buffer_info.buffer_type = buf->buffer_type;
+ buffer_info.num_buffers = 1;
+ buffer_info.align_device_addr = handle->device_addr;
+ buffer_info.response_required = true;
+ rc = call_hfi_op(hdev, session_release_buffers,
+ (void *)inst->session, &buffer_info);
+ if (!rc) {
+ mutex_unlock(&inst->persistbufs.lock);
+ rc = wait_for_sess_signal_receipt(inst,
HAL_SESSION_RELEASE_BUFFER_DONE);
- if (rc)
- dprintk(CVP_WARN,
+ if (rc)
+ dprintk(CVP_WARN,
"%s: wait for signal failed, rc %d\n",
- __func__, rc);
- mutex_lock(&inst->persistbufs.lock);
- } else {
- dprintk(CVP_WARN,
- "Rel prst buf fail:%x, %d\n",
- buffer_info.align_device_addr,
- buffer_info.buffer_size);
+ __func__, rc);
+ mutex_lock(&inst->persistbufs.lock);
+ } else {
+ dprintk(CVP_WARN,
+ "Rel prst buf fail:%x, %d\n",
+ buffer_info.align_device_addr,
+ buffer_info.buffer_size);
+ }
+ all_released = 1;
}
list_del(&buf->list);
- msm_cvp_smem_free(handle);
+
+ if (buf->buffer_ownership == DRIVER) {
+ dprintk(CVP_DBG,
+ "%s: %x : fd %d %s size %d",
+ "free arp", hash32_ptr(inst->session), buf->smem.fd,
+ buf->smem.dma_buf->name, buf->smem.size);
+ msm_cvp_smem_free(handle);
+ } else if (buf->buffer_ownership == CLIENT) {
+ dprintk(CVP_DBG,
+ "%s: %x : fd %d %s size %d",
+ "unmap persist", hash32_ptr(inst->session),
+ buf->smem.fd, buf->smem.dma_buf->name, buf->smem.size);
+ msm_cvp_smem_unmap_dma_buf(inst, &buf->smem);
+ }
+
kfree(buf);
}
mutex_unlock(&inst->persistbufs.lock);
diff --git a/drivers/media/platform/msm/cvp/msm_cvp_common.h b/drivers/media/platform/msm/cvp/msm_cvp_common.h
index aac667d..99dd3fd 100644
--- a/drivers/media/platform/msm/cvp/msm_cvp_common.h
+++ b/drivers/media/platform/msm/cvp/msm_cvp_common.h
@@ -45,4 +45,5 @@ int cvp_comm_set_arp_buffers(struct msm_cvp_inst *inst);
int cvp_comm_release_persist_buffers(struct msm_cvp_inst *inst);
void print_client_buffer(u32 tag, const char *str,
struct msm_cvp_inst *inst, struct cvp_kmd_buffer *cbuf);
+void msm_cvp_unmap_buf_cpu(struct msm_cvp_inst *inst, u64 ktid);
#endif
diff --git a/drivers/media/platform/msm/cvp/msm_cvp_core.c b/drivers/media/platform/msm/cvp/msm_cvp_core.c
index 365f0a8..cde1eaa 100644
--- a/drivers/media/platform/msm/cvp/msm_cvp_core.c
+++ b/drivers/media/platform/msm/cvp/msm_cvp_core.c
@@ -213,8 +213,9 @@ static bool msm_cvp_check_for_inst_overload(struct msm_cvp_core *core)
/* Instance count includes current instance as well. */
- if ((instance_count > core->resources.max_inst_count) ||
- (secure_instance_count > core->resources.max_secure_inst_count))
+ if ((instance_count >= core->resources.max_inst_count) ||
+ (secure_instance_count >=
+ core->resources.max_secure_inst_count))
overload = true;
return overload;
}
@@ -273,6 +274,19 @@ void *msm_cvp_open(int core_id, int session_type)
goto err_invalid_core;
}
+ core->resources.max_inst_count = MAX_SUPPORTED_INSTANCES;
+ if (msm_cvp_check_for_inst_overload(core)) {
+ dprintk(CVP_ERR, "Instance num reached Max, rejecting session");
+ mutex_lock(&core->lock);
+ list_for_each_entry(inst, &core->instances, list)
+ dprintk(CVP_ERR, "inst %pK, cmd %d id %d\n",
+ inst, inst->cur_cmd_type,
+ hash32_ptr(inst->session));
+ mutex_unlock(&core->lock);
+
+ return NULL;
+ }
+
inst = kzalloc(sizeof(*inst), GFP_KERNEL);
if (!inst) {
dprintk(CVP_ERR, "Failed to allocate memory\n");
@@ -331,19 +345,6 @@ void *msm_cvp_open(int core_id, int session_type)
goto fail_init;
}
- core->resources.max_inst_count = MAX_SUPPORTED_INSTANCES;
- if (msm_cvp_check_for_inst_overload(core)) {
- dprintk(CVP_ERR, "Instance num reached Max, rejecting session");
- mutex_lock(&core->lock);
- list_for_each_entry(inst, &core->instances, list)
- dprintk(CVP_ERR, "inst %pK, cmd %d id %d\n",
- inst, inst->cur_cmd_type,
- hash32_ptr(inst->session));
- mutex_unlock(&core->lock);
-
- goto fail_init;
- }
-
inst->debugfs_root =
msm_cvp_debugfs_init_inst(inst, core->debugfs_root);
diff --git a/drivers/media/platform/msm/cvp/msm_cvp_dsp.c b/drivers/media/platform/msm/cvp/msm_cvp_dsp.c
index 8dabdae..4b6d92e 100644
--- a/drivers/media/platform/msm/cvp/msm_cvp_dsp.c
+++ b/drivers/media/platform/msm/cvp/msm_cvp_dsp.c
@@ -63,6 +63,8 @@ struct cvp_dsp_apps {
struct completion dereg_buffer_work;
struct completion shutdown_work;
struct completion cmdqueue_send_work;
+ struct work_struct ssr_work;
+ struct iris_hfi_device *device;
};
@@ -78,6 +80,7 @@ static int cvp_dsp_send_cmd(void *msg, uint32_t len)
int err = 0;
if (IS_ERR_OR_NULL(me->chan)) {
+ dprintk(CVP_ERR, "%s: DSP GLink is not ready\n", __func__);
err = -EINVAL;
goto bail;
}
@@ -87,6 +90,40 @@ static int cvp_dsp_send_cmd(void *msg, uint32_t len)
return err;
}
+void msm_cvp_cdsp_ssr_handler(struct work_struct *work)
+{
+ struct cvp_dsp_apps *me;
+ uint64_t msg_ptr;
+ uint32_t msg_ptr_len;
+ int err;
+
+ me = container_of(work, struct cvp_dsp_apps, ssr_work);
+ if (!me) {
+ dprintk(CVP_ERR, "%s: Invalid params\n", __func__);
+ return;
+ }
+
+ msg_ptr = cmd_msg.msg_ptr;
+ msg_ptr_len = cmd_msg.msg_ptr_len;
+
+ err = cvp_dsp_send_cmd_hfi_queue((phys_addr_t *)msg_ptr,
+ msg_ptr_len,
+ (void *)NULL);
+ if (err) {
+ dprintk(CVP_ERR,
+ "%s: Failed to send HFI Queue address. err=%d\n",
+ __func__, err);
+ return;
+ }
+
+ if (me->device) {
+ mutex_lock(&me->device->lock);
+ me->device->dsp_flags |= DSP_INIT;
+ mutex_unlock(&me->device->lock);
+ }
+ dprintk(CVP_DBG, "%s: dsp recover from SSR successfully\n", __func__);
+}
+
static int cvp_dsp_rpmsg_probe(struct rpmsg_device *rpdev)
{
int err = 0;
@@ -123,14 +160,7 @@ static int cvp_dsp_rpmsg_probe(struct rpmsg_device *rpdev)
__func__, err);
return err;
}
- err = cvp_dsp_send_cmd_hfi_queue(
- (phys_addr_t *)msg_ptr, msg_ptr_len);
- if (err) {
- dprintk(CVP_ERR,
- "%s: Failed to send HFI Queue address. err=%d\n",
- __func__, err);
- goto bail;
- }
+ schedule_work(&me->ssr_work);
mutex_lock(&me->smd_mutex);
cdsp_state = me->cdsp_state;
mutex_unlock(&me->smd_mutex);
@@ -147,9 +177,15 @@ static void cvp_dsp_rpmsg_remove(struct rpmsg_device *rpdev)
{
struct cvp_dsp_apps *me = &gfa_cv;
+ cancel_work_sync(&me->ssr_work);
mutex_lock(&me->smd_mutex);
me->chan = NULL;
me->cdsp_state = STATUS_SSR;
+ if (me->device) {
+ mutex_lock(&me->device->lock);
+ me->device->dsp_flags &= ~DSP_INIT;
+ mutex_unlock(&me->device->lock);
+ }
mutex_unlock(&me->smd_mutex);
dprintk(CVP_INFO,
"%s: CDSP SSR triggered\n", __func__);
@@ -192,7 +228,8 @@ static int cvp_dsp_rpmsg_callback(struct rpmsg_device *rpdev,
}
int cvp_dsp_send_cmd_hfi_queue(phys_addr_t *phys_addr,
- uint32_t size_in_bytes)
+ uint32_t size_in_bytes,
+ struct iris_hfi_device *device)
{
int err, timeout;
struct msm_cvp_core *core;
@@ -211,15 +248,18 @@ int cvp_dsp_send_cmd_hfi_queue(phys_addr_t *phys_addr,
dprintk(CVP_ERR,
"%s: Incorrect DDR type value %d\n",
__func__, local_cmd_msg.ddr_type);
+ err = -EINVAL;
+ goto exit;
}
mutex_lock(&me->smd_mutex);
cmd_msg.msg_ptr = (uint64_t)phys_addr;
cmd_msg.msg_ptr_len = (size_in_bytes);
+ me->device = device;
mutex_unlock(&me->smd_mutex);
dprintk(CVP_DBG,
- "%s :: address of buffer, PA=0x%pK size_buff=%d ddr_type=%d\n",
+ "%s: address of buffer, PA=0x%pK size_buff=%d ddr_type=%d\n",
__func__, phys_addr, size_in_bytes, local_cmd_msg.ddr_type);
err = hyp_assign_phys((uint64_t)local_cmd_msg.msg_ptr,
@@ -229,33 +269,34 @@ int cvp_dsp_send_cmd_hfi_queue(phys_addr_t *phys_addr,
dprintk(CVP_ERR,
"%s: Failed in hyp_assign. err=%d\n",
__func__, err);
- return err;
+ goto exit;
}
err = cvp_dsp_send_cmd
(&local_cmd_msg, sizeof(struct cvp_dsp_cmd_msg));
- if (err != 0)
+ if (err) {
dprintk(CVP_ERR,
- "%s: cvp_dsp_send_cmd failed with err=%d\n",
+ "%s: cvp_dsp_send_cmd faidmesgled with err=%d\n",
__func__, err);
- else {
- core = list_first_entry(&cvp_driver->cores,
- struct msm_cvp_core, list);
- timeout = msecs_to_jiffies(
- core->resources.msm_cvp_dsp_rsp_timeout);
- err = wait_for_completion_timeout(
- &me->cmdqueue_send_work, timeout);
- if (!err) {
- dprintk(CVP_ERR, "failed to send cmdqueue\n");
- return -ETIMEDOUT;
- }
-
- mutex_lock(&me->smd_mutex);
- me->cvp_shutdown = STATUS_OK;
- me->cdsp_state = STATUS_OK;
- mutex_unlock(&me->smd_mutex);
+ goto exit;
}
+ core = list_first_entry(&cvp_driver->cores,
+ struct msm_cvp_core, list);
+ timeout = msecs_to_jiffies(
+ core->resources.msm_cvp_dsp_rsp_timeout);
+ if (!wait_for_completion_timeout(&me->cmdqueue_send_work, timeout)) {
+ dprintk(CVP_ERR, "failed to send cmdqueue\n");
+ err = -ETIMEDOUT;
+ goto exit;
+ }
+
+ mutex_lock(&me->smd_mutex);
+ me->cvp_shutdown = STATUS_OK;
+ me->cdsp_state = STATUS_OK;
+ mutex_unlock(&me->smd_mutex);
+
+exit:
return err;
}
@@ -485,6 +526,7 @@ static int __init cvp_dsp_device_init(void)
init_completion(&me->cmdqueue_send_work);
me->cvp_shutdown = STATUS_INIT;
me->cdsp_state = STATUS_INIT;
+ INIT_WORK(&me->ssr_work, msm_cvp_cdsp_ssr_handler);
err = register_rpmsg_driver(&cvp_dsp_rpmsg_client);
if (err) {
dprintk(CVP_ERR,
diff --git a/drivers/media/platform/msm/cvp/msm_cvp_dsp.h b/drivers/media/platform/msm/cvp/msm_cvp_dsp.h
index 0fb3567..ee7a847 100644
--- a/drivers/media/platform/msm/cvp/msm_cvp_dsp.h
+++ b/drivers/media/platform/msm/cvp/msm_cvp_dsp.h
@@ -8,6 +8,7 @@
#include <linux/types.h>
#include "msm_cvp_debug.h"
+#include "cvp_core_hfi.h"
#define CVP_APPS_DSP_GLINK_GUID "cvp-glink-apps-dsp"
#define CVP_APPS_DSP_SMD_GUID "cvp-smd-apps-dsp"
@@ -23,7 +24,7 @@
* Size in bytes of command message queue
*/
int cvp_dsp_send_cmd_hfi_queue(phys_addr_t *phys_addr,
- uint32_t size_in_bytes);
+ uint32_t size_in_bytes, struct iris_hfi_device *device);
/*
* API for CVP driver to suspend CVP session during
diff --git a/drivers/media/platform/msm/cvp/msm_cvp_internal.h b/drivers/media/platform/msm/cvp/msm_cvp_internal.h
index 0d5b18c..139b322 100644
--- a/drivers/media/platform/msm/cvp/msm_cvp_internal.h
+++ b/drivers/media/platform/msm/cvp/msm_cvp_internal.h
@@ -28,7 +28,7 @@
#define MAX_SUPPORTED_INSTANCES 16
#define MAX_NAME_LENGTH 64
#define MAX_DEBUGFS_NAME 50
-#define DCVS_FTB_WINDOW 16
+#define MAX_DSP_INIT_ATTEMPTS 16
#define SYS_MSG_START HAL_SYS_INIT_DONE
#define SYS_MSG_END HAL_SYS_ERROR
diff --git a/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_sw.c b/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_sw.c
index 3226e9b..d08e998 100644
--- a/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_sw.c
+++ b/drivers/media/platform/msm/dvb/demux/mpq_dmx_plugin_sw.c
@@ -77,7 +77,7 @@ static int mpq_sw_dmx_write_to_decoder(struct dvb_demux_feed *feed,
}
static int mpq_sw_dmx_set_source(struct dmx_demux *demux,
- const dmx_source_t *src)
+ const enum dmx_source_t *src)
{
int ret = -EINVAL;
diff --git a/drivers/media/platform/msm/npu/npu_common.h b/drivers/media/platform/msm/npu/npu_common.h
index 6bea45e..bafab3f 100644
--- a/drivers/media/platform/msm/npu/npu_common.h
+++ b/drivers/media/platform/msm/npu/npu_common.h
@@ -45,6 +45,7 @@
#define NPU_MAX_DT_NAME_LEN 21
#define NPU_MAX_PWRLEVELS 8
#define NPU_MAX_STATS_BUF_SIZE 16384
+#define NPU_MAX_BW_DEVS 4
enum npu_power_level {
NPU_PWRLEVEL_MINSVS = 0,
@@ -174,7 +175,9 @@ struct npu_pwrctrl {
uint32_t min_pwrlevel;
uint32_t num_pwrlevels;
- struct device *devbw;
+ struct device *devbw[NPU_MAX_BW_DEVS];
+ uint32_t devbw_num;
+ uint32_t bwmon_enabled;
uint32_t uc_pwrlevel;
uint32_t cdsprm_pwrlevel;
uint32_t fmax_pwrlevel;
diff --git a/drivers/media/platform/msm/npu/npu_dbg.c b/drivers/media/platform/msm/npu/npu_dbg.c
index f69efe5..988d177 100644
--- a/drivers/media/platform/msm/npu/npu_dbg.c
+++ b/drivers/media/platform/msm/npu/npu_dbg.c
@@ -17,18 +17,6 @@
* Function Definitions - Debug
* -------------------------------------------------------------------------
*/
-static void npu_dump_debug_timeout_stats(struct npu_device *npu_dev)
-{
- uint32_t reg_val;
-
- reg_val = REGR(npu_dev, REG_FW_JOB_CNT_START);
- NPU_INFO("fw jobs execute started count = %d\n", reg_val);
- reg_val = REGR(npu_dev, REG_FW_JOB_CNT_END);
- NPU_INFO("fw jobs execute finished count = %d\n", reg_val);
- reg_val = REGR(npu_dev, REG_NPU_FW_DEBUG_DATA);
- NPU_INFO("fw jobs aco parser debug = %d\n", reg_val);
-}
-
void npu_dump_ipc_packet(struct npu_device *npu_dev, void *cmd_ptr)
{
int32_t *ptr = (int32_t *)cmd_ptr;
@@ -50,7 +38,7 @@ static void npu_dump_ipc_queue(struct npu_device *npu_dev, uint32_t target_que)
target_que * sizeof(struct hfi_queue_header);
int32_t *ptr = (int32_t *)&queue;
size_t content_off;
- uint32_t *content;
+ uint32_t *content, content_size;
int i;
MEMR(npu_dev, (void *)((size_t)offset), (uint8_t *)&queue,
@@ -58,21 +46,42 @@ static void npu_dump_ipc_queue(struct npu_device *npu_dev, uint32_t target_que)
NPU_ERR("DUMP IPC queue %d:\n", target_que);
NPU_ERR("Header size %d:\n", HFI_QUEUE_HEADER_SIZE);
- NPU_ERR("Content size %d:\n", queue.qhdr_q_size);
NPU_ERR("============QUEUE HEADER=============\n");
for (i = 0; i < HFI_QUEUE_HEADER_SIZE/4; i++)
NPU_ERR("%x\n", ptr[i]);
- content_off = (size_t)IPC_ADDR + queue.qhdr_start_offset;
- content = kzalloc(queue.qhdr_q_size, GFP_KERNEL);
+ content_off = (size_t)(IPC_ADDR + queue.qhdr_start_offset +
+ queue.qhdr_read_idx);
+ if (queue.qhdr_write_idx >= queue.qhdr_read_idx)
+ content_size = queue.qhdr_write_idx - queue.qhdr_read_idx;
+ else
+ content_size = queue.qhdr_q_size - queue.qhdr_read_idx +
+ queue.qhdr_write_idx;
+
+ NPU_ERR("Content size %d:\n", content_size);
+ if (content_size == 0)
+ return;
+
+ content = kzalloc(content_size, GFP_KERNEL);
if (!content) {
NPU_ERR("failed to allocate IPC queue content buffer\n");
return;
}
- MEMR(npu_dev, (void *)content_off, content, queue.qhdr_q_size);
+ if (queue.qhdr_write_idx >= queue.qhdr_read_idx) {
+ MEMR(npu_dev, (void *)content_off, content, content_size);
+ } else {
+ MEMR(npu_dev, (void *)content_off, content,
+ queue.qhdr_q_size - queue.qhdr_read_idx);
+
+ MEMR(npu_dev, (void *)((size_t)IPC_ADDR +
+ queue.qhdr_start_offset),
+ (void *)((size_t)content + queue.qhdr_q_size -
+ queue.qhdr_read_idx), queue.qhdr_write_idx);
+ }
+
NPU_ERR("============QUEUE CONTENT=============\n");
- for (i = 0; i < queue.qhdr_q_size/4; i++)
+ for (i = 0; i < content_size/4; i++)
NPU_ERR("%x\n", content[i]);
NPU_ERR("DUMP IPC queue %d END\n", target_que);
@@ -103,7 +112,13 @@ static void npu_dump_all_ipc_queue(struct npu_device *npu_dev)
void npu_dump_debug_info(struct npu_device *npu_dev)
{
- npu_dump_debug_timeout_stats(npu_dev);
+ struct npu_host_ctx *host_ctx = &npu_dev->host_ctx;
+
+ if (host_ctx->fw_state != FW_ENABLED) {
+ NPU_WARN("NPU is disabled\n");
+ return;
+ }
+
npu_dump_dbg_registers(npu_dev);
npu_dump_all_ipc_queue(npu_dev);
}
diff --git a/drivers/media/platform/msm/npu/npu_debugfs.c b/drivers/media/platform/msm/npu/npu_debugfs.c
index 8653f38..4c46f77 100644
--- a/drivers/media/platform/msm/npu/npu_debugfs.c
+++ b/drivers/media/platform/msm/npu/npu_debugfs.c
@@ -25,6 +25,7 @@
*/
static int npu_debug_open(struct inode *inode, struct file *file);
static int npu_debug_release(struct inode *inode, struct file *file);
+static int npu_debug_reg_release(struct inode *inode, struct file *file);
static ssize_t npu_debug_reg_write(struct file *file,
const char __user *user_buf, size_t count, loff_t *ppos);
static ssize_t npu_debug_reg_read(struct file *file,
@@ -46,7 +47,7 @@ struct npu_device *g_npu_dev;
static const struct file_operations npu_reg_fops = {
.open = npu_debug_open,
- .release = npu_debug_release,
+ .release = npu_debug_reg_release,
.read = npu_debug_reg_read,
.write = npu_debug_reg_write,
};
@@ -86,6 +87,11 @@ static int npu_debug_open(struct inode *inode, struct file *file)
static int npu_debug_release(struct inode *inode, struct file *file)
{
+ return 0;
+}
+
+static int npu_debug_reg_release(struct inode *inode, struct file *file)
+{
struct npu_device *npu_dev = file->private_data;
struct npu_debugfs_ctx *debugfs;
diff --git a/drivers/media/platform/msm/npu/npu_dev.c b/drivers/media/platform/msm/npu/npu_dev.c
index 732a9df..a7851bd 100644
--- a/drivers/media/platform/msm/npu/npu_dev.c
+++ b/drivers/media/platform/msm/npu/npu_dev.c
@@ -16,6 +16,7 @@
#include <linux/regulator/consumer.h>
#include <linux/thermal.h>
#include <linux/soc/qcom/llcc-qcom.h>
+#include <soc/qcom/devfreq_devbw.h>
#include "npu_common.h"
#include "npu_hw.h"
@@ -60,6 +61,8 @@ static ssize_t perf_mode_override_store(struct device *dev,
static ssize_t boot_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count);
+static void npu_suspend_devbw(struct npu_device *npu_dev);
+static void npu_resume_devbw(struct npu_device *npu_dev);
static bool npu_is_post_clock(const char *clk_name);
static bool npu_is_exclude_rate_clock(const char *clk_name);
static int npu_get_max_state(struct thermal_cooling_device *cdev,
@@ -74,14 +77,10 @@ static int npu_get_info(struct npu_client *client, unsigned long arg);
static int npu_map_buf(struct npu_client *client, unsigned long arg);
static int npu_unmap_buf(struct npu_client *client,
unsigned long arg);
-static int npu_load_network(struct npu_client *client,
- unsigned long arg);
static int npu_load_network_v2(struct npu_client *client,
unsigned long arg);
static int npu_unload_network(struct npu_client *client,
unsigned long arg);
-static int npu_exec_network(struct npu_client *client,
- unsigned long arg);
static int npu_exec_network_v2(struct npu_client *client,
unsigned long arg);
static int npu_receive_event(struct npu_client *client,
@@ -126,8 +125,6 @@ static const char * const npu_exclude_rate_clocks[] = {
"axi_clk",
"ahb_clk",
"dma_clk",
- "llm_temp_clk",
- "llm_curr_clk",
"atb_clk",
"s2p_clk",
};
@@ -341,26 +338,30 @@ int npu_enable_core_power(struct npu_device *npu_dev)
struct npu_pwrctrl *pwr = &npu_dev->pwrctrl;
int ret = 0;
+ mutex_lock(&npu_dev->dev_lock);
+ NPU_DBG("Enable core power %d\n", pwr->pwr_vote_num);
if (!pwr->pwr_vote_num) {
ret = npu_enable_regulators(npu_dev);
if (ret)
- return ret;
+ goto fail;
ret = npu_set_bw(npu_dev, 100, 100);
if (ret) {
npu_disable_regulators(npu_dev);
- return ret;
+ goto fail;
}
ret = npu_enable_core_clocks(npu_dev);
if (ret) {
npu_set_bw(npu_dev, 0, 0);
npu_disable_regulators(npu_dev);
- pwr->pwr_vote_num = 0;
- return ret;
+ goto fail;
}
+ npu_resume_devbw(npu_dev);
}
pwr->pwr_vote_num++;
+fail:
+ mutex_unlock(&npu_dev->dev_lock);
return ret;
}
@@ -369,10 +370,16 @@ void npu_disable_core_power(struct npu_device *npu_dev)
{
struct npu_pwrctrl *pwr = &npu_dev->pwrctrl;
- if (!pwr->pwr_vote_num)
+ mutex_lock(&npu_dev->dev_lock);
+ NPU_DBG("Disable core power %d\n", pwr->pwr_vote_num);
+ if (!pwr->pwr_vote_num) {
+ mutex_unlock(&npu_dev->dev_lock);
return;
+ }
+
pwr->pwr_vote_num--;
if (!pwr->pwr_vote_num) {
+ npu_suspend_devbw(npu_dev);
npu_disable_core_clocks(npu_dev);
npu_set_bw(npu_dev, 0, 0);
npu_disable_regulators(npu_dev);
@@ -382,6 +389,7 @@ void npu_disable_core_power(struct npu_device *npu_dev)
NPU_DBG("setting back to power level=%d\n",
pwr->active_pwrlevel);
}
+ mutex_unlock(&npu_dev->dev_lock);
}
static int npu_enable_core_clocks(struct npu_device *npu_dev)
@@ -565,6 +573,42 @@ int npu_set_uc_power_level(struct npu_device *npu_dev,
}
/* -------------------------------------------------------------------------
+ * Bandwidth Monitor Related
+ * -------------------------------------------------------------------------
+ */
+static void npu_suspend_devbw(struct npu_device *npu_dev)
+{
+ struct npu_pwrctrl *pwr = &npu_dev->pwrctrl;
+ int ret, i;
+
+ if (pwr->bwmon_enabled && (pwr->devbw_num > 0)) {
+ for (i = 0; i < pwr->devbw_num; i++) {
+ ret = devfreq_suspend_devbw(pwr->devbw[i]);
+ if (ret)
+ NPU_ERR("devfreq_suspend_devbw failed rc:%d\n",
+ ret);
+ }
+ pwr->bwmon_enabled = 0;
+ }
+}
+
+static void npu_resume_devbw(struct npu_device *npu_dev)
+{
+ struct npu_pwrctrl *pwr = &npu_dev->pwrctrl;
+ int ret, i;
+
+ if (!pwr->bwmon_enabled && (pwr->devbw_num > 0)) {
+ for (i = 0; i < pwr->devbw_num; i++) {
+ ret = devfreq_resume_devbw(pwr->devbw[i]);
+ if (ret)
+ NPU_ERR("devfreq_resume_devbw failed rc:%d\n",
+ ret);
+ }
+ pwr->bwmon_enabled = 1;
+ }
+}
+
+/* -------------------------------------------------------------------------
* Clocks Related
* -------------------------------------------------------------------------
*/
@@ -631,10 +675,7 @@ static int npu_enable_clocks(struct npu_device *npu_dev, bool post_pil)
continue;
}
- NPU_DBG("enabling clock %s\n", core_clks[i].clk_name);
-
if (core_clks[i].reset) {
- NPU_DBG("Deassert %s\n", core_clks[i].clk_name);
rc = reset_control_deassert(core_clks[i].reset);
if (rc)
NPU_WARN("deassert %s reset failed\n",
@@ -651,9 +692,6 @@ static int npu_enable_clocks(struct npu_device *npu_dev, bool post_pil)
if (npu_is_exclude_rate_clock(core_clks[i].clk_name))
continue;
- NPU_DBG("setting rate of clock %s to %ld\n",
- core_clks[i].clk_name, pwrlevel->clk_freq[i]);
-
rc = clk_set_rate(core_clks[i].clk,
pwrlevel->clk_freq[i]);
/* not fatal error, keep using previous clk rate */
@@ -674,11 +712,9 @@ static int npu_enable_clocks(struct npu_device *npu_dev, bool post_pil)
if (npu_is_post_clock(core_clks[i].clk_name))
continue;
}
- NPU_DBG("disabling clock %s\n", core_clks[i].clk_name);
clk_disable_unprepare(core_clks[i].clk);
if (core_clks[i].reset) {
- NPU_DBG("Assert %s\n", core_clks[i].clk_name);
rc = reset_control_assert(core_clks[i].reset);
if (rc)
NPU_WARN("assert %s reset failed\n",
@@ -706,9 +742,6 @@ static void npu_disable_clocks(struct npu_device *npu_dev, bool post_pil)
/* set clock rate to 0 before disabling it */
if (!npu_is_exclude_rate_clock(core_clks[i].clk_name)) {
- NPU_DBG("setting rate of clock %s to 0\n",
- core_clks[i].clk_name);
-
rc = clk_set_rate(core_clks[i].clk, 0);
if (rc) {
NPU_ERR("clk_set_rate %s to 0 failed\n",
@@ -716,11 +749,9 @@ static void npu_disable_clocks(struct npu_device *npu_dev, bool post_pil)
}
}
- NPU_DBG("disabling clock %s\n", core_clks[i].clk_name);
clk_disable_unprepare(core_clks[i].clk);
if (core_clks[i].reset) {
- NPU_DBG("Assert %s\n", core_clks[i].clk_name);
rc = reset_control_assert(core_clks[i].reset);
if (rc)
NPU_WARN("assert %s reset failed\n",
@@ -794,11 +825,15 @@ static int npu_enable_regulators(struct npu_device *npu_dev)
regulators[i].regulator_name);
break;
}
- NPU_DBG("regulator %s enabled\n",
- regulators[i].regulator_name);
}
}
- host_ctx->power_vote_num++;
+
+ if (rc) {
+ for (i--; i >= 0; i--)
+ regulator_disable(regulators[i].regulator);
+ } else {
+ host_ctx->power_vote_num++;
+ }
return rc;
}
@@ -809,11 +844,9 @@ static void npu_disable_regulators(struct npu_device *npu_dev)
struct npu_regulator *regulators = npu_dev->regulators;
if (host_ctx->power_vote_num > 0) {
- for (i = 0; i < npu_dev->regulator_num; i++) {
+ for (i = 0; i < npu_dev->regulator_num; i++)
regulator_disable(regulators[i].regulator);
- NPU_DBG("regulator %s disabled\n",
- regulators[i].regulator_name);
- }
+
host_ctx->power_vote_num--;
}
}
@@ -845,13 +878,12 @@ int npu_enable_irq(struct npu_device *npu_dev)
reg_val |= RSC_SHUTDOWN_REQ_IRQ_ENABLE | RSC_BRINGUP_REQ_IRQ_ENABLE;
npu_cc_reg_write(npu_dev, NPU_CC_NPU_MASTERn_GENERAL_IRQ_ENABLE(0),
reg_val);
- for (i = 0; i < NPU_MAX_IRQ; i++) {
- if (npu_dev->irq[i].irq != 0) {
+ for (i = 0; i < NPU_MAX_IRQ; i++)
+ if (npu_dev->irq[i].irq != 0)
enable_irq(npu_dev->irq[i].irq);
- NPU_DBG("enable irq %d\n", npu_dev->irq[i].irq);
- }
- }
+
npu_dev->irq_enabled = true;
+ NPU_DBG("irq enabled\n");
return 0;
}
@@ -866,12 +898,9 @@ void npu_disable_irq(struct npu_device *npu_dev)
return;
}
- for (i = 0; i < NPU_MAX_IRQ; i++) {
- if (npu_dev->irq[i].irq != 0) {
+ for (i = 0; i < NPU_MAX_IRQ; i++)
+ if (npu_dev->irq[i].irq != 0)
disable_irq(npu_dev->irq[i].irq);
- NPU_DBG("disable irq %d\n", npu_dev->irq[i].irq);
- }
- }
reg_val = npu_cc_reg_read(npu_dev,
NPU_CC_NPU_MASTERn_GENERAL_IRQ_OWNER(0));
@@ -886,6 +915,7 @@ void npu_disable_irq(struct npu_device *npu_dev)
npu_cc_reg_write(npu_dev, NPU_CC_NPU_MASTERn_GENERAL_IRQ_CLEAR(0),
RSC_SHUTDOWN_REQ_IRQ_ENABLE | RSC_BRINGUP_REQ_IRQ_ENABLE);
npu_dev->irq_enabled = false;
+ NPU_DBG("irq disabled\n");
}
/* -------------------------------------------------------------------------
@@ -931,12 +961,13 @@ int npu_enable_sys_cache(struct npu_device *npu_dev)
REGW(npu_dev, NPU_CACHEMAP1_ATTR_METADATA_IDn(3), reg_val);
REGW(npu_dev, NPU_CACHEMAP1_ATTR_METADATA_IDn(4), reg_val);
- NPU_DBG("prior to activate sys cache\n");
rc = llcc_slice_activate(npu_dev->sys_cache);
- if (rc)
+ if (rc) {
NPU_ERR("failed to activate sys cache\n");
- else
- NPU_DBG("sys cache activated\n");
+ llcc_slice_putd(npu_dev->sys_cache);
+ npu_dev->sys_cache = NULL;
+ rc = 0;
+ }
}
return rc;
@@ -1095,38 +1126,6 @@ static int npu_unmap_buf(struct npu_client *client, unsigned long arg)
return 0;
}
-static int npu_load_network(struct npu_client *client,
- unsigned long arg)
-{
- struct msm_npu_load_network_ioctl req;
- struct msm_npu_unload_network_ioctl unload_req;
- void __user *argp = (void __user *)arg;
- int ret = 0;
-
- ret = copy_from_user(&req, argp, sizeof(req));
-
- if (ret) {
- NPU_ERR("fail to copy from user\n");
- return -EFAULT;
- }
-
- NPU_DBG("network load with perf request %d\n", req.perf_mode);
-
- ret = npu_host_load_network(client, &req);
- if (ret) {
- NPU_ERR("npu_host_load_network failed %d\n", ret);
- return ret;
- }
-
- ret = copy_to_user(argp, &req, sizeof(req));
- if (ret) {
- NPU_ERR("fail to copy to user\n");
- ret = -EFAULT;
- unload_req.network_hdl = req.network_hdl;
- npu_host_unload_network(client, &unload_req);
- }
- return ret;
-}
static int npu_load_network_v2(struct npu_client *client,
unsigned long arg)
@@ -1216,44 +1215,6 @@ static int npu_unload_network(struct npu_client *client,
return 0;
}
-static int npu_exec_network(struct npu_client *client,
- unsigned long arg)
-{
- struct msm_npu_exec_network_ioctl req;
- void __user *argp = (void __user *)arg;
- int ret = 0;
-
- ret = copy_from_user(&req, argp, sizeof(req));
-
- if (ret) {
- NPU_ERR("fail to copy from user\n");
- return -EFAULT;
- }
-
- if ((req.input_layer_num > MSM_NPU_MAX_INPUT_LAYER_NUM) ||
- (req.output_layer_num > MSM_NPU_MAX_OUTPUT_LAYER_NUM)) {
- NPU_ERR("Invalid input/out layer num %d[max:%d] %d[max:%d]\n",
- req.input_layer_num, MSM_NPU_MAX_INPUT_LAYER_NUM,
- req.output_layer_num, MSM_NPU_MAX_OUTPUT_LAYER_NUM);
- return -EINVAL;
- }
-
- ret = npu_host_exec_network(client, &req);
-
- if (ret) {
- NPU_ERR("npu_host_exec_network failed %d\n", ret);
- return ret;
- }
-
- ret = copy_to_user(argp, &req, sizeof(req));
-
- if (ret) {
- NPU_ERR("fail to copy to user\n");
- return -EFAULT;
- }
- return 0;
-}
-
static int npu_exec_network_v2(struct npu_client *client,
unsigned long arg)
{
@@ -1446,7 +1407,8 @@ static long npu_ioctl(struct file *file, unsigned int cmd,
ret = npu_unmap_buf(client, arg);
break;
case MSM_NPU_LOAD_NETWORK:
- ret = npu_load_network(client, arg);
+ NPU_ERR("npu_load_network_v1 is no longer supported\n");
+ ret = -ENOTTY;
break;
case MSM_NPU_LOAD_NETWORK_V2:
ret = npu_load_network_v2(client, arg);
@@ -1455,7 +1417,8 @@ static long npu_ioctl(struct file *file, unsigned int cmd,
ret = npu_unload_network(client, arg);
break;
case MSM_NPU_EXEC_NETWORK:
- ret = npu_exec_network(client, arg);
+ NPU_ERR("npu_exec_network_v1 is no longer supported\n");
+ ret = -ENOTTY;
break;
case MSM_NPU_EXEC_NETWORK_V2:
ret = npu_exec_network_v2(client, arg);
@@ -1658,8 +1621,6 @@ int npu_set_bw(struct npu_device *npu_dev, int new_ib, int new_ab)
bwctrl->bw_levels[i].vectors[1].ib = new_ib * MBYTE;
bwctrl->bw_levels[i].vectors[1].ab = new_ab / bwctrl->num_paths * MBYTE;
- NPU_INFO("BW MBps: AB: %d IB: %d\n", new_ab, new_ib);
-
ret = msm_bus_scale_client_update_request(bwctrl->bus_client, i);
if (ret) {
NPU_ERR("bandwidth request failed (%d)\n", ret);
@@ -1790,7 +1751,9 @@ static int npu_pwrctrl_init(struct npu_device *npu_dev)
{
struct platform_device *pdev = npu_dev->pdev;
struct device_node *node;
- int ret = 0;
+ int ret = 0, i;
+ struct platform_device *p2dev;
+ struct npu_pwrctrl *pwr = &npu_dev->pwrctrl;
/* Power levels */
node = of_find_node_by_name(pdev->dev.of_node, "qcom,npu-pwrlevels");
@@ -1804,6 +1767,47 @@ static int npu_pwrctrl_init(struct npu_device *npu_dev)
if (ret)
return ret;
+ /* Parse Bandwidth Monitor */
+ pwr->devbw_num = of_property_count_strings(pdev->dev.of_node,
+ "qcom,npubw-dev-names");
+ if (pwr->devbw_num <= 0) {
+ NPU_INFO("npubw-dev-names are not defined\n");
+ return 0;
+ } else if (pwr->devbw_num > NPU_MAX_BW_DEVS) {
+ NPU_ERR("number of devbw %d exceeds limit\n", pwr->devbw_num);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < pwr->devbw_num; i++) {
+ node = of_parse_phandle(pdev->dev.of_node,
+ "qcom,npubw-devs", i);
+
+ if (node) {
+ p2dev = of_find_device_by_node(node);
+ of_node_put(node);
+ if (p2dev) {
+ pwr->devbw[i] = &p2dev->dev;
+ } else {
+ NPU_ERR("can't find devbw%d\n", i);
+ ret = -EINVAL;
+ break;
+ }
+ } else {
+ NPU_ERR("can't find devbw node\n");
+ ret = -EINVAL;
+ break;
+ }
+ }
+
+ if (ret) {
+ /* Allow npu work without bwmon */
+ pwr->devbw_num = 0;
+ ret = 0;
+ } else {
+ /* Set to 1 initially - we assume bwmon is on */
+ pwr->bwmon_enabled = 1;
+ }
+
return ret;
}
@@ -2109,6 +2113,7 @@ static int npu_probe(struct platform_device *pdev)
return -EFAULT;
npu_dev->pdev = pdev;
+ mutex_init(&npu_dev->dev_lock);
platform_set_drvdata(pdev, npu_dev);
res = platform_get_resource_byname(pdev,
@@ -2298,8 +2303,6 @@ static int npu_probe(struct platform_device *pdev)
if (rc)
goto error_driver_init;
- mutex_init(&npu_dev->dev_lock);
-
rc = npu_host_init(npu_dev);
if (rc) {
NPU_ERR("unable to init host\n");
diff --git a/drivers/media/platform/msm/npu/npu_firmware.h b/drivers/media/platform/msm/npu/npu_firmware.h
index 8c0385d..3d8537b 100644
--- a/drivers/media/platform/msm/npu/npu_firmware.h
+++ b/drivers/media/platform/msm/npu/npu_firmware.h
@@ -29,11 +29,6 @@
/* Data value for debug */
#define REG_NPU_FW_DEBUG_DATA NPU_GPR13
-/* Started job count */
-#define REG_FW_JOB_CNT_START NPU_GPR14
-/* Finished job count */
-#define REG_FW_JOB_CNT_END NPU_GPR15
-
/* NPU FW Control/Status Register */
/* bit fields definitions in CTRL STATUS REG */
#define FW_CTRL_STATUS_IPC_READY_BIT 0
diff --git a/drivers/media/platform/msm/npu/npu_host_ipc.c b/drivers/media/platform/msm/npu/npu_host_ipc.c
index 85e8187..62feb8c 100644
--- a/drivers/media/platform/msm/npu/npu_host_ipc.c
+++ b/drivers/media/platform/msm/npu/npu_host_ipc.c
@@ -166,9 +166,7 @@ static int npu_host_ipc_send_cmd_hfi(struct npu_device *npu_dev,
status = INTERRUPT_RAISE_NPU(npu_dev);
}
- if (status == 0)
- NPU_DBG("Cmd Msg put on Command Queue - SUCCESSS\n");
- else
+ if (status)
NPU_ERR("Cmd Msg put on Command Queue - FAILURE\n");
return status;
@@ -238,6 +236,13 @@ static int ipc_queue_read(struct npu_device *npu_dev,
status = -EPERM;
goto exit;
}
+
+ if (packet_size > NPU_IPC_BUF_LENGTH) {
+ NPU_ERR("Invalid packet size %d\n", packet_size);
+ status = -EINVAL;
+ goto exit;
+ }
+
new_read_idx = queue.qhdr_read_idx + packet_size;
if (new_read_idx < (queue.qhdr_q_size)) {
diff --git a/drivers/media/platform/msm/npu/npu_mgr.c b/drivers/media/platform/msm/npu/npu_mgr.c
index 243d7cd..b738a8b 100644
--- a/drivers/media/platform/msm/npu/npu_mgr.c
+++ b/drivers/media/platform/msm/npu/npu_mgr.c
@@ -99,7 +99,6 @@ static int load_fw_nolock(struct npu_device *npu_dev, bool enable)
}
/* Keep reading ctrl status until NPU is ready */
- NPU_DBG("waiting for status ready from fw\n");
if (wait_for_status_ready(npu_dev, REG_NPU_FW_CTRL_STATUS,
FW_CTRL_STATUS_MAIN_THREAD_READY_VAL)) {
ret = -EPERM;
@@ -194,7 +193,6 @@ int load_fw(struct npu_device *npu_dev)
int unload_fw(struct npu_device *npu_dev)
{
struct npu_host_ctx *host_ctx = &npu_dev->host_ctx;
- int ret = 0;
if (host_ctx->auto_pil_disable) {
NPU_WARN("auto pil is disabled\n");
@@ -212,16 +210,7 @@ int unload_fw(struct npu_device *npu_dev)
return -EBUSY;
}
- /* vote minimum bandwidth before unload npu fw via PIL */
- ret = npu_set_bw(npu_dev, 100, 100);
- if (ret) {
- NPU_ERR("Can't update bandwidth\n");
- mutex_unlock(&host_ctx->lock);
- return ret;
- }
-
subsystem_put_local(host_ctx->subsystem_handle);
- npu_set_bw(npu_dev, 0, 0);
host_ctx->fw_state = FW_UNLOADED;
NPU_DBG("fw is unloaded\n");
mutex_unlock(&host_ctx->lock);
@@ -531,9 +520,18 @@ static int npu_notifier_cb(struct notifier_block *this, unsigned long code,
npu_disable_core_power(npu_dev);
npu_notify_aop(npu_dev, false);
}
+
+ /* vote minimum bandwidth before unload npu fw via PIL */
+ ret = npu_set_bw(npu_dev, 100, 100);
+ if (ret)
+ NPU_WARN("Can't update bandwidth\n");
+
break;
}
case SUBSYS_AFTER_SHUTDOWN:
+ ret = npu_set_bw(npu_dev, 0, 0);
+ if (ret)
+ NPU_WARN("Can't update bandwidth\n");
break;
default:
NPU_DBG("Ignoring event\n");
@@ -592,12 +590,14 @@ int npu_host_init(struct npu_device *npu_dev)
if (IS_ERR(host_ctx->notif_hdle)) {
NPU_ERR("register event notification failed\n");
sts = PTR_ERR(host_ctx->notif_hdle);
- return sts;
+ host_ctx->notif_hdle = NULL;
+ goto fail;
}
host_ctx->wq = create_workqueue("npu_irq_hdl");
if (!host_ctx->wq) {
sts = -EPERM;
+ goto fail;
} else {
INIT_WORK(&host_ctx->ipc_irq_work, npu_ipc_irq_work);
INIT_WORK(&host_ctx->wdg_err_irq_work, npu_wdg_err_irq_work);
@@ -608,16 +608,33 @@ int npu_host_init(struct npu_device *npu_dev)
npu_disable_fw_work);
}
+ host_ctx->ipc_msg_buf = kzalloc(NPU_IPC_BUF_LENGTH, GFP_KERNEL);
+ if (!host_ctx->ipc_msg_buf) {
+ NPU_ERR("Failed to allocate ipc buffer\n");
+ sts = -ENOMEM;
+ goto fail;
+ }
+
host_ctx->auto_pil_disable = false;
return sts;
+fail:
+ if (host_ctx->wq)
+ destroy_workqueue(host_ctx->wq);
+ if (host_ctx->notif_hdle)
+ subsys_notif_unregister_notifier(host_ctx->notif_hdle,
+ &host_ctx->nb);
+ mutex_destroy(&host_ctx->lock);
+ return sts;
}
void npu_host_deinit(struct npu_device *npu_dev)
{
struct npu_host_ctx *host_ctx = &npu_dev->host_ctx;
+ kfree(host_ctx->ipc_msg_buf);
destroy_workqueue(host_ctx->wq);
+ subsys_notif_unregister_notifier(host_ctx->notif_hdle, &host_ctx->nb);
mutex_destroy(&host_ctx->lock);
}
@@ -630,7 +647,6 @@ irqreturn_t npu_ipc_intr_hdlr(int irq, void *ptr)
struct npu_device *npu_dev = (struct npu_device *)ptr;
struct npu_host_ctx *host_ctx = &npu_dev->host_ctx;
- NPU_DBG("NPU ipc irq %d\n", irq);
INTERRUPT_ACK(npu_dev, irq);
/* Check that the event thread currently is running */
@@ -646,23 +662,17 @@ irqreturn_t npu_general_intr_hdlr(int irq, void *ptr)
struct npu_device *npu_dev = (struct npu_device *)ptr;
struct npu_host_ctx *host_ctx = &npu_dev->host_ctx;
- NPU_DBG("NPU general irq %d\n", irq);
-
reg_val = npu_cc_reg_read(npu_dev,
NPU_CC_NPU_MASTERn_GENERAL_IRQ_STATUS(0));
NPU_DBG("GENERAL_IRQ_STATUS %x\n", reg_val);
reg_val &= (RSC_SHUTDOWN_REQ_IRQ_STATUS | RSC_BRINGUP_REQ_IRQ_STATUS);
ack_val = npu_cc_reg_read(npu_dev, NPU_CC_NPU_CPC_RSC_CTRL);
- if (reg_val & RSC_SHUTDOWN_REQ_IRQ_STATUS) {
- NPU_DBG("Send SHUTDOWN ACK\n");
+ if (reg_val & RSC_SHUTDOWN_REQ_IRQ_STATUS)
ack_val |= Q6SS_RSC_SHUTDOWN_ACK_EN;
- }
- if (reg_val & RSC_BRINGUP_REQ_IRQ_STATUS) {
- NPU_DBG("Send BRINGUP ACK\n");
+ if (reg_val & RSC_BRINGUP_REQ_IRQ_STATUS)
ack_val |= Q6SS_RSC_BRINGUP_ACK_EN;
- }
npu_cc_reg_write(npu_dev, NPU_CC_NPU_CPC_RSC_CTRL, ack_val);
npu_cc_reg_write(npu_dev,
@@ -732,6 +742,7 @@ static int host_error_hdlr(struct npu_device *npu_dev, bool force)
if (host_ctx->wdg_irq_sts) {
NPU_INFO("watchdog irq triggered\n");
+ npu_dump_debug_info(npu_dev);
fw_alive = false;
}
@@ -773,7 +784,6 @@ static int host_error_hdlr(struct npu_device *npu_dev, bool force)
}
/* Keep reading ctrl status until NPU is ready */
- NPU_DBG("waiting for status ready from fw\n");
if (wait_for_status_ready(npu_dev, REG_NPU_FW_CTRL_STATUS,
FW_CTRL_STATUS_MAIN_THREAD_READY_VAL)) {
NPU_ERR("wait for fw status ready timedout\n");
@@ -899,6 +909,12 @@ static void npu_bridge_mbox_work(struct work_struct *work)
return;
}
+ if ((host_ctx->wdg_irq_sts != 0) || (host_ctx->err_irq_sts != 0)) {
+ NPU_WARN("SSR is triggered, skip this time\n");
+ mutex_unlock(&host_ctx->lock);
+ return;
+ }
+
/* queue or modify delayed work to disable fw */
mod_delayed_work(host_ctx->wq, &host_ctx->disable_fw_work,
NPU_MBOX_IDLE_TIMEOUT);
@@ -1174,14 +1190,6 @@ static void app_msg_proc(struct npu_host_ctx *host_ctx, uint32_t *msg)
NPU_DBG("NPU_IPC_MSG_EXECUTE_DONE status: %d\n",
exe_rsp_pkt->header.status);
NPU_DBG("trans_id : %d\n", exe_rsp_pkt->header.trans_id);
- NPU_DBG("e2e_IPC_time: %d (in tick count)\n",
- exe_rsp_pkt->stats.e2e_ipc_tick_count);
- NPU_DBG("aco_load_time: %d (in tick count)\n",
- exe_rsp_pkt->stats.aco_load_tick_count);
- NPU_DBG("aco_execute_time: %d (in tick count)\n",
- exe_rsp_pkt->stats.aco_execution_tick_count);
- NPU_DBG("total_num_layers: %d\n",
- exe_rsp_pkt->stats.exe_stats.total_num_layers);
network = get_network_by_hdl(host_ctx, NULL,
exe_rsp_pkt->network_hdl);
@@ -1503,13 +1511,14 @@ static int npu_send_network_cmd(struct npu_device *npu_dev,
NPU_ERR("Another cmd is pending\n");
ret = -EBUSY;
} else {
- NPU_DBG("Send cmd %d network id %lld\n",
- ((struct ipc_cmd_header_pkt *)cmd_ptr)->cmd_type,
- network->id);
network->cmd_async = async;
network->cmd_ret_status = 0;
network->cmd_pending = true;
network->trans_id = atomic_read(&host_ctx->ipc_trans_id);
+ reinit_completion(&network->cmd_done);
+ NPU_DBG("Send cmd %d network id %llx trans id %d\n",
+ ((struct ipc_cmd_header_pkt *)cmd_ptr)->cmd_type,
+ network->id, network->trans_id);
ret = npu_host_ipc_send_cmd(npu_dev,
IPC_QUEUE_APPS_EXEC, cmd_ptr);
if (ret)
@@ -1546,28 +1555,6 @@ static int npu_send_misc_cmd(struct npu_device *npu_dev, uint32_t q_idx,
return ret;
}
-static void host_copy_patch_data(struct npu_patch_tuple *param, uint32_t value,
- struct msm_npu_layer *layer_info)
-{
- param->value = value;
- param->chunk_id = layer_info->patch_info.chunk_id;
- param->loc_offset = layer_info->patch_info.loc_offset;
- param->instruction_size_in_bytes =
- layer_info->patch_info.instruction_size_in_bytes;
- param->shift_value_in_bits =
- layer_info->patch_info.shift_value_in_bits;
- param->variable_size_in_bits =
- layer_info->patch_info.variable_size_in_bits;
-
- NPU_DBG("copy_patch_data: %x %d %x %x %x %x\n",
- param->value,
- param->chunk_id,
- param->loc_offset,
- param->instruction_size_in_bytes,
- param->shift_value_in_bits,
- param->variable_size_in_bits);
-}
-
static void host_copy_patch_data_v2(struct npu_patch_tuple_v2 *param,
struct msm_npu_patch_info_v2 *patch_info)
{
@@ -1607,7 +1594,7 @@ static uint32_t find_networks_perf_mode(struct npu_host_ctx *host_ctx)
network++;
}
}
- pr_debug("max perf mode for networks: %d\n", max_perf_mode);
+ NPU_DBG("max perf mode for networks: %d\n", max_perf_mode);
return max_perf_mode;
}
@@ -1627,110 +1614,6 @@ static int set_perf_mode(struct npu_device *npu_dev)
return ret;
}
-int32_t npu_host_load_network(struct npu_client *client,
- struct msm_npu_load_network_ioctl *load_ioctl)
-{
- int ret = 0;
- struct npu_device *npu_dev = client->npu_dev;
- struct npu_network *network;
- struct ipc_cmd_load_pkt load_packet;
- struct npu_host_ctx *host_ctx = &npu_dev->host_ctx;
-
- ret = enable_fw(npu_dev);
- if (ret)
- return ret;
-
- mutex_lock(&host_ctx->lock);
- network = alloc_network(host_ctx, client);
- if (!network) {
- ret = -ENOMEM;
- goto err_deinit_fw;
- }
-
- network_get(network);
- network->buf_hdl = load_ioctl->buf_ion_hdl;
- network->size = load_ioctl->buf_size;
- network->phy_add = load_ioctl->buf_phys_addr;
- network->first_block_size = load_ioctl->first_block_size;
- network->priority = load_ioctl->priority;
- network->perf_mode = load_ioctl->perf_mode;
-
- /* verify mapped physical address */
- if (!npu_mem_verify_addr(client, network->phy_add)) {
- ret = -EINVAL;
- goto error_free_network;
- }
-
- ret = set_perf_mode(npu_dev);
- if (ret) {
- NPU_ERR("set_perf_mode failed\n");
- goto error_free_network;
- }
-
- load_packet.header.cmd_type = NPU_IPC_CMD_LOAD;
- load_packet.header.size = sizeof(struct ipc_cmd_load_pkt);
- load_packet.header.trans_id =
- atomic_add_return(1, &host_ctx->ipc_trans_id);
- load_packet.header.flags = load_ioctl->flags;
-
- /* ACO Buffer. Use the npu mapped aco address */
- load_packet.buf_pkt.address = (uint64_t)network->phy_add;
- load_packet.buf_pkt.buf_size = network->first_block_size;
- load_packet.buf_pkt.network_id = network->id;
-
- /* NPU_IPC_CMD_LOAD will go onto IPC_QUEUE_APPS_EXEC */
- reinit_completion(&network->cmd_done);
- ret = npu_send_network_cmd(npu_dev, network, &load_packet, false,
- false);
- if (ret) {
- NPU_ERR("NPU_IPC_CMD_LOAD sent failed: %d\n", ret);
- goto error_free_network;
- }
-
- mutex_unlock(&host_ctx->lock);
-
- ret = wait_for_completion_interruptible_timeout(
- &network->cmd_done,
- (host_ctx->fw_dbg_mode & FW_DBG_MODE_INC_TIMEOUT) ?
- NW_DEBUG_TIMEOUT : NW_CMD_TIMEOUT);
-
- mutex_lock(&host_ctx->lock);
- if (!ret) {
- NPU_ERR("NPU_IPC_CMD_LOAD time out\n");
- npu_dump_debug_info(npu_dev);
- ret = -ETIMEDOUT;
- goto error_free_network;
- } else if (ret < 0) {
- NPU_ERR("NPU_IPC_CMD_LOAD is interrupted by signal\n");
- goto error_free_network;
- }
-
- if (network->fw_error) {
- ret = -EIO;
- NPU_ERR("fw is in error state during load network\n");
- goto error_free_network;
- }
-
- ret = network->cmd_ret_status;
- if (ret)
- goto error_free_network;
-
- load_ioctl->network_hdl = network->network_hdl;
- network->is_active = true;
- network_put(network);
- mutex_unlock(&host_ctx->lock);
-
- return ret;
-
-error_free_network:
- network_put(network);
- free_network(host_ctx, client, network->id);
-err_deinit_fw:
- mutex_unlock(&host_ctx->lock);
- disable_fw(npu_dev);
- return ret;
-}
-
int32_t npu_host_load_network_v2(struct npu_client *client,
struct msm_npu_load_network_ioctl_v2 *load_ioctl,
struct msm_npu_patch_info_v2 *patch_info)
@@ -1784,8 +1667,6 @@ int32_t npu_host_load_network_v2(struct npu_client *client,
goto error_free_network;
}
- NPU_DBG("network address %llx\n", network->phy_add);
-
ret = set_perf_mode(npu_dev);
if (ret) {
NPU_ERR("set_perf_mode failed\n");
@@ -1805,11 +1686,9 @@ int32_t npu_host_load_network_v2(struct npu_client *client,
load_packet->buf_pkt.num_layers = network->num_layers;
load_packet->num_patch_params = num_patch_params;
- /* NPU_IPC_CMD_LOAD_V2 will go onto IPC_QUEUE_APPS_EXEC */
- reinit_completion(&network->cmd_done);
ret = npu_send_network_cmd(npu_dev, network, load_packet, false, false);
if (ret) {
- NPU_DBG("NPU_IPC_CMD_LOAD_V2 sent failed: %d\n", ret);
+ NPU_ERR("NPU_IPC_CMD_LOAD_V2 sent failed: %d\n", ret);
goto error_free_network;
}
@@ -1822,19 +1701,20 @@ int32_t npu_host_load_network_v2(struct npu_client *client,
mutex_lock(&host_ctx->lock);
- if (!ret) {
- NPU_ERR("npu: NPU_IPC_CMD_LOAD time out\n");
- npu_dump_debug_info(npu_dev);
- ret = -ETIMEDOUT;
- goto error_load_network;
- }
-
if (network->fw_error) {
ret = -EIO;
NPU_ERR("fw is in error state during load_v2 network\n");
goto error_free_network;
}
+ if (!ret) {
+ NPU_ERR("npu: NPU_IPC_CMD_LOAD time out %lld:%d\n",
+ network->id, network->trans_id);
+ npu_dump_debug_info(npu_dev);
+ ret = -ETIMEDOUT;
+ goto error_load_network;
+ }
+
ret = network->cmd_ret_status;
if (ret)
goto error_free_network;
@@ -1908,8 +1788,6 @@ int32_t npu_host_unload_network(struct npu_client *client,
unload_packet.header.flags = 0;
unload_packet.network_hdl = (uint32_t)network->network_hdl;
- /* NPU_IPC_CMD_UNLOAD will go onto IPC_QUEUE_APPS_EXEC */
- reinit_completion(&network->cmd_done);
ret = npu_send_network_cmd(npu_dev, network, &unload_packet, false,
false);
@@ -1937,21 +1815,23 @@ int32_t npu_host_unload_network(struct npu_client *client,
mutex_lock(&host_ctx->lock);
+ if (network->fw_error) {
+ ret = -EIO;
+ NPU_ERR("fw is in error state during unload network\n");
+ goto free_network;
+ }
+
if (!ret) {
- NPU_ERR("npu: NPU_IPC_CMD_UNLOAD time out\n");
+ NPU_ERR("npu: NPU_IPC_CMD_UNLOAD time out %llx:%d\n",
+ network->id, network->trans_id);
npu_dump_debug_info(npu_dev);
network->cmd_pending = false;
ret = -ETIMEDOUT;
goto free_network;
}
- if (network->fw_error) {
- ret = -EIO;
- NPU_ERR("fw is in error state during unload network\n");
- } else {
- ret = network->cmd_ret_status;
- NPU_DBG("unload network status %d\n", ret);
- }
+ ret = network->cmd_ret_status;
+ NPU_DBG("unload network status %d\n", ret);
free_network:
/*
@@ -1971,131 +1851,6 @@ int32_t npu_host_unload_network(struct npu_client *client,
return ret;
}
-int32_t npu_host_exec_network(struct npu_client *client,
- struct msm_npu_exec_network_ioctl *exec_ioctl)
-{
- struct npu_device *npu_dev = client->npu_dev;
- struct ipc_cmd_execute_pkt exec_packet;
- /* npu mapped addr */
- uint64_t input_off, output_off;
- int32_t ret;
- struct npu_network *network;
- struct npu_host_ctx *host_ctx = &npu_dev->host_ctx;
- bool async_ioctl = !!exec_ioctl->async;
-
- mutex_lock(&host_ctx->lock);
- network = get_network_by_hdl(host_ctx, client,
- exec_ioctl->network_hdl);
-
- if (!network) {
- mutex_unlock(&host_ctx->lock);
- return -EINVAL;
- }
-
- if (!network->is_active) {
- NPU_ERR("network is not active\n");
- ret = -EINVAL;
- goto exec_done;
- }
-
- if (network->fw_error) {
- NPU_ERR("fw is in error state\n");
- ret = -EIO;
- goto exec_done;
- }
-
- NPU_DBG("execute network %lld\n", network->id);
- memset(&exec_packet, 0, sizeof(exec_packet));
- if (exec_ioctl->patching_required) {
- if ((exec_ioctl->input_layer_num != 1) ||
- (exec_ioctl->output_layer_num != 1)) {
- NPU_ERR("Invalid input/output layer num\n");
- ret = -EINVAL;
- goto exec_done;
- }
-
- input_off = exec_ioctl->input_layers[0].buf_phys_addr;
- output_off = exec_ioctl->output_layers[0].buf_phys_addr;
- /* verify mapped physical address */
- if (!npu_mem_verify_addr(client, input_off) ||
- !npu_mem_verify_addr(client, output_off)) {
- NPU_ERR("Invalid patch buf address\n");
- ret = -EINVAL;
- goto exec_done;
- }
-
- exec_packet.patch_params.num_params = 2;
- host_copy_patch_data(&exec_packet.patch_params.param[0],
- (uint32_t)input_off, &exec_ioctl->input_layers[0]);
- host_copy_patch_data(&exec_packet.patch_params.param[1],
- (uint32_t)output_off, &exec_ioctl->output_layers[0]);
- } else {
- exec_packet.patch_params.num_params = 0;
- }
-
- exec_packet.header.cmd_type = NPU_IPC_CMD_EXECUTE;
- exec_packet.header.size = sizeof(struct ipc_cmd_execute_pkt);
- exec_packet.header.trans_id =
- atomic_add_return(1, &host_ctx->ipc_trans_id);
- exec_packet.header.flags = 0xF;
- exec_packet.network_hdl = network->network_hdl;
-
- /* Send it on the high priority queue */
- reinit_completion(&network->cmd_done);
- ret = npu_send_network_cmd(npu_dev, network, &exec_packet, async_ioctl,
- false);
-
- if (ret) {
- NPU_ERR("NPU_IPC_CMD_EXECUTE sent failed: %d\n", ret);
- goto exec_done;
- }
-
- if (async_ioctl) {
- NPU_DBG("Async ioctl, return now\n");
- goto exec_done;
- }
-
- mutex_unlock(&host_ctx->lock);
-
- ret = wait_for_completion_timeout(
- &network->cmd_done,
- (host_ctx->fw_dbg_mode & FW_DBG_MODE_INC_TIMEOUT) ?
- NW_DEBUG_TIMEOUT : NW_CMD_TIMEOUT);
-
- mutex_lock(&host_ctx->lock);
- if (!ret) {
- NPU_ERR("npu: %x NPU_IPC_CMD_EXECUTE time out\n",
- network->id);
- npu_dump_debug_info(npu_dev);
- network->cmd_pending = false;
- ret = -ETIMEDOUT;
- goto exec_done;
- }
-
- if (network->fw_error) {
- ret = -EIO;
- NPU_ERR("fw is in error state during execute network\n");
- } else {
- ret = network->cmd_ret_status;
- NPU_DBG("execution status %d\n", ret);
- }
-
-exec_done:
- network_put(network);
- mutex_unlock(&host_ctx->lock);
-
- /*
- * treat network execution timed out as error in order to
- * force npu fw to stop execution
- */
- if (ret == -ETIMEDOUT) {
- NPU_ERR("Error handling after execution failure\n");
- host_error_hdlr(npu_dev, true);
- }
-
- return ret;
-}
-
int32_t npu_host_exec_network_v2(struct npu_client *client,
struct msm_npu_exec_network_ioctl_v2 *exec_ioctl,
struct msm_npu_patch_buf_info *patch_buf_info)
@@ -2174,8 +1929,6 @@ int32_t npu_host_exec_network_v2(struct npu_client *client,
NPU_DBG("Execute_v2 flags %x stats_buf_size %d\n",
exec_packet->header.flags, exec_ioctl->stats_buf_size);
- /* Send it on the high priority queue */
- reinit_completion(&network->cmd_done);
ret = npu_send_network_cmd(npu_dev, network, exec_packet, async_ioctl,
false);
@@ -2197,21 +1950,21 @@ int32_t npu_host_exec_network_v2(struct npu_client *client,
NW_DEBUG_TIMEOUT : NW_CMD_TIMEOUT);
mutex_lock(&host_ctx->lock);
- if (!ret) {
- NPU_ERR("npu: %x NPU_IPC_CMD_EXECUTE_V2 time out\n",
- network->id);
- npu_dump_debug_info(npu_dev);
- network->cmd_pending = false;
- ret = -ETIMEDOUT;
- goto free_exec_packet;
- }
-
if (network->fw_error) {
ret = -EIO;
NPU_ERR("fw is in error state during execute_v2 network\n");
goto free_exec_packet;
}
+ if (!ret) {
+ NPU_ERR("npu: %llx:%d NPU_IPC_CMD_EXECUTE_V2 time out\n",
+ network->id, network->trans_id);
+ npu_dump_debug_info(npu_dev);
+ network->cmd_pending = false;
+ ret = -ETIMEDOUT;
+ goto free_exec_packet;
+ }
+
ret = network->cmd_ret_status;
if (!ret) {
exec_ioctl->stats_buf_size = network->stats_buf_size;
diff --git a/drivers/media/platform/msm/npu/npu_mgr.h b/drivers/media/platform/msm/npu/npu_mgr.h
index 36bcc08..72976cb 100644
--- a/drivers/media/platform/msm/npu/npu_mgr.h
+++ b/drivers/media/platform/msm/npu/npu_mgr.h
@@ -26,7 +26,7 @@
#define NPU_MBOX_IDLE_TIMEOUT msecs_to_jiffies(NPU_MBOX_IDLE_TIMEOUT_MS)
#define FIRMWARE_VERSION 0x00001000
#define MAX_LOADED_NETWORK 32
-#define NPU_IPC_BUF_LENGTH 512
+#define NPU_IPC_BUF_LENGTH 4096
#define FW_DBG_MODE_PAUSE (1 << 0)
#define FW_DBG_MODE_INC_TIMEOUT (1 << 1)
@@ -105,6 +105,7 @@ struct npu_host_ctx {
void *notif_hdle;
spinlock_t bridge_mbox_lock;
bool bridge_mbox_pwr_on;
+ void *ipc_msg_buf;
};
struct npu_device;
@@ -131,15 +132,11 @@ int32_t npu_host_map_buf(struct npu_client *client,
struct msm_npu_map_buf_ioctl *map_ioctl);
int32_t npu_host_unmap_buf(struct npu_client *client,
struct msm_npu_unmap_buf_ioctl *unmap_ioctl);
-int32_t npu_host_load_network(struct npu_client *client,
- struct msm_npu_load_network_ioctl *load_ioctl);
int32_t npu_host_load_network_v2(struct npu_client *client,
struct msm_npu_load_network_ioctl_v2 *load_ioctl,
struct msm_npu_patch_info_v2 *patch_info);
int32_t npu_host_unload_network(struct npu_client *client,
struct msm_npu_unload_network_ioctl *unload);
-int32_t npu_host_exec_network(struct npu_client *client,
- struct msm_npu_exec_network_ioctl *exec_ioctl);
int32_t npu_host_exec_network_v2(struct npu_client *client,
struct msm_npu_exec_network_ioctl_v2 *exec_ioctl,
struct msm_npu_patch_buf_info *patch_buf_info);
diff --git a/drivers/media/radio/radio-raremono.c b/drivers/media/radio/radio-raremono.c
index 9a5079d..729600c 100644
--- a/drivers/media/radio/radio-raremono.c
+++ b/drivers/media/radio/radio-raremono.c
@@ -271,6 +271,14 @@ static int vidioc_g_frequency(struct file *file, void *priv,
return 0;
}
+static void raremono_device_release(struct v4l2_device *v4l2_dev)
+{
+ struct raremono_device *radio = to_raremono_dev(v4l2_dev);
+
+ kfree(radio->buffer);
+ kfree(radio);
+}
+
/* File system interface */
static const struct v4l2_file_operations usb_raremono_fops = {
.owner = THIS_MODULE,
@@ -295,12 +303,14 @@ static int usb_raremono_probe(struct usb_interface *intf,
struct raremono_device *radio;
int retval = 0;
- radio = devm_kzalloc(&intf->dev, sizeof(struct raremono_device), GFP_KERNEL);
- if (radio)
- radio->buffer = devm_kmalloc(&intf->dev, BUFFER_LENGTH, GFP_KERNEL);
-
- if (!radio || !radio->buffer)
+ radio = kzalloc(sizeof(*radio), GFP_KERNEL);
+ if (!radio)
return -ENOMEM;
+ radio->buffer = kmalloc(BUFFER_LENGTH, GFP_KERNEL);
+ if (!radio->buffer) {
+ kfree(radio);
+ return -ENOMEM;
+ }
radio->usbdev = interface_to_usbdev(intf);
radio->intf = intf;
@@ -324,7 +334,8 @@ static int usb_raremono_probe(struct usb_interface *intf,
if (retval != 3 ||
(get_unaligned_be16(&radio->buffer[1]) & 0xfff) == 0x0242) {
dev_info(&intf->dev, "this is not Thanko's Raremono.\n");
- return -ENODEV;
+ retval = -ENODEV;
+ goto free_mem;
}
dev_info(&intf->dev, "Thanko's Raremono connected: (%04X:%04X)\n",
@@ -333,7 +344,7 @@ static int usb_raremono_probe(struct usb_interface *intf,
retval = v4l2_device_register(&intf->dev, &radio->v4l2_dev);
if (retval < 0) {
dev_err(&intf->dev, "couldn't register v4l2_device\n");
- return retval;
+ goto free_mem;
}
mutex_init(&radio->lock);
@@ -345,6 +356,7 @@ static int usb_raremono_probe(struct usb_interface *intf,
radio->vdev.ioctl_ops = &usb_raremono_ioctl_ops;
radio->vdev.lock = &radio->lock;
radio->vdev.release = video_device_release_empty;
+ radio->v4l2_dev.release = raremono_device_release;
usb_set_intfdata(intf, &radio->v4l2_dev);
@@ -360,6 +372,10 @@ static int usb_raremono_probe(struct usb_interface *intf,
}
dev_err(&intf->dev, "could not register video device\n");
v4l2_device_unregister(&radio->v4l2_dev);
+
+free_mem:
+ kfree(radio->buffer);
+ kfree(radio);
return retval;
}
diff --git a/drivers/media/radio/rtc6226/radio-rtc6226-common.c b/drivers/media/radio/rtc6226/radio-rtc6226-common.c
index 15cf8c8..7b8750a 100644
--- a/drivers/media/radio/rtc6226/radio-rtc6226-common.c
+++ b/drivers/media/radio/rtc6226/radio-rtc6226-common.c
@@ -1389,9 +1389,9 @@ int rtc6226_power_up(struct rtc6226_device *radio)
FMDBG("%s : after initialization\n", __func__);
/* mpxconfig */
- /* Disable Softmute / Disable Mute / De-emphasis / Volume 8 */
- radio->registers[MPXCFG] = 0x0008 |
- MPXCFG_CSR0_DIS_SMUTE | MPXCFG_CSR0_DIS_MUTE |
+ /* Disable Mute / De-emphasis / Volume 12 */
+ radio->registers[MPXCFG] = 0x000c |
+ MPXCFG_CSR0_DIS_MUTE |
((de << 12) & MPXCFG_CSR0_DEEM);
retval = rtc6226_set_register(radio, MPXCFG);
if (retval < 0)
diff --git a/drivers/media/usb/au0828/au0828-core.c b/drivers/media/usb/au0828/au0828-core.c
index 257ae0d..e3f6329 100644
--- a/drivers/media/usb/au0828/au0828-core.c
+++ b/drivers/media/usb/au0828/au0828-core.c
@@ -623,6 +623,12 @@ static int au0828_usb_probe(struct usb_interface *interface,
/* Setup */
au0828_card_setup(dev);
+ /*
+ * Store the pointer to the au0828_dev so it can be accessed in
+ * au0828_usb_disconnect
+ */
+ usb_set_intfdata(interface, dev);
+
/* Analog TV */
retval = au0828_analog_register(dev, interface);
if (retval) {
@@ -641,12 +647,6 @@ static int au0828_usb_probe(struct usb_interface *interface,
/* Remote controller */
au0828_rc_register(dev);
- /*
- * Store the pointer to the au0828_dev so it can be accessed in
- * au0828_usb_disconnect
- */
- usb_set_intfdata(interface, dev);
-
pr_info("Registered device AU0828 [%s]\n",
dev->board.name == NULL ? "Unset" : dev->board.name);
diff --git a/drivers/media/usb/cpia2/cpia2_usb.c b/drivers/media/usb/cpia2/cpia2_usb.c
index a771e0a..f5b0459 100644
--- a/drivers/media/usb/cpia2/cpia2_usb.c
+++ b/drivers/media/usb/cpia2/cpia2_usb.c
@@ -902,7 +902,6 @@ static void cpia2_usb_disconnect(struct usb_interface *intf)
cpia2_unregister_camera(cam);
v4l2_device_disconnect(&cam->v4l2_dev);
mutex_unlock(&cam->v4l2_lock);
- v4l2_device_put(&cam->v4l2_dev);
if(cam->buffers) {
DBG("Wakeup waiting processes\n");
@@ -911,6 +910,8 @@ static void cpia2_usb_disconnect(struct usb_interface *intf)
wake_up_interruptible(&cam->wq_stream);
}
+ v4l2_device_put(&cam->v4l2_dev);
+
LOG("CPiA2 camera disconnected.\n");
}
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-hdw.c b/drivers/media/usb/pvrusb2/pvrusb2-hdw.c
index 673fdca..fcb201a 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-hdw.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-hdw.c
@@ -1680,7 +1680,7 @@ static int pvr2_decoder_enable(struct pvr2_hdw *hdw,int enablefl)
}
if (!hdw->flag_decoder_missed) {
pvr2_trace(PVR2_TRACE_ERROR_LEGS,
- "WARNING: No decoder present");
+ "***WARNING*** No decoder present");
hdw->flag_decoder_missed = !0;
trace_stbit("flag_decoder_missed",
hdw->flag_decoder_missed);
@@ -2366,7 +2366,7 @@ struct pvr2_hdw *pvr2_hdw_create(struct usb_interface *intf,
if (hdw_desc->flag_is_experimental) {
pvr2_trace(PVR2_TRACE_INFO, "**********");
pvr2_trace(PVR2_TRACE_INFO,
- "WARNING: Support for this device (%s) is experimental.",
+ "***WARNING*** Support for this device (%s) is experimental.",
hdw_desc->description);
pvr2_trace(PVR2_TRACE_INFO,
"Important functionality might not be entirely working.");
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-i2c-core.c b/drivers/media/usb/pvrusb2/pvrusb2-i2c-core.c
index f3003ca..922c062 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-i2c-core.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-i2c-core.c
@@ -343,11 +343,11 @@ static int i2c_hack_cx25840(struct pvr2_hdw *hdw,
if ((ret != 0) || (*rdata == 0x04) || (*rdata == 0x0a)) {
pvr2_trace(PVR2_TRACE_ERROR_LEGS,
- "WARNING: Detected a wedged cx25840 chip; the device will not work.");
+ "***WARNING*** Detected a wedged cx25840 chip; the device will not work.");
pvr2_trace(PVR2_TRACE_ERROR_LEGS,
- "WARNING: Try power cycling the pvrusb2 device.");
+ "***WARNING*** Try power cycling the pvrusb2 device.");
pvr2_trace(PVR2_TRACE_ERROR_LEGS,
- "WARNING: Disabling further access to the device to prevent other foul-ups.");
+ "***WARNING*** Disabling further access to the device to prevent other foul-ups.");
// This blocks all further communication with the part.
hdw->i2c_func[0x44] = NULL;
pvr2_hdw_render_useless(hdw);
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-std.c b/drivers/media/usb/pvrusb2/pvrusb2-std.c
index 6b651f8..37dc299 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-std.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-std.c
@@ -353,7 +353,7 @@ struct v4l2_standard *pvr2_std_create_enum(unsigned int *countptr,
bcnt = pvr2_std_id_to_str(buf,sizeof(buf),fmsk);
pvr2_trace(
PVR2_TRACE_ERROR_LEGS,
- "WARNING: Failed to classify the following standard(s): %.*s",
+ "***WARNING*** Failed to classify the following standard(s): %.*s",
bcnt,buf);
}
diff --git a/drivers/memstick/core/memstick.c b/drivers/memstick/core/memstick.c
index 1246d69..b1564ca 100644
--- a/drivers/memstick/core/memstick.c
+++ b/drivers/memstick/core/memstick.c
@@ -629,13 +629,18 @@ static int __init memstick_init(void)
return -ENOMEM;
rc = bus_register(&memstick_bus_type);
- if (!rc)
- rc = class_register(&memstick_host_class);
+ if (rc)
+ goto error_destroy_workqueue;
- if (!rc)
- return 0;
+ rc = class_register(&memstick_host_class);
+ if (rc)
+ goto error_bus_unregister;
+ return 0;
+
+error_bus_unregister:
bus_unregister(&memstick_bus_type);
+error_destroy_workqueue:
destroy_workqueue(workqueue);
return rc;
diff --git a/drivers/mfd/arizona-core.c b/drivers/mfd/arizona-core.c
index 5f1e37d..47d6d40 100644
--- a/drivers/mfd/arizona-core.c
+++ b/drivers/mfd/arizona-core.c
@@ -996,7 +996,7 @@ int arizona_dev_init(struct arizona *arizona)
unsigned int reg, val;
int (*apply_patch)(struct arizona *) = NULL;
const struct mfd_cell *subdevs = NULL;
- int n_subdevs, ret, i;
+ int n_subdevs = 0, ret, i;
dev_set_drvdata(arizona->dev, arizona);
mutex_init(&arizona->clk_lock);
diff --git a/drivers/mfd/hi655x-pmic.c b/drivers/mfd/hi655x-pmic.c
index 96c07fa..6693f74 100644
--- a/drivers/mfd/hi655x-pmic.c
+++ b/drivers/mfd/hi655x-pmic.c
@@ -112,6 +112,8 @@ static int hi655x_pmic_probe(struct platform_device *pdev)
pmic->regmap = devm_regmap_init_mmio_clk(dev, NULL, base,
&hi655x_regmap_config);
+ if (IS_ERR(pmic->regmap))
+ return PTR_ERR(pmic->regmap);
regmap_read(pmic->regmap, HI655X_BUS_ADDR(HI655X_VER_REG), &pmic->ver);
if ((pmic->ver < PMU_VER_START) || (pmic->ver > PMU_VER_END)) {
diff --git a/drivers/mfd/madera-core.c b/drivers/mfd/madera-core.c
index 8cfea96..45c7d8b 100644
--- a/drivers/mfd/madera-core.c
+++ b/drivers/mfd/madera-core.c
@@ -278,6 +278,7 @@ const struct of_device_id madera_of_match[] = {
{ .compatible = "cirrus,wm1840", .data = (void *)WM1840 },
{}
};
+MODULE_DEVICE_TABLE(of, madera_of_match);
EXPORT_SYMBOL_GPL(madera_of_match);
static int madera_get_reset_gpio(struct madera *madera)
diff --git a/drivers/mfd/mfd-core.c b/drivers/mfd/mfd-core.c
index 94e3f32c..182973d 100644
--- a/drivers/mfd/mfd-core.c
+++ b/drivers/mfd/mfd-core.c
@@ -179,6 +179,7 @@ static int mfd_add_device(struct device *parent, int id,
for_each_child_of_node(parent->of_node, np) {
if (of_device_is_compatible(np, cell->of_compatible)) {
pdev->dev.of_node = np;
+ pdev->dev.fwnode = &np->fwnode;
break;
}
}
diff --git a/drivers/misc/eeprom/at24.c b/drivers/misc/eeprom/at24.c
index ddfcf4a..dc35376 100644
--- a/drivers/misc/eeprom/at24.c
+++ b/drivers/misc/eeprom/at24.c
@@ -724,7 +724,7 @@ static int at24_probe(struct i2c_client *client)
nvmem_config.name = dev_name(dev);
nvmem_config.dev = dev;
nvmem_config.read_only = !writable;
- nvmem_config.root_only = true;
+ nvmem_config.root_only = !(pdata.flags & AT24_FLAG_IRUGO);
nvmem_config.owner = THIS_MODULE;
nvmem_config.compat = true;
nvmem_config.base_dev = dev;
diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h
index bb1ee98..225373e 100644
--- a/drivers/misc/mei/hw-me-regs.h
+++ b/drivers/misc/mei/hw-me-regs.h
@@ -141,6 +141,9 @@
#define MEI_DEV_ID_ICP_LP 0x34E0 /* Ice Lake Point LP */
+#define MEI_DEV_ID_MCC 0x4B70 /* Mule Creek Canyon (EHL) */
+#define MEI_DEV_ID_MCC_4 0x4B75 /* Mule Creek Canyon 4 (EHL) */
+
/*
* MEI HW Section
*/
diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
index 4299658..a66ebce 100644
--- a/drivers/misc/mei/pci-me.c
+++ b/drivers/misc/mei/pci-me.c
@@ -107,6 +107,9 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
{MEI_PCI_DEVICE(MEI_DEV_ID_ICP_LP, MEI_ME_PCH12_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_MCC, MEI_ME_PCH12_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_MCC_4, MEI_ME_PCH8_CFG)},
+
/* required last entry */
{0, }
};
diff --git a/drivers/misc/qseecom.c b/drivers/misc/qseecom.c
index cd8db2c..fc57b6b 100644
--- a/drivers/misc/qseecom.c
+++ b/drivers/misc/qseecom.c
@@ -310,6 +310,7 @@ struct qseecom_control {
bool smcinvoke_support;
uint64_t qseecom_bridge_handle;
uint64_t ta_bridge_handle;
+ uint64_t user_contig_bridge_handle;
struct list_head unregister_lsnr_pending_list_head;
wait_queue_head_t register_lsnr_pending_wq;
@@ -1778,20 +1779,17 @@ static int __qseecom_decrease_clk_ref_count(enum qseecom_ce_hw_instance ce)
else
qclk = &qseecom.ce_drv;
- if (qclk->clk_access_cnt > 2) {
+ if (qclk->clk_access_cnt > 0) {
+ qclk->clk_access_cnt--;
+ } else {
pr_err("Invalid clock ref count %d\n", qclk->clk_access_cnt);
ret = -EINVAL;
- goto err_dec_ref_cnt;
}
- if (qclk->clk_access_cnt == 2)
- qclk->clk_access_cnt--;
-err_dec_ref_cnt:
mutex_unlock(&clk_access_lock);
return ret;
}
-
static int qseecom_scale_bus_bandwidth_timer(uint32_t mode)
{
int32_t ret = 0;
@@ -7750,6 +7748,13 @@ static long qseecom_ioctl(struct file *file,
break;
}
case QSEECOM_IOCTL_APP_LOADED_QUERY_REQ: {
+ if ((data->type != QSEECOM_GENERIC) &&
+ (data->type != QSEECOM_CLIENT_APP)) {
+ pr_err("app loaded query req: invalid handle (%d)\n",
+ data->type);
+ ret = -EINVAL;
+ break;
+ }
data->type = QSEECOM_CLIENT_APP;
mutex_lock(&app_access_lock);
atomic_inc(&data->ioctl_count);
@@ -9243,14 +9248,13 @@ static int qseecom_register_heap_shmbridge(uint32_t heapid, uint64_t *handle)
struct device_node *ion_node, *node;
struct platform_device *ion_pdev = NULL;
struct cma *cma = NULL;
- int ret = -1;
uint32_t ns_vmids[] = {VMID_HLOS};
uint32_t ns_vm_perms[] = {PERM_READ | PERM_WRITE};
ion_node = of_find_compatible_node(NULL, NULL, "qcom,msm-ion");
if (!ion_node) {
pr_err("Failed to get qcom,msm-ion node\n");
- return ret;
+ return -ENODEV;
}
for_each_available_child_of_node(ion_node, node) {
@@ -9259,23 +9263,22 @@ static int qseecom_register_heap_shmbridge(uint32_t heapid, uint64_t *handle)
ion_pdev = of_find_device_by_node(node);
if (!ion_pdev) {
pr_err("Failed to find node for heap %d\n", heapid);
- break;
+ return -ENODEV;
}
cma = dev_get_cma_area(&ion_pdev->dev);
- if (cma) {
- heap_pa = cma_get_base(cma);
- heap_size = (size_t)cma_get_size(cma);
- } else {
+ if (!cma) {
pr_err("Failed to get Heap %d info\n", heapid);
+ return -ENODEV;
}
- break;
- }
-
- if (heap_pa)
- ret = qtee_shmbridge_register(heap_pa,
+ heap_pa = cma_get_base(cma);
+ heap_size = (size_t)cma_get_size(cma);
+ return qtee_shmbridge_register(heap_pa,
heap_size, ns_vmids, ns_vm_perms, 1,
PERM_READ | PERM_WRITE, handle);
- return ret;
+ }
+
+ pr_warn("Could not get heap %d info: No shmbridge created\n", heapid);
+ return 0;
}
static int qseecom_register_shmbridge(void)
@@ -9283,22 +9286,31 @@ static int qseecom_register_shmbridge(void)
if (qseecom_register_heap_shmbridge(ION_QSECOM_TA_HEAP_ID,
&qseecom.ta_bridge_handle)) {
pr_err("Failed to register shmbridge for ta heap\n");
- return -ENOMEM;
+ return -EINVAL;
}
if (qseecom_register_heap_shmbridge(ION_QSECOM_HEAP_ID,
&qseecom.qseecom_bridge_handle)) {
pr_err("Failed to register shmbridge for qseecom heap\n");
qtee_shmbridge_deregister(qseecom.ta_bridge_handle);
- return -ENOMEM;
+ return -EINVAL;
+ }
+
+ if (qseecom_register_heap_shmbridge(ION_USER_CONTIG_HEAP_ID,
+ &qseecom.user_contig_bridge_handle)) {
+ pr_err("Failed to register shmbridge for user contig heap\n");
+ qtee_shmbridge_deregister(qseecom.qseecom_bridge_handle);
+ qtee_shmbridge_deregister(qseecom.ta_bridge_handle);
+ return -EINVAL;
}
return 0;
}
static void qseecom_deregister_shmbridge(void)
{
- qtee_shmbridge_deregister(qseecom.ta_bridge_handle);
+ qtee_shmbridge_deregister(qseecom.user_contig_bridge_handle);
qtee_shmbridge_deregister(qseecom.qseecom_bridge_handle);
+ qtee_shmbridge_deregister(qseecom.ta_bridge_handle);
}
static int qseecom_probe(struct platform_device *pdev)
diff --git a/drivers/mmc/host/cavium.c b/drivers/mmc/host/cavium.c
index ed5cefb..89deb45 100644
--- a/drivers/mmc/host/cavium.c
+++ b/drivers/mmc/host/cavium.c
@@ -374,6 +374,7 @@ static int finish_dma_single(struct cvm_mmc_host *host, struct mmc_data *data)
{
data->bytes_xfered = data->blocks * data->blksz;
data->error = 0;
+ dma_unmap_sg(host->dev, data->sg, data->sg_len, get_dma_dir(data));
return 1;
}
@@ -1046,7 +1047,8 @@ int cvm_mmc_of_slot_probe(struct device *dev, struct cvm_mmc_host *host)
mmc->max_segs = 1;
/* DMA size field can address up to 8 MB */
- mmc->max_seg_size = 8 * 1024 * 1024;
+ mmc->max_seg_size = min_t(unsigned int, 8 * 1024 * 1024,
+ dma_get_max_seg_size(host->dev));
mmc->max_req_size = mmc->max_seg_size;
/* External DMA is in 512 byte blocks */
mmc->max_blk_size = 512;
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index 80dc2fd..942da07 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -2038,8 +2038,7 @@ static void dw_mci_tasklet_func(unsigned long priv)
* delayed. Allowing the transfer to take place
* avoids races and keeps things simple.
*/
- if ((err != -ETIMEDOUT) &&
- (cmd->opcode == MMC_SEND_TUNING_BLOCK)) {
+ if (err != -ETIMEDOUT) {
state = STATE_SENDING_DATA;
continue;
}
diff --git a/drivers/mmc/host/meson-mx-sdio.c b/drivers/mmc/host/meson-mx-sdio.c
index 9841b44..f6c76be 100644
--- a/drivers/mmc/host/meson-mx-sdio.c
+++ b/drivers/mmc/host/meson-mx-sdio.c
@@ -76,7 +76,7 @@
#define MESON_MX_SDIO_IRQC_IF_CONFIG_MASK GENMASK(7, 6)
#define MESON_MX_SDIO_IRQC_FORCE_DATA_CLK BIT(8)
#define MESON_MX_SDIO_IRQC_FORCE_DATA_CMD BIT(9)
- #define MESON_MX_SDIO_IRQC_FORCE_DATA_DAT_MASK GENMASK(10, 13)
+ #define MESON_MX_SDIO_IRQC_FORCE_DATA_DAT_MASK GENMASK(13, 10)
#define MESON_MX_SDIO_IRQC_SOFT_RESET BIT(15)
#define MESON_MX_SDIO_IRQC_FORCE_HALT BIT(30)
#define MESON_MX_SDIO_IRQC_HALT_HOLE BIT(31)
diff --git a/drivers/mmc/host/sdhci-of-arasan.c b/drivers/mmc/host/sdhci-of-arasan.c
index 7fdac27..9c77bfe 100644
--- a/drivers/mmc/host/sdhci-of-arasan.c
+++ b/drivers/mmc/host/sdhci-of-arasan.c
@@ -788,7 +788,8 @@ static int sdhci_arasan_probe(struct platform_device *pdev)
ret = mmc_of_parse(host->mmc);
if (ret) {
- dev_err(&pdev->dev, "parsing dt failed (%d)\n", ret);
+ if (ret != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "parsing dt failed (%d)\n", ret);
goto unreg_clk;
}
diff --git a/drivers/mmc/host/sdhci-pci-o2micro.c b/drivers/mmc/host/sdhci-pci-o2micro.c
index fa8d9da..e248d79 100644
--- a/drivers/mmc/host/sdhci-pci-o2micro.c
+++ b/drivers/mmc/host/sdhci-pci-o2micro.c
@@ -290,11 +290,21 @@ int sdhci_pci_o2_probe_slot(struct sdhci_pci_slot *slot)
{
struct sdhci_pci_chip *chip;
struct sdhci_host *host;
- u32 reg;
+ u32 reg, caps;
int ret;
chip = slot->chip;
host = slot->host;
+
+ caps = sdhci_readl(host, SDHCI_CAPABILITIES);
+
+ /*
+ * mmc_select_bus_width() will test the bus to determine the actual bus
+ * width.
+ */
+ if (caps & SDHCI_CAN_DO_8BIT)
+ host->mmc->caps |= MMC_CAP_8_BIT_DATA;
+
switch (chip->pdev->device) {
case PCI_DEVICE_ID_O2_SDS0:
case PCI_DEVICE_ID_O2_SEABIRD0:
diff --git a/drivers/mtd/nand/raw/nand_micron.c b/drivers/mtd/nand/raw/nand_micron.c
index f5dc0a7..fb401c2 100644
--- a/drivers/mtd/nand/raw/nand_micron.c
+++ b/drivers/mtd/nand/raw/nand_micron.c
@@ -400,6 +400,14 @@ static int micron_supports_on_die_ecc(struct nand_chip *chip)
(chip->id.data[4] & MICRON_ID_INTERNAL_ECC_MASK) != 0x2)
return MICRON_ON_DIE_UNSUPPORTED;
+ /*
+ * It seems that there are devices which do not support ECC officially.
+ * At least the MT29F2G08ABAGA / MT29F2G08ABBGA devices supports
+ * enabling the ECC feature but don't reflect that to the READ_ID table.
+ * So we have to guarantee that we disable the ECC feature directly
+ * after we did the READ_ID table command. Later we can evaluate the
+ * ECC_ENABLE support.
+ */
ret = micron_nand_on_die_ecc_setup(chip, true);
if (ret)
return MICRON_ON_DIE_UNSUPPORTED;
@@ -408,13 +416,13 @@ static int micron_supports_on_die_ecc(struct nand_chip *chip)
if (ret)
return MICRON_ON_DIE_UNSUPPORTED;
- if (!(id[4] & MICRON_ID_ECC_ENABLED))
- return MICRON_ON_DIE_UNSUPPORTED;
-
ret = micron_nand_on_die_ecc_setup(chip, false);
if (ret)
return MICRON_ON_DIE_UNSUPPORTED;
+ if (!(id[4] & MICRON_ID_ECC_ENABLED))
+ return MICRON_ON_DIE_UNSUPPORTED;
+
ret = nand_readid_op(chip, 0, id, sizeof(id));
if (ret)
return MICRON_ON_DIE_UNSUPPORTED;
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index be0b785..0d2392c 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1102,6 +1102,8 @@ static void bond_compute_features(struct bonding *bond)
done:
bond_dev->vlan_features = vlan_features;
bond_dev->hw_enc_features = enc_features | NETIF_F_GSO_ENCAP_ALL |
+ NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_STAG_TX |
NETIF_F_GSO_UDP_L4;
bond_dev->gso_max_segs = gso_max_segs;
netif_set_gso_max_size(bond_dev, gso_max_size);
@@ -2188,6 +2190,15 @@ static void bond_miimon_commit(struct bonding *bond)
bond_for_each_slave(bond, slave, iter) {
switch (slave->new_link) {
case BOND_LINK_NOCHANGE:
+ /* For 802.3ad mode, check current slave speed and
+ * duplex again in case its port was disabled after
+ * invalid speed/duplex reporting but recovered before
+ * link monitoring could make a decision on the actual
+ * link status
+ */
+ if (BOND_MODE(bond) == BOND_MODE_8023AD &&
+ slave->link == BOND_LINK_UP)
+ bond_3ad_adapter_speed_duplex_changed(slave);
continue;
case BOND_LINK_UP:
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
index c05e4d5..bd127ce 100644
--- a/drivers/net/can/dev.c
+++ b/drivers/net/can/dev.c
@@ -1260,6 +1260,8 @@ int register_candev(struct net_device *dev)
return -EINVAL;
dev->rtnl_link_ops = &can_link_ops;
+ netif_carrier_off(dev);
+
return register_netdev(dev);
}
EXPORT_SYMBOL_GPL(register_candev);
diff --git a/drivers/net/can/rcar/rcar_canfd.c b/drivers/net/can/rcar/rcar_canfd.c
index 602c19e2..786d852 100644
--- a/drivers/net/can/rcar/rcar_canfd.c
+++ b/drivers/net/can/rcar/rcar_canfd.c
@@ -1512,10 +1512,11 @@ static int rcar_canfd_rx_poll(struct napi_struct *napi, int quota)
/* All packets processed */
if (num_pkts < quota) {
- napi_complete_done(napi, num_pkts);
- /* Enable Rx FIFO interrupts */
- rcar_canfd_set_bit(priv->base, RCANFD_RFCC(ridx),
- RCANFD_RFCC_RFIE);
+ if (napi_complete_done(napi, num_pkts)) {
+ /* Enable Rx FIFO interrupts */
+ rcar_canfd_set_bit(priv->base, RCANFD_RFCC(ridx),
+ RCANFD_RFCC_RFIE);
+ }
}
return num_pkts;
}
diff --git a/drivers/net/can/sja1000/peak_pcmcia.c b/drivers/net/can/sja1000/peak_pcmcia.c
index b8c39ed..179bfcd 100644
--- a/drivers/net/can/sja1000/peak_pcmcia.c
+++ b/drivers/net/can/sja1000/peak_pcmcia.c
@@ -487,7 +487,7 @@ static void pcan_free_channels(struct pcan_pccard *card)
if (!netdev)
continue;
- strncpy(name, netdev->name, IFNAMSIZ);
+ strlcpy(name, netdev->name, IFNAMSIZ);
unregister_sja1000dev(netdev);
diff --git a/drivers/net/can/spi/mcp251x.c b/drivers/net/can/spi/mcp251x.c
index da64e71..fccb6bf 100644
--- a/drivers/net/can/spi/mcp251x.c
+++ b/drivers/net/can/spi/mcp251x.c
@@ -678,17 +678,6 @@ static int mcp251x_power_enable(struct regulator *reg, int enable)
return regulator_disable(reg);
}
-static void mcp251x_open_clean(struct net_device *net)
-{
- struct mcp251x_priv *priv = netdev_priv(net);
- struct spi_device *spi = priv->spi;
-
- free_irq(spi->irq, priv);
- mcp251x_hw_sleep(spi);
- mcp251x_power_enable(priv->transceiver, 0);
- close_candev(net);
-}
-
static int mcp251x_stop(struct net_device *net)
{
struct mcp251x_priv *priv = netdev_priv(net);
@@ -954,37 +943,43 @@ static int mcp251x_open(struct net_device *net)
flags | IRQF_ONESHOT, DEVICE_NAME, priv);
if (ret) {
dev_err(&spi->dev, "failed to acquire irq %d\n", spi->irq);
- mcp251x_power_enable(priv->transceiver, 0);
- close_candev(net);
- goto open_unlock;
+ goto out_close;
}
priv->wq = alloc_workqueue("mcp251x_wq", WQ_FREEZABLE | WQ_MEM_RECLAIM,
0);
+ if (!priv->wq) {
+ ret = -ENOMEM;
+ goto out_clean;
+ }
INIT_WORK(&priv->tx_work, mcp251x_tx_work_handler);
INIT_WORK(&priv->restart_work, mcp251x_restart_work_handler);
ret = mcp251x_hw_reset(spi);
- if (ret) {
- mcp251x_open_clean(net);
- goto open_unlock;
- }
+ if (ret)
+ goto out_free_wq;
ret = mcp251x_setup(net, spi);
- if (ret) {
- mcp251x_open_clean(net);
- goto open_unlock;
- }
+ if (ret)
+ goto out_free_wq;
ret = mcp251x_set_normal_mode(spi);
- if (ret) {
- mcp251x_open_clean(net);
- goto open_unlock;
- }
+ if (ret)
+ goto out_free_wq;
can_led_event(net, CAN_LED_EVENT_OPEN);
netif_wake_queue(net);
+ mutex_unlock(&priv->mcp_lock);
-open_unlock:
+ return 0;
+
+out_free_wq:
+ destroy_workqueue(priv->wq);
+out_clean:
+ free_irq(spi->irq, priv);
+ mcp251x_hw_sleep(spi);
+out_close:
+ mcp251x_power_enable(priv->transceiver, 0);
+ close_candev(net);
mutex_unlock(&priv->mcp_lock);
return ret;
}
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
index 611f9d3..43b0fa2b 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
@@ -576,16 +576,16 @@ static int peak_usb_ndo_stop(struct net_device *netdev)
dev->state &= ~PCAN_USB_STATE_STARTED;
netif_stop_queue(netdev);
+ close_candev(netdev);
+
+ dev->can.state = CAN_STATE_STOPPED;
+
/* unlink all pending urbs and free used memory */
peak_usb_unlink_all_urbs(dev);
if (dev->adapter->dev_stop)
dev->adapter->dev_stop(dev);
- close_candev(netdev);
-
- dev->can.state = CAN_STATE_STOPPED;
-
/* can set bus off now */
if (dev->adapter->dev_set_bus) {
int err = dev->adapter->dev_set_bus(dev, 0);
@@ -863,7 +863,7 @@ static void peak_usb_disconnect(struct usb_interface *intf)
dev_prev_siblings = dev->prev_siblings;
dev->state &= ~PCAN_USB_STATE_CONNECTED;
- strncpy(name, netdev->name, IFNAMSIZ);
+ strlcpy(name, netdev->name, IFNAMSIZ);
unregister_netdev(netdev);
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
index dd161c5..4198835 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
@@ -849,7 +849,7 @@ static int pcan_usb_fd_init(struct peak_usb_device *dev)
goto err_out;
/* allocate command buffer once for all for the interface */
- pdev->cmd_buffer_addr = kmalloc(PCAN_UFD_CMD_BUFFER_SIZE,
+ pdev->cmd_buffer_addr = kzalloc(PCAN_UFD_CMD_BUFFER_SIZE,
GFP_KERNEL);
if (!pdev->cmd_buffer_addr)
goto err_out_1;
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
index d516def..b304198 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
@@ -502,7 +502,7 @@ static int pcan_usb_pro_drv_loaded(struct peak_usb_device *dev, int loaded)
u8 *buffer;
int err;
- buffer = kmalloc(PCAN_USBPRO_FCT_DRVLD_REQ_LEN, GFP_KERNEL);
+ buffer = kzalloc(PCAN_USBPRO_FCT_DRVLD_REQ_LEN, GFP_KERNEL);
if (!buffer)
return -ENOMEM;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 3edb81a..cf01e73 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -1936,8 +1936,7 @@ u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
}
/* select a non-FCoE queue */
- return fallback(dev, skb, NULL) %
- (BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos);
+ return fallback(dev, skb, NULL) % (BNX2X_NUM_ETH_QUEUES(bp));
}
void bnx2x_set_num_queues(struct bnx2x *bp)
@@ -3059,12 +3058,13 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
/* if VF indicate to PF this function is going down (PF will delete sp
* elements and clear initializations
*/
- if (IS_VF(bp))
+ if (IS_VF(bp)) {
+ bnx2x_clear_vlan_info(bp);
bnx2x_vfpf_close_vf(bp);
- else if (unload_mode != UNLOAD_RECOVERY)
+ } else if (unload_mode != UNLOAD_RECOVERY) {
/* if this is a normal/close unload need to clean up chip*/
bnx2x_chip_cleanup(bp, unload_mode, keep_link);
- else {
+ } else {
/* Send the UNLOAD_REQUEST to the MCP */
bnx2x_send_unload_req(bp, unload_mode);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
index 0e508e5..ee5159e 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
@@ -425,6 +425,8 @@ void bnx2x_set_reset_global(struct bnx2x *bp);
void bnx2x_disable_close_the_gate(struct bnx2x *bp);
int bnx2x_init_hw_func_cnic(struct bnx2x *bp);
+void bnx2x_clear_vlan_info(struct bnx2x *bp);
+
/**
* bnx2x_sp_event - handle ramrods completion.
*
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 2c9af0f..68c62e3 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -8488,11 +8488,21 @@ int bnx2x_set_vlan_one(struct bnx2x *bp, u16 vlan,
return rc;
}
+void bnx2x_clear_vlan_info(struct bnx2x *bp)
+{
+ struct bnx2x_vlan_entry *vlan;
+
+ /* Mark that hw forgot all entries */
+ list_for_each_entry(vlan, &bp->vlan_reg, link)
+ vlan->hw = false;
+
+ bp->vlan_cnt = 0;
+}
+
static int bnx2x_del_all_vlans(struct bnx2x *bp)
{
struct bnx2x_vlan_mac_obj *vlan_obj = &bp->sp_objs[0].vlan_obj;
unsigned long ramrod_flags = 0, vlan_flags = 0;
- struct bnx2x_vlan_entry *vlan;
int rc;
__set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
@@ -8501,10 +8511,7 @@ static int bnx2x_del_all_vlans(struct bnx2x *bp)
if (rc)
return rc;
- /* Mark that hw forgot all entries */
- list_for_each_entry(vlan, &bp->vlan_reg, link)
- vlan->hw = false;
- bp->vlan_cnt = 0;
+ bnx2x_clear_vlan_info(bp);
return 0;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
index c34ea38..6be6de0 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
@@ -3270,7 +3270,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (!adapter->regs) {
dev_err(&pdev->dev, "cannot map device registers\n");
err = -ENOMEM;
- goto out_free_adapter;
+ goto out_free_adapter_nofail;
}
adapter->pdev = pdev;
@@ -3398,6 +3398,9 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (adapter->port[i])
free_netdev(adapter->port[i]);
+out_free_adapter_nofail:
+ kfree_skb(adapter->nofail_skb);
+
out_free_adapter:
kfree(adapter);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
index d97e0d7..b766362 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
@@ -1065,14 +1065,12 @@ static void cudbg_t4_fwcache(struct cudbg_init *pdbg_init,
}
}
-static int cudbg_collect_mem_region(struct cudbg_init *pdbg_init,
- struct cudbg_buffer *dbg_buff,
- struct cudbg_error *cudbg_err,
- u8 mem_type)
+static unsigned long cudbg_mem_region_size(struct cudbg_init *pdbg_init,
+ struct cudbg_error *cudbg_err,
+ u8 mem_type)
{
struct adapter *padap = pdbg_init->adap;
struct cudbg_meminfo mem_info;
- unsigned long size;
u8 mc_idx;
int rc;
@@ -1086,7 +1084,16 @@ static int cudbg_collect_mem_region(struct cudbg_init *pdbg_init,
if (rc)
return rc;
- size = mem_info.avail[mc_idx].limit - mem_info.avail[mc_idx].base;
+ return mem_info.avail[mc_idx].limit - mem_info.avail[mc_idx].base;
+}
+
+static int cudbg_collect_mem_region(struct cudbg_init *pdbg_init,
+ struct cudbg_buffer *dbg_buff,
+ struct cudbg_error *cudbg_err,
+ u8 mem_type)
+{
+ unsigned long size = cudbg_mem_region_size(pdbg_init, cudbg_err, mem_type);
+
return cudbg_read_fw_mem(pdbg_init, dbg_buff, mem_type, size,
cudbg_err);
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
index f2aba5b..d45c435 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
@@ -67,7 +67,8 @@ static struct ch_tc_pedit_fields pedits[] = {
static struct ch_tc_flower_entry *allocate_flower_entry(void)
{
struct ch_tc_flower_entry *new = kzalloc(sizeof(*new), GFP_KERNEL);
- spin_lock_init(&new->lock);
+ if (new)
+ spin_lock_init(&new->lock);
return new;
}
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index bff7475..3fe6a28 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -4700,8 +4700,12 @@ int be_update_queues(struct be_adapter *adapter)
struct net_device *netdev = adapter->netdev;
int status;
- if (netif_running(netdev))
+ if (netif_running(netdev)) {
+ /* device cannot transmit now, avoid dev_watchdog timeouts */
+ netif_carrier_off(netdev);
+
be_close(netdev);
+ }
be_cancel_worker(adapter);
diff --git a/drivers/net/ethernet/hisilicon/hip04_eth.c b/drivers/net/ethernet/hisilicon/hip04_eth.c
index 6127697..a91d49d 100644
--- a/drivers/net/ethernet/hisilicon/hip04_eth.c
+++ b/drivers/net/ethernet/hisilicon/hip04_eth.c
@@ -157,6 +157,7 @@ struct hip04_priv {
unsigned int reg_inten;
struct napi_struct napi;
+ struct device *dev;
struct net_device *ndev;
struct tx_desc *tx_desc;
@@ -185,7 +186,7 @@ struct hip04_priv {
static inline unsigned int tx_count(unsigned int head, unsigned int tail)
{
- return (head - tail) % (TX_DESC_NUM - 1);
+ return (head - tail) % TX_DESC_NUM;
}
static void hip04_config_port(struct net_device *ndev, u32 speed, u32 duplex)
@@ -387,7 +388,7 @@ static int hip04_tx_reclaim(struct net_device *ndev, bool force)
}
if (priv->tx_phys[tx_tail]) {
- dma_unmap_single(&ndev->dev, priv->tx_phys[tx_tail],
+ dma_unmap_single(priv->dev, priv->tx_phys[tx_tail],
priv->tx_skb[tx_tail]->len,
DMA_TO_DEVICE);
priv->tx_phys[tx_tail] = 0;
@@ -437,8 +438,8 @@ static int hip04_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
return NETDEV_TX_BUSY;
}
- phys = dma_map_single(&ndev->dev, skb->data, skb->len, DMA_TO_DEVICE);
- if (dma_mapping_error(&ndev->dev, phys)) {
+ phys = dma_map_single(priv->dev, skb->data, skb->len, DMA_TO_DEVICE);
+ if (dma_mapping_error(priv->dev, phys)) {
dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
@@ -497,6 +498,9 @@ static int hip04_rx_poll(struct napi_struct *napi, int budget)
u16 len;
u32 err;
+ /* clean up tx descriptors */
+ tx_remaining = hip04_tx_reclaim(ndev, false);
+
while (cnt && !last) {
buf = priv->rx_buf[priv->rx_head];
skb = build_skb(buf, priv->rx_buf_size);
@@ -505,7 +509,7 @@ static int hip04_rx_poll(struct napi_struct *napi, int budget)
goto refill;
}
- dma_unmap_single(&ndev->dev, priv->rx_phys[priv->rx_head],
+ dma_unmap_single(priv->dev, priv->rx_phys[priv->rx_head],
RX_BUF_SIZE, DMA_FROM_DEVICE);
priv->rx_phys[priv->rx_head] = 0;
@@ -534,9 +538,9 @@ static int hip04_rx_poll(struct napi_struct *napi, int budget)
buf = netdev_alloc_frag(priv->rx_buf_size);
if (!buf)
goto done;
- phys = dma_map_single(&ndev->dev, buf,
+ phys = dma_map_single(priv->dev, buf,
RX_BUF_SIZE, DMA_FROM_DEVICE);
- if (dma_mapping_error(&ndev->dev, phys))
+ if (dma_mapping_error(priv->dev, phys))
goto done;
priv->rx_buf[priv->rx_head] = buf;
priv->rx_phys[priv->rx_head] = phys;
@@ -557,8 +561,7 @@ static int hip04_rx_poll(struct napi_struct *napi, int budget)
}
napi_complete_done(napi, rx);
done:
- /* clean up tx descriptors and start a new timer if necessary */
- tx_remaining = hip04_tx_reclaim(ndev, false);
+ /* start a new timer if necessary */
if (rx < budget && tx_remaining)
hip04_start_tx_timer(priv);
@@ -640,9 +643,9 @@ static int hip04_mac_open(struct net_device *ndev)
for (i = 0; i < RX_DESC_NUM; i++) {
dma_addr_t phys;
- phys = dma_map_single(&ndev->dev, priv->rx_buf[i],
+ phys = dma_map_single(priv->dev, priv->rx_buf[i],
RX_BUF_SIZE, DMA_FROM_DEVICE);
- if (dma_mapping_error(&ndev->dev, phys))
+ if (dma_mapping_error(priv->dev, phys))
return -EIO;
priv->rx_phys[i] = phys;
@@ -676,7 +679,7 @@ static int hip04_mac_stop(struct net_device *ndev)
for (i = 0; i < RX_DESC_NUM; i++) {
if (priv->rx_phys[i]) {
- dma_unmap_single(&ndev->dev, priv->rx_phys[i],
+ dma_unmap_single(priv->dev, priv->rx_phys[i],
RX_BUF_SIZE, DMA_FROM_DEVICE);
priv->rx_phys[i] = 0;
}
@@ -820,6 +823,7 @@ static int hip04_mac_probe(struct platform_device *pdev)
return -ENOMEM;
priv = netdev_priv(ndev);
+ priv->dev = d;
priv->ndev = ndev;
platform_set_drvdata(pdev, ndev);
SET_NETDEV_DEV(ndev, &pdev->dev);
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index df5b74f..9b608d2 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -3501,6 +3501,7 @@ static int mvpp2_set_mac_address(struct net_device *dev, void *p)
static int mvpp2_change_mtu(struct net_device *dev, int mtu)
{
struct mvpp2_port *port = netdev_priv(dev);
+ bool running = netif_running(dev);
int err;
if (!IS_ALIGNED(MVPP2_RX_PKT_SIZE(mtu), 8)) {
@@ -3509,40 +3510,24 @@ static int mvpp2_change_mtu(struct net_device *dev, int mtu)
mtu = ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8);
}
- if (!netif_running(dev)) {
- err = mvpp2_bm_update_mtu(dev, mtu);
- if (!err) {
- port->pkt_size = MVPP2_RX_PKT_SIZE(mtu);
- return 0;
- }
-
- /* Reconfigure BM to the original MTU */
- err = mvpp2_bm_update_mtu(dev, dev->mtu);
- if (err)
- goto log_error;
- }
-
- mvpp2_stop_dev(port);
+ if (running)
+ mvpp2_stop_dev(port);
err = mvpp2_bm_update_mtu(dev, mtu);
- if (!err) {
+ if (err) {
+ netdev_err(dev, "failed to change MTU\n");
+ /* Reconfigure BM to the original MTU */
+ mvpp2_bm_update_mtu(dev, dev->mtu);
+ } else {
port->pkt_size = MVPP2_RX_PKT_SIZE(mtu);
- goto out_start;
}
- /* Reconfigure BM to the original MTU */
- err = mvpp2_bm_update_mtu(dev, dev->mtu);
- if (err)
- goto log_error;
+ if (running) {
+ mvpp2_start_dev(port);
+ mvpp2_egress_enable(port);
+ mvpp2_ingress_enable(port);
+ }
-out_start:
- mvpp2_start_dev(port);
- mvpp2_egress_enable(port);
- mvpp2_ingress_enable(port);
-
- return 0;
-log_error:
- netdev_err(dev, "failed to change MTU\n");
return err;
}
@@ -4427,9 +4412,9 @@ static void mvpp2_xlg_config(struct mvpp2_port *port, unsigned int mode,
if (state->pause & MLO_PAUSE_RX)
ctrl0 |= MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN;
- ctrl4 &= ~MVPP22_XLG_CTRL4_MACMODSELECT_GMAC;
- ctrl4 |= MVPP22_XLG_CTRL4_FWD_FC | MVPP22_XLG_CTRL4_FWD_PFC |
- MVPP22_XLG_CTRL4_EN_IDLE_CHECK;
+ ctrl4 &= ~(MVPP22_XLG_CTRL4_MACMODSELECT_GMAC |
+ MVPP22_XLG_CTRL4_EN_IDLE_CHECK);
+ ctrl4 |= MVPP22_XLG_CTRL4_FWD_FC | MVPP22_XLG_CTRL4_FWD_PFC;
writel(ctrl0, port->base + MVPP22_XLG_CTRL0_REG);
writel(ctrl4, port->base + MVPP22_XLG_CTRL4_REG);
@@ -5358,9 +5343,6 @@ static int mvpp2_remove(struct platform_device *pdev)
mvpp2_dbgfs_cleanup(priv);
- flush_workqueue(priv->stats_queue);
- destroy_workqueue(priv->stats_queue);
-
fwnode_for_each_available_child_node(fwnode, port_fwnode) {
if (priv->port_list[i]) {
mutex_destroy(&priv->port_list[i]->gather_stats_lock);
@@ -5369,6 +5351,8 @@ static int mvpp2_remove(struct platform_device *pdev)
i++;
}
+ destroy_workqueue(priv->stats_queue);
+
for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
struct mvpp2_bm_pool *bm_pool = &priv->bm_pools[i];
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index f5cd953..45d9a5f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -1190,7 +1190,7 @@ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv)
err = mlx4_qp_alloc(mdev->dev, priv->base_qpn, rss_map->indir_qp);
if (err) {
en_err(priv, "Failed to allocate RSS indirection QP\n");
- goto rss_err;
+ goto qp_alloc_err;
}
rss_map->indir_qp->event = mlx4_en_sqp_event;
@@ -1244,6 +1244,7 @@ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv)
MLX4_QP_STATE_RST, NULL, 0, 0, rss_map->indir_qp);
mlx4_qp_remove(mdev->dev, rss_map->indir_qp);
mlx4_qp_free(mdev->dev, rss_map->indir_qp);
+qp_alloc_err:
kfree(rss_map->indir_qp);
rss_map->indir_qp = NULL;
rss_err:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
index 1c225be..3692d6a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/dev.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
@@ -307,7 +307,7 @@ void mlx5_unregister_device(struct mlx5_core_dev *dev)
struct mlx5_interface *intf;
mutex_lock(&mlx5_intf_mutex);
- list_for_each_entry(intf, &intf_list, list)
+ list_for_each_entry_reverse(intf, &intf_list, list)
mlx5_remove_device(intf, priv);
list_del(&priv->dev_list);
mutex_unlock(&mlx5_intf_mutex);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
index 45cdde6..a4be04d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
@@ -437,12 +437,6 @@ arfs_hash_bucket(struct arfs_table *arfs_t, __be16 src_port,
return &arfs_t->rules_hash[bucket_idx];
}
-static u8 arfs_get_ip_proto(const struct sk_buff *skb)
-{
- return (skb->protocol == htons(ETH_P_IP)) ?
- ip_hdr(skb)->protocol : ipv6_hdr(skb)->nexthdr;
-}
-
static struct arfs_table *arfs_get_table(struct mlx5e_arfs_tables *arfs,
u8 ip_proto, __be16 etype)
{
@@ -599,31 +593,9 @@ static void arfs_handle_work(struct work_struct *work)
arfs_may_expire_flow(priv);
}
-/* return L4 destination port from ip4/6 packets */
-static __be16 arfs_get_dst_port(const struct sk_buff *skb)
-{
- char *transport_header;
-
- transport_header = skb_transport_header(skb);
- if (arfs_get_ip_proto(skb) == IPPROTO_TCP)
- return ((struct tcphdr *)transport_header)->dest;
- return ((struct udphdr *)transport_header)->dest;
-}
-
-/* return L4 source port from ip4/6 packets */
-static __be16 arfs_get_src_port(const struct sk_buff *skb)
-{
- char *transport_header;
-
- transport_header = skb_transport_header(skb);
- if (arfs_get_ip_proto(skb) == IPPROTO_TCP)
- return ((struct tcphdr *)transport_header)->source;
- return ((struct udphdr *)transport_header)->source;
-}
-
static struct arfs_rule *arfs_alloc_rule(struct mlx5e_priv *priv,
struct arfs_table *arfs_t,
- const struct sk_buff *skb,
+ const struct flow_keys *fk,
u16 rxq, u32 flow_id)
{
struct arfs_rule *rule;
@@ -638,19 +610,19 @@ static struct arfs_rule *arfs_alloc_rule(struct mlx5e_priv *priv,
INIT_WORK(&rule->arfs_work, arfs_handle_work);
tuple = &rule->tuple;
- tuple->etype = skb->protocol;
+ tuple->etype = fk->basic.n_proto;
+ tuple->ip_proto = fk->basic.ip_proto;
if (tuple->etype == htons(ETH_P_IP)) {
- tuple->src_ipv4 = ip_hdr(skb)->saddr;
- tuple->dst_ipv4 = ip_hdr(skb)->daddr;
+ tuple->src_ipv4 = fk->addrs.v4addrs.src;
+ tuple->dst_ipv4 = fk->addrs.v4addrs.dst;
} else {
- memcpy(&tuple->src_ipv6, &ipv6_hdr(skb)->saddr,
+ memcpy(&tuple->src_ipv6, &fk->addrs.v6addrs.src,
sizeof(struct in6_addr));
- memcpy(&tuple->dst_ipv6, &ipv6_hdr(skb)->daddr,
+ memcpy(&tuple->dst_ipv6, &fk->addrs.v6addrs.dst,
sizeof(struct in6_addr));
}
- tuple->ip_proto = arfs_get_ip_proto(skb);
- tuple->src_port = arfs_get_src_port(skb);
- tuple->dst_port = arfs_get_dst_port(skb);
+ tuple->src_port = fk->ports.src;
+ tuple->dst_port = fk->ports.dst;
rule->flow_id = flow_id;
rule->filter_id = priv->fs.arfs.last_filter_id++ % RPS_NO_FILTER;
@@ -661,37 +633,33 @@ static struct arfs_rule *arfs_alloc_rule(struct mlx5e_priv *priv,
return rule;
}
-static bool arfs_cmp_ips(struct arfs_tuple *tuple,
- const struct sk_buff *skb)
+static bool arfs_cmp(const struct arfs_tuple *tuple, const struct flow_keys *fk)
{
- if (tuple->etype == htons(ETH_P_IP) &&
- tuple->src_ipv4 == ip_hdr(skb)->saddr &&
- tuple->dst_ipv4 == ip_hdr(skb)->daddr)
- return true;
- if (tuple->etype == htons(ETH_P_IPV6) &&
- (!memcmp(&tuple->src_ipv6, &ipv6_hdr(skb)->saddr,
- sizeof(struct in6_addr))) &&
- (!memcmp(&tuple->dst_ipv6, &ipv6_hdr(skb)->daddr,
- sizeof(struct in6_addr))))
- return true;
+ if (tuple->src_port != fk->ports.src || tuple->dst_port != fk->ports.dst)
+ return false;
+ if (tuple->etype != fk->basic.n_proto)
+ return false;
+ if (tuple->etype == htons(ETH_P_IP))
+ return tuple->src_ipv4 == fk->addrs.v4addrs.src &&
+ tuple->dst_ipv4 == fk->addrs.v4addrs.dst;
+ if (tuple->etype == htons(ETH_P_IPV6))
+ return !memcmp(&tuple->src_ipv6, &fk->addrs.v6addrs.src,
+ sizeof(struct in6_addr)) &&
+ !memcmp(&tuple->dst_ipv6, &fk->addrs.v6addrs.dst,
+ sizeof(struct in6_addr));
return false;
}
static struct arfs_rule *arfs_find_rule(struct arfs_table *arfs_t,
- const struct sk_buff *skb)
+ const struct flow_keys *fk)
{
struct arfs_rule *arfs_rule;
struct hlist_head *head;
- __be16 src_port = arfs_get_src_port(skb);
- __be16 dst_port = arfs_get_dst_port(skb);
- head = arfs_hash_bucket(arfs_t, src_port, dst_port);
+ head = arfs_hash_bucket(arfs_t, fk->ports.src, fk->ports.dst);
hlist_for_each_entry(arfs_rule, head, hlist) {
- if (arfs_rule->tuple.src_port == src_port &&
- arfs_rule->tuple.dst_port == dst_port &&
- arfs_cmp_ips(&arfs_rule->tuple, skb)) {
+ if (arfs_cmp(&arfs_rule->tuple, fk))
return arfs_rule;
- }
}
return NULL;
@@ -704,20 +672,24 @@ int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
struct mlx5e_arfs_tables *arfs = &priv->fs.arfs;
struct arfs_table *arfs_t;
struct arfs_rule *arfs_rule;
+ struct flow_keys fk;
- if (skb->protocol != htons(ETH_P_IP) &&
- skb->protocol != htons(ETH_P_IPV6))
+ if (!skb_flow_dissect_flow_keys(skb, &fk, 0))
+ return -EPROTONOSUPPORT;
+
+ if (fk.basic.n_proto != htons(ETH_P_IP) &&
+ fk.basic.n_proto != htons(ETH_P_IPV6))
return -EPROTONOSUPPORT;
if (skb->encapsulation)
return -EPROTONOSUPPORT;
- arfs_t = arfs_get_table(arfs, arfs_get_ip_proto(skb), skb->protocol);
+ arfs_t = arfs_get_table(arfs, fk.basic.ip_proto, fk.basic.n_proto);
if (!arfs_t)
return -EPROTONOSUPPORT;
spin_lock_bh(&arfs->arfs_lock);
- arfs_rule = arfs_find_rule(arfs_t, skb);
+ arfs_rule = arfs_find_rule(arfs_t, &fk);
if (arfs_rule) {
if (arfs_rule->rxq == rxq_index) {
spin_unlock_bh(&arfs->arfs_lock);
@@ -725,8 +697,7 @@ int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
}
arfs_rule->rxq = rxq_index;
} else {
- arfs_rule = arfs_alloc_rule(priv, arfs_t, skb,
- rxq_index, flow_id);
+ arfs_rule = arfs_alloc_rule(priv, arfs_t, &fk, rxq_index, flow_id);
if (!arfs_rule) {
spin_unlock_bh(&arfs->arfs_lock);
return -ENOMEM;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index 792bb8b..2b9350f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -1083,6 +1083,9 @@ static int mlx5e_set_pauseparam(struct net_device *netdev,
struct mlx5_core_dev *mdev = priv->mdev;
int err;
+ if (!MLX5_CAP_GEN(mdev, vport_group_manager))
+ return -EOPNOTSUPP;
+
if (pauseparam->autoneg)
return -EINVAL;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 0f1c296..83ab2c0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -420,12 +420,11 @@ static inline u64 mlx5e_get_mpwqe_offset(struct mlx5e_rq *rq, u16 wqe_ix)
static void mlx5e_init_frags_partition(struct mlx5e_rq *rq)
{
- struct mlx5e_wqe_frag_info next_frag, *prev;
+ struct mlx5e_wqe_frag_info next_frag = {};
+ struct mlx5e_wqe_frag_info *prev = NULL;
int i;
next_frag.di = &rq->wqe.di[0];
- next_frag.offset = 0;
- prev = NULL;
for (i = 0; i < mlx5_wq_cyc_get_size(&rq->wqe.wq); i++) {
struct mlx5e_rq_frag_info *frag_info = &rq->wqe.info.arr[0];
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 9f7f842..c8928ce 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -992,13 +992,13 @@ void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv,
void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe)
{
struct mlx5e_neigh *m_neigh = &nhe->m_neigh;
- u64 bytes, packets, lastuse = 0;
struct mlx5e_tc_flow *flow;
struct mlx5e_encap_entry *e;
struct mlx5_fc *counter;
struct neigh_table *tbl;
bool neigh_used = false;
struct neighbour *n;
+ u64 lastuse;
if (m_neigh->family == AF_INET)
tbl = &arp_tbl;
@@ -1015,7 +1015,7 @@ void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe)
list_for_each_entry(flow, &e->flows, encap) {
if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) {
counter = mlx5_flow_rule_counter(flow->rule[0]);
- mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse);
+ lastuse = mlx5_fc_query_lastuse(counter);
if (time_after((unsigned long)lastuse, nhe->reported_lastuse)) {
neigh_used = true;
break;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
index 58af6be..808ddd7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
@@ -321,6 +321,11 @@ int mlx5_fc_query(struct mlx5_core_dev *dev, struct mlx5_fc *counter,
}
EXPORT_SYMBOL(mlx5_fc_query);
+u64 mlx5_fc_query_lastuse(struct mlx5_fc *counter)
+{
+ return counter->cache.lastuse;
+}
+
void mlx5_fc_query_cached(struct mlx5_fc *counter,
u64 *bytes, u64 *packets, u64 *lastuse)
{
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index 0cab060..ee126bc 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -5032,7 +5032,7 @@ static int __init mlxsw_sp_module_init(void)
return 0;
err_sp2_pci_driver_register:
- mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver);
+ mlxsw_pci_driver_unregister(&mlxsw_sp1_pci_driver);
err_sp1_pci_driver_register:
mlxsw_core_driver_unregister(&mlxsw_sp2_driver);
err_sp2_core_driver_register:
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c
index b25048c..21296fa 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c
@@ -408,14 +408,6 @@ static int mlxsw_sp_port_dcb_app_update(struct mlxsw_sp_port *mlxsw_sp_port)
have_dscp = mlxsw_sp_port_dcb_app_prio_dscp_map(mlxsw_sp_port,
&prio_map);
- if (!have_dscp) {
- err = mlxsw_sp_port_dcb_toggle_trust(mlxsw_sp_port,
- MLXSW_REG_QPTS_TRUST_STATE_PCP);
- if (err)
- netdev_err(mlxsw_sp_port->dev, "Couldn't switch to trust L2\n");
- return err;
- }
-
mlxsw_sp_port_dcb_app_dscp_prio_map(mlxsw_sp_port, default_prio,
&dscp_map);
err = mlxsw_sp_port_dcb_app_update_qpdpm(mlxsw_sp_port,
@@ -432,6 +424,14 @@ static int mlxsw_sp_port_dcb_app_update(struct mlxsw_sp_port *mlxsw_sp_port)
return err;
}
+ if (!have_dscp) {
+ err = mlxsw_sp_port_dcb_toggle_trust(mlxsw_sp_port,
+ MLXSW_REG_QPTS_TRUST_STATE_PCP);
+ if (err)
+ netdev_err(mlxsw_sp_port->dev, "Couldn't switch to trust L2\n");
+ return err;
+ }
+
err = mlxsw_sp_port_dcb_toggle_trust(mlxsw_sp_port,
MLXSW_REG_QPTS_TRUST_STATE_DSCP);
if (err) {
diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
index 1029119..732ba21 100644
--- a/drivers/net/ethernet/mscc/ocelot.c
+++ b/drivers/net/ethernet/mscc/ocelot.c
@@ -1767,6 +1767,7 @@ EXPORT_SYMBOL(ocelot_init);
void ocelot_deinit(struct ocelot *ocelot)
{
+ cancel_delayed_work(&ocelot->stats_work);
destroy_workqueue(ocelot->stats_queue);
mutex_destroy(&ocelot->stats_lock);
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c
index b22f464..f9e4750 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_int.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_int.c
@@ -939,7 +939,7 @@ static int qed_int_deassertion(struct qed_hwfn *p_hwfn,
snprintf(bit_name, 30,
p_aeu->bit_name, num);
else
- strncpy(bit_name,
+ strlcpy(bit_name,
p_aeu->bit_name, 30);
/* We now need to pass bitmask in its
diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.c b/drivers/net/ethernet/qlogic/qed/qed_rdma.c
index 13802b8..909422d 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_rdma.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.c
@@ -442,7 +442,7 @@ static void qed_rdma_init_devinfo(struct qed_hwfn *p_hwfn,
/* Vendor specific information */
dev->vendor_id = cdev->vendor_id;
dev->vendor_part_id = cdev->device_id;
- dev->hw_ver = 0;
+ dev->hw_ver = cdev->chip_rev;
dev->fw_ver = (FW_MAJOR_VERSION << 24) | (FW_MINOR_VERSION << 16) |
(FW_REVISION_VERSION << 8) | (FW_ENGINEERING_VERSION);
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
index aaad5e4..72f3e4b 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
@@ -389,14 +389,13 @@ static int rmnet_changelink(struct net_device *dev, struct nlattr *tb[],
}
if (data[IFLA_RMNET_UL_AGG_PARAMS]) {
- void *agg_params;
- unsigned long irq_flags;
+ struct rmnet_egress_agg_params *agg_params;
agg_params = nla_data(data[IFLA_RMNET_UL_AGG_PARAMS]);
- spin_lock_irqsave(&port->agg_lock, irq_flags);
- memcpy(&port->egress_agg_params, agg_params,
- sizeof(port->egress_agg_params));
- spin_unlock_irqrestore(&port->agg_lock, irq_flags);
+ rmnet_map_update_ul_agg_config(port, agg_params->agg_size,
+ agg_params->agg_count,
+ agg_params->agg_features,
+ agg_params->agg_time);
}
return 0;
@@ -700,6 +699,16 @@ int rmnet_get_powersave_notif(void *port)
return ((struct rmnet_port *)port)->data_format & RMNET_FORMAT_PS_NOTIF;
}
EXPORT_SYMBOL(rmnet_get_powersave_notif);
+
+struct net_device *rmnet_get_real_dev(void *port)
+{
+ if (port)
+ return ((struct rmnet_port *)port)->dev;
+
+ return NULL;
+}
+EXPORT_SYMBOL(rmnet_get_real_dev);
+
#endif
/* Startup/Shutdown */
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h
index 07b1154..2359401 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h
@@ -20,6 +20,11 @@ struct rmnet_endpoint {
struct hlist_node hlnode;
};
+struct rmnet_agg_stats {
+ u64 ul_agg_reuse;
+ u64 ul_agg_alloc;
+};
+
struct rmnet_port_priv_stats {
u64 dl_hdr_last_qmap_vers;
u64 dl_hdr_last_ep_id;
@@ -33,14 +38,21 @@ struct rmnet_port_priv_stats {
u64 dl_hdr_total_pkts;
u64 dl_trl_last_seq;
u64 dl_trl_count;
+ struct rmnet_agg_stats agg;
};
struct rmnet_egress_agg_params {
u16 agg_size;
- u16 agg_count;
+ u8 agg_count;
+ u8 agg_features;
u32 agg_time;
};
+struct rmnet_agg_page {
+ struct list_head list;
+ struct page *page;
+};
+
/* One instance of this structure is instantiated for each real_dev associated
* with rmnet.
*/
@@ -65,6 +77,9 @@ struct rmnet_port {
struct timespec agg_last;
struct hrtimer hrtimer;
struct work_struct agg_wq;
+ u8 agg_size_order;
+ struct list_head agg_list;
+ struct rmnet_agg_page *agg_head;
void *qmi_info;
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_descriptor.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_descriptor.c
index a2252c5..40238e6 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_descriptor.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_descriptor.c
@@ -375,12 +375,28 @@ static void rmnet_frag_gso_stamp(struct sk_buff *skb,
struct rmnet_frag_descriptor *frag_desc)
{
struct skb_shared_info *shinfo = skb_shinfo(skb);
+
+ if (frag_desc->trans_proto == IPPROTO_TCP)
+ shinfo->gso_type = (frag_desc->ip_proto == 4) ?
+ SKB_GSO_TCPV4 : SKB_GSO_TCPV6;
+ else
+ shinfo->gso_type = SKB_GSO_UDP_L4;
+
+ shinfo->gso_size = frag_desc->gso_size;
+ shinfo->gso_segs = frag_desc->gso_segs;
+}
+
+/* Set the partial checksum information. Sets the transport checksum tot he
+ * pseudoheader checksum and sets the offload metadata.
+ */
+static void rmnet_frag_partial_csum(struct sk_buff *skb,
+ struct rmnet_frag_descriptor *frag_desc)
+{
struct iphdr *iph = (struct iphdr *)skb->data;
__sum16 pseudo;
u16 pkt_len = skb->len - frag_desc->ip_len;
- bool ipv4 = frag_desc->ip_proto == 4;
- if (ipv4) {
+ if (frag_desc->ip_proto == 4) {
iph->tot_len = htons(skb->len);
iph->check = 0;
iph->check = ip_fast_csum(iph, iph->ihl);
@@ -401,7 +417,6 @@ static void rmnet_frag_gso_stamp(struct sk_buff *skb,
((u8 *)iph + frag_desc->ip_len);
tp->check = pseudo;
- shinfo->gso_type = (ipv4) ? SKB_GSO_TCPV4 : SKB_GSO_TCPV6;
skb->csum_offset = offsetof(struct tcphdr, check);
} else {
struct udphdr *up = (struct udphdr *)
@@ -409,14 +424,11 @@ static void rmnet_frag_gso_stamp(struct sk_buff *skb,
up->len = htons(pkt_len);
up->check = pseudo;
- shinfo->gso_type = SKB_GSO_UDP_L4;
skb->csum_offset = offsetof(struct udphdr, check);
}
skb->ip_summed = CHECKSUM_PARTIAL;
skb->csum_start = (u8 *)iph + frag_desc->ip_len - skb->head;
- shinfo->gso_size = frag_desc->gso_size;
- shinfo->gso_segs = frag_desc->gso_segs;
}
/* Allocate and populate an skb to contain the packet represented by the
@@ -542,7 +554,8 @@ static struct sk_buff *rmnet_alloc_skb(struct rmnet_frag_descriptor *frag_desc,
/* Handle csum offloading */
if (frag_desc->csum_valid) {
- head_skb->ip_summed = CHECKSUM_UNNECESSARY;
+ /* Set the partial checksum information */
+ rmnet_frag_partial_csum(head_skb, frag_desc);
} else if (frag_desc->hdrs_valid &&
(frag_desc->trans_proto == IPPROTO_TCP ||
frag_desc->trans_proto == IPPROTO_UDP)) {
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h
index f575096..04048f6 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h
@@ -261,6 +261,8 @@ int rmnet_map_tx_agg_skip(struct sk_buff *skb, int offset);
void rmnet_map_tx_aggregate(struct sk_buff *skb, struct rmnet_port *port);
void rmnet_map_tx_aggregate_init(struct rmnet_port *port);
void rmnet_map_tx_aggregate_exit(struct rmnet_port *port);
+void rmnet_map_update_ul_agg_config(struct rmnet_port *port, u16 size,
+ u8 count, u8 features, u32 time);
void rmnet_map_dl_hdr_notify(struct rmnet_port *port,
struct rmnet_map_dl_ind_hdr *dl_hdr);
void rmnet_map_dl_hdr_notify_v2(struct rmnet_port *port,
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c
index fcb1d2d..694e596 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c
@@ -656,12 +656,28 @@ static void rmnet_map_gso_stamp(struct sk_buff *skb,
struct rmnet_map_coal_metadata *coal_meta)
{
struct skb_shared_info *shinfo = skb_shinfo(skb);
+
+ if (coal_meta->trans_proto == IPPROTO_TCP)
+ shinfo->gso_type = (coal_meta->ip_proto == 4) ?
+ SKB_GSO_TCPV4 : SKB_GSO_TCPV6;
+ else
+ shinfo->gso_type = SKB_GSO_UDP_L4;
+
+ shinfo->gso_size = coal_meta->data_len;
+ shinfo->gso_segs = coal_meta->pkt_count;
+}
+
+/* Handles setting up the partial checksum in the skb. Sets the transport
+ * checksum to the pseudoheader checksum and sets the csum offload metadata
+ */
+static void rmnet_map_partial_csum(struct sk_buff *skb,
+ struct rmnet_map_coal_metadata *coal_meta)
+{
unsigned char *data = skb->data;
__sum16 pseudo;
u16 pkt_len = skb->len - coal_meta->ip_len;
- bool ipv4 = coal_meta->ip_proto == 4;
- if (ipv4) {
+ if (coal_meta->ip_proto == 4) {
struct iphdr *iph = (struct iphdr *)data;
pseudo = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
@@ -678,20 +694,16 @@ static void rmnet_map_gso_stamp(struct sk_buff *skb,
struct tcphdr *tp = (struct tcphdr *)(data + coal_meta->ip_len);
tp->check = pseudo;
- shinfo->gso_type = (ipv4) ? SKB_GSO_TCPV4 : SKB_GSO_TCPV6;
skb->csum_offset = offsetof(struct tcphdr, check);
} else {
struct udphdr *up = (struct udphdr *)(data + coal_meta->ip_len);
up->check = pseudo;
- shinfo->gso_type = SKB_GSO_UDP_L4;
skb->csum_offset = offsetof(struct udphdr, check);
}
skb->ip_summed = CHECKSUM_PARTIAL;
skb->csum_start = skb->data + coal_meta->ip_len - skb->head;
- shinfo->gso_size = coal_meta->data_len;
- shinfo->gso_segs = coal_meta->pkt_count;
}
static void
@@ -756,7 +768,8 @@ __rmnet_map_segment_coal_skb(struct sk_buff *coal_skb,
/* Handle checksum status */
if (likely(csum_valid)) {
- skbn->ip_summed = CHECKSUM_UNNECESSARY;
+ /* Set the partial checksum information */
+ rmnet_map_partial_csum(skbn, coal_meta);
} else if (check) {
/* Unfortunately, we have to fake a bad checksum here, since
* the original bad value is lost by the hardware. The only
@@ -938,8 +951,10 @@ static void rmnet_map_segment_coal_skb(struct sk_buff *coal_skb,
coal_meta.data_len = ntohs(coal_hdr->nl_pairs[0].pkt_len);
coal_meta.data_len -= coal_meta.ip_len + coal_meta.trans_len;
coal_meta.pkt_count = coal_hdr->nl_pairs[0].num_packets;
- if (coal_meta.pkt_count > 1)
+ if (coal_meta.pkt_count > 1) {
+ rmnet_map_partial_csum(coal_skb, &coal_meta);
rmnet_map_gso_stamp(coal_skb, &coal_meta);
+ }
__skb_queue_tail(list, coal_skb);
return;
@@ -1250,6 +1265,113 @@ static void rmnet_map_linearize_copy(struct sk_buff *dst, struct sk_buff *src)
}
}
+static void rmnet_free_agg_pages(struct rmnet_port *port)
+{
+ struct rmnet_agg_page *agg_page, *idx;
+
+ list_for_each_entry_safe(agg_page, idx, &port->agg_list, list) {
+ put_page(agg_page->page);
+ kfree(agg_page);
+ }
+
+ port->agg_head = NULL;
+}
+
+static struct page *rmnet_get_agg_pages(struct rmnet_port *port)
+{
+ struct rmnet_agg_page *agg_page;
+ struct page *page = NULL;
+ int i = 0;
+
+ if (!(port->egress_agg_params.agg_features & RMNET_PAGE_RECYCLE))
+ goto alloc;
+
+ do {
+ agg_page = port->agg_head;
+ if (unlikely(!agg_page))
+ break;
+
+ if (page_ref_count(agg_page->page) == 1) {
+ page = agg_page->page;
+ page_ref_inc(agg_page->page);
+
+ port->stats.agg.ul_agg_reuse++;
+ port->agg_head = list_next_entry(agg_page, list);
+ break;
+ }
+
+ port->agg_head = list_next_entry(agg_page, list);
+ i++;
+ } while (i <= 5);
+
+alloc:
+ if (!page) {
+ page = __dev_alloc_pages(GFP_ATOMIC, port->agg_size_order);
+ port->stats.agg.ul_agg_alloc++;
+ }
+
+ return page;
+}
+
+static struct rmnet_agg_page *__rmnet_alloc_agg_pages(struct rmnet_port *port)
+{
+ struct rmnet_agg_page *agg_page;
+ struct page *page;
+
+ agg_page = kzalloc(sizeof(*agg_page), GFP_ATOMIC);
+ if (!agg_page)
+ return NULL;
+
+ page = __dev_alloc_pages(GFP_ATOMIC, port->agg_size_order);
+ if (!page) {
+ kfree(agg_page);
+ return NULL;
+ }
+
+ agg_page->page = page;
+
+ return agg_page;
+}
+
+static void rmnet_alloc_agg_pages(struct rmnet_port *port)
+{
+ struct rmnet_agg_page *agg_page = NULL;
+ int i = 0;
+
+ for (i = 0; i < 512; i++) {
+ agg_page = __rmnet_alloc_agg_pages(port);
+
+ if (agg_page)
+ list_add_tail(&agg_page->list, &port->agg_list);
+ }
+
+ port->agg_head = list_first_entry_or_null(&port->agg_list,
+ struct rmnet_agg_page, list);
+}
+
+static struct sk_buff *rmnet_map_build_skb(struct rmnet_port *port)
+{
+ struct sk_buff *skb;
+ unsigned int size;
+ struct page *page;
+ void *vaddr;
+
+ page = rmnet_get_agg_pages(port);
+ if (!page)
+ return NULL;
+
+ vaddr = page_address(page);
+ size = PAGE_SIZE << port->agg_size_order;
+
+ skb = build_skb(vaddr, size);
+ if (!skb) {
+ put_page(page);
+ return NULL;
+ }
+
+ return skb;
+}
+
void rmnet_map_tx_aggregate(struct sk_buff *skb, struct rmnet_port *port)
{
struct timespec diff, last;
@@ -1277,8 +1399,7 @@ void rmnet_map_tx_aggregate(struct sk_buff *skb, struct rmnet_port *port)
return;
}
- port->agg_skb = alloc_skb(port->egress_agg_params.agg_size,
- GFP_ATOMIC);
+ port->agg_skb = rmnet_map_build_skb(port);
if (!port->agg_skb) {
port->agg_skb = 0;
port->agg_count = 0;
@@ -1328,14 +1449,51 @@ void rmnet_map_tx_aggregate(struct sk_buff *skb, struct rmnet_port *port)
spin_unlock_irqrestore(&port->agg_lock, flags);
}
+void rmnet_map_update_ul_agg_config(struct rmnet_port *port, u16 size,
+ u8 count, u8 features, u32 time)
+{
+ unsigned long irq_flags;
+
+ spin_lock_irqsave(&port->agg_lock, irq_flags);
+ port->egress_agg_params.agg_count = count;
+ port->egress_agg_params.agg_time = time;
+ port->egress_agg_params.agg_size = size;
+ port->egress_agg_params.agg_features = features;
+
+ rmnet_free_agg_pages(port);
+
+ /* This effectively disables recycling in case the UL aggregation
+ * size is lesser than PAGE_SIZE.
+ */
+ if (size < PAGE_SIZE)
+ goto done;
+
+ port->agg_size_order = get_order(size);
+
+ size = PAGE_SIZE << port->agg_size_order;
+ size -= SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+ port->egress_agg_params.agg_size = size;
+
+ if (port->egress_agg_params.agg_features == RMNET_PAGE_RECYCLE)
+ rmnet_alloc_agg_pages(port);
+
+done:
+ spin_unlock_irqrestore(&port->agg_lock, irq_flags);
+}
+
void rmnet_map_tx_aggregate_init(struct rmnet_port *port)
{
hrtimer_init(&port->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
port->hrtimer.function = rmnet_map_flush_tx_packet_queue;
- port->egress_agg_params.agg_size = 8192;
- port->egress_agg_params.agg_count = 20;
- port->egress_agg_params.agg_time = 3000000;
spin_lock_init(&port->agg_lock);
+ INIT_LIST_HEAD(&port->agg_list);
+
+ /* Since PAGE_SIZE - 1 is specified here, no pages are pre-allocated.
+ * This is done to reduce memory usage in cases where
+ * UL aggregation is disabled.
+ * Additionally, the features flag is also set to 0.
+ */
+ rmnet_map_update_ul_agg_config(port, PAGE_SIZE - 1, 20, 0, 3000000);
INIT_WORK(&port->agg_wq, rmnet_map_flush_tx_packet_work);
}
@@ -1359,6 +1517,7 @@ void rmnet_map_tx_aggregate_exit(struct rmnet_port *port)
port->agg_state = 0;
}
+ rmnet_free_agg_pages(port);
spin_unlock_irqrestore(&port->agg_lock, flags);
}
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_private.h b/drivers/net/ethernet/qualcomm/rmnet/rmnet_private.h
index e5c530c..2ce29bf 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_private.h
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_private.h
@@ -22,6 +22,9 @@ RMNET_INGRESS_FORMAT_DL_MARKER_V2)
#define RMNET_INGRESS_FORMAT_PS BIT(27)
#define RMNET_FORMAT_PS_NOTIF BIT(26)
+/* UL Aggregation parameters */
+#define RMNET_PAGE_RECYCLE BIT(0)
+
/* Replace skb->dev to a virtual rmnet device and pass up the stack */
#define RMNET_EPMODE_VND (1)
/* Pass the frame directly to another device with dev_queue_xmit() */
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
index 1edf9e7..f00f1ce 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
@@ -226,6 +226,8 @@ static const char rmnet_port_gstrings_stats[][ETH_GSTRING_LEN] = {
"DL header total pkts received",
"DL trailer last seen sequence",
"DL trailer pkts received",
+ "UL agg reuse",
+ "UL agg alloc",
};
static void rmnet_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
@@ -276,6 +278,7 @@ static int rmnet_stats_reset(struct net_device *dev)
{
struct rmnet_priv *priv = netdev_priv(dev);
struct rmnet_port_priv_stats *stp;
+ struct rmnet_priv_stats *st;
struct rmnet_port *port;
port = rmnet_get_port(priv->real_dev);
@@ -285,6 +288,11 @@ static int rmnet_stats_reset(struct net_device *dev)
stp = &port->stats;
memset(stp, 0, sizeof(*stp));
+
+ st = &priv->stats;
+
+ memset(st, 0, sizeof(*st));
+
return 0;
}
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index a6992c4..0c8b714 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -7239,13 +7239,18 @@ static int rtl_alloc_irq(struct rtl8169_private *tp)
{
unsigned int flags;
- if (tp->mac_version <= RTL_GIGA_MAC_VER_06) {
+ switch (tp->mac_version) {
+ case RTL_GIGA_MAC_VER_02 ... RTL_GIGA_MAC_VER_06:
RTL_W8(tp, Cfg9346, Cfg9346_Unlock);
RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~MSIEnable);
RTL_W8(tp, Cfg9346, Cfg9346_Lock);
+ /* fall through */
+ case RTL_GIGA_MAC_VER_07 ... RTL_GIGA_MAC_VER_24:
flags = PCI_IRQ_LEGACY;
- } else {
+ break;
+ default:
flags = PCI_IRQ_ALL_TYPES;
+ break;
}
return pci_alloc_irq_vectors(tp->pci_dev, 1, 1, flags);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
index d0e6e15..48cf5e2 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
@@ -88,6 +88,8 @@ static void dwmac4_rx_queue_priority(struct mac_device_info *hw,
u32 value;
base_register = (queue < 4) ? GMAC_RXQ_CTRL2 : GMAC_RXQ_CTRL3;
+ if (queue >= 4)
+ queue -= 4;
value = readl(ioaddr + base_register);
@@ -105,6 +107,8 @@ static void dwmac4_tx_queue_priority(struct mac_device_info *hw,
u32 value;
base_register = (queue < 4) ? GMAC_TXQ_PRTY_MAP0 : GMAC_TXQ_PRTY_MAP1;
+ if (queue >= 4)
+ queue -= 4;
value = readl(ioaddr + base_register);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
index d182f82..870302a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
@@ -106,6 +106,8 @@ static void dwxgmac2_rx_queue_prio(struct mac_device_info *hw, u32 prio,
u32 value, reg;
reg = (queue < 4) ? XGMAC_RXQ_CTRL2 : XGMAC_RXQ_CTRL3;
+ if (queue >= 4)
+ queue -= 4;
value = readl(ioaddr + reg);
value &= ~XGMAC_PSRQ(queue);
@@ -169,6 +171,8 @@ static void dwxgmac2_map_mtl_to_dma(struct mac_device_info *hw, u32 queue,
u32 value, reg;
reg = (queue < 4) ? XGMAC_MTL_RXQ_DMA_MAP0 : XGMAC_MTL_RXQ_DMA_MAP1;
+ if (queue >= 4)
+ queue -= 4;
value = readl(ioaddr + reg);
value &= ~XGMAC_QxMDMACH(queue);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
index 58ea18a..37c0bc6 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
@@ -37,7 +37,7 @@ static struct stmmac_tc_entry *tc_find_entry(struct stmmac_priv *priv,
entry = &priv->tc_entries[i];
if (!entry->in_use && !first && free)
first = entry;
- if (entry->handle == loc && !free)
+ if ((entry->handle == loc) && !free && !entry->is_frag)
dup = entry;
}
diff --git a/drivers/net/phy/phy_led_triggers.c b/drivers/net/phy/phy_led_triggers.c
index 491efc1..7278eca 100644
--- a/drivers/net/phy/phy_led_triggers.c
+++ b/drivers/net/phy/phy_led_triggers.c
@@ -58,8 +58,9 @@ void phy_led_trigger_change_speed(struct phy_device *phy)
if (!phy->last_triggered)
led_trigger_event(&phy->led_link_trigger->trigger,
LED_FULL);
+ else
+ led_trigger_event(&phy->last_triggered->trigger, LED_OFF);
- led_trigger_event(&phy->last_triggered->trigger, LED_OFF);
led_trigger_event(&plt->trigger, LED_FULL);
phy->last_triggered = plt;
}
diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
index e029c79..2e8056d 100644
--- a/drivers/net/phy/phylink.c
+++ b/drivers/net/phy/phylink.c
@@ -226,6 +226,8 @@ static int phylink_parse_fixedlink(struct phylink *pl,
__ETHTOOL_LINK_MODE_MASK_NBITS, true);
linkmode_zero(pl->supported);
phylink_set(pl->supported, MII);
+ phylink_set(pl->supported, Pause);
+ phylink_set(pl->supported, Asym_Pause);
if (s) {
__set_bit(s->bit, pl->supported);
} else {
diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
index f22639f..c04f3dc 100644
--- a/drivers/net/ppp/pppoe.c
+++ b/drivers/net/ppp/pppoe.c
@@ -1120,6 +1120,9 @@ static const struct proto_ops pppoe_ops = {
.recvmsg = pppoe_recvmsg,
.mmap = sock_no_mmap,
.ioctl = pppox_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = pppox_compat_ioctl,
+#endif
};
static const struct pppox_proto pppoe_proto = {
diff --git a/drivers/net/ppp/pppox.c b/drivers/net/ppp/pppox.c
index c0599b3..9128e42 100644
--- a/drivers/net/ppp/pppox.c
+++ b/drivers/net/ppp/pppox.c
@@ -22,6 +22,7 @@
#include <linux/string.h>
#include <linux/module.h>
#include <linux/kernel.h>
+#include <linux/compat.h>
#include <linux/errno.h>
#include <linux/netdevice.h>
#include <linux/net.h>
@@ -103,6 +104,18 @@ int pppox_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
EXPORT_SYMBOL(pppox_ioctl);
+#ifdef CONFIG_COMPAT
+int pppox_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
+{
+ if (cmd == PPPOEIOCSFWD32)
+ cmd = PPPOEIOCSFWD;
+
+ return pppox_ioctl(sock, cmd, (unsigned long)compat_ptr(arg));
+}
+
+EXPORT_SYMBOL(pppox_compat_ioctl);
+#endif
+
static int pppox_create(struct net *net, struct socket *sock, int protocol,
int kern)
{
diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c
index 50c6055..b626001 100644
--- a/drivers/net/ppp/pptp.c
+++ b/drivers/net/ppp/pptp.c
@@ -628,6 +628,9 @@ static const struct proto_ops pptp_ops = {
.recvmsg = sock_no_recvmsg,
.mmap = sock_no_mmap,
.ioctl = pppox_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = pppox_compat_ioctl,
+#endif
};
static const struct pppox_proto pppox_pptp_proto = {
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index dc30f11..3feb49b 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -1011,6 +1011,8 @@ static void __team_compute_features(struct team *team)
team->dev->vlan_features = vlan_features;
team->dev->hw_enc_features = enc_features | NETIF_F_GSO_ENCAP_ALL |
+ NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_STAG_TX |
NETIF_F_GSO_UDP_L4;
team->dev->hard_header_len = max_hard_header_len;
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 51a25be..2c80972 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -1682,6 +1682,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
skb_reserve(skb, pad - delta);
skb_put(skb, len);
+ skb_set_owner_w(skb, tfile->socket.sk);
get_page(alloc_frag->page);
alloc_frag->offset += buflen;
diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c
index f4247b2..b7a0df9 100644
--- a/drivers/net/usb/pegasus.c
+++ b/drivers/net/usb/pegasus.c
@@ -285,7 +285,7 @@ static void mdio_write(struct net_device *dev, int phy_id, int loc, int val)
static int read_eprom_word(pegasus_t *pegasus, __u8 index, __u16 *retdata)
{
int i;
- __u8 tmp;
+ __u8 tmp = 0;
__le16 retdatai;
int ret;
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 128c8a3..51017c6 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -1231,6 +1231,7 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x2001, 0x7e35, 4)}, /* D-Link DWM-222 */
{QMI_FIXED_INTF(0x2020, 0x2031, 4)}, /* Olicard 600 */
{QMI_FIXED_INTF(0x2020, 0x2033, 4)}, /* BroadMobi BM806U */
+ {QMI_FIXED_INTF(0x2020, 0x2060, 4)}, /* BroadMobi BM818 */
{QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)}, /* Sierra Wireless MC7700 */
{QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */
{QMI_FIXED_INTF(0x1199, 0x68a2, 8)}, /* Sierra Wireless MC7710 in QMI mode */
diff --git a/drivers/net/wireless/ath/ath10k/usb.c b/drivers/net/wireless/ath/ath10k/usb.c
index d4803ff..f09a4ad 100644
--- a/drivers/net/wireless/ath/ath10k/usb.c
+++ b/drivers/net/wireless/ath/ath10k/usb.c
@@ -1025,7 +1025,7 @@ static int ath10k_usb_probe(struct usb_interface *interface,
}
/* TODO: remove this once USB support is fully implemented */
- ath10k_warn(ar, "WARNING: ath10k USB support is incomplete, don't expect anything to work!\n");
+ ath10k_warn(ar, "Warning: ath10k USB support is incomplete, don't expect anything to work!\n");
return 0;
diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c
index ba15afd..05d8c8e 100644
--- a/drivers/net/wireless/ath/wil6210/cfg80211.c
+++ b/drivers/net/wireless/ath/wil6210/cfg80211.c
@@ -18,6 +18,22 @@
#define WIL_BRD_SUFFIX_CN "CN"
#define WIL_BRD_SUFFIX_FCC "FCC"
+#define WIL_EDMG_CHANNEL_9_SUBCHANNELS (BIT(0) | BIT(1))
+#define WIL_EDMG_CHANNEL_10_SUBCHANNELS (BIT(1) | BIT(2))
+#define WIL_EDMG_CHANNEL_11_SUBCHANNELS (BIT(2) | BIT(3))
+
+/* WIL_EDMG_BW_CONFIGURATION define the allowed channel bandwidth
+ * configurations as defined by IEEE 802.11 section 9.4.2.251, Table 13.
+ * The value 5 allowing CB1 and CB2 of adjacent channels.
+ */
+#define WIL_EDMG_BW_CONFIGURATION 5
+
+/* WIL_EDMG_CHANNELS is a bitmap that indicates the 2.16 GHz channel(s) that
+ * are allowed to be used for EDMG transmissions in the BSS as defined by
+ * IEEE 802.11 section 9.4.2.251.
+ */
+#define WIL_EDMG_CHANNELS (BIT(0) | BIT(1) | BIT(2) | BIT(3))
+
bool disable_ap_sme;
module_param(disable_ap_sme, bool, 0444);
MODULE_PARM_DESC(disable_ap_sme, " let user space handle AP mode SME");
@@ -56,6 +72,39 @@ static struct ieee80211_channel wil_60ghz_channels[] = {
CHAN60G(4, 0),
};
+/* Rx channel bonding mode */
+enum wil_rx_cb_mode {
+ WIL_RX_CB_MODE_DMG,
+ WIL_RX_CB_MODE_EDMG,
+ WIL_RX_CB_MODE_WIDE,
+};
+
+static int wil_rx_cb_mode_to_n_bonded(u8 cb_mode)
+{
+ switch (cb_mode) {
+ case WIL_RX_CB_MODE_DMG:
+ case WIL_RX_CB_MODE_EDMG:
+ return 1;
+ case WIL_RX_CB_MODE_WIDE:
+ return 2;
+ default:
+ return 1;
+ }
+}
+
+static int wil_tx_cb_mode_to_n_bonded(u8 cb_mode)
+{
+ switch (cb_mode) {
+ case WMI_TX_MODE_DMG:
+ case WMI_TX_MODE_EDMG_CB1:
+ return 1;
+ case WMI_TX_MODE_EDMG_CB2:
+ return 2;
+ default:
+ return 1;
+ }
+}
+
static void
wil_memdup_ie(u8 **pdst, size_t *pdst_len, const u8 *src, size_t src_len)
{
@@ -167,6 +216,13 @@ void update_supported_bands(struct wil6210_priv *wil)
wiphy->bands[NL80211_BAND_60GHZ]->n_channels =
wil_num_supported_channels(wil);
+
+ if (test_bit(WMI_FW_CAPABILITY_CHANNEL_BONDING, wil->fw_capabilities)) {
+ wiphy->bands[NL80211_BAND_60GHZ]->edmg_cap.channels =
+ WIL_EDMG_CHANNELS;
+ wiphy->bands[NL80211_BAND_60GHZ]->edmg_cap.bw_config =
+ WIL_EDMG_BW_CONFIGURATION;
+ }
}
/* Vendor id to be used in vendor specific command and events
@@ -593,6 +649,7 @@ int wil_cid_fill_sinfo(struct wil6210_vif *vif, int cid,
} __packed reply;
struct wil_net_stats *stats = &wil->sta[cid].stats;
int rc;
+ u8 txflag = RATE_INFO_FLAGS_DMG;
memset(&reply, 0, sizeof(reply));
@@ -606,7 +663,8 @@ int wil_cid_fill_sinfo(struct wil6210_vif *vif, int cid,
" MCS %d TSF 0x%016llx\n"
" BF status 0x%08x RSSI %d SQI %d%%\n"
" Tx Tpt %d goodput %d Rx goodput %d\n"
- " Sectors(rx:tx) my %d:%d peer %d:%d\n""}\n",
+ " Sectors(rx:tx) my %d:%d peer %d:%d\n"
+ " Tx mode %d}\n",
cid, vif->mid, le16_to_cpu(reply.evt.bf_mcs),
le64_to_cpu(reply.evt.tsf), reply.evt.status,
reply.evt.rssi,
@@ -617,7 +675,8 @@ int wil_cid_fill_sinfo(struct wil6210_vif *vif, int cid,
le16_to_cpu(reply.evt.my_rx_sector),
le16_to_cpu(reply.evt.my_tx_sector),
le16_to_cpu(reply.evt.other_rx_sector),
- le16_to_cpu(reply.evt.other_tx_sector));
+ le16_to_cpu(reply.evt.other_tx_sector),
+ reply.evt.tx_mode);
sinfo->generation = wil->sinfo_gen;
@@ -630,9 +689,16 @@ int wil_cid_fill_sinfo(struct wil6210_vif *vif, int cid,
BIT_ULL(NL80211_STA_INFO_RX_DROP_MISC) |
BIT_ULL(NL80211_STA_INFO_TX_FAILED);
- sinfo->txrate.flags = RATE_INFO_FLAGS_60G;
+ if (wil->use_enhanced_dma_hw && reply.evt.tx_mode != WMI_TX_MODE_DMG)
+ txflag = RATE_INFO_FLAGS_EDMG;
+
+ sinfo->txrate.flags = txflag;
sinfo->txrate.mcs = le16_to_cpu(reply.evt.bf_mcs);
sinfo->rxrate.mcs = stats->last_mcs_rx;
+ sinfo->txrate.n_bonded_ch =
+ wil_tx_cb_mode_to_n_bonded(reply.evt.tx_mode);
+ sinfo->rxrate.n_bonded_ch =
+ wil_rx_cb_mode_to_n_bonded(stats->last_cb_mode_rx);
sinfo->rx_bytes = stats->rx_bytes;
sinfo->rx_packets = stats->rx_packets;
sinfo->rx_dropped_misc = stats->rx_dropped;
@@ -1310,6 +1376,33 @@ static int wil_ft_connect(struct wiphy *wiphy,
return rc;
}
+static int wil_get_wmi_edmg_channel(struct wil6210_priv *wil, u8 edmg_bw_config,
+ u8 edmg_channels, u8 *wmi_ch)
+{
+ if (!edmg_bw_config) {
+ *wmi_ch = 0;
+ return 0;
+ } else if (edmg_bw_config == WIL_EDMG_BW_CONFIGURATION) {
+ /* convert from edmg channel bitmap into edmg channel number */
+ switch (edmg_channels) {
+ case WIL_EDMG_CHANNEL_9_SUBCHANNELS:
+ return wil_spec2wmi_ch(9, wmi_ch);
+ case WIL_EDMG_CHANNEL_10_SUBCHANNELS:
+ return wil_spec2wmi_ch(10, wmi_ch);
+ case WIL_EDMG_CHANNEL_11_SUBCHANNELS:
+ return wil_spec2wmi_ch(11, wmi_ch);
+ default:
+ wil_err(wil, "Unsupported edmg channel bitmap 0x%x\n",
+ edmg_channels);
+ return -EINVAL;
+ }
+ } else {
+ wil_err(wil, "Unsupported EDMG BW configuration %d\n",
+ edmg_bw_config);
+ return -EINVAL;
+ }
+}
+
static int wil_cfg80211_connect(struct wiphy *wiphy,
struct net_device *ndev,
struct cfg80211_connect_params *sme)
@@ -1455,7 +1548,7 @@ static int wil_cfg80211_connect(struct wiphy *wiphy,
memcpy(conn.ssid, ssid_eid+2, conn.ssid_len);
conn.channel = ch - 1;
- if (test_bit(WMI_FW_CAPABILITY_CHANNEL_BONDING, wil->fw_capabilities))
+ if (test_bit(WMI_FW_CAPABILITY_CHANNEL_BONDING, wil->fw_capabilities)) {
if (wil->force_edmg_channel) {
rc = wil_spec2wmi_ch(wil->force_edmg_channel,
&conn.edmg_channel);
@@ -1463,7 +1556,15 @@ static int wil_cfg80211_connect(struct wiphy *wiphy,
wil_err(wil,
"wmi channel for channel %d not found",
wil->force_edmg_channel);
+ } else {
+ rc = wil_get_wmi_edmg_channel(wil,
+ sme->edmg.bw_config,
+ sme->edmg.channels,
+ &conn.edmg_channel);
+ if (rc < 0)
+ return rc;
}
+ }
ether_addr_copy(conn.bssid, bss->bssid);
ether_addr_copy(conn.dst_mac, bss->bssid);
@@ -2032,7 +2133,7 @@ static int _wil_cfg80211_set_ies(struct wil6210_vif *vif,
static int _wil_cfg80211_start_ap(struct wiphy *wiphy,
struct net_device *ndev,
const u8 *ssid, size_t ssid_len, u32 privacy,
- int bi, u8 chan,
+ int bi, u8 chan, u8 wmi_edmg_channel,
struct cfg80211_beacon_data *bcon,
u8 hidden_ssid, u32 pbss)
{
@@ -2049,7 +2150,8 @@ static int _wil_cfg80211_start_ap(struct wiphy *wiphy,
if (pbss)
wmi_nettype = WMI_NETTYPE_P2P;
- wil_dbg_misc(wil, "start_ap: mid=%d, is_go=%d\n", vif->mid, is_go);
+ wil_dbg_misc(wil, "start_ap: mid=%d, is_go=%d ap_ps=%d\n", vif->mid,
+ is_go, wil->ap_ps);
if (is_go && !pbss) {
wil_err(wil, "P2P GO must be in PBSS\n");
return -ENOTSUPP;
@@ -2110,6 +2212,7 @@ static int _wil_cfg80211_start_ap(struct wiphy *wiphy,
vif->privacy = privacy;
vif->channel = chan;
+ vif->wmi_edmg_channel = wmi_edmg_channel;
vif->hidden_ssid = hidden_ssid;
vif->pbss = pbss;
vif->bi = bi;
@@ -2131,7 +2234,8 @@ static int _wil_cfg80211_start_ap(struct wiphy *wiphy,
}
- rc = wmi_pcp_start(vif, bi, wmi_nettype, chan, hidden_ssid, is_go);
+ rc = wmi_pcp_start(vif, bi, wmi_nettype, chan, wmi_edmg_channel,
+ hidden_ssid, is_go);
if (rc)
goto err_pcp_start;
@@ -2139,6 +2243,14 @@ static int _wil_cfg80211_start_ap(struct wiphy *wiphy,
if (rc)
goto err_bcast;
+ if (test_bit(WMI_FW_CAPABILITY_AP_POWER_MANAGEMENT,
+ wil->fw_capabilities)) {
+ enum wmi_ps_profile_type ps_profile = wil->ap_ps ?
+ wil->ps_profile : WMI_PS_PROFILE_TYPE_PS_DISABLED;
+
+ wil_ps_update(wil, ps_profile);
+ }
+
goto out; /* success */
err_bcast:
@@ -2188,7 +2300,8 @@ void wil_cfg80211_ap_recovery(struct wil6210_priv *wil)
rc = _wil_cfg80211_start_ap(wiphy, ndev,
vif->ssid, vif->ssid_len,
vif->privacy, vif->bi,
- vif->channel, &bcon,
+ vif->channel,
+ vif->wmi_edmg_channel, &bcon,
vif->hidden_ssid, vif->pbss);
if (rc) {
wil_err(wil, "vif %d recovery failed (%d)\n", i, rc);
@@ -2238,7 +2351,8 @@ static int wil_cfg80211_change_beacon(struct wiphy *wiphy,
rc = _wil_cfg80211_start_ap(wiphy, ndev, vif->ssid,
vif->ssid_len, privacy,
wdev->beacon_interval,
- vif->channel, bcon,
+ vif->channel,
+ vif->wmi_edmg_channel, bcon,
vif->hidden_ssid,
vif->pbss);
} else {
@@ -2257,10 +2371,17 @@ static int wil_cfg80211_start_ap(struct wiphy *wiphy,
struct ieee80211_channel *channel = info->chandef.chan;
struct cfg80211_beacon_data *bcon = &info->beacon;
struct cfg80211_crypto_settings *crypto = &info->crypto;
+ u8 wmi_edmg_channel;
u8 hidden_ssid;
wil_dbg_misc(wil, "start_ap\n");
+ rc = wil_get_wmi_edmg_channel(wil, info->chandef.edmg.bw_config,
+ info->chandef.edmg.channels,
+ &wmi_edmg_channel);
+ if (rc < 0)
+ return rc;
+
if (!channel) {
wil_err(wil, "AP: No channel???\n");
return -EINVAL;
@@ -2300,7 +2421,8 @@ static int wil_cfg80211_start_ap(struct wiphy *wiphy,
rc = _wil_cfg80211_start_ap(wiphy, ndev,
info->ssid, info->ssid_len, info->privacy,
info->beacon_interval, channel->hw_value,
- bcon, hidden_ssid, info->pbss);
+ wmi_edmg_channel, bcon, hidden_ssid,
+ info->pbss);
return rc;
}
@@ -3663,9 +3785,7 @@ static int wil_nl_60g_handle_cmd(struct wiphy *wiphy, struct wireless_dev *wdev,
"NL_60G_GEN_FW_RESET, resetting...\n");
mutex_lock(&wil->mutex);
- down_write(&wil->mem_lock);
rc = wil_reset(wil, true);
- up_write(&wil->mem_lock);
mutex_unlock(&wil->mutex);
break;
diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c
index 7a33dab..36ca937 100644
--- a/drivers/net/wireless/ath/wil6210/debugfs.c
+++ b/drivers/net/wireless/ath/wil6210/debugfs.c
@@ -2639,6 +2639,7 @@ static const struct dbg_off dbg_wil_off[] = {
WIL_FIELD(rx_buff_id_count, 0644, doff_u32),
WIL_FIELD(amsdu_en, 0644, doff_u8),
WIL_FIELD(force_edmg_channel, 0644, doff_u8),
+ WIL_FIELD(ap_ps, 0644, doff_u8),
{},
};
diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c
index 1c2227c..e9b7b2d 100644
--- a/drivers/net/wireless/ath/wil6210/main.c
+++ b/drivers/net/wireless/ath/wil6210/main.c
@@ -1307,6 +1307,8 @@ void wil_refresh_fw_capabilities(struct wil6210_priv *wil)
}
update_supported_bands(wil);
+
+ wil->ap_ps = test_bit(WIL_PLATFORM_CAPA_AP_PS, wil->platform_capa);
}
void wil_mbox_ring_le2cpus(struct wil6210_mbox_ring *r)
@@ -1745,6 +1747,8 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
/* Disable device led before reset*/
wmi_led_cfg(wil, false);
+ down_write(&wil->mem_lock);
+
/* prevent NAPI from being scheduled and prevent wmi commands */
mutex_lock(&wil->wmi_mutex);
if (test_bit(wil_status_suspending, wil->status))
@@ -1800,6 +1804,7 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
if (wil->secured_boot) {
wil_err(wil, "secured boot is not supported\n");
+ up_write(&wil->mem_lock);
return -ENOTSUPP;
}
@@ -1830,6 +1835,8 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
clear_bit(wil_status_resetting, wil->status);
+ up_write(&wil->mem_lock);
+
if (load_fw) {
wil_unmask_irq(wil);
@@ -1897,6 +1904,7 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
return rc;
out:
+ up_write(&wil->mem_lock);
clear_bit(wil_status_resetting, wil->status);
return rc;
}
@@ -1922,9 +1930,7 @@ int __wil_up(struct wil6210_priv *wil)
WARN_ON(!mutex_is_locked(&wil->mutex));
- down_write(&wil->mem_lock);
rc = wil_reset(wil, true);
- up_write(&wil->mem_lock);
if (rc)
return rc;
@@ -2017,9 +2023,7 @@ int __wil_down(struct wil6210_priv *wil)
wil_abort_scan_all_vifs(wil, false);
mutex_unlock(&wil->vif_mutex);
- down_write(&wil->mem_lock);
rc = wil_reset(wil, false);
- up_write(&wil->mem_lock);
return rc;
}
diff --git a/drivers/net/wireless/ath/wil6210/netdev.c b/drivers/net/wireless/ath/wil6210/netdev.c
index 97d5933..7913124 100644
--- a/drivers/net/wireless/ath/wil6210/netdev.c
+++ b/drivers/net/wireless/ath/wil6210/netdev.c
@@ -299,6 +299,7 @@ static void wil_vif_init(struct wil6210_vif *vif)
INIT_WORK(&vif->probe_client_worker, wil_probe_client_worker);
INIT_WORK(&vif->disconnect_worker, wil_disconnect_worker);
+ INIT_WORK(&vif->p2p.discovery_expired_work, wil_p2p_listen_expired);
INIT_WORK(&vif->p2p.delayed_listen_work, wil_p2p_delayed_listen_work);
INIT_LIST_HEAD(&vif->probe_client_pending);
diff --git a/drivers/net/wireless/ath/wil6210/txrx_edma.c b/drivers/net/wireless/ath/wil6210/txrx_edma.c
index 273e57f..a71c0dc 100644
--- a/drivers/net/wireless/ath/wil6210/txrx_edma.c
+++ b/drivers/net/wireless/ath/wil6210/txrx_edma.c
@@ -1080,6 +1080,8 @@ static struct sk_buff *wil_sring_reap_rx_edma(struct wil6210_priv *wil,
stats->last_mcs_rx = wil_rx_status_get_mcs(msg);
if (stats->last_mcs_rx < ARRAY_SIZE(stats->rx_per_mcs))
stats->rx_per_mcs[stats->last_mcs_rx]++;
+
+ stats->last_cb_mode_rx = wil_rx_status_get_cb_mode(msg);
}
if (!wil->use_rx_hw_reordering && !wil->use_compressed_rx_status &&
diff --git a/drivers/net/wireless/ath/wil6210/txrx_edma.h b/drivers/net/wireless/ath/wil6210/txrx_edma.h
index e9ab926..af6de29 100644
--- a/drivers/net/wireless/ath/wil6210/txrx_edma.h
+++ b/drivers/net/wireless/ath/wil6210/txrx_edma.h
@@ -354,6 +354,12 @@ static inline u8 wil_rx_status_get_mcs(void *msg)
16, 21);
}
+static inline u8 wil_rx_status_get_cb_mode(void *msg)
+{
+ return WIL_GET_BITS(((struct wil_rx_status_compressed *)msg)->d1,
+ 22, 23);
+}
+
static inline u16 wil_rx_status_get_flow_id(void *msg)
{
return WIL_GET_BITS(((struct wil_rx_status_compressed *)msg)->d0,
diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h
index 5120b46..8ebc76a 100644
--- a/drivers/net/wireless/ath/wil6210/wil6210.h
+++ b/drivers/net/wireless/ath/wil6210/wil6210.h
@@ -604,6 +604,7 @@ struct wil_net_stats {
unsigned long rx_amsdu_error; /* eDMA specific */
unsigned long rx_csum_err;
u16 last_mcs_rx;
+ u8 last_cb_mode_rx;
u64 rx_per_mcs[WIL_MCS_MAX + 1];
u32 ft_roams; /* relevant in STA mode */
};
@@ -866,6 +867,7 @@ struct wil6210_vif {
DECLARE_BITMAP(status, wil_vif_status_last);
u32 privacy; /* secure connection? */
u16 channel; /* relevant in AP mode */
+ u8 wmi_edmg_channel; /* relevant in AP mode */
u8 hidden_ssid; /* relevant in AP mode */
u32 ap_isolate; /* no intra-BSS communication */
bool pbss;
@@ -1041,6 +1043,7 @@ struct wil6210_priv {
void *platform_handle;
struct wil_platform_ops platform_ops;
bool keep_radio_on_during_sleep;
+ u8 ap_ps; /* AP mode power save enabled */
struct pmc_ctx pmc;
@@ -1396,7 +1399,7 @@ void wil_p2p_wdev_free(struct wil6210_priv *wil);
int wmi_set_mac_address(struct wil6210_priv *wil, void *addr);
int wmi_pcp_start(struct wil6210_vif *vif, int bi, u8 wmi_nettype, u8 chan,
- u8 hidden_ssid, u8 is_go);
+ u8 edmg_chan, u8 hidden_ssid, u8 is_go);
int wmi_pcp_stop(struct wil6210_vif *vif);
int wmi_led_cfg(struct wil6210_priv *wil, bool enable);
int wmi_abort_scan(struct wil6210_vif *vif);
@@ -1506,6 +1509,7 @@ int wmi_rbufcap_cfg(struct wil6210_priv *wil, bool enable, u16 threshold);
int wil_wmi2spec_ch(u8 wmi_ch, u8 *spec_ch);
int wil_spec2wmi_ch(u8 spec_ch, u8 *wmi_ch);
+void wil_update_supported_bands(struct wil6210_priv *wil);
int reverse_memcmp(const void *cs, const void *ct, size_t count);
diff --git a/drivers/net/wireless/ath/wil6210/wil_crash_dump.c b/drivers/net/wireless/ath/wil6210/wil_crash_dump.c
index 1332eb8..89c12cb 100644
--- a/drivers/net/wireless/ath/wil6210/wil_crash_dump.c
+++ b/drivers/net/wireless/ath/wil6210/wil_crash_dump.c
@@ -46,7 +46,7 @@ static int wil_fw_get_crash_dump_bounds(struct wil6210_priv *wil,
int wil_fw_copy_crash_dump(struct wil6210_priv *wil, void *dest, u32 size)
{
- int i, rc;
+ int i;
const struct fw_map *map;
void *data;
u32 host_min, dump_size, offset, len;
@@ -62,9 +62,15 @@ int wil_fw_copy_crash_dump(struct wil6210_priv *wil, void *dest, u32 size)
return -EINVAL;
}
- rc = wil_mem_access_lock(wil);
- if (rc)
- return rc;
+ down_write(&wil->mem_lock);
+
+ if (test_bit(wil_status_suspending, wil->status) ||
+ test_bit(wil_status_suspended, wil->status)) {
+ wil_err(wil,
+ "suspend/resume in progress. cannot copy crash dump\n");
+ up_write(&wil->mem_lock);
+ return -EBUSY;
+ }
/* copy to crash dump area */
for (i = 0; i < ARRAY_SIZE(fw_mapping); i++) {
@@ -84,7 +90,8 @@ int wil_fw_copy_crash_dump(struct wil6210_priv *wil, void *dest, u32 size)
wil_memcpy_fromio_32((void * __force)(dest + offset),
(const void __iomem * __force)data, len);
}
- wil_mem_access_unlock(wil);
+
+ up_write(&wil->mem_lock);
return 0;
}
diff --git a/drivers/net/wireless/ath/wil6210/wil_platform.h b/drivers/net/wireless/ath/wil6210/wil_platform.h
index d381649..e6730af 100644
--- a/drivers/net/wireless/ath/wil6210/wil_platform.h
+++ b/drivers/net/wireless/ath/wil6210/wil_platform.h
@@ -32,6 +32,7 @@ enum wil_platform_capa {
WIL_PLATFORM_CAPA_T_PWR_ON_0 = 1,
WIL_PLATFORM_CAPA_EXT_CLK = 2,
WIL_PLATFORM_CAPA_SMMU = 3,
+ WIL_PLATFORM_CAPA_AP_PS = 4,
WIL_PLATFORM_CAPA_MAX,
};
diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c
index 3a10bd8..ec7444b 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.c
+++ b/drivers/net/wireless/ath/wil6210/wmi.c
@@ -1447,6 +1447,10 @@ wmi_evt_sched_scan_result(struct wil6210_vif *vif, int id, void *d, int len)
__le16 fc;
u32 d_len;
struct cfg80211_bss *bss;
+ struct cfg80211_inform_bss bss_data = {
+ .scan_width = NL80211_BSS_CHAN_WIDTH_20,
+ .boottime_ns = ktime_to_ns(ktime_get_boottime()),
+ };
if (flen < 0) {
wil_err(wil, "sched scan result event too short, len %d\n",
@@ -1489,8 +1493,10 @@ wmi_evt_sched_scan_result(struct wil6210_vif *vif, int id, void *d, int len)
return;
}
- bss = cfg80211_inform_bss_frame(wiphy, channel, rx_mgmt_frame,
- d_len, signal, GFP_KERNEL);
+ bss_data.signal = signal;
+ bss_data.chan = channel;
+ bss = cfg80211_inform_bss_frame_data(wiphy, &bss_data, rx_mgmt_frame,
+ d_len, GFP_KERNEL);
if (bss) {
wil_dbg_wmi(wil, "Added BSS %pM\n", rx_mgmt_frame->bssid);
cfg80211_put_bss(wiphy, bss);
@@ -2253,8 +2259,8 @@ int wmi_rbufcap_cfg(struct wil6210_priv *wil, bool enable, u16 threshold)
return rc;
}
-int wmi_pcp_start(struct wil6210_vif *vif,
- int bi, u8 wmi_nettype, u8 chan, u8 hidden_ssid, u8 is_go)
+int wmi_pcp_start(struct wil6210_vif *vif, int bi, u8 wmi_nettype,
+ u8 chan, u8 wmi_edmg_chan, u8 hidden_ssid, u8 is_go)
{
struct wil6210_priv *wil = vif_to_wil(vif);
int rc;
@@ -2264,6 +2270,7 @@ int wmi_pcp_start(struct wil6210_vif *vif,
.network_type = wmi_nettype,
.disable_sec_offload = 1,
.channel = chan - 1,
+ .edmg_channel = wmi_edmg_chan,
.pcp_max_assoc_sta = max_assoc_sta,
.hidden_ssid = hidden_ssid,
.is_go = is_go,
diff --git a/drivers/net/wireless/ath/wil6210/wmi.h b/drivers/net/wireless/ath/wil6210/wmi.h
index ceb0c5b..fc28f4b 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.h
+++ b/drivers/net/wireless/ath/wil6210/wmi.h
@@ -99,6 +99,7 @@ enum wmi_fw_capability {
WMI_FW_CAPABILITY_CHANNEL_4 = 26,
WMI_FW_CAPABILITY_IPA = 27,
WMI_FW_CAPABILITY_TEMPERATURE_ALL_RF = 30,
+ WMI_FW_CAPABILITY_AP_POWER_MANAGEMENT = 32,
WMI_FW_CAPABILITY_MAX,
};
diff --git a/drivers/net/wireless/cnss2/debug.c b/drivers/net/wireless/cnss2/debug.c
index 6f0efc7..ec75ff6 100644
--- a/drivers/net/wireless/cnss2/debug.c
+++ b/drivers/net/wireless/cnss2/debug.c
@@ -615,6 +615,9 @@ static int cnss_show_quirks_state(struct seq_file *s,
case DISABLE_DRV:
seq_puts(s, "DISABLE_DRV");
continue;
+ case DISABLE_IO_COHERENCY:
+ seq_puts(s, "DISABLE_IO_COHERENCY");
+ continue;
}
seq_printf(s, "UNKNOWN-%d", i);
diff --git a/drivers/net/wireless/cnss2/main.c b/drivers/net/wireless/cnss2/main.c
index b80d523..8041882 100644
--- a/drivers/net/wireless/cnss2/main.c
+++ b/drivers/net/wireless/cnss2/main.c
@@ -29,8 +29,9 @@
#define FW_READY_TIMEOUT 20000
#define FW_ASSERT_TIMEOUT 5000
#define CNSS_EVENT_PENDING 2989
+#define COLD_BOOT_CAL_SHUTDOWN_DELAY_MS 50
-#define CNSS_QUIRKS_DEFAULT 0
+#define CNSS_QUIRKS_DEFAULT BIT(DISABLE_IO_COHERENCY)
#ifdef CONFIG_CNSS_EMULATION
#define CNSS_MHI_TIMEOUT_DEFAULT 90000
#else
@@ -1254,6 +1255,7 @@ static int cnss_cold_boot_cal_done_hdlr(struct cnss_plat_data *plat_priv,
cnss_wlfw_wlan_mode_send_sync(plat_priv, CNSS_OFF);
cnss_release_antenna_sharing(plat_priv);
cnss_bus_dev_shutdown(plat_priv);
+ msleep(COLD_BOOT_CAL_SHUTDOWN_DELAY_MS);
complete(&plat_priv->cal_complete);
clear_bit(CNSS_COLD_BOOT_CAL, &plat_priv->driver_state);
diff --git a/drivers/net/wireless/cnss2/main.h b/drivers/net/wireless/cnss2/main.h
index d11f099..65d7330 100644
--- a/drivers/net/wireless/cnss2/main.h
+++ b/drivers/net/wireless/cnss2/main.h
@@ -239,6 +239,7 @@ enum cnss_debug_quirks {
FBC_BYPASS,
ENABLE_DAEMON_SUPPORT,
DISABLE_DRV,
+ DISABLE_IO_COHERENCY,
};
enum cnss_bdf_type {
diff --git a/drivers/net/wireless/cnss2/pci.c b/drivers/net/wireless/cnss2/pci.c
index 01b20ec..245446f 100644
--- a/drivers/net/wireless/cnss2/pci.c
+++ b/drivers/net/wireless/cnss2/pci.c
@@ -16,6 +16,7 @@
#include "bus.h"
#include "debug.h"
#include "pci.h"
+#include "reg.h"
#define PCI_LINK_UP 1
#define PCI_LINK_DOWN 0
@@ -57,79 +58,10 @@ static DEFINE_SPINLOCK(pci_reg_window_lock);
#define MHI_TIMEOUT_OVERWRITE_MS (plat_priv->ctrl_params.mhi_timeout)
-#define QCA6390_PCIE_REMAP_BAR_CTRL_OFFSET 0x310C
-
-#define QCA6390_CE_SRC_RING_REG_BASE 0xA00000
-#define QCA6390_CE_DST_RING_REG_BASE 0xA01000
-#define QCA6390_CE_COMMON_REG_BASE 0xA18000
-
-#define QCA6390_CE_SRC_RING_BASE_LSB_OFFSET 0x0
-#define QCA6390_CE_SRC_RING_BASE_MSB_OFFSET 0x4
-#define QCA6390_CE_SRC_RING_ID_OFFSET 0x8
-#define QCA6390_CE_SRC_RING_MISC_OFFSET 0x10
-#define QCA6390_CE_SRC_CTRL_OFFSET 0x58
-#define QCA6390_CE_SRC_R0_CE_CH_SRC_IS_OFFSET 0x5C
-#define QCA6390_CE_SRC_RING_HP_OFFSET 0x400
-#define QCA6390_CE_SRC_RING_TP_OFFSET 0x404
-
-#define QCA6390_CE_DEST_RING_BASE_LSB_OFFSET 0x0
-#define QCA6390_CE_DEST_RING_BASE_MSB_OFFSET 0x4
-#define QCA6390_CE_DEST_RING_ID_OFFSET 0x8
-#define QCA6390_CE_DEST_RING_MISC_OFFSET 0x10
-#define QCA6390_CE_DEST_CTRL_OFFSET 0xB0
-#define QCA6390_CE_CH_DST_IS_OFFSET 0xB4
-#define QCA6390_CE_CH_DEST_CTRL2_OFFSET 0xB8
-#define QCA6390_CE_DEST_RING_HP_OFFSET 0x400
-#define QCA6390_CE_DEST_RING_TP_OFFSET 0x404
-
-#define QCA6390_CE_STATUS_RING_BASE_LSB_OFFSET 0x58
-#define QCA6390_CE_STATUS_RING_BASE_MSB_OFFSET 0x5C
-#define QCA6390_CE_STATUS_RING_ID_OFFSET 0x60
-#define QCA6390_CE_STATUS_RING_MISC_OFFSET 0x68
-#define QCA6390_CE_STATUS_RING_HP_OFFSET 0x408
-#define QCA6390_CE_STATUS_RING_TP_OFFSET 0x40C
-
-#define QCA6390_CE_COMMON_GXI_ERR_INTS 0x14
-#define QCA6390_CE_COMMON_GXI_ERR_STATS 0x18
-#define QCA6390_CE_COMMON_GXI_WDOG_STATUS 0x2C
-#define QCA6390_CE_COMMON_TARGET_IE_0 0x48
-#define QCA6390_CE_COMMON_TARGET_IE_1 0x4C
-
-#define QCA6390_CE_REG_INTERVAL 0x2000
-
-#define SHADOW_REG_COUNT 36
-#define QCA6390_PCIE_SHADOW_REG_VALUE_0 0x8FC
-#define QCA6390_PCIE_SHADOW_REG_VALUE_34 0x984
-#define QCA6390_PCIE_SHADOW_REG_VALUE_35 0x988
-#define QCA6390_WLAON_GLOBAL_COUNTER_CTRL3 0x1F80118
-#define QCA6390_WLAON_GLOBAL_COUNTER_CTRL4 0x1F8011C
-#define QCA6390_WLAON_GLOBAL_COUNTER_CTRL5 0x1F80120
-
-#define SHADOW_REG_INTER_COUNT 43
-#define QCA6390_PCIE_SHADOW_REG_INTER_0 0x1E05000
-#define QCA6390_PCIE_SHADOW_REG_HUNG 0x1E050A8
-
-#define QDSS_APB_DEC_CSR_BASE 0x1C01000
-
-#define QDSS_APB_DEC_CSR_ETRIRQCTRL_OFFSET 0x6C
-#define QDSS_APB_DEC_CSR_PRESERVEETF_OFFSET 0x70
-#define QDSS_APB_DEC_CSR_PRESERVEETR0_OFFSET 0x74
-#define QDSS_APB_DEC_CSR_PRESERVEETR1_OFFSET 0x78
-
-#define MAX_UNWINDOWED_ADDRESS 0x80000
-#define WINDOW_ENABLE_BIT 0x40000000
-#define WINDOW_SHIFT 19
-#define WINDOW_VALUE_MASK 0x3F
-#define WINDOW_START MAX_UNWINDOWED_ADDRESS
-#define WINDOW_RANGE_MASK 0x7FFFF
-
#define FORCE_WAKE_DELAY_MIN_US 4000
#define FORCE_WAKE_DELAY_MAX_US 6000
#define FORCE_WAKE_DELAY_TIMEOUT_US 60000
-#define QCA6390_TIME_SYNC_ENABLE 0x80000000
-#define QCA6390_TIME_SYNC_CLEAR 0x0
-
static struct cnss_pci_reg ce_src[] = {
{ "SRC_RING_BASE_LSB", QCA6390_CE_SRC_RING_BASE_LSB_OFFSET },
{ "SRC_RING_BASE_MSB", QCA6390_CE_SRC_RING_BASE_MSB_OFFSET },
@@ -178,6 +110,240 @@ static struct cnss_pci_reg qdss_csr[] = {
{ NULL },
};
+static struct cnss_misc_reg wcss_reg_access_seq[] = {
+ {0, QCA6390_GCC_DEBUG_CLK_CTL, 0},
+ {1, QCA6390_GCC_DEBUG_CLK_CTL, 0x802},
+ {0, QCA6390_GCC_DEBUG_CLK_CTL, 0},
+ {0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_PLL_MODE, 0},
+ {1, QCA6390_GCC_DEBUG_CLK_CTL, 0x805},
+ {0, QCA6390_GCC_DEBUG_CLK_CTL, 0},
+ {0, QCA6390_WCSS_WFSS_PMM_WFSS_PMM_R0_PMM_CTRL, 0},
+ {0, QCA6390_WCSS_PMM_TOP_PMU_CX_CSR, 0},
+ {0, QCA6390_WCSS_PMM_TOP_AON_INT_RAW_STAT, 0},
+ {0, QCA6390_WCSS_PMM_TOP_AON_INT_EN, 0},
+ {0, QCA6390_WCSS_PMM_TOP_PMU_TESTBUS_STS, 0},
+ {1, QCA6390_WCSS_PMM_TOP_PMU_TESTBUS_CTL, 0xD},
+ {0, QCA6390_WCSS_PMM_TOP_TESTBUS_STS, 0},
+ {0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_SAW2_CFG, 0},
+ {1, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_SAW2_CFG, 0},
+ {1, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_CTL, 0x8},
+ {0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_VALUE, 0},
+ {0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_SAW2_SPM_STS, 0},
+ {0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_SAW2_SPM_CTL, 0},
+ {0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_SAW2_SPM_SLP_SEQ_ENTRY_0, 0},
+ {0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_SAW2_SPM_SLP_SEQ_ENTRY_9, 0},
+ {0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_STATUS0, 0},
+ {0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_STATUS1, 0},
+ {0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_STATUS2, 0},
+ {0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_STATUS3, 0},
+ {0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_STATUS4, 0},
+ {0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_STATUS5, 0},
+ {0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_STATUS6, 0},
+ {0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_ENABLE0, 0},
+ {0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_ENABLE1, 0},
+ {0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_ENABLE2, 0},
+ {0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_ENABLE3, 0},
+ {0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_ENABLE4, 0},
+ {0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_ENABLE5, 0},
+ {0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_ENABLE6, 0},
+ {0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_PENDING0, 0},
+ {0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_PENDING1, 0},
+ {0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_PENDING2, 0},
+ {0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_PENDING3, 0},
+ {0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_PENDING4, 0},
+ {0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_PENDING5, 0},
+ {0, QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_PENDING6, 0},
+ {1, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_CTL, 0x30040},
+ {0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_CTL, 0},
+ {0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_VALUE, 0},
+ {0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_VALUE, 0},
+ {0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_VALUE, 0},
+ {1, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_CTL, 0x30105},
+ {0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_CTL, 0},
+ {0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_VALUE, 0},
+ {0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_VALUE, 0},
+ {0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_VALUE, 0},
+ {0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_VALUE, 0},
+ {0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_VALUE, 0},
+ {0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_VALUE, 0},
+ {0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_VALUE, 0},
+ {0, QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_CTL, 0},
+ {0, QCA6390_WCSS_CC_WCSS_UMAC_NOC_CBCR, 0},
+ {0, QCA6390_WCSS_CC_WCSS_UMAC_AHB_CBCR, 0},
+ {0, QCA6390_WCSS_CC_WCSS_UMAC_GDSCR, 0},
+ {0, QCA6390_WCSS_CC_WCSS_WLAN1_GDSCR, 0},
+ {0, QCA6390_WCSS_CC_WCSS_WLAN2_GDSCR, 0},
+ {0, QCA6390_WCSS_PMM_TOP_PMM_INT_CLR, 0},
+ {0, QCA6390_WCSS_PMM_TOP_AON_INT_STICKY_EN, 0},
+};
+
+static struct cnss_misc_reg pcie_reg_access_seq[] = {
+ {0, QCA6390_PCIE_PCIE_WCSS_STATUS_FOR_DEBUG_LOW_PCIE_LOCAL_REG, 0},
+ {0, QCA6390_PCIE_SOC_PCIE_WRAP_INTR_MASK_SOC_PCIE_REG, 0},
+ {1, QCA6390_PCIE_SOC_PCIE_WRAP_INTR_MASK_SOC_PCIE_REG, 0x18},
+ {0, QCA6390_PCIE_SOC_PCIE_WRAP_INTR_MASK_SOC_PCIE_REG, 0},
+ {0, QCA6390_PCIE_SOC_PCIE_WRAP_INTR_MASK_SOC_PCIE_REG, 0},
+ {0, QCA6390_PCIE_SOC_PCIE_WRAP_INTR_STATUS_SOC_PCIE_REG, 0},
+ {0, QCA6390_PCIE_SOC_COMMIT_REPLAY_SOC_PCIE_REG, 0},
+ {0, QCA6390_TLMM_GPIO_IN_OUT57, 0},
+ {0, QCA6390_TLMM_GPIO_INTR_CFG57, 0},
+ {0, QCA6390_TLMM_GPIO_INTR_STATUS57, 0},
+ {0, QCA6390_TLMM_GPIO_IN_OUT59, 0},
+ {0, QCA6390_TLMM_GPIO_INTR_CFG59, 0},
+ {0, QCA6390_TLMM_GPIO_INTR_STATUS59, 0},
+ {0, QCA6390_PCIE_PCIE_PARF_LTSSM, 0},
+ {0, QCA6390_PCIE_PCIE_PARF_PM_STTS, 0},
+ {0, QCA6390_PCIE_PCIE_PARF_PM_STTS_1, 0},
+ {0, QCA6390_PCIE_PCIE_PARF_INT_STATUS, 0},
+ {0, QCA6390_PCIE_PCIE_INT_ALL_STATUS, 0},
+ {0, QCA6390_PCIE_PCIE_INT_ALL_MASK, 0},
+ {0, QCA6390_PCIE_PCIE_PARF_BDF_TO_SID_CFG, 0},
+ {0, QCA6390_PCIE_PCIE_PARF_L1SS_SLEEP_NO_MHI_ACCESS_HANDLER_RD_4, 0},
+ {0, QCA6390_PCIE_PCIE_PARF_L1SS_SLEEP_NO_MHI_ACCESS_HANDLER_RD_3, 0},
+ {0, QCA6390_PCIE_PCIE_PARF_MHI_CLOCK_RESET_CTRL, 0},
+ {0, QCA6390_PCIE_PCIE_PARF_MHI_BASE_ADDR_LOWER, 0},
+ {0, QCA6390_PCIE_PCIE_PARF_L1SS_SLEEP_MODE_HANDLER_STATUS, 0},
+ {0, QCA6390_PCIE_PCIE_PARF_L1SS_SLEEP_MODE_HANDLER_CFG, 0},
+ {0, QCA6390_PCIE_PCIE_PARF_DEBUG_CNT_AUX_CLK_IN_L1SUB_L2, 0},
+ {0, QCA6390_PCIE_PCIE_PARF_DEBUG_CNT_PM_LINKST_IN_L1SUB, 0},
+ {0, QCA6390_PCIE_PCIE_CORE_CONFIG, 0},
+ {0, QCA6390_PCIE_PCIE_PARF_L1SS_SLEEP_NO_MHI_ACCESS_HANDLER_RD_4, 0},
+ {0, QCA6390_PCIE_PCIE_PARF_DEBUG_CNT_PM_LINKST_IN_L2, 0},
+ {0, QCA6390_PCIE_PCIE_PARF_DEBUG_CNT_PM_LINKST_IN_L1, 0},
+ {0, QCA6390_PCIE_PCIE_PARF_DEBUG_CNT_AUX_CLK_IN_L1SUB_L1, 0},
+ {0, QCA6390_PCIE_PCIE_PARF_DEBUG_CNT_AUX_CLK_IN_L1SUB_L2, 0},
+ {0, QCA6390_PCIE_PCIE_LOCAL_REG_WCSSAON_PCIE_SR_STATUS_HIGH, 0},
+ {0, QCA6390_PCIE_PCIE_LOCAL_REG_WCSSAON_PCIE_SR_STATUS_LOW, 0},
+ {0, QCA6390_PCIE_PCIE_LOCAL_REG_WCSS_STATUS_FOR_DEBUG_HIGH, 0},
+ {0, QCA6390_PCIE_PCIE_LOCAL_REG_WCSS_STATUS_FOR_DEBUG_LOW, 0},
+ {0, QCA6390_WFSS_PMM_WFSS_PMM_R0_WLAN1_STATUS_REG2, 0},
+ {0, QCA6390_WFSS_PMM_WFSS_PMM_R0_WLAN2_STATUS_REG2, 0},
+ {0, QCA6390_WFSS_PMM_WFSS_PMM_R0_PMM_WLAN2_CFG_REG1, 0},
+ {0, QCA6390_WFSS_PMM_WFSS_PMM_R0_PMM_WLAN1_CFG_REG1, 0},
+ {0, QCA6390_WFSS_PMM_WFSS_PMM_R0_WLAN2_APS_STATUS_REG1, 0},
+ {0, QCA6390_WFSS_PMM_WFSS_PMM_R0_WLAN1_APS_STATUS_REG1, 0},
+};
+
+static struct cnss_misc_reg wlaon_reg_access_seq[] = {
+ {0, QCA6390_WLAON_WCSSAON_PCIE_SR_STATUS_HI_REG, 0},
+ {0, QCA6390_WLAON_SOC_POWER_CTRL, 0},
+ {0, QCA6390_WLAON_PCIE_PWR_CTRL_REG, 0},
+ {0, QCA6390_WLAON_WCSSAON_PCIE_SR_STATUS_LO_REG, 0},
+ {0, QCA6390_WLAON_WCSS_TCSR_PMM_SR_STATUS_HI_REG, 0},
+ {0, QCA6390_WLAON_WCSS_TCSR_PMM_SR_STATUS_LO_REG, 0},
+ {0, QCA6390_WLAON_SOC_POWER_CTRL, 0},
+ {0, QCA6390_WLAON_SOC_PWR_WDG_BARK_THRSHD, 0},
+ {0, QCA6390_WLAON_SOC_PWR_WDG_BITE_THRSHD, 0},
+ {0, QCA6390_WLAON_SW_COLD_RESET, 0},
+ {0, QCA6390_WLAON_RFA_MEM_SLP_NRET_N_OVERRIDE, 0},
+ {0, QCA6390_WLAON_GDSC_DELAY_SETTING, 0},
+ {0, QCA6390_WLAON_GDSC_DELAY_SETTING2, 0},
+ {0, QCA6390_WLAON_WL_PWR_STATUS_REG, 0},
+ {0, QCA6390_WLAON_WL_AON_DBG_CFG_REG, 0},
+ {0, QCA6390_WLAON_GLOBAL_COUNTER_CTRL1, 0},
+ {0, QCA6390_WLAON_GLOBAL_COUNTER_CTRL6, 0},
+ {0, QCA6390_WLAON_GLOBAL_COUNTER_CTRL7, 0},
+ {0, QCA6390_WLAON_GLOBAL_COUNTER_CTRL3, 0},
+ {0, QCA6390_WLAON_GLOBAL_COUNTER_CTRL4, 0},
+ {0, QCA6390_WLAON_GLOBAL_COUNTER_CTRL5, 0},
+ {0, QCA6390_WLAON_GLOBAL_COUNTER_CTRL8, 0},
+ {0, QCA6390_WLAON_GLOBAL_COUNTER_CTRL2, 0},
+ {0, QCA6390_WLAON_GLOBAL_COUNTER_CTRL9, 0},
+ {0, QCA6390_WLAON_RTC_CLK_CAL_CTRL1, 0},
+ {0, QCA6390_WLAON_RTC_CLK_CAL_CTRL2, 0},
+ {0, QCA6390_WLAON_RTC_CLK_CAL_CTRL3, 0},
+ {0, QCA6390_WLAON_RTC_CLK_CAL_CTRL4, 0},
+ {0, QCA6390_WLAON_RTC_CLK_CAL_CTRL5, 0},
+ {0, QCA6390_WLAON_RTC_CLK_CAL_CTRL6, 0},
+ {0, QCA6390_WLAON_RTC_CLK_CAL_CTRL7, 0},
+ {0, QCA6390_WLAON_RTC_CLK_CAL_CTRL8, 0},
+ {0, QCA6390_WLAON_RTC_CLK_CAL_CTRL9, 0},
+ {0, QCA6390_WLAON_WCSSAON_CONFIG_REG, 0},
+ {0, QCA6390_WLAON_WLAN_OEM_DEBUG_REG, 0},
+ {0, QCA6390_WLAON_WLAN_RAM_DUMP_REG, 0},
+ {0, QCA6390_WLAON_QDSS_WCSS_REG, 0},
+ {0, QCA6390_WLAON_QDSS_WCSS_ACK, 0},
+ {0, QCA6390_WLAON_WL_CLK_CNTL_KDF_REG, 0},
+ {0, QCA6390_WLAON_WL_CLK_CNTL_PMU_HFRC_REG, 0},
+ {0, QCA6390_WLAON_QFPROM_PWR_CTRL_REG, 0},
+ {0, QCA6390_WLAON_DLY_CONFIG, 0},
+ {0, QCA6390_WLAON_WLAON_Q6_IRQ_REG, 0},
+ {0, QCA6390_WLAON_PCIE_INTF_SW_CFG_REG, 0},
+ {0, QCA6390_WLAON_PCIE_INTF_STICKY_SW_CFG_REG, 0},
+ {0, QCA6390_WLAON_PCIE_INTF_PHY_SW_CFG_REG, 0},
+ {0, QCA6390_WLAON_PCIE_INTF_PHY_NOCSR_SW_CFG_REG, 0},
+ {0, QCA6390_WLAON_Q6_COOKIE_BIT, 0},
+ {0, QCA6390_WLAON_WARM_SW_ENTRY, 0},
+ {0, QCA6390_WLAON_RESET_DBG_SW_ENTRY, 0},
+ {0, QCA6390_WLAON_WL_PMUNOC_CFG_REG, 0},
+ {0, QCA6390_WLAON_RESET_CAUSE_CFG_REG, 0},
+ {0, QCA6390_WLAON_SOC_WCSSAON_WAKEUP_IRQ_7_EN_REG, 0},
+ {0, QCA6390_WLAON_DEBUG, 0},
+ {0, QCA6390_WLAON_SOC_PARAMETERS, 0},
+ {0, QCA6390_WLAON_WLPM_SIGNAL, 0},
+ {0, QCA6390_WLAON_SOC_RESET_CAUSE_REG, 0},
+ {0, QCA6390_WLAON_WAKEUP_PCIE_SOC_REG, 0},
+ {0, QCA6390_WLAON_PBL_STACK_CANARY, 0},
+ {0, QCA6390_WLAON_MEM_TOT_NUM_GRP_REG, 0},
+ {0, QCA6390_WLAON_MEM_TOT_BANKS_IN_GRP0_REG, 0},
+ {0, QCA6390_WLAON_MEM_TOT_BANKS_IN_GRP1_REG, 0},
+ {0, QCA6390_WLAON_MEM_TOT_BANKS_IN_GRP2_REG, 0},
+ {0, QCA6390_WLAON_MEM_TOT_BANKS_IN_GRP3_REG, 0},
+ {0, QCA6390_WLAON_MEM_TOT_SIZE_IN_GRP0_REG, 0},
+ {0, QCA6390_WLAON_MEM_TOT_SIZE_IN_GRP1_REG, 0},
+ {0, QCA6390_WLAON_MEM_TOT_SIZE_IN_GRP2_REG, 0},
+ {0, QCA6390_WLAON_MEM_TOT_SIZE_IN_GRP3_REG, 0},
+ {0, QCA6390_WLAON_MEM_SLP_NRET_OVERRIDE_GRP0_REG, 0},
+ {0, QCA6390_WLAON_MEM_SLP_NRET_OVERRIDE_GRP1_REG, 0},
+ {0, QCA6390_WLAON_MEM_SLP_NRET_OVERRIDE_GRP2_REG, 0},
+ {0, QCA6390_WLAON_MEM_SLP_NRET_OVERRIDE_GRP3_REG, 0},
+ {0, QCA6390_WLAON_MEM_SLP_RET_OVERRIDE_GRP0_REG, 0},
+ {0, QCA6390_WLAON_MEM_SLP_RET_OVERRIDE_GRP1_REG, 0},
+ {0, QCA6390_WLAON_MEM_SLP_RET_OVERRIDE_GRP2_REG, 0},
+ {0, QCA6390_WLAON_MEM_SLP_RET_OVERRIDE_GRP3_REG, 0},
+ {0, QCA6390_WLAON_MEM_CNT_SEL_REG, 0},
+ {0, QCA6390_WLAON_MEM_NO_EXTBHS_REG, 0},
+ {0, QCA6390_WLAON_MEM_DEBUG_REG, 0},
+ {0, QCA6390_WLAON_MEM_DEBUG_BUS_REG, 0},
+ {0, QCA6390_WLAON_MEM_REDUN_CFG_REG, 0},
+ {0, QCA6390_WLAON_WL_AON_SPARE2, 0},
+ {0, QCA6390_WLAON_VSEL_CFG_FOR_WL_RET_DISABLE_REG, 0},
+ {0, QCA6390_WLAON_BTFM_WLAN_IPC_STATUS_REG, 0},
+ {0, QCA6390_WLAON_MPM_COUNTER_CHICKEN_BITS, 0},
+ {0, QCA6390_WLAON_WLPM_CHICKEN_BITS, 0},
+ {0, QCA6390_WLAON_PCIE_PHY_PWR_REG, 0},
+ {0, QCA6390_WLAON_WL_CLK_CNTL_PMU_LPO2M_REG, 0},
+ {0, QCA6390_WLAON_WL_SS_ROOT_CLK_SWITCH_REG, 0},
+ {0, QCA6390_WLAON_POWERCTRL_PMU_REG, 0},
+ {0, QCA6390_WLAON_POWERCTRL_MEM_REG, 0},
+ {0, QCA6390_WLAON_PCIE_PWR_CTRL_REG, 0},
+ {0, QCA6390_WLAON_SOC_PWR_PROFILE_REG, 0},
+ {0, QCA6390_WLAON_WCSSAON_PCIE_SR_STATUS_HI_REG, 0},
+ {0, QCA6390_WLAON_WCSSAON_PCIE_SR_STATUS_LO_REG, 0},
+ {0, QCA6390_WLAON_WCSS_TCSR_PMM_SR_STATUS_HI_REG, 0},
+ {0, QCA6390_WLAON_WCSS_TCSR_PMM_SR_STATUS_LO_REG, 0},
+ {0, QCA6390_WLAON_MEM_SVS_CFG_REG, 0},
+ {0, QCA6390_WLAON_CMN_AON_MISC_REG, 0},
+ {0, QCA6390_WLAON_INTR_STATUS, 0},
+ {0, QCA6390_SYSPM_SYSPM_PWR_STATUS, 0},
+ {0, QCA6390_SYSPM_DBG_BTFM_AON_REG, 0},
+ {0, QCA6390_SYSPM_DBG_BUS_SEL_REG, 0},
+ {0, QCA6390_SYSPM_WCSSAON_SR_STATUS, 0},
+ {0, QCA6390_SYSPM_WCSSAON_SR_STATUS, 0},
+ {0, QCA6390_SYSPM_WCSSAON_SR_STATUS, 0},
+ {0, QCA6390_SYSPM_WCSSAON_SR_STATUS, 0},
+ {0, QCA6390_SYSPM_WCSSAON_SR_STATUS, 0},
+ {0, QCA6390_SYSPM_WCSSAON_SR_STATUS, 0},
+ {0, QCA6390_SYSPM_WCSSAON_SR_STATUS, 0},
+ {0, QCA6390_SYSPM_WCSSAON_SR_STATUS, 0},
+ {0, QCA6390_SYSPM_WCSSAON_SR_STATUS, 0},
+ {0, QCA6390_SYSPM_WCSSAON_SR_STATUS, 0},
+};
+
+#define WCSS_REG_SIZE ARRAY_SIZE(wcss_reg_access_seq)
+#define PCIE_REG_SIZE ARRAY_SIZE(pcie_reg_access_seq)
+#define WLAON_REG_SIZE ARRAY_SIZE(wlaon_reg_access_seq)
+
static int cnss_pci_check_link_status(struct cnss_pci_data *pci_priv)
{
u16 device_id;
@@ -1090,6 +1256,74 @@ int cnss_pci_update_status(struct cnss_pci_data *pci_priv,
return 0;
}
+static void cnss_pci_misc_reg_dump(struct cnss_pci_data *pci_priv,
+ struct cnss_misc_reg *misc_reg,
+ u32 misc_reg_size,
+ char *reg_name)
+{
+ int i;
+
+ if (!misc_reg)
+ return;
+
+ if (in_interrupt() || irqs_disabled())
+ return;
+
+ if (cnss_pci_check_link_status(pci_priv))
+ return;
+
+ cnss_pci_force_wake_get(pci_priv);
+
+ cnss_pr_dbg("start to dump %s registers\n", reg_name);
+
+ for (i = 0; i < misc_reg_size; i++) {
+ if (misc_reg[i].wr) {
+ if (misc_reg[i].offset ==
+ QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_SAW2_CFG &&
+ i >= 1)
+ misc_reg[i].val =
+ QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_SAW2_CFG_MSK |
+ misc_reg[i - 1].val;
+ if (cnss_pci_reg_write(pci_priv,
+ misc_reg[i].offset,
+ misc_reg[i].val))
+ goto force_wake_put;
+ cnss_pr_vdbg("Write 0x%X to 0x%X\n",
+ misc_reg[i].val,
+ misc_reg[i].offset);
+
+ } else {
+ if (cnss_pci_reg_read(pci_priv,
+ misc_reg[i].offset,
+ &misc_reg[i].val))
+ goto force_wake_put;
+ cnss_pr_vdbg("Read 0x%X from 0x%X\n",
+ misc_reg[i].val,
+ misc_reg[i].offset);
+ }
+ }
+
+force_wake_put:
+ cnss_pci_force_wake_put(pci_priv);
+}
+
+static void cnss_pci_dump_misc_reg(struct cnss_pci_data *pci_priv)
+{
+ if (in_interrupt() || irqs_disabled())
+ return;
+
+ if (cnss_pci_check_link_status(pci_priv))
+ return;
+
+ mhi_debug_reg_dump(pci_priv->mhi_ctrl);
+ cnss_pci_misc_reg_dump(pci_priv, pci_priv->wcss_reg,
+ pci_priv->wcss_reg_size, "wcss");
+ cnss_pci_misc_reg_dump(pci_priv, pci_priv->pcie_reg,
+ pci_priv->pcie_reg_size, "pcie");
+ cnss_pci_misc_reg_dump(pci_priv, pci_priv->wlaon_reg,
+ pci_priv->wlaon_reg_size, "wlaon");
+}
+
static void cnss_pci_dump_shadow_reg(struct cnss_pci_data *pci_priv)
{
int i, j = 0, array_size = SHADOW_REG_COUNT + SHADOW_REG_INTER_COUNT;
@@ -1626,9 +1860,16 @@ int cnss_pci_unregister_driver_hdlr(struct cnss_pci_data *pci_priv)
static bool cnss_pci_is_drv_supported(struct cnss_pci_data *pci_priv)
{
struct pci_dev *root_port = pci_find_pcie_root_port(pci_priv->pci_dev);
- struct device_node *root_of_node = root_port->dev.of_node;
+ struct device_node *root_of_node;
bool drv_supported = false;
+ if (!root_port) {
+ cnss_pr_err("PCIe DRV is not supported as root port is null\n");
+ return drv_supported;
+ }
+
+ root_of_node = root_port->dev.of_node;
+
if (root_of_node->parent)
drv_supported = of_property_read_bool(root_of_node->parent,
"qcom,drv-supported");
@@ -1691,6 +1932,8 @@ static void cnss_pci_event_cb(struct msm_pcie_notify *notify)
break;
case MSM_PCIE_EVENT_DRV_DISCONNECT:
cnss_pr_dbg("DRV subsystem is disconnected\n");
+ if (cnss_pci_get_auto_suspended(pci_priv))
+ cnss_pci_pm_request_resume(pci_priv);
cnss_pci_set_drv_connected(pci_priv, 0);
break;
default:
@@ -1864,11 +2107,17 @@ static int cnss_pci_suspend(struct device *dev)
if (!cnss_is_device_powered_on(plat_priv))
goto out;
- set_bit(CNSS_IN_SUSPEND_RESUME, &plat_priv->driver_state);
-
- if (!test_bit(DISABLE_DRV, &plat_priv->ctrl_params.quirks))
+ if (!test_bit(DISABLE_DRV, &plat_priv->ctrl_params.quirks)) {
pci_priv->drv_connected_last =
cnss_pci_get_drv_connected(pci_priv);
+ if (!pci_priv->drv_connected_last) {
+ cnss_pr_dbg("Firmware does not support non-DRV suspend, reject\n");
+ ret = -EAGAIN;
+ goto out;
+ }
+ }
+
+ set_bit(CNSS_IN_SUSPEND_RESUME, &plat_priv->driver_state);
ret = cnss_pci_suspend_driver(pci_priv);
if (ret)
@@ -1988,11 +2237,16 @@ static int cnss_pci_runtime_suspend(struct device *dev)
return -EAGAIN;
}
- cnss_pr_vdbg("Runtime suspend start\n");
-
- if (!test_bit(DISABLE_DRV, &plat_priv->ctrl_params.quirks))
+ if (!test_bit(DISABLE_DRV, &plat_priv->ctrl_params.quirks)) {
pci_priv->drv_connected_last =
cnss_pci_get_drv_connected(pci_priv);
+ if (!pci_priv->drv_connected_last) {
+ cnss_pr_dbg("Firmware does not support non-DRV suspend, reject\n");
+ return -EAGAIN;
+ }
+ }
+
+ cnss_pr_vdbg("Runtime suspend start\n");
driver_ops = pci_priv->driver_ops;
if (driver_ops && driver_ops->runtime_ops &&
@@ -2091,46 +2345,74 @@ void cnss_pci_pm_runtime_show_usage_count(struct cnss_pci_data *pci_priv)
int cnss_pci_pm_request_resume(struct cnss_pci_data *pci_priv)
{
- struct pci_dev *pci_dev;
+ struct device *dev;
+ enum rpm_status status;
if (!pci_priv)
return -ENODEV;
- pci_dev = pci_priv->pci_dev;
- if (!pci_dev)
- return -ENODEV;
+ dev = &pci_priv->pci_dev->dev;
- return pm_request_resume(&pci_dev->dev);
+ status = dev->power.runtime_status;
+ if (status == RPM_SUSPENDING || status == RPM_SUSPENDED)
+ cnss_pr_vdbg("Runtime PM resume is requested by %ps\n",
+ (void *)_RET_IP_);
+
+ return pm_request_resume(dev);
}
int cnss_pci_pm_runtime_resume(struct cnss_pci_data *pci_priv)
{
- struct pci_dev *pci_dev;
+ struct device *dev;
+ enum rpm_status status;
if (!pci_priv)
return -ENODEV;
- pci_dev = pci_priv->pci_dev;
- if (!pci_dev)
- return -ENODEV;
+ dev = &pci_priv->pci_dev->dev;
- return pm_runtime_resume(&pci_dev->dev);
+ status = dev->power.runtime_status;
+ if (status == RPM_SUSPENDING || status == RPM_SUSPENDED)
+ cnss_pr_vdbg("Runtime PM resume is requested by %ps\n",
+ (void *)_RET_IP_);
+
+ return pm_runtime_resume(dev);
}
int cnss_pci_pm_runtime_get(struct cnss_pci_data *pci_priv)
{
+ struct device *dev;
+ enum rpm_status status;
+
if (!pci_priv)
return -ENODEV;
- return pm_runtime_get(&pci_priv->pci_dev->dev);
+ dev = &pci_priv->pci_dev->dev;
+
+ status = dev->power.runtime_status;
+ if (status == RPM_SUSPENDING || status == RPM_SUSPENDED)
+ cnss_pr_vdbg("Runtime PM resume is requested by %ps\n",
+ (void *)_RET_IP_);
+
+ return pm_runtime_get(dev);
}
int cnss_pci_pm_runtime_get_sync(struct cnss_pci_data *pci_priv)
{
+ struct device *dev;
+ enum rpm_status status;
+
if (!pci_priv)
return -ENODEV;
- return pm_runtime_get_sync(&pci_priv->pci_dev->dev);
+ dev = &pci_priv->pci_dev->dev;
+
+ status = dev->power.runtime_status;
+ if (status == RPM_SUSPENDING || status == RPM_SUSPENDED)
+ cnss_pr_vdbg("Runtime PM resume is requested by %ps\n",
+ (void *)_RET_IP_);
+
+ return pm_runtime_get_sync(dev);
}
void cnss_pci_pm_runtime_get_noresume(struct cnss_pci_data *pci_priv)
@@ -2138,7 +2420,7 @@ void cnss_pci_pm_runtime_get_noresume(struct cnss_pci_data *pci_priv)
if (!pci_priv)
return;
- return pm_runtime_get_noresume(&pci_priv->pci_dev->dev);
+ pm_runtime_get_noresume(&pci_priv->pci_dev->dev);
}
int cnss_pci_pm_runtime_put_autosuspend(struct cnss_pci_data *pci_priv)
@@ -2496,6 +2778,7 @@ int cnss_pci_force_fw_assert_hdlr(struct cnss_pci_data *pci_priv)
return -ENODEV;
cnss_auto_resume(&pci_priv->pci_dev->dev);
+ cnss_pci_dump_misc_reg(pci_priv);
cnss_pci_dump_shadow_reg(pci_priv);
ret = cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_TRIGGER_RDDM);
@@ -2619,9 +2902,14 @@ int cnss_smmu_map(struct device *dev,
phys_addr_t paddr, uint32_t *iova_addr, size_t size)
{
struct cnss_pci_data *pci_priv = cnss_get_pci_priv(to_pci_dev(dev));
+ struct cnss_plat_data *plat_priv;
unsigned long iova;
size_t len;
int ret = 0;
+ int flag = IOMMU_READ | IOMMU_WRITE;
+ struct pci_dev *root_port;
+ struct device_node *root_of_node;
+ bool dma_coherent = false;
if (!pci_priv)
return -ENODEV;
@@ -2632,6 +2920,8 @@ int cnss_smmu_map(struct device *dev,
return -EINVAL;
}
+ plat_priv = pci_priv->plat_priv;
+
len = roundup(size + paddr - rounddown(paddr, PAGE_SIZE), PAGE_SIZE);
iova = roundup(pci_priv->smmu_iova_ipa_start, PAGE_SIZE);
@@ -2644,9 +2934,23 @@ int cnss_smmu_map(struct device *dev,
return -ENOMEM;
}
+ if (!test_bit(DISABLE_IO_COHERENCY,
+ &plat_priv->ctrl_params.quirks)) {
+ root_port = pci_find_pcie_root_port(pci_priv->pci_dev);
+ root_of_node = root_port->dev.of_node;
+ if (root_of_node->parent) {
+ dma_coherent =
+ of_property_read_bool(root_of_node->parent,
+ "dma-coherent");
+ cnss_pr_dbg("dma-coherent is %s\n",
+ dma_coherent ? "enabled" : "disabled");
+ if (dma_coherent)
+ flag |= IOMMU_CACHE;
+ }
+ }
+
ret = iommu_map(pci_priv->iommu_domain, iova,
- rounddown(paddr, PAGE_SIZE), len,
- IOMMU_READ | IOMMU_WRITE);
+ rounddown(paddr, PAGE_SIZE), len, flag);
if (ret) {
cnss_pr_err("PA to IOVA mapping failed, ret %d\n", ret);
return ret;
@@ -3032,6 +3336,7 @@ void cnss_pci_collect_dump_info(struct cnss_pci_data *pci_priv, bool in_panic)
if (cnss_pci_check_link_status(pci_priv))
return;
+ cnss_pci_dump_misc_reg(pci_priv);
cnss_pci_dump_qdss_reg(pci_priv);
ret = mhi_download_rddm_img(pci_priv->mhi_ctrl, in_panic);
@@ -3351,6 +3656,31 @@ static void cnss_pci_unregister_mhi(struct cnss_pci_data *pci_priv)
kfree(mhi_ctrl->irq);
}
+static void cnss_pci_config_regs(struct cnss_pci_data *pci_priv)
+{
+ switch (pci_priv->device_id) {
+ case QCA6390_DEVICE_ID:
+ pci_priv->wcss_reg = wcss_reg_access_seq;
+ pci_priv->wcss_reg_size = WCSS_REG_SIZE;
+ pci_priv->pcie_reg = pcie_reg_access_seq;
+ pci_priv->pcie_reg_size = PCIE_REG_SIZE;
+ pci_priv->wlaon_reg = wlaon_reg_access_seq;
+ pci_priv->wlaon_reg_size = WLAON_REG_SIZE;
+
+ /* Configure WDOG register with specific value so that we can
+ * know if HW is in the process of WDOG reset recovery or not
+ * when reading the registers.
+ */
+ cnss_pci_reg_write
+ (pci_priv,
+ QCA6390_PCIE_SOC_WDOG_DISC_BAD_DATA_LOW_CFG_SOC_PCIE_REG,
+ QCA6390_PCIE_SOC_WDOG_DISC_BAD_DATA_LOW_CFG_SOC_PCIE_REG_VAL);
+ break;
+ default:
+ return;
+ }
+}
+
static int cnss_pci_probe(struct pci_dev *pci_dev,
const struct pci_device_id *id)
{
@@ -3432,6 +3762,9 @@ static int cnss_pci_probe(struct pci_dev *pci_dev,
goto disable_bus;
}
cnss_pci_get_link_status(pci_priv);
+
+ cnss_pci_config_regs(pci_priv);
+
if (EMULATION_HW)
break;
ret = cnss_suspend_pci_link(pci_priv);
diff --git a/drivers/net/wireless/cnss2/pci.h b/drivers/net/wireless/cnss2/pci.h
index 0858b27..8dcb14a6 100644
--- a/drivers/net/wireless/cnss2/pci.h
+++ b/drivers/net/wireless/cnss2/pci.h
@@ -53,6 +53,12 @@ struct cnss_pci_debug_reg {
u32 val;
};
+struct cnss_misc_reg {
+ u8 wr;
+ u32 offset;
+ u32 val;
+};
+
struct cnss_pci_data {
struct pci_dev *pci_dev;
struct cnss_plat_data *plat_priv;
@@ -87,6 +93,12 @@ struct cnss_pci_data {
struct delayed_work time_sync_work;
u8 disable_pc;
struct cnss_pci_debug_reg *debug_reg;
+ struct cnss_misc_reg *wcss_reg;
+ u32 wcss_reg_size;
+ struct cnss_misc_reg *pcie_reg;
+ u32 pcie_reg_size;
+ struct cnss_misc_reg *wlaon_reg;
+ u32 wlaon_reg_size;
};
static inline void cnss_set_pci_priv(struct pci_dev *pci_dev, void *data)
diff --git a/drivers/net/wireless/cnss2/qmi.c b/drivers/net/wireless/cnss2/qmi.c
index 89e12659..02178b3 100644
--- a/drivers/net/wireless/cnss2/qmi.c
+++ b/drivers/net/wireless/cnss2/qmi.c
@@ -432,6 +432,56 @@ int cnss_wlfw_tgt_cap_send_sync(struct cnss_plat_data *plat_priv)
return ret;
}
+static int cnss_get_bdf_file_name(struct cnss_plat_data *plat_priv,
+ u32 bdf_type, char *filename,
+ u32 filename_len)
+{
+ int ret = 0;
+
+ switch (bdf_type) {
+ case CNSS_BDF_ELF:
+ if (plat_priv->board_info.board_id == 0xFF)
+ snprintf(filename, filename_len, ELF_BDF_FILE_NAME);
+ else if (plat_priv->board_info.board_id < 0xFF)
+ snprintf(filename, filename_len,
+ ELF_BDF_FILE_NAME_PREFIX "%02x",
+ plat_priv->board_info.board_id);
+ else
+ snprintf(filename, filename_len,
+ BDF_FILE_NAME_PREFIX "%02x.e%02x",
+ plat_priv->board_info.board_id >> 8 & 0xFF,
+ plat_priv->board_info.board_id & 0xFF);
+ break;
+ case CNSS_BDF_BIN:
+ if (plat_priv->board_info.board_id == 0xFF)
+ snprintf(filename, filename_len, BIN_BDF_FILE_NAME);
+ else if (plat_priv->board_info.board_id < 0xFF)
+ snprintf(filename, filename_len,
+ BIN_BDF_FILE_NAME_PREFIX "%02x",
+ plat_priv->board_info.board_id);
+ else
+ snprintf(filename, filename_len,
+ BDF_FILE_NAME_PREFIX "%02x.b%02x",
+ plat_priv->board_info.board_id >> 8 & 0xFF,
+ plat_priv->board_info.board_id & 0xFF);
+ break;
+ case CNSS_BDF_REGDB:
+ snprintf(filename, filename_len, REGDB_FILE_NAME);
+ break;
+ case CNSS_BDF_DUMMY:
+ cnss_pr_dbg("CNSS_BDF_DUMMY is set, sending dummy BDF\n");
+ snprintf(filename, filename_len, DUMMY_BDF_FILE_NAME);
+ ret = MAX_BDF_FILE_NAME;
+ break;
+ default:
+ cnss_pr_err("Invalid BDF type: %d\n",
+ plat_priv->ctrl_params.bdf_type);
+ ret = -EINVAL;
+ break;
+ }
+ return ret;
+}
+
int cnss_wlfw_bdf_dnld_send_sync(struct cnss_plat_data *plat_priv,
u32 bdf_type)
{
@@ -439,7 +489,7 @@ int cnss_wlfw_bdf_dnld_send_sync(struct cnss_plat_data *plat_priv,
struct wlfw_bdf_download_resp_msg_v01 *resp;
struct qmi_txn txn;
char filename[MAX_BDF_FILE_NAME];
- const struct firmware *fw_entry;
+ const struct firmware *fw_entry = NULL;
const u8 *temp;
unsigned int remaining;
int ret = 0;
@@ -457,46 +507,13 @@ int cnss_wlfw_bdf_dnld_send_sync(struct cnss_plat_data *plat_priv,
return -ENOMEM;
}
- switch (bdf_type) {
- case CNSS_BDF_ELF:
- if (plat_priv->board_info.board_id == 0xFF)
- snprintf(filename, sizeof(filename), ELF_BDF_FILE_NAME);
- else if (plat_priv->board_info.board_id < 0xFF)
- snprintf(filename, sizeof(filename),
- ELF_BDF_FILE_NAME_PREFIX "%02x",
- plat_priv->board_info.board_id);
- else
- snprintf(filename, sizeof(filename),
- BDF_FILE_NAME_PREFIX "%02x.e%02x",
- plat_priv->board_info.board_id >> 8 & 0xFF,
- plat_priv->board_info.board_id & 0xFF);
- break;
- case CNSS_BDF_BIN:
- if (plat_priv->board_info.board_id == 0xFF)
- snprintf(filename, sizeof(filename), BIN_BDF_FILE_NAME);
- else if (plat_priv->board_info.board_id < 0xFF)
- snprintf(filename, sizeof(filename),
- BIN_BDF_FILE_NAME_PREFIX "%02x",
- plat_priv->board_info.board_id);
- else
- snprintf(filename, sizeof(filename),
- BDF_FILE_NAME_PREFIX "%02x.b%02x",
- plat_priv->board_info.board_id >> 8 & 0xFF,
- plat_priv->board_info.board_id & 0xFF);
- break;
- case CNSS_BDF_REGDB:
- snprintf(filename, sizeof(filename), REGDB_FILE_NAME);
- break;
- case CNSS_BDF_DUMMY:
- cnss_pr_dbg("CNSS_BDF_DUMMY is set, sending dummy BDF\n");
- snprintf(filename, sizeof(filename), DUMMY_BDF_FILE_NAME);
+ ret = cnss_get_bdf_file_name(plat_priv, bdf_type,
+ filename, sizeof(filename));
+ if (ret > 0) {
temp = DUMMY_BDF_FILE_NAME;
remaining = MAX_BDF_FILE_NAME;
goto bypass_bdf;
- default:
- cnss_pr_err("Invalid BDF type: %d\n",
- plat_priv->ctrl_params.bdf_type);
- ret = -EINVAL;
+ } else if (ret < 0) {
goto err_req_fw;
}
@@ -522,7 +539,7 @@ int cnss_wlfw_bdf_dnld_send_sync(struct cnss_plat_data *plat_priv,
req->data_valid = 1;
req->end_valid = 1;
req->bdf_type_valid = 1;
- req->bdf_type = plat_priv->ctrl_params.bdf_type;
+ req->bdf_type = bdf_type;
if (remaining > QMI_WLFW_MAX_DATA_SIZE_V01) {
req->data_len = QMI_WLFW_MAX_DATA_SIZE_V01;
@@ -577,7 +594,7 @@ int cnss_wlfw_bdf_dnld_send_sync(struct cnss_plat_data *plat_priv,
return 0;
err_send:
- if (plat_priv->ctrl_params.bdf_type != CNSS_BDF_DUMMY)
+ if (bdf_type != CNSS_BDF_DUMMY)
release_firmware(fw_entry);
err_req_fw:
if (bdf_type != CNSS_BDF_REGDB)
@@ -758,12 +775,12 @@ int cnss_wlfw_wlan_cfg_send_sync(struct cnss_plat_data *plat_priv,
u32 i;
int ret = 0;
- cnss_pr_dbg("Sending WLAN config message, state: 0x%lx\n",
- plat_priv->driver_state);
-
if (!plat_priv)
return -ENODEV;
+ cnss_pr_dbg("Sending WLAN config message, state: 0x%lx\n",
+ plat_priv->driver_state);
+
req = kzalloc(sizeof(*req), GFP_KERNEL);
if (!req)
return -ENOMEM;
diff --git a/drivers/net/wireless/cnss2/reg.h b/drivers/net/wireless/cnss2/reg.h
new file mode 100644
index 0000000..4052de4
--- /dev/null
+++ b/drivers/net/wireless/cnss2/reg.h
@@ -0,0 +1,267 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2019, The Linux Foundation. All rights reserved. */
+
+#ifndef _CNSS_REG_H
+#define _CNSS_REG_H
+
+#define QCA6390_PCIE_REMAP_BAR_CTRL_OFFSET 0x310C
+
+#define QCA6390_CE_SRC_RING_REG_BASE 0xA00000
+#define QCA6390_CE_DST_RING_REG_BASE 0xA01000
+#define QCA6390_CE_COMMON_REG_BASE 0xA18000
+
+#define QCA6390_CE_SRC_RING_BASE_LSB_OFFSET 0x0
+#define QCA6390_CE_SRC_RING_BASE_MSB_OFFSET 0x4
+#define QCA6390_CE_SRC_RING_ID_OFFSET 0x8
+#define QCA6390_CE_SRC_RING_MISC_OFFSET 0x10
+#define QCA6390_CE_SRC_CTRL_OFFSET 0x58
+#define QCA6390_CE_SRC_R0_CE_CH_SRC_IS_OFFSET 0x5C
+#define QCA6390_CE_SRC_RING_HP_OFFSET 0x400
+#define QCA6390_CE_SRC_RING_TP_OFFSET 0x404
+
+#define QCA6390_CE_DEST_RING_BASE_LSB_OFFSET 0x0
+#define QCA6390_CE_DEST_RING_BASE_MSB_OFFSET 0x4
+#define QCA6390_CE_DEST_RING_ID_OFFSET 0x8
+#define QCA6390_CE_DEST_RING_MISC_OFFSET 0x10
+#define QCA6390_CE_DEST_CTRL_OFFSET 0xB0
+#define QCA6390_CE_CH_DST_IS_OFFSET 0xB4
+#define QCA6390_CE_CH_DEST_CTRL2_OFFSET 0xB8
+#define QCA6390_CE_DEST_RING_HP_OFFSET 0x400
+#define QCA6390_CE_DEST_RING_TP_OFFSET 0x404
+
+#define QCA6390_CE_STATUS_RING_BASE_LSB_OFFSET 0x58
+#define QCA6390_CE_STATUS_RING_BASE_MSB_OFFSET 0x5C
+#define QCA6390_CE_STATUS_RING_ID_OFFSET 0x60
+#define QCA6390_CE_STATUS_RING_MISC_OFFSET 0x68
+#define QCA6390_CE_STATUS_RING_HP_OFFSET 0x408
+#define QCA6390_CE_STATUS_RING_TP_OFFSET 0x40C
+
+#define QCA6390_CE_COMMON_GXI_ERR_INTS 0x14
+#define QCA6390_CE_COMMON_GXI_ERR_STATS 0x18
+#define QCA6390_CE_COMMON_GXI_WDOG_STATUS 0x2C
+#define QCA6390_CE_COMMON_TARGET_IE_0 0x48
+#define QCA6390_CE_COMMON_TARGET_IE_1 0x4C
+
+#define QCA6390_CE_REG_INTERVAL 0x2000
+
+#define SHADOW_REG_COUNT 36
+#define QCA6390_PCIE_SHADOW_REG_VALUE_0 0x8FC
+#define QCA6390_PCIE_SHADOW_REG_VALUE_34 0x984
+#define QCA6390_PCIE_SHADOW_REG_VALUE_35 0x988
+#define QCA6390_WLAON_GLOBAL_COUNTER_CTRL3 0x1F80118
+#define QCA6390_WLAON_GLOBAL_COUNTER_CTRL4 0x1F8011C
+#define QCA6390_WLAON_GLOBAL_COUNTER_CTRL5 0x1F80120
+
+#define SHADOW_REG_INTER_COUNT 43
+#define QCA6390_PCIE_SHADOW_REG_INTER_0 0x1E05000
+#define QCA6390_PCIE_SHADOW_REG_HUNG 0x1E050A8
+
+#define QDSS_APB_DEC_CSR_BASE 0x1C01000
+
+#define QDSS_APB_DEC_CSR_ETRIRQCTRL_OFFSET 0x6C
+#define QDSS_APB_DEC_CSR_PRESERVEETF_OFFSET 0x70
+#define QDSS_APB_DEC_CSR_PRESERVEETR0_OFFSET 0x74
+#define QDSS_APB_DEC_CSR_PRESERVEETR1_OFFSET 0x78
+
+#define MAX_UNWINDOWED_ADDRESS 0x80000
+#define WINDOW_ENABLE_BIT 0x40000000
+#define WINDOW_SHIFT 19
+#define WINDOW_VALUE_MASK 0x3F
+#define WINDOW_START MAX_UNWINDOWED_ADDRESS
+#define WINDOW_RANGE_MASK 0x7FFFF
+
+#define QCA6390_TIME_SYNC_ENABLE 0x80000000
+#define QCA6390_TIME_SYNC_CLEAR 0x0
+
+#define QCA6390_PCIE_SOC_WDOG_DISC_BAD_DATA_LOW_CFG_SOC_PCIE_REG 0x01E04234
+#define QCA6390_PCIE_SOC_WDOG_DISC_BAD_DATA_LOW_CFG_SOC_PCIE_REG_VAL 0xDEAD1234
+#define QCA6390_PCIE_PCIE_WCSS_STATUS_FOR_DEBUG_LOW_PCIE_LOCAL_REG 0x01E03140
+#define QCA6390_PCIE_SOC_PCIE_WRAP_INTR_MASK_SOC_PCIE_REG 0x1E04054
+#define QCA6390_PCIE_SOC_PCIE_WRAP_INTR_STATUS_SOC_PCIE_REG 0x1E04058
+#define QCA6390_PCIE_SOC_COMMIT_REPLAY_SOC_PCIE_REG 0x1E05090
+#define QCA6390_PCIE_PCIE_PARF_LTSSM 0x01E081B0
+#define QCA6390_PCIE_PCIE_PARF_PM_STTS 0x01E08024
+#define QCA6390_PCIE_PCIE_PARF_PM_STTS_1 0x01E08028
+#define QCA6390_PCIE_PCIE_PARF_INT_STATUS 0x01E08220
+#define QCA6390_PCIE_PCIE_INT_ALL_STATUS 0x01E08224
+#define QCA6390_PCIE_PCIE_INT_ALL_MASK 0x01E0822C
+#define QCA6390_PCIE_PCIE_PARF_BDF_TO_SID_CFG 0x01E0AC00
+#define QCA6390_PCIE_PCIE_PARF_L1SS_SLEEP_NO_MHI_ACCESS_HANDLER_RD_4 0x01E08530
+#define QCA6390_PCIE_PCIE_PARF_L1SS_SLEEP_NO_MHI_ACCESS_HANDLER_RD_3 0x01E0852c
+#define QCA6390_PCIE_PCIE_PARF_MHI_CLOCK_RESET_CTRL 0x01E08174
+#define QCA6390_PCIE_PCIE_PARF_MHI_BASE_ADDR_LOWER 0x01E08178
+#define QCA6390_PCIE_PCIE_PARF_L1SS_SLEEP_MODE_HANDLER_STATUS 0x01E084D0
+#define QCA6390_PCIE_PCIE_PARF_L1SS_SLEEP_MODE_HANDLER_CFG 0x01E084d4
+#define QCA6390_PCIE_PCIE_PARF_DEBUG_CNT_AUX_CLK_IN_L1SUB_L2 0x01E0ec88
+#define QCA6390_PCIE_PCIE_PARF_DEBUG_CNT_PM_LINKST_IN_L1SUB 0x01E0ec08
+#define QCA6390_PCIE_PCIE_CORE_CONFIG 0x01E08640
+#define QCA6390_PCIE_PCIE_PARF_DEBUG_CNT_PM_LINKST_IN_L2 0x01E0EC04
+#define QCA6390_PCIE_PCIE_PARF_DEBUG_CNT_PM_LINKST_IN_L1 0x01E0EC0C
+#define QCA6390_PCIE_PCIE_PARF_DEBUG_CNT_AUX_CLK_IN_L1SUB_L1 0x01E0EC84
+#define QCA6390_PCIE_PCIE_LOCAL_REG_WCSSAON_PCIE_SR_STATUS_HIGH 0x01E030C8
+#define QCA6390_PCIE_PCIE_LOCAL_REG_WCSSAON_PCIE_SR_STATUS_LOW 0x01E030CC
+#define QCA6390_PCIE_PCIE_LOCAL_REG_WCSS_STATUS_FOR_DEBUG_HIGH 0x01E0313C
+#define QCA6390_PCIE_PCIE_LOCAL_REG_WCSS_STATUS_FOR_DEBUG_LOW 0x01E03140
+
+#define QCA6390_GCC_DEBUG_CLK_CTL 0x001E4025C
+
+#define QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_PLL_MODE 0x00D00200
+#define QCA6390_WCSS_WFSS_PMM_WFSS_PMM_R0_PMM_CTRL 0x00B60164
+#define QCA6390_WCSS_PMM_TOP_PMU_CX_CSR 0x00B70080
+#define QCA6390_WCSS_PMM_TOP_AON_INT_RAW_STAT 0x00B700E0
+#define QCA6390_WCSS_PMM_TOP_AON_INT_EN 0x00B700D0
+#define QCA6390_WCSS_PMM_TOP_PMU_TESTBUS_STS 0x00B70020
+#define QCA6390_WCSS_PMM_TOP_PMU_TESTBUS_CTL 0x00B7001C
+#define QCA6390_WCSS_PMM_TOP_TESTBUS_STS 0x00B70028
+#define QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_SAW2_CFG 0x00DB0008
+#define QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_SAW2_CFG_MSK 0x20
+#define QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_CTL 0x00D02000
+#define QCA6390_WCSS_Q6SS_PUBCSR_QDSP6SS_TEST_BUS_VALUE 0x00D02004
+#define QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_SAW2_SPM_STS 0x00DB000C
+#define QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_SAW2_SPM_CTL 0x00DB0030
+#define QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_SAW2_SPM_SLP_SEQ_ENTRY_0 0x00DB0400
+#define QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_SAW2_SPM_SLP_SEQ_ENTRY_9 0x00DB0424
+#define QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_STATUS0 0x00D90380
+#define QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_STATUS1 0x00D90384
+#define QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_STATUS2 0x00D90388
+#define QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_STATUS3 0x00D9038C
+#define QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_STATUS4 0x00D90390
+#define QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_STATUS5 0x00D90394
+#define QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_STATUS6 0x00D90398
+#define QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_ENABLE0 0x00D90100
+#define QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_ENABLE1 0x00D90104
+#define QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_ENABLE2 0x00D90108
+#define QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_ENABLE3 0x00D9010C
+#define QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_ENABLE4 0x00D90110
+#define QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_ENABLE5 0x00D90114
+#define QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_ENABLE6 0x00D90118
+#define QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_PENDING0 0x00D90500
+#define QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_PENDING1 0x00D90504
+#define QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_PENDING2 0x00D90508
+#define QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_PENDING3 0x00D9050C
+#define QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_PENDING4 0x00D90510
+#define QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_PENDING5 0x00D90514
+#define QCA6390_WCSS_Q6SS_PRIVCSR_QDSP6SS_L2VIC_INT_PENDING6 0x00D90518
+#define QCA6390_WCSS_CC_WCSS_UMAC_NOC_CBCR 0x00C3029C
+#define QCA6390_WCSS_CC_WCSS_UMAC_AHB_CBCR 0x00C302BC
+#define QCA6390_WCSS_CC_WCSS_UMAC_GDSCR 0x00C30298
+#define QCA6390_WCSS_CC_WCSS_WLAN1_GDSCR 0x00C300C4
+#define QCA6390_WCSS_CC_WCSS_WLAN2_GDSCR 0x00C30138
+#define QCA6390_WCSS_PMM_TOP_PMM_INT_CLR 0x00B70168
+#define QCA6390_WCSS_PMM_TOP_AON_INT_STICKY_EN 0x00B700D8
+
+#define QCA6390_TLMM_GPIO_IN_OUT57 0x01839004
+#define QCA6390_TLMM_GPIO_INTR_CFG57 0x01839008
+#define QCA6390_TLMM_GPIO_INTR_STATUS57 0x0183900C
+#define QCA6390_TLMM_GPIO_IN_OUT59 0x0183b004
+#define QCA6390_TLMM_GPIO_INTR_CFG59 0x0183b008
+#define QCA6390_TLMM_GPIO_INTR_STATUS59 0x0183b00C
+
+#define QCA6390_WFSS_PMM_WFSS_PMM_R0_WLAN1_STATUS_REG2 0x00B6017C
+#define QCA6390_WFSS_PMM_WFSS_PMM_R0_WLAN2_STATUS_REG2 0x00B60190
+#define QCA6390_WFSS_PMM_WFSS_PMM_R0_PMM_WLAN2_CFG_REG1 0x00B6018C
+#define QCA6390_WFSS_PMM_WFSS_PMM_R0_PMM_WLAN1_CFG_REG1 0x00B60178
+#define QCA6390_WFSS_PMM_WFSS_PMM_R0_WLAN2_APS_STATUS_REG1 0x00B600B0
+#define QCA6390_WFSS_PMM_WFSS_PMM_R0_WLAN1_APS_STATUS_REG1 0x00B60044
+
+#define QCA6390_WLAON_WCSSAON_PCIE_SR_STATUS_HI_REG 0x01F806C4
+#define QCA6390_WLAON_SOC_POWER_CTRL 0x01F80000
+#define QCA6390_WLAON_PCIE_PWR_CTRL_REG 0x01F806BC
+#define QCA6390_WLAON_WCSSAON_PCIE_SR_STATUS_LO_REG 0x1F806C8
+#define QCA6390_WLAON_WCSS_TCSR_PMM_SR_STATUS_HI_REG 0x1F806CC
+#define QCA6390_WLAON_WCSS_TCSR_PMM_SR_STATUS_LO_REG 0x1F806D0
+#define QCA6390_WLAON_SOC_PWR_WDG_BARK_THRSHD 0x1F80004
+#define QCA6390_WLAON_SOC_PWR_WDG_BITE_THRSHD 0x1F80008
+#define QCA6390_WLAON_SW_COLD_RESET 0x1F8000C
+#define QCA6390_WLAON_RFA_MEM_SLP_NRET_N_OVERRIDE 0x1F8001C
+#define QCA6390_WLAON_GDSC_DELAY_SETTING 0x1F80024
+#define QCA6390_WLAON_GDSC_DELAY_SETTING2 0x1F80028
+#define QCA6390_WLAON_WL_PWR_STATUS_REG 0x1F8002C
+#define QCA6390_WLAON_WL_AON_DBG_CFG_REG 0x1F80030
+#define QCA6390_WLAON_GLOBAL_COUNTER_CTRL1 0x1F80100
+#define QCA6390_WLAON_GLOBAL_COUNTER_CTRL6 0x1F80108
+#define QCA6390_WLAON_GLOBAL_COUNTER_CTRL7 0x1F8010C
+#define QCA6390_WLAON_GLOBAL_COUNTER_CTRL3 0x1F80118
+#define QCA6390_WLAON_GLOBAL_COUNTER_CTRL4 0x1F8011C
+#define QCA6390_WLAON_GLOBAL_COUNTER_CTRL5 0x1F80120
+#define QCA6390_WLAON_GLOBAL_COUNTER_CTRL8 0x1F801F0
+#define QCA6390_WLAON_GLOBAL_COUNTER_CTRL2 0x1F801F4
+#define QCA6390_WLAON_GLOBAL_COUNTER_CTRL9 0x1F801F8
+#define QCA6390_WLAON_RTC_CLK_CAL_CTRL1 0x1F80200
+#define QCA6390_WLAON_RTC_CLK_CAL_CTRL2 0x1F80204
+#define QCA6390_WLAON_RTC_CLK_CAL_CTRL3 0x1F80208
+#define QCA6390_WLAON_RTC_CLK_CAL_CTRL4 0x1F8020C
+#define QCA6390_WLAON_RTC_CLK_CAL_CTRL5 0x1F80210
+#define QCA6390_WLAON_RTC_CLK_CAL_CTRL6 0x1F80214
+#define QCA6390_WLAON_RTC_CLK_CAL_CTRL7 0x1F80218
+#define QCA6390_WLAON_RTC_CLK_CAL_CTRL8 0x1F8021C
+#define QCA6390_WLAON_RTC_CLK_CAL_CTRL9 0x1F80220
+#define QCA6390_WLAON_WCSSAON_CONFIG_REG 0x1F80300
+#define QCA6390_WLAON_WLAN_OEM_DEBUG_REG 0x1F80304
+#define QCA6390_WLAON_WLAN_RAM_DUMP_REG 0x1F80308
+#define QCA6390_WLAON_QDSS_WCSS_REG 0x1F8030C
+#define QCA6390_WLAON_QDSS_WCSS_ACK 0x1F80310
+#define QCA6390_WLAON_WL_CLK_CNTL_KDF_REG 0x1F80314
+#define QCA6390_WLAON_WL_CLK_CNTL_PMU_HFRC_REG 0x1F80318
+#define QCA6390_WLAON_QFPROM_PWR_CTRL_REG 0x1F8031C
+#define QCA6390_WLAON_DLY_CONFIG 0x1F80400
+#define QCA6390_WLAON_WLAON_Q6_IRQ_REG 0x1F80404
+#define QCA6390_WLAON_PCIE_INTF_SW_CFG_REG 0x1F80408
+#define QCA6390_WLAON_PCIE_INTF_STICKY_SW_CFG_REG 0x1F8040C
+#define QCA6390_WLAON_PCIE_INTF_PHY_SW_CFG_REG 0x1F80410
+#define QCA6390_WLAON_PCIE_INTF_PHY_NOCSR_SW_CFG_REG 0x1F80414
+#define QCA6390_WLAON_Q6_COOKIE_BIT 0x1F80500
+#define QCA6390_WLAON_WARM_SW_ENTRY 0x1F80504
+#define QCA6390_WLAON_RESET_DBG_SW_ENTRY 0x1F80508
+#define QCA6390_WLAON_WL_PMUNOC_CFG_REG 0x1F8050C
+#define QCA6390_WLAON_RESET_CAUSE_CFG_REG 0x1F80510
+#define QCA6390_WLAON_SOC_WCSSAON_WAKEUP_IRQ_7_EN_REG 0x1F80514
+#define QCA6390_WLAON_DEBUG 0x1F80600
+#define QCA6390_WLAON_SOC_PARAMETERS 0x1F80604
+#define QCA6390_WLAON_WLPM_SIGNAL 0x1F80608
+#define QCA6390_WLAON_SOC_RESET_CAUSE_REG 0x1F8060C
+#define QCA6390_WLAON_WAKEUP_PCIE_SOC_REG 0x1F80610
+#define QCA6390_WLAON_PBL_STACK_CANARY 0x1F80614
+#define QCA6390_WLAON_MEM_TOT_NUM_GRP_REG 0x1F80618
+#define QCA6390_WLAON_MEM_TOT_BANKS_IN_GRP0_REG 0x1F8061C
+#define QCA6390_WLAON_MEM_TOT_BANKS_IN_GRP1_REG 0x1F80620
+#define QCA6390_WLAON_MEM_TOT_BANKS_IN_GRP2_REG 0x1F80624
+#define QCA6390_WLAON_MEM_TOT_BANKS_IN_GRP3_REG 0x1F80628
+#define QCA6390_WLAON_MEM_TOT_SIZE_IN_GRP0_REG 0x1F8062C
+#define QCA6390_WLAON_MEM_TOT_SIZE_IN_GRP1_REG 0x1F80630
+#define QCA6390_WLAON_MEM_TOT_SIZE_IN_GRP2_REG 0x1F80634
+#define QCA6390_WLAON_MEM_TOT_SIZE_IN_GRP3_REG 0x1F80638
+#define QCA6390_WLAON_MEM_SLP_NRET_OVERRIDE_GRP0_REG 0x1F8063C
+#define QCA6390_WLAON_MEM_SLP_NRET_OVERRIDE_GRP1_REG 0x1F80640
+#define QCA6390_WLAON_MEM_SLP_NRET_OVERRIDE_GRP2_REG 0x1F80644
+#define QCA6390_WLAON_MEM_SLP_NRET_OVERRIDE_GRP3_REG 0x1F80648
+#define QCA6390_WLAON_MEM_SLP_RET_OVERRIDE_GRP0_REG 0x1F8064C
+#define QCA6390_WLAON_MEM_SLP_RET_OVERRIDE_GRP1_REG 0x1F80650
+#define QCA6390_WLAON_MEM_SLP_RET_OVERRIDE_GRP2_REG 0x1F80654
+#define QCA6390_WLAON_MEM_SLP_RET_OVERRIDE_GRP3_REG 0x1F80658
+#define QCA6390_WLAON_MEM_CNT_SEL_REG 0x1F8065C
+#define QCA6390_WLAON_MEM_NO_EXTBHS_REG 0x1F80660
+#define QCA6390_WLAON_MEM_DEBUG_REG 0x1F80664
+#define QCA6390_WLAON_MEM_DEBUG_BUS_REG 0x1F80668
+#define QCA6390_WLAON_MEM_REDUN_CFG_REG 0x1F8066C
+#define QCA6390_WLAON_WL_AON_SPARE2 0x1F80670
+#define QCA6390_WLAON_VSEL_CFG_FOR_WL_RET_DISABLE_REG 0x1F80680
+#define QCA6390_WLAON_BTFM_WLAN_IPC_STATUS_REG 0x1F80690
+#define QCA6390_WLAON_MPM_COUNTER_CHICKEN_BITS 0x1F806A0
+#define QCA6390_WLAON_WLPM_CHICKEN_BITS 0x1F806A4
+#define QCA6390_WLAON_PCIE_PHY_PWR_REG 0x1F806A8
+#define QCA6390_WLAON_WL_CLK_CNTL_PMU_LPO2M_REG 0x1F806AC
+#define QCA6390_WLAON_WL_SS_ROOT_CLK_SWITCH_REG 0x1F806B0
+#define QCA6390_WLAON_POWERCTRL_PMU_REG 0x1F806B4
+#define QCA6390_WLAON_POWERCTRL_MEM_REG 0x1F806B8
+#define QCA6390_WLAON_SOC_PWR_PROFILE_REG 0x1F806C0
+#define QCA6390_WLAON_MEM_SVS_CFG_REG 0x1F806D4
+#define QCA6390_WLAON_CMN_AON_MISC_REG 0x1F806D8
+#define QCA6390_WLAON_INTR_STATUS 0x1F80700
+
+#define QCA6390_SYSPM_SYSPM_PWR_STATUS 0x1F82000
+#define QCA6390_SYSPM_DBG_BTFM_AON_REG 0x1F82004
+#define QCA6390_SYSPM_DBG_BUS_SEL_REG 0x1F82008
+#define QCA6390_SYSPM_WCSSAON_SR_STATUS 0x1F8200C
+
+#endif
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
index 8b7d70e..3fe7605 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
@@ -724,7 +724,7 @@ static int iwl_mvm_sar_get_ewrd_table(struct iwl_mvm *mvm)
for (i = 0; i < n_profiles; i++) {
/* the tables start at element 3 */
- static int pos = 3;
+ int pos = 3;
/* The EWRD profiles officially go from 2 to 4, but we
* save them in sar_profiles[1-3] (because we don't
@@ -836,6 +836,22 @@ int iwl_mvm_sar_select_profile(struct iwl_mvm *mvm, int prof_a, int prof_b)
return iwl_mvm_send_cmd_pdu(mvm, REDUCE_TX_POWER_CMD, 0, len, &cmd);
}
+static bool iwl_mvm_sar_geo_support(struct iwl_mvm *mvm)
+{
+ /*
+ * The GEO_TX_POWER_LIMIT command is not supported on earlier
+ * firmware versions. Unfortunately, we don't have a TLV API
+ * flag to rely on, so rely on the major version which is in
+ * the first byte of ucode_ver. This was implemented
+ * initially on version 38 and then backported to 36, 29 and
+ * 17.
+ */
+ return IWL_UCODE_SERIAL(mvm->fw->ucode_ver) >= 38 ||
+ IWL_UCODE_SERIAL(mvm->fw->ucode_ver) == 36 ||
+ IWL_UCODE_SERIAL(mvm->fw->ucode_ver) == 29 ||
+ IWL_UCODE_SERIAL(mvm->fw->ucode_ver) == 17;
+}
+
int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm)
{
struct iwl_geo_tx_power_profiles_resp *resp;
@@ -851,6 +867,9 @@ int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm)
.data = { &geo_cmd },
};
+ if (!iwl_mvm_sar_geo_support(mvm))
+ return -EOPNOTSUPP;
+
ret = iwl_mvm_send_cmd(mvm, &cmd);
if (ret) {
IWL_ERR(mvm, "Failed to get geographic profile info %d\n", ret);
@@ -876,13 +895,7 @@ static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm)
int ret, i, j;
u16 cmd_wide_id = WIDE_ID(PHY_OPS_GROUP, GEO_TX_POWER_LIMIT);
- /*
- * This command is not supported on earlier firmware versions.
- * Unfortunately, we don't have a TLV API flag to rely on, so
- * rely on the major version which is in the first byte of
- * ucode_ver.
- */
- if (IWL_UCODE_SERIAL(mvm->fw->ucode_ver) < 41)
+ if (!iwl_mvm_sar_geo_support(mvm))
return 0;
ret = iwl_mvm_sar_get_wgds_table(mvm);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
index 93f0d38..42fdb79 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
@@ -403,6 +403,8 @@ static void iwl_pcie_tfd_unmap(struct iwl_trans *trans,
DMA_TO_DEVICE);
}
+ meta->tbs = 0;
+
if (trans->cfg->use_tfh) {
struct iwl_tfh_tfd *tfd_fh = (void *)tfd;
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 7cd428c..ce2dd06 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -3502,10 +3502,12 @@ static int hwsim_dump_radio_nl(struct sk_buff *skb,
hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq, &hwsim_genl_family,
NLM_F_MULTI, HWSIM_CMD_GET_RADIO);
- if (!hdr)
+ if (hdr) {
+ genl_dump_check_consistent(cb, hdr);
+ genlmsg_end(skb, hdr);
+ } else {
res = -EMSGSIZE;
- genl_dump_check_consistent(cb, hdr);
- genlmsg_end(skb, hdr);
+ }
}
done:
diff --git a/drivers/net/wireless/marvell/mwifiex/main.h b/drivers/net/wireless/marvell/mwifiex/main.h
index b025ba1..e39bb5c 100644
--- a/drivers/net/wireless/marvell/mwifiex/main.h
+++ b/drivers/net/wireless/marvell/mwifiex/main.h
@@ -124,6 +124,7 @@ enum {
#define MWIFIEX_MAX_TOTAL_SCAN_TIME (MWIFIEX_TIMER_10S - MWIFIEX_TIMER_1S)
+#define WPA_GTK_OUI_OFFSET 2
#define RSN_GTK_OUI_OFFSET 2
#define MWIFIEX_OUI_NOT_PRESENT 0
diff --git a/drivers/net/wireless/marvell/mwifiex/scan.c b/drivers/net/wireless/marvell/mwifiex/scan.c
index 6dd771c..ed27147 100644
--- a/drivers/net/wireless/marvell/mwifiex/scan.c
+++ b/drivers/net/wireless/marvell/mwifiex/scan.c
@@ -181,7 +181,8 @@ mwifiex_is_wpa_oui_present(struct mwifiex_bssdescriptor *bss_desc, u32 cipher)
u8 ret = MWIFIEX_OUI_NOT_PRESENT;
if (has_vendor_hdr(bss_desc->bcn_wpa_ie, WLAN_EID_VENDOR_SPECIFIC)) {
- iebody = (struct ie_body *) bss_desc->bcn_wpa_ie->data;
+ iebody = (struct ie_body *)((u8 *)bss_desc->bcn_wpa_ie->data +
+ WPA_GTK_OUI_OFFSET);
oui = &mwifiex_wpa_oui[cipher][0];
ret = mwifiex_search_oui_in_ie(iebody, oui);
if (ret)
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index d5081ff..1c84910 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -925,6 +925,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
skb_shinfo(skb)->nr_frags = MAX_SKB_FRAGS;
nskb = xenvif_alloc_skb(0);
if (unlikely(nskb == NULL)) {
+ skb_shinfo(skb)->nr_frags = 0;
kfree_skb(skb);
xenvif_tx_err(queue, &txreq, extra_count, idx);
if (net_ratelimit())
@@ -940,6 +941,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
if (xenvif_set_skb_gso(queue->vif, skb, gso)) {
/* Failure in xenvif_set_skb_gso is fatal. */
+ skb_shinfo(skb)->nr_frags = 0;
kfree_skb(skb);
kfree_skb(nskb);
break;
diff --git a/drivers/nfc/nfcmrvl/main.c b/drivers/nfc/nfcmrvl/main.c
index e65d027..529be35a 100644
--- a/drivers/nfc/nfcmrvl/main.c
+++ b/drivers/nfc/nfcmrvl/main.c
@@ -244,7 +244,7 @@ void nfcmrvl_chip_reset(struct nfcmrvl_private *priv)
/* Reset possible fault of previous session */
clear_bit(NFCMRVL_PHY_ERROR, &priv->flags);
- if (priv->config.reset_n_io) {
+ if (gpio_is_valid(priv->config.reset_n_io)) {
nfc_info(priv->dev, "reset the chip\n");
gpio_set_value(priv->config.reset_n_io, 0);
usleep_range(5000, 10000);
@@ -255,7 +255,7 @@ void nfcmrvl_chip_reset(struct nfcmrvl_private *priv)
void nfcmrvl_chip_halt(struct nfcmrvl_private *priv)
{
- if (priv->config.reset_n_io)
+ if (gpio_is_valid(priv->config.reset_n_io))
gpio_set_value(priv->config.reset_n_io, 0);
}
diff --git a/drivers/nfc/nfcmrvl/uart.c b/drivers/nfc/nfcmrvl/uart.c
index 9a22056..e5a622c 100644
--- a/drivers/nfc/nfcmrvl/uart.c
+++ b/drivers/nfc/nfcmrvl/uart.c
@@ -26,7 +26,7 @@
static unsigned int hci_muxed;
static unsigned int flow_control;
static unsigned int break_control;
-static unsigned int reset_n_io;
+static int reset_n_io = -EINVAL;
/*
** NFCMRVL NCI OPS
@@ -231,5 +231,5 @@ MODULE_PARM_DESC(break_control, "Tell if UART driver must drive break signal.");
module_param(hci_muxed, uint, 0);
MODULE_PARM_DESC(hci_muxed, "Tell if transport is muxed in HCI one.");
-module_param(reset_n_io, uint, 0);
+module_param(reset_n_io, int, 0);
MODULE_PARM_DESC(reset_n_io, "GPIO that is wired to RESET_N signal.");
diff --git a/drivers/nfc/nfcmrvl/usb.c b/drivers/nfc/nfcmrvl/usb.c
index 945cc90..888e298 100644
--- a/drivers/nfc/nfcmrvl/usb.c
+++ b/drivers/nfc/nfcmrvl/usb.c
@@ -305,6 +305,7 @@ static int nfcmrvl_probe(struct usb_interface *intf,
/* No configuration for USB */
memset(&config, 0, sizeof(config));
+ config.reset_n_io = -EINVAL;
nfc_info(&udev->dev, "intf %p id %p\n", intf, id);
diff --git a/drivers/nfc/nq-nci.c b/drivers/nfc/nq-nci.c
index 2c8f425..1d49640 100644
--- a/drivers/nfc/nq-nci.c
+++ b/drivers/nfc/nq-nci.c
@@ -41,21 +41,9 @@ static const struct of_device_id msm_match_table[] = {
MODULE_DEVICE_TABLE(of, msm_match_table);
-#define DEV_COUNT 1
-#define DEVICE_NAME "nq-nci"
-#define CLASS_NAME "nqx"
-#define MAX_BUFFER_SIZE (320)
-#define WAKEUP_SRC_TIMEOUT (2000)
-#define MAX_RETRY_COUNT 3
-#define NCI_RESET_CMD_LEN 4
-#define NCI_RESET_RSP_LEN 4
-#define NCI_RESET_NTF_LEN 13
-#define NCI_GET_VERSION_CMD_LEN 8
-#define NCI_GET_VERSION_RSP_LEN 12
-#define MAX_IRQ_WAIT_TIME (90) //in ms
-
struct nqx_dev {
wait_queue_head_t read_wq;
+ wait_queue_head_t cold_reset_read_wq;
struct mutex read_mutex;
struct mutex dev_ref_mutex;
struct i2c_client *client;
@@ -72,10 +60,14 @@ struct nqx_dev {
unsigned int ese_gpio;
/* NFC VEN pin state powered by Nfc */
bool nfc_ven_enabled;
+ /* NFC state reflected from MW */
+ bool nfc_enabled;
/* NFC_IRQ state */
bool irq_enabled;
/* NFC_IRQ wake-up state */
bool irq_wake_up;
+ bool cold_reset_rsp_pending;
+ uint8_t cold_reset_status;
spinlock_t irq_enabled_lock;
unsigned int count_irq;
/* NFC_IRQ Count */
@@ -97,6 +89,9 @@ static int nfcc_reboot(struct notifier_block *notifier, unsigned long val,
static int nqx_clock_select(struct nqx_dev *nqx_dev);
/*clock disable function*/
static int nqx_clock_deselect(struct nqx_dev *nqx_dev);
+static int nqx_standby_write(struct nqx_dev *nqx_dev,
+ const unsigned char *buf, size_t len);
+
static struct notifier_block nfcc_notifier = {
.notifier_call = nfcc_reboot,
.next = NULL,
@@ -159,6 +154,92 @@ static irqreturn_t nqx_dev_irq_handler(int irq, void *dev_id)
return IRQ_HANDLED;
}
+static int is_data_available_for_read(struct nqx_dev *nqx_dev)
+{
+ int ret;
+
+ nqx_enable_irq(nqx_dev);
+ ret = wait_event_interruptible_timeout(nqx_dev->read_wq,
+ !nqx_dev->irq_enabled, msecs_to_jiffies(MAX_IRQ_WAIT_TIME));
+ return ret;
+}
+
+static int send_cold_reset_cmd(struct nqx_dev *nqx_dev)
+{
+ int ret;
+ char *cold_reset_cmd = NULL;
+
+ if (gpio_get_value(nqx_dev->firm_gpio)) {
+ dev_err(&nqx_dev->client->dev, "FW download in-progress\n");
+ return -EBUSY;
+ }
+ if (!gpio_get_value(nqx_dev->en_gpio)) {
+ dev_err(&nqx_dev->client->dev, "VEN LOW - NFCC powered off\n");
+ return -ENODEV;
+ }
+ cold_reset_cmd = kzalloc(COLD_RESET_CMD_LEN, GFP_DMA | GFP_KERNEL);
+ if (!cold_reset_cmd)
+ return -ENOMEM;
+
+ cold_reset_cmd[0] = COLD_RESET_CMD_GID;
+ cold_reset_cmd[1] = COLD_RESET_OID;
+ cold_reset_cmd[2] = COLD_RESET_CMD_PAYLOAD_LEN;
+
+ ret = nqx_standby_write(nqx_dev, cold_reset_cmd, COLD_RESET_CMD_LEN);
+ if (ret < 0) {
+ dev_err(&nqx_dev->client->dev,
+ "%s: write failed after max retry\n", __func__);
+ }
+ kfree(cold_reset_cmd);
+ return ret;
+}
+
+static void read_cold_reset_rsp(struct nqx_dev *nqx_dev, bool isNfcEnabled,
+ char *header)
+{
+ int ret = -1;
+ char *cold_reset_rsp = NULL;
+
+ cold_reset_rsp = kzalloc(COLD_RESET_RSP_LEN, GFP_DMA | GFP_KERNEL);
+ if (!cold_reset_rsp)
+ return;
+
+ /*
+ * read header also if NFC is disabled
+ * for enable case, will be taken care by nfc_read thread
+ */
+ if (!isNfcEnabled) {
+ ret = i2c_master_recv(nqx_dev->client, cold_reset_rsp,
+ NCI_HEADER_LEN);
+ if (ret != NCI_HEADER_LEN) {
+ dev_err(&nqx_dev->client->dev,
+ "%s: failure to read cold reset rsp header\n",
+ __func__);
+ goto error;
+ }
+ } else {
+ memcpy(cold_reset_rsp, header, NCI_HEADER_LEN);
+ }
+
+ if ((NCI_HEADER_LEN + cold_reset_rsp[2]) > COLD_RESET_RSP_LEN) {
+ dev_err(&nqx_dev->client->dev,
+ "%s: - invalid response for cold_reset\n", __func__);
+ ret = -EINVAL;
+ goto error;
+ }
+ ret = i2c_master_recv(nqx_dev->client, &cold_reset_rsp[NCI_PAYLOAD_IDX],
+ cold_reset_rsp[2]);
+ if (ret != cold_reset_rsp[2]) {
+ dev_err(&nqx_dev->client->dev,
+ "%s: failure to read cold reset rsp status\n",
+ __func__);
+ goto error;
+ }
+ nqx_dev->cold_reset_status = cold_reset_rsp[NCI_PAYLOAD_IDX];
+error:
+ kfree(cold_reset_rsp);
+}
+
static ssize_t nfc_read(struct file *filp, char __user *buf,
size_t count, loff_t *offset)
{
@@ -232,6 +313,24 @@ static ssize_t nfc_read(struct file *filp, char __user *buf,
ret = -EIO;
goto err;
}
+ /* check if it's response of cold reset command
+ * NFC HAL process shouldn't receive this data as
+ * command was sent by eSE HAL
+ */
+ if (nqx_dev->cold_reset_rsp_pending
+ && (tmp[0] == COLD_RESET_RSP_GID)
+ && (tmp[1] == COLD_RESET_OID)) {
+ read_cold_reset_rsp(nqx_dev, true, tmp);
+ nqx_dev->cold_reset_rsp_pending = false;
+ wake_up_interruptible(&nqx_dev->cold_reset_read_wq);
+ mutex_unlock(&nqx_dev->read_mutex);
+ /*
+ * NFC process doesn't know about cold reset command
+ * being sent as it was initiated by eSE process
+ * we shouldn't return any data to NFC process
+ */
+ return 0;
+ }
#ifdef NFC_KERNEL_BU
dev_dbg(&nqx_dev->client->dev, "%s : NfcNciRx %x %x %x\n",
__func__, tmp[0], tmp[1], tmp[2]);
@@ -326,17 +425,19 @@ static int nqx_standby_write(struct nqx_dev *nqx_dev,
return ret;
}
+
/*
* Power management of the SN100 eSE
* eSE and NFCC both are powered using VEN gpio in SN100,
* VEN HIGH - eSE and NFCC both are powered on
* VEN LOW - eSE and NFCC both are power down
*/
+
static int sn100_ese_pwr(struct nqx_dev *nqx_dev, unsigned long arg)
{
int r = -1;
- if (arg == 0) {
+ if (arg == ESE_POWER_ON) {
/**
* Let's store the NFC VEN pin state
* will check stored value in case of eSE power off request,
@@ -355,7 +456,7 @@ static int sn100_ese_pwr(struct nqx_dev *nqx_dev, unsigned long arg)
dev_dbg(&nqx_dev->client->dev, "en_gpio already HIGH\n");
}
r = 0;
- } else if (arg == 1) {
+ } else if (arg == ESE_POWER_OFF) {
if (!nqx_dev->nfc_ven_enabled) {
dev_dbg(&nqx_dev->client->dev, "NFC not enabled, disabling en_gpio\n");
gpio_set_value(nqx_dev->en_gpio, 0);
@@ -365,7 +466,40 @@ static int sn100_ese_pwr(struct nqx_dev *nqx_dev, unsigned long arg)
dev_dbg(&nqx_dev->client->dev, "keep en_gpio high as NFC is enabled\n");
}
r = 0;
- } else if (arg == 3) {
+ } else if (arg == ESE_COLD_RESET) {
+ // set default value for status as failure
+ nqx_dev->cold_reset_status = EIO;
+
+ r = send_cold_reset_cmd(nqx_dev);
+ if (r <= 0) {
+ dev_err(&nqx_dev->client->dev,
+ "failed to send cold reset command\n");
+ return nqx_dev->cold_reset_status;
+ }
+ nqx_dev->cold_reset_rsp_pending = true;
+ // check if NFC is enabled
+ if (nqx_dev->nfc_enabled) {
+ /*
+ * nfc_read thread will initiate cold reset response
+ * and it will signal for data available
+ */
+ wait_event_interruptible(nqx_dev->cold_reset_read_wq,
+ !nqx_dev->cold_reset_rsp_pending);
+ } else {
+ /*
+ * Read data as NFC thread is not active
+ */
+ r = is_data_available_for_read(nqx_dev);
+ if (r <= 0) {
+ nqx_disable_irq(nqx_dev);
+ nqx_dev->cold_reset_rsp_pending = false;
+ return nqx_dev->cold_reset_status;
+ }
+ read_cold_reset_rsp(nqx_dev, false, NULL);
+ nqx_dev->cold_reset_rsp_pending = false;
+ }
+ r = nqx_dev->cold_reset_status;
+ } else if (arg == ESE_POWER_STATE) {
// eSE power state
r = gpio_get_value(nqx_dev->en_gpio);
}
@@ -556,7 +690,7 @@ int nfc_ioctl_power_states(struct file *filp, unsigned long arg)
int r = 0;
struct nqx_dev *nqx_dev = filp->private_data;
- if (arg == 0) {
+ if (arg == NFC_POWER_OFF) {
/*
* We are attempting a hardware reset so let us disable
* interrupts to avoid spurious notifications to upper
@@ -590,7 +724,7 @@ int nfc_ioctl_power_states(struct file *filp, unsigned long arg)
dev_err(&nqx_dev->client->dev, "unable to disable clock\n");
}
nqx_dev->nfc_ven_enabled = false;
- } else if (arg == 1) {
+ } else if (arg == NFC_POWER_ON) {
nqx_enable_irq(nqx_dev);
dev_dbg(&nqx_dev->client->dev,
"gpio_set_value enable: %s: info: %p\n",
@@ -607,7 +741,7 @@ int nfc_ioctl_power_states(struct file *filp, unsigned long arg)
dev_err(&nqx_dev->client->dev, "unable to enable clock\n");
}
nqx_dev->nfc_ven_enabled = true;
- } else if (arg == 2) {
+ } else if (arg == NFC_FW_DWL_VEN_TOGGLE) {
/*
* We are switching to Dowload Mode, toggle the enable pin
* in order to set the NFCC in the new mode
@@ -629,7 +763,7 @@ int nfc_ioctl_power_states(struct file *filp, unsigned long arg)
usleep_range(10000, 10100);
gpio_set_value(nqx_dev->en_gpio, 1);
usleep_range(10000, 10100);
- } else if (arg == 4) {
+ } else if (arg == NFC_FW_DWL_HIGH) {
/*
* Setting firmware download gpio to HIGH for SN100U
* before FW download start
@@ -641,7 +775,7 @@ int nfc_ioctl_power_states(struct file *filp, unsigned long arg)
} else
dev_err(&nqx_dev->client->dev,
"firm_gpio is invalid\n");
- } else if (arg == 6) {
+ } else if (arg == NFC_FW_DWL_LOW) {
/*
* Setting firmware download gpio to LOW for SN100U
* FW download finished
@@ -654,6 +788,16 @@ int nfc_ioctl_power_states(struct file *filp, unsigned long arg)
dev_err(&nqx_dev->client->dev,
"firm_gpio is invalid\n");
}
+ } else if (arg == NFC_ENABLE) {
+ /*
+ * Setting flag true when NFC is enabled
+ */
+ nqx_dev->nfc_enabled = true;
+ } else if (arg == NFC_DISABLE) {
+ /*
+ * Setting flag false when NFC is disabled
+ */
+ nqx_dev->nfc_enabled = false;
} else {
r = -ENOIOCTLCMD;
}
@@ -808,7 +952,7 @@ static int nfcc_hw_check(struct i2c_client *client, struct nqx_dev *nqx_dev)
}
nci_reset_ntf = kzalloc(NCI_RESET_NTF_LEN + 1, GFP_DMA | GFP_KERNEL);
- if (!nci_reset_rsp) {
+ if (!nci_reset_ntf) {
ret = -ENOMEM;
goto done;
}
@@ -897,9 +1041,8 @@ static int nfcc_hw_check(struct i2c_client *client, struct nqx_dev *nqx_dev)
}
goto err_nfcc_reset_failed;
}
- nqx_enable_irq(nqx_dev);
- ret = wait_event_interruptible_timeout(nqx_dev->read_wq,
- !nqx_dev->irq_enabled, msecs_to_jiffies(MAX_IRQ_WAIT_TIME));
+
+ ret = is_data_available_for_read(nqx_dev);
if (ret <= 0) {
nqx_disable_irq(nqx_dev);
goto err_nfcc_hw_check;
@@ -915,9 +1058,8 @@ static int nfcc_hw_check(struct i2c_client *client, struct nqx_dev *nqx_dev)
goto reset_enable_gpio;
goto err_nfcc_hw_check;
}
- nqx_enable_irq(nqx_dev);
- ret = wait_event_interruptible_timeout(nqx_dev->read_wq,
- !nqx_dev->irq_enabled, msecs_to_jiffies(MAX_IRQ_WAIT_TIME));
+
+ ret = is_data_available_for_read(nqx_dev);
if (ret <= 0) {
nqx_disable_irq(nqx_dev);
goto err_nfcc_hw_check;
@@ -1280,6 +1422,7 @@ static int nqx_probe(struct i2c_client *client,
/* init mutex and queues */
init_waitqueue_head(&nqx_dev->read_wq);
+ init_waitqueue_head(&nqx_dev->cold_reset_read_wq);
mutex_init(&nqx_dev->read_mutex);
mutex_init(&nqx_dev->dev_ref_mutex);
spin_lock_init(&nqx_dev->irq_enabled_lock);
@@ -1362,6 +1505,8 @@ static int nqx_probe(struct i2c_client *client,
device_set_wakeup_capable(&client->dev, true);
i2c_set_clientdata(client, nqx_dev);
nqx_dev->irq_wake_up = false;
+ nqx_dev->cold_reset_rsp_pending = false;
+ nqx_dev->nfc_enabled = false;
dev_err(&client->dev,
"%s: probing NFCC NQxxx exited successfully\n",
diff --git a/drivers/nfc/nq-nci.h b/drivers/nfc/nq-nci.h
index 12b0737..8d807ec 100644
--- a/drivers/nfc/nq-nci.h
+++ b/drivers/nfc/nq-nci.h
@@ -24,12 +24,65 @@
#define SET_EMULATOR_TEST_POINT _IOW(0xE9, 0x05, unsigned int)
#define NFCC_INITIAL_CORE_RESET_NTF _IOW(0xE9, 0x10, unsigned int)
+#define DEV_COUNT 1
+#define DEVICE_NAME "nq-nci"
+#define CLASS_NAME "nqx"
+#define MAX_BUFFER_SIZE (320)
+#define WAKEUP_SRC_TIMEOUT (2000)
+#define NCI_HEADER_LEN 3
+#define NCI_PAYLOAD_IDX 3
+#define MAX_RETRY_COUNT 3
+#define NCI_RESET_CMD_LEN 4
+#define NCI_RESET_RSP_LEN 4
+#define NCI_RESET_NTF_LEN 13
+#define NCI_GET_VERSION_CMD_LEN 8
+#define NCI_GET_VERSION_RSP_LEN 12
+#define MAX_IRQ_WAIT_TIME (90) //in ms
+#define COLD_RESET_CMD_LEN 3
+#define COLD_RESET_RSP_LEN 4
+#define COLD_RESET_CMD_GID 0x2F
+#define COLD_RESET_CMD_PAYLOAD_LEN 0x00
+#define COLD_RESET_RSP_GID 0x4F
+#define COLD_RESET_OID 0x1E
+
#define NFC_RX_BUFFER_CNT_START (0x0)
#define PAYLOAD_HEADER_LENGTH (0x3)
#define PAYLOAD_LENGTH_MAX (256)
#define BYTE (0x8)
#define NCI_IDENTIFIER (0x10)
+enum ese_ioctl_request {
+ /* eSE POWER ON */
+ ESE_POWER_ON = 0,
+ /* eSE POWER OFF */
+ ESE_POWER_OFF,
+ /* eSE COLD RESET */
+ ESE_COLD_RESET,
+ /* eSE POWER STATE */
+ ESE_POWER_STATE
+};
+
+enum nfcc_ioctl_request {
+ /* NFC disable request with VEN LOW */
+ NFC_POWER_OFF = 0,
+ /* NFC enable request with VEN Toggle */
+ NFC_POWER_ON,
+ /* firmware download request with VEN Toggle */
+ NFC_FW_DWL_VEN_TOGGLE,
+ /* ISO reset request */
+ NFC_ISO_RESET,
+ /* request for firmware download gpio HIGH */
+ NFC_FW_DWL_HIGH,
+ /* hard reset request */
+ NFC_HARD_RESET,
+ /* request for firmware download gpio LOW */
+ NFC_FW_DWL_LOW,
+ /* NFC enable without VEN gpio modification */
+ NFC_ENABLE,
+ /* NFC disable without VEN gpio modification */
+ NFC_DISABLE
+};
+
enum nfcc_initial_core_reset_ntf {
TIMEDOUT_INITIAL_CORE_RESET_NTF = 0, /* 0*/
ARRIVED_INITIAL_CORE_RESET_NTF, /* 1 */
diff --git a/drivers/nfc/st-nci/se.c b/drivers/nfc/st-nci/se.c
index f55d082..5d6e7e9 100644
--- a/drivers/nfc/st-nci/se.c
+++ b/drivers/nfc/st-nci/se.c
@@ -344,6 +344,8 @@ static int st_nci_hci_connectivity_event_received(struct nci_dev *ndev,
transaction = (struct nfc_evt_transaction *)devm_kzalloc(dev,
skb->len - 2, GFP_KERNEL);
+ if (!transaction)
+ return -ENOMEM;
transaction->aid_len = skb->data[1];
memcpy(transaction->aid, &skb->data[2], transaction->aid_len);
diff --git a/drivers/nfc/st21nfca/se.c b/drivers/nfc/st21nfca/se.c
index acdce23..569475a 100644
--- a/drivers/nfc/st21nfca/se.c
+++ b/drivers/nfc/st21nfca/se.c
@@ -335,6 +335,8 @@ int st21nfca_connectivity_event_received(struct nfc_hci_dev *hdev, u8 host,
transaction = (struct nfc_evt_transaction *)devm_kzalloc(dev,
skb->len - 2, GFP_KERNEL);
+ if (!transaction)
+ return -ENOMEM;
transaction->aid_len = skb->data[1];
memcpy(transaction->aid, &skb->data[2],
diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c
index a3132a9..2ba22cd 100644
--- a/drivers/nvdimm/bus.c
+++ b/drivers/nvdimm/bus.c
@@ -86,7 +86,7 @@ static void nvdimm_bus_probe_end(struct nvdimm_bus *nvdimm_bus)
{
nvdimm_bus_lock(&nvdimm_bus->dev);
if (--nvdimm_bus->probe_active == 0)
- wake_up(&nvdimm_bus->probe_wait);
+ wake_up(&nvdimm_bus->wait);
nvdimm_bus_unlock(&nvdimm_bus->dev);
}
@@ -348,7 +348,7 @@ struct nvdimm_bus *nvdimm_bus_register(struct device *parent,
return NULL;
INIT_LIST_HEAD(&nvdimm_bus->list);
INIT_LIST_HEAD(&nvdimm_bus->mapping_list);
- init_waitqueue_head(&nvdimm_bus->probe_wait);
+ init_waitqueue_head(&nvdimm_bus->wait);
nvdimm_bus->id = ida_simple_get(&nd_ida, 0, 0, GFP_KERNEL);
mutex_init(&nvdimm_bus->reconfig_mutex);
badrange_init(&nvdimm_bus->badrange);
@@ -418,6 +418,9 @@ static int nd_bus_remove(struct device *dev)
list_del_init(&nvdimm_bus->list);
mutex_unlock(&nvdimm_bus_list_mutex);
+ wait_event(nvdimm_bus->wait,
+ atomic_read(&nvdimm_bus->ioctl_active) == 0);
+
nd_synchronize();
device_for_each_child(&nvdimm_bus->dev, NULL, child_unregister);
@@ -525,13 +528,38 @@ EXPORT_SYMBOL(nd_device_register);
void nd_device_unregister(struct device *dev, enum nd_async_mode mode)
{
+ bool killed;
+
switch (mode) {
case ND_ASYNC:
+ /*
+ * In the async case this is being triggered with the
+ * device lock held and the unregistration work needs to
+ * be moved out of line iff this is thread has won the
+ * race to schedule the deletion.
+ */
+ if (!kill_device(dev))
+ return;
+
get_device(dev);
async_schedule_domain(nd_async_device_unregister, dev,
&nd_async_domain);
break;
case ND_SYNC:
+ /*
+ * In the sync case the device is being unregistered due
+ * to a state change of the parent. Claim the kill state
+ * to synchronize against other unregistration requests,
+ * or otherwise let the async path handle it if the
+ * unregistration was already queued.
+ */
+ device_lock(dev);
+ killed = kill_device(dev);
+ device_unlock(dev);
+
+ if (!killed)
+ return;
+
nd_synchronize();
device_unregister(dev);
break;
@@ -837,10 +865,12 @@ void wait_nvdimm_bus_probe_idle(struct device *dev)
do {
if (nvdimm_bus->probe_active == 0)
break;
- nvdimm_bus_unlock(&nvdimm_bus->dev);
- wait_event(nvdimm_bus->probe_wait,
+ nvdimm_bus_unlock(dev);
+ device_unlock(dev);
+ wait_event(nvdimm_bus->wait,
nvdimm_bus->probe_active == 0);
- nvdimm_bus_lock(&nvdimm_bus->dev);
+ device_lock(dev);
+ nvdimm_bus_lock(dev);
} while (true);
}
@@ -923,20 +953,19 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
int read_only, unsigned int ioctl_cmd, unsigned long arg)
{
struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc;
- static char out_env[ND_CMD_MAX_ENVELOPE];
- static char in_env[ND_CMD_MAX_ENVELOPE];
const struct nd_cmd_desc *desc = NULL;
unsigned int cmd = _IOC_NR(ioctl_cmd);
struct device *dev = &nvdimm_bus->dev;
void __user *p = (void __user *) arg;
+ char *out_env = NULL, *in_env = NULL;
const char *cmd_name, *dimm_name;
u32 in_len = 0, out_len = 0;
unsigned int func = cmd;
unsigned long cmd_mask;
struct nd_cmd_pkg pkg;
int rc, i, cmd_rc;
+ void *buf = NULL;
u64 buf_len = 0;
- void *buf;
if (nvdimm) {
desc = nd_cmd_dimm_desc(cmd);
@@ -967,7 +996,7 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
case ND_CMD_ARS_START:
case ND_CMD_CLEAR_ERROR:
case ND_CMD_CALL:
- dev_dbg(&nvdimm_bus->dev, "'%s' command while read-only.\n",
+ dev_dbg(dev, "'%s' command while read-only.\n",
nvdimm ? nvdimm_cmd_name(cmd)
: nvdimm_bus_cmd_name(cmd));
return -EPERM;
@@ -976,6 +1005,9 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
}
/* process an input envelope */
+ in_env = kzalloc(ND_CMD_MAX_ENVELOPE, GFP_KERNEL);
+ if (!in_env)
+ return -ENOMEM;
for (i = 0; i < desc->in_num; i++) {
u32 in_size, copy;
@@ -983,14 +1015,17 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
if (in_size == UINT_MAX) {
dev_err(dev, "%s:%s unknown input size cmd: %s field: %d\n",
__func__, dimm_name, cmd_name, i);
- return -ENXIO;
+ rc = -ENXIO;
+ goto out;
}
- if (in_len < sizeof(in_env))
- copy = min_t(u32, sizeof(in_env) - in_len, in_size);
+ if (in_len < ND_CMD_MAX_ENVELOPE)
+ copy = min_t(u32, ND_CMD_MAX_ENVELOPE - in_len, in_size);
else
copy = 0;
- if (copy && copy_from_user(&in_env[in_len], p + in_len, copy))
- return -EFAULT;
+ if (copy && copy_from_user(&in_env[in_len], p + in_len, copy)) {
+ rc = -EFAULT;
+ goto out;
+ }
in_len += in_size;
}
@@ -1002,6 +1037,12 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
}
/* process an output envelope */
+ out_env = kzalloc(ND_CMD_MAX_ENVELOPE, GFP_KERNEL);
+ if (!out_env) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
for (i = 0; i < desc->out_num; i++) {
u32 out_size = nd_cmd_out_size(nvdimm, cmd, desc, i,
(u32 *) in_env, (u32 *) out_env, 0);
@@ -1010,15 +1051,18 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
if (out_size == UINT_MAX) {
dev_dbg(dev, "%s unknown output size cmd: %s field: %d\n",
dimm_name, cmd_name, i);
- return -EFAULT;
+ rc = -EFAULT;
+ goto out;
}
- if (out_len < sizeof(out_env))
- copy = min_t(u32, sizeof(out_env) - out_len, out_size);
+ if (out_len < ND_CMD_MAX_ENVELOPE)
+ copy = min_t(u32, ND_CMD_MAX_ENVELOPE - out_len, out_size);
else
copy = 0;
if (copy && copy_from_user(&out_env[out_len],
- p + in_len + out_len, copy))
- return -EFAULT;
+ p + in_len + out_len, copy)) {
+ rc = -EFAULT;
+ goto out;
+ }
out_len += out_size;
}
@@ -1026,19 +1070,23 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
if (buf_len > ND_IOCTL_MAX_BUFLEN) {
dev_dbg(dev, "%s cmd: %s buf_len: %llu > %d\n", dimm_name,
cmd_name, buf_len, ND_IOCTL_MAX_BUFLEN);
- return -EINVAL;
+ rc = -EINVAL;
+ goto out;
}
buf = vmalloc(buf_len);
- if (!buf)
- return -ENOMEM;
+ if (!buf) {
+ rc = -ENOMEM;
+ goto out;
+ }
if (copy_from_user(buf, p, buf_len)) {
rc = -EFAULT;
goto out;
}
- nvdimm_bus_lock(&nvdimm_bus->dev);
+ device_lock(dev);
+ nvdimm_bus_lock(dev);
rc = nd_cmd_clear_to_send(nvdimm_bus, nvdimm, func, buf);
if (rc)
goto out_unlock;
@@ -1053,39 +1101,24 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
nvdimm_account_cleared_poison(nvdimm_bus, clear_err->address,
clear_err->cleared);
}
- nvdimm_bus_unlock(&nvdimm_bus->dev);
if (copy_to_user(p, buf, buf_len))
rc = -EFAULT;
- vfree(buf);
- return rc;
-
- out_unlock:
- nvdimm_bus_unlock(&nvdimm_bus->dev);
- out:
+out_unlock:
+ nvdimm_bus_unlock(dev);
+ device_unlock(dev);
+out:
+ kfree(in_env);
+ kfree(out_env);
vfree(buf);
return rc;
}
-static long nd_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
-{
- long id = (long) file->private_data;
- int rc = -ENXIO, ro;
- struct nvdimm_bus *nvdimm_bus;
-
- ro = ((file->f_flags & O_ACCMODE) == O_RDONLY);
- mutex_lock(&nvdimm_bus_list_mutex);
- list_for_each_entry(nvdimm_bus, &nvdimm_bus_list, list) {
- if (nvdimm_bus->id == id) {
- rc = __nd_ioctl(nvdimm_bus, NULL, ro, cmd, arg);
- break;
- }
- }
- mutex_unlock(&nvdimm_bus_list_mutex);
-
- return rc;
-}
+enum nd_ioctl_mode {
+ BUS_IOCTL,
+ DIMM_IOCTL,
+};
static int match_dimm(struct device *dev, void *data)
{
@@ -1100,31 +1133,62 @@ static int match_dimm(struct device *dev, void *data)
return 0;
}
-static long nvdimm_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+static long nd_ioctl(struct file *file, unsigned int cmd, unsigned long arg,
+ enum nd_ioctl_mode mode)
+
{
- int rc = -ENXIO, ro;
- struct nvdimm_bus *nvdimm_bus;
+ struct nvdimm_bus *nvdimm_bus, *found = NULL;
+ long id = (long) file->private_data;
+ struct nvdimm *nvdimm = NULL;
+ int rc, ro;
ro = ((file->f_flags & O_ACCMODE) == O_RDONLY);
mutex_lock(&nvdimm_bus_list_mutex);
list_for_each_entry(nvdimm_bus, &nvdimm_bus_list, list) {
- struct device *dev = device_find_child(&nvdimm_bus->dev,
- file->private_data, match_dimm);
- struct nvdimm *nvdimm;
+ if (mode == DIMM_IOCTL) {
+ struct device *dev;
- if (!dev)
- continue;
+ dev = device_find_child(&nvdimm_bus->dev,
+ file->private_data, match_dimm);
+ if (!dev)
+ continue;
+ nvdimm = to_nvdimm(dev);
+ found = nvdimm_bus;
+ } else if (nvdimm_bus->id == id) {
+ found = nvdimm_bus;
+ }
- nvdimm = to_nvdimm(dev);
- rc = __nd_ioctl(nvdimm_bus, nvdimm, ro, cmd, arg);
- put_device(dev);
- break;
+ if (found) {
+ atomic_inc(&nvdimm_bus->ioctl_active);
+ break;
+ }
}
mutex_unlock(&nvdimm_bus_list_mutex);
+ if (!found)
+ return -ENXIO;
+
+ nvdimm_bus = found;
+ rc = __nd_ioctl(nvdimm_bus, nvdimm, ro, cmd, arg);
+
+ if (nvdimm)
+ put_device(&nvdimm->dev);
+ if (atomic_dec_and_test(&nvdimm_bus->ioctl_active))
+ wake_up(&nvdimm_bus->wait);
+
return rc;
}
+static long bus_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ return nd_ioctl(file, cmd, arg, BUS_IOCTL);
+}
+
+static long dimm_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ return nd_ioctl(file, cmd, arg, DIMM_IOCTL);
+}
+
static int nd_open(struct inode *inode, struct file *file)
{
long minor = iminor(inode);
@@ -1136,16 +1200,16 @@ static int nd_open(struct inode *inode, struct file *file)
static const struct file_operations nvdimm_bus_fops = {
.owner = THIS_MODULE,
.open = nd_open,
- .unlocked_ioctl = nd_ioctl,
- .compat_ioctl = nd_ioctl,
+ .unlocked_ioctl = bus_ioctl,
+ .compat_ioctl = bus_ioctl,
.llseek = noop_llseek,
};
static const struct file_operations nvdimm_fops = {
.owner = THIS_MODULE,
.open = nd_open,
- .unlocked_ioctl = nvdimm_ioctl,
- .compat_ioctl = nvdimm_ioctl,
+ .unlocked_ioctl = dimm_ioctl,
+ .compat_ioctl = dimm_ioctl,
.llseek = noop_llseek,
};
diff --git a/drivers/nvdimm/nd-core.h b/drivers/nvdimm/nd-core.h
index 5ff254d..adf62a6 100644
--- a/drivers/nvdimm/nd-core.h
+++ b/drivers/nvdimm/nd-core.h
@@ -25,10 +25,11 @@ extern int nvdimm_major;
struct nvdimm_bus {
struct nvdimm_bus_descriptor *nd_desc;
- wait_queue_head_t probe_wait;
+ wait_queue_head_t wait;
struct list_head list;
struct device dev;
int id, probe_active;
+ atomic_t ioctl_active;
struct list_head mapping_list;
struct mutex reconfig_mutex;
struct badrange badrange;
diff --git a/drivers/nvdimm/region.c b/drivers/nvdimm/region.c
index b9ca003..f9130cc 100644
--- a/drivers/nvdimm/region.c
+++ b/drivers/nvdimm/region.c
@@ -42,17 +42,6 @@ static int nd_region_probe(struct device *dev)
if (rc)
return rc;
- rc = nd_region_register_namespaces(nd_region, &err);
- if (rc < 0)
- return rc;
-
- ndrd = dev_get_drvdata(dev);
- ndrd->ns_active = rc;
- ndrd->ns_count = rc + err;
-
- if (rc && err && rc == err)
- return -ENODEV;
-
if (is_nd_pmem(&nd_region->dev)) {
struct resource ndr_res;
@@ -68,6 +57,17 @@ static int nd_region_probe(struct device *dev)
nvdimm_badblocks_populate(nd_region, &nd_region->bb, &ndr_res);
}
+ rc = nd_region_register_namespaces(nd_region, &err);
+ if (rc < 0)
+ return rc;
+
+ ndrd = dev_get_drvdata(dev);
+ ndrd->ns_active = rc;
+ ndrd->ns_count = rc + err;
+
+ if (rc && err && rc == err)
+ return -ENODEV;
+
nd_region->btt_seed = nd_btt_create(nd_region);
nd_region->pfn_seed = nd_pfn_create(nd_region);
nd_region->dax_seed = nd_dax_create(nd_region);
diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c
index e7377f1..0303296 100644
--- a/drivers/nvdimm/region_devs.c
+++ b/drivers/nvdimm/region_devs.c
@@ -425,10 +425,12 @@ static ssize_t available_size_show(struct device *dev,
* memory nvdimm_bus_lock() is dropped, but that's userspace's
* problem to not race itself.
*/
+ device_lock(dev);
nvdimm_bus_lock(dev);
wait_nvdimm_bus_probe_idle(dev);
available = nd_region_available_dpa(nd_region);
nvdimm_bus_unlock(dev);
+ device_unlock(dev);
return sprintf(buf, "%llu\n", available);
}
@@ -440,10 +442,12 @@ static ssize_t max_available_extent_show(struct device *dev,
struct nd_region *nd_region = to_nd_region(dev);
unsigned long long available = 0;
+ device_lock(dev);
nvdimm_bus_lock(dev);
wait_nvdimm_bus_probe_idle(dev);
available = nd_region_allocatable_dpa(nd_region);
nvdimm_bus_unlock(dev);
+ device_unlock(dev);
return sprintf(buf, "%llu\n", available);
}
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index 260248fb..a11e210 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -20,11 +20,6 @@ module_param(multipath, bool, 0444);
MODULE_PARM_DESC(multipath,
"turn on native support for multiple controllers per subsystem");
-inline bool nvme_ctrl_use_ana(struct nvme_ctrl *ctrl)
-{
- return multipath && ctrl->subsys && (ctrl->subsys->cmic & (1 << 3));
-}
-
/*
* If multipathing is enabled we need to always use the subsystem instance
* number for numbering our devices to avoid conflicts between subsystems that
@@ -516,7 +511,8 @@ int nvme_mpath_init(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
{
int error;
- if (!nvme_ctrl_use_ana(ctrl))
+ /* check if multipath is enabled and we have the capability */
+ if (!multipath || !ctrl->subsys || !(ctrl->subsys->cmic & (1 << 3)))
return 0;
ctrl->anacap = id->anacap;
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index e82cdae..d5e29b5 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -464,7 +464,11 @@ extern const struct attribute_group nvme_ns_id_attr_group;
extern const struct block_device_operations nvme_ns_head_ops;
#ifdef CONFIG_NVME_MULTIPATH
-bool nvme_ctrl_use_ana(struct nvme_ctrl *ctrl);
+static inline bool nvme_ctrl_use_ana(struct nvme_ctrl *ctrl)
+{
+ return ctrl->ana_log_buf != NULL;
+}
+
void nvme_set_disk_name(char *disk_name, struct nvme_ns *ns,
struct nvme_ctrl *ctrl, int *flags);
void nvme_failover_req(struct request *req);
diff --git a/drivers/pci/controller/dwc/pci-dra7xx.c b/drivers/pci/controller/dwc/pci-dra7xx.c
index a32d6dd..412524a 100644
--- a/drivers/pci/controller/dwc/pci-dra7xx.c
+++ b/drivers/pci/controller/dwc/pci-dra7xx.c
@@ -26,6 +26,7 @@
#include <linux/types.h>
#include <linux/mfd/syscon.h>
#include <linux/regmap.h>
+#include <linux/gpio/consumer.h>
#include "../../pci.h"
#include "pcie-designware.h"
diff --git a/drivers/pci/controller/pcie-mobiveil.c b/drivers/pci/controller/pcie-mobiveil.c
index a939e8d3..a2d1e89 100644
--- a/drivers/pci/controller/pcie-mobiveil.c
+++ b/drivers/pci/controller/pcie-mobiveil.c
@@ -508,6 +508,12 @@ static int mobiveil_host_init(struct mobiveil_pcie *pcie)
return err;
}
+ /* setup bus numbers */
+ value = csr_readl(pcie, PCI_PRIMARY_BUS);
+ value &= 0xff000000;
+ value |= 0x00ff0100;
+ csr_writel(pcie, value, PCI_PRIMARY_BUS);
+
/*
* program Bus Master Enable Bit in Command Register in PAB Config
* Space
@@ -547,7 +553,7 @@ static int mobiveil_host_init(struct mobiveil_pcie *pcie)
resource_size(pcie->ob_io_res));
/* memory inbound translation window */
- program_ib_windows(pcie, WIN_NUM_1, 0, MEM_WINDOW_TYPE, IB_WIN_SIZE);
+ program_ib_windows(pcie, WIN_NUM_0, 0, MEM_WINDOW_TYPE, IB_WIN_SIZE);
/* Get the I/O and memory ranges from DT */
resource_list_for_each_entry_safe(win, tmp, &pcie->resources) {
@@ -559,11 +565,18 @@ static int mobiveil_host_init(struct mobiveil_pcie *pcie)
if (type) {
/* configure outbound translation window */
program_ob_windows(pcie, pcie->ob_wins_configured,
- win->res->start, 0, type,
- resource_size(win->res));
+ win->res->start,
+ win->res->start - win->offset,
+ type, resource_size(win->res));
}
}
+ /* fixup for PCIe class register */
+ value = csr_readl(pcie, PAB_INTP_AXI_PIO_CLASS);
+ value &= 0xff;
+ value |= (PCI_CLASS_BRIDGE_PCI << 16);
+ csr_writel(pcie, value, PAB_INTP_AXI_PIO_CLASS);
+
/* setup MSI hardware registers */
mobiveil_pcie_enable_msi(pcie);
@@ -804,9 +817,6 @@ static int mobiveil_pcie_probe(struct platform_device *pdev)
goto error;
}
- /* fixup for PCIe class register */
- csr_writel(pcie, 0x060402ab, PAB_INTP_AXI_PIO_CLASS);
-
/* initialize the IRQ domains */
ret = mobiveil_pcie_init_irq_domain(pcie);
if (ret) {
diff --git a/drivers/pci/controller/pcie-xilinx-nwl.c b/drivers/pci/controller/pcie-xilinx-nwl.c
index fb32840..4850a1b 100644
--- a/drivers/pci/controller/pcie-xilinx-nwl.c
+++ b/drivers/pci/controller/pcie-xilinx-nwl.c
@@ -483,15 +483,13 @@ static int nwl_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
int i;
mutex_lock(&msi->lock);
- bit = bitmap_find_next_zero_area(msi->bitmap, INT_PCI_MSI_NR, 0,
- nr_irqs, 0);
- if (bit >= INT_PCI_MSI_NR) {
+ bit = bitmap_find_free_region(msi->bitmap, INT_PCI_MSI_NR,
+ get_count_order(nr_irqs));
+ if (bit < 0) {
mutex_unlock(&msi->lock);
return -ENOSPC;
}
- bitmap_set(msi->bitmap, bit, nr_irqs);
-
for (i = 0; i < nr_irqs; i++) {
irq_domain_set_info(domain, virq + i, bit + i, &nwl_irq_chip,
domain->host_data, handle_simple_irq,
@@ -509,7 +507,8 @@ static void nwl_irq_domain_free(struct irq_domain *domain, unsigned int virq,
struct nwl_msi *msi = &pcie->msi;
mutex_lock(&msi->lock);
- bitmap_clear(msi->bitmap, data->hwirq, nr_irqs);
+ bitmap_release_region(msi->bitmap, data->hwirq,
+ get_count_order(nr_irqs));
mutex_unlock(&msi->lock);
}
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index 75b7f2c..f06ca58 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -414,6 +414,9 @@ static int pci_device_probe(struct device *dev)
struct pci_dev *pci_dev = to_pci_dev(dev);
struct pci_driver *drv = to_pci_driver(dev->driver);
+ if (!pci_device_can_probe(pci_dev))
+ return -ENODEV;
+
pci_assign_irq(pci_dev);
error = pcibios_alloc_irq(pci_dev);
@@ -421,12 +424,10 @@ static int pci_device_probe(struct device *dev)
return error;
pci_dev_get(pci_dev);
- if (pci_device_can_probe(pci_dev)) {
- error = __pci_device_probe(drv, pci_dev);
- if (error) {
- pcibios_free_irq(pci_dev);
- pci_dev_put(pci_dev);
- }
+ error = __pci_device_probe(drv, pci_dev);
+ if (error) {
+ pcibios_free_irq(pci_dev);
+ pci_dev_put(pci_dev);
}
return error;
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index 9ecfe13..1edf5a1 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -478,7 +478,7 @@ static ssize_t remove_store(struct device *dev, struct device_attribute *attr,
pci_stop_and_remove_bus_device_locked(to_pci_dev(dev));
return count;
}
-static struct device_attribute dev_remove_attr = __ATTR(remove,
+static struct device_attribute dev_remove_attr = __ATTR_IGNORE_LOCKDEP(remove,
(S_IWUSR|S_IWGRP),
NULL, remove_store);
diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c
index ed711a0..86c6c53 100644
--- a/drivers/perf/arm_pmu.c
+++ b/drivers/perf/arm_pmu.c
@@ -733,8 +733,8 @@ static int cpu_pm_pmu_notify(struct notifier_block *b, unsigned long cmd,
cpu_pm_pmu_setup(armpmu, cmd);
break;
case CPU_PM_EXIT:
- cpu_pm_pmu_setup(armpmu, cmd);
case CPU_PM_ENTER_FAILED:
+ cpu_pm_pmu_setup(armpmu, cmd);
armpmu->start(armpmu);
break;
default:
diff --git a/drivers/phy/qualcomm/phy-qcom-ufs-qmp-v4-lito.h b/drivers/phy/qualcomm/phy-qcom-ufs-qmp-v4-lito.h
index 88253ff..de03667 100644
--- a/drivers/phy/qualcomm/phy-qcom-ufs-qmp-v4-lito.h
+++ b/drivers/phy/qualcomm/phy-qcom-ufs-qmp-v4-lito.h
@@ -79,6 +79,7 @@
#define UFS_PHY_RX_HSGEAR_CAPABILITY PHY_OFF(0xB4)
#define UFS_PHY_RX_MIN_HIBERN8_TIME PHY_OFF(0x150)
#define UFS_PHY_BIST_FIXED_PAT_CTRL PHY_OFF(0x60)
+#define UFS_PHY_RX_SIGDET_CTRL1 PHY_OFF(0x154)
/* UFS PHY TX registers */
#define QSERDES_TX0_PWM_GEAR_1_DIVIDER_BAND0_1 TX_OFF(0, 0x168)
@@ -270,6 +271,7 @@ static struct ufs_qcom_phy_calibration phy_cal_table_rate_A_no_g4[] = {
UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_TX_HS_GEAR_BAND, 0x06),
UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_TX_HSGEAR_CAPABILITY, 0x03),
UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_RX_HSGEAR_CAPABILITY, 0x03),
+ UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_RX_SIGDET_CTRL1, 0x0E),
};
static struct ufs_qcom_phy_calibration phy_cal_table_2nd_lane_no_g4[] = {
diff --git a/drivers/phy/renesas/phy-rcar-gen2.c b/drivers/phy/renesas/phy-rcar-gen2.c
index 97d4dd6..aa02b19 100644
--- a/drivers/phy/renesas/phy-rcar-gen2.c
+++ b/drivers/phy/renesas/phy-rcar-gen2.c
@@ -288,6 +288,7 @@ static int rcar_gen2_phy_probe(struct platform_device *pdev)
error = of_property_read_u32(np, "reg", &channel_num);
if (error || channel_num > 2) {
dev_err(dev, "Invalid \"reg\" property\n");
+ of_node_put(np);
return error;
}
channel->select_mask = select_mask[channel_num];
@@ -303,6 +304,7 @@ static int rcar_gen2_phy_probe(struct platform_device *pdev)
&rcar_gen2_phy_ops);
if (IS_ERR(phy->phy)) {
dev_err(dev, "Failed to create PHY\n");
+ of_node_put(np);
return PTR_ERR(phy->phy);
}
phy_set_drvdata(phy->phy, phy);
diff --git a/drivers/pinctrl/pinctrl-rockchip.c b/drivers/pinctrl/pinctrl-rockchip.c
index f4a6142..8d83817 100644
--- a/drivers/pinctrl/pinctrl-rockchip.c
+++ b/drivers/pinctrl/pinctrl-rockchip.c
@@ -3172,6 +3172,7 @@ static int rockchip_get_bank_data(struct rockchip_pin_bank *bank,
base,
&rockchip_regmap_config);
}
+ of_node_put(node);
}
bank->irq = irq_of_parse_and_map(bank->of_node, 0);
diff --git a/drivers/platform/msm/gsi/gsi.c b/drivers/platform/msm/gsi/gsi.c
index 0e3fc66..cac0034 100644
--- a/drivers/platform/msm/gsi/gsi.c
+++ b/drivers/platform/msm/gsi/gsi.c
@@ -486,12 +486,12 @@ static uint16_t gsi_get_complete_num(struct gsi_ring_ctx *ctx, uint64_t addr1,
GSIDBG_LOW("gsi base addr 0x%llx end addr 0x%llx\n",
ctx->base, ctx->end);
- if (addr1 < ctx->base || addr1 >= ctx->end) {
+ if (unlikely(addr1 < ctx->base || addr1 >= ctx->end)) {
GSIERR("address = 0x%llx not in range\n", addr1);
GSI_ASSERT();
}
- if (addr2 < ctx->base || addr2 >= ctx->end) {
+ if (unlikely(addr2 < ctx->base || addr2 >= ctx->end)) {
GSIERR("address = 0x%llx not in range\n", addr2);
GSI_ASSERT();
}
@@ -512,14 +512,14 @@ static void gsi_process_chan(struct gsi_xfer_compl_evt *evt,
uint64_t rp;
ch_id = evt->chid;
- if (WARN_ON(ch_id >= gsi_ctx->max_ch)) {
+ if (unlikely(WARN_ON(ch_id >= gsi_ctx->max_ch))) {
GSIERR("Unexpected ch %d\n", ch_id);
return;
}
ch_ctx = &gsi_ctx->chan[ch_id];
- if (WARN_ON(ch_ctx->props.prot != GSI_CHAN_PROT_GPI &&
- ch_ctx->props.prot != GSI_CHAN_PROT_GCI))
+ if (unlikely(WARN_ON(ch_ctx->props.prot != GSI_CHAN_PROT_GPI &&
+ ch_ctx->props.prot != GSI_CHAN_PROT_GCI)))
return;
if (evt->type != GSI_XFER_COMPL_TYPE_GCI) {
@@ -555,7 +555,7 @@ static void gsi_process_chan(struct gsi_xfer_compl_evt *evt,
notify->bytes_xfered = evt->len;
if (callback) {
- if (atomic_read(&ch_ctx->poll_mode)) {
+ if (unlikely(atomic_read(&ch_ctx->poll_mode))) {
GSIERR("Calling client callback in polling mode\n");
WARN_ON(1);
}
@@ -633,7 +633,8 @@ static void gsi_handle_ieob(int ee)
for (i = 0; i < GSI_STTS_REG_BITS; i++) {
if ((1 << i) & ch & msk) {
- if (i >= gsi_ctx->max_ev || i >= GSI_EVT_RING_MAX) {
+ if (unlikely(i >= gsi_ctx->max_ev
+ || i >= GSI_EVT_RING_MAX)) {
GSIERR("invalid event %d\n", i);
break;
}
@@ -646,7 +647,8 @@ static void gsi_handle_ieob(int ee)
if (ctx->props.intr == GSI_INTR_MSI)
continue;
- if (ctx->props.intf != GSI_EVT_CHTYPE_GPI_EV) {
+ if (unlikely(ctx->props.intf !=
+ GSI_EVT_CHTYPE_GPI_EV)) {
GSIERR("Unexpected irq intf %d\n",
ctx->props.intf);
GSI_ASSERT();
@@ -780,7 +782,7 @@ static void gsi_handle_irq(void)
if (type & GSI_EE_n_CNTXT_TYPE_IRQ_GENERAL_BMSK)
gsi_handle_general(ee);
- if (++cnt > GSI_ISR_MAX_ITER) {
+ if (unlikely(++cnt > GSI_ISR_MAX_ITER)) {
/*
* Max number of spurious interrupts from hardware.
* Unexpected hardware state.
@@ -942,17 +944,17 @@ int gsi_complete_clk_grant(unsigned long dev_hdl)
{
unsigned long flags;
- if (!gsi_ctx) {
+ if (unlikely(!gsi_ctx)) {
pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
return -GSI_STATUS_NODEV;
}
- if (!gsi_ctx->per_registered) {
+ if (unlikely(!gsi_ctx->per_registered)) {
GSIERR("no client registered\n");
return -GSI_STATUS_INVALID_PARAMS;
}
- if (dev_hdl != (uintptr_t)gsi_ctx) {
+ if (unlikely(dev_hdl != (uintptr_t)gsi_ctx)) {
GSIERR("bad params dev_hdl=0x%lx gsi_ctx=0x%pK\n", dev_hdl,
gsi_ctx);
return -GSI_STATUS_INVALID_PARAMS;
@@ -1871,19 +1873,19 @@ int gsi_ring_evt_ring_db(unsigned long evt_ring_hdl, uint64_t value)
{
struct gsi_evt_ctx *ctx;
- if (!gsi_ctx) {
+ if (unlikely(!gsi_ctx)) {
pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
return -GSI_STATUS_NODEV;
}
- if (evt_ring_hdl >= gsi_ctx->max_ev) {
+ if (unlikely(evt_ring_hdl >= gsi_ctx->max_ev)) {
GSIERR("bad params evt_ring_hdl=%lu\n", evt_ring_hdl);
return -GSI_STATUS_INVALID_PARAMS;
}
ctx = &gsi_ctx->evtr[evt_ring_hdl];
- if (ctx->state != GSI_EVT_RING_STATE_ALLOCATED) {
+ if (unlikely(ctx->state != GSI_EVT_RING_STATE_ALLOCATED)) {
GSIERR("bad state %d\n",
gsi_ctx->evtr[evt_ring_hdl].state);
return -GSI_STATUS_UNSUPPORTED_OP;
@@ -1901,19 +1903,19 @@ int gsi_ring_ch_ring_db(unsigned long chan_hdl, uint64_t value)
struct gsi_chan_ctx *ctx;
uint32_t val;
- if (!gsi_ctx) {
+ if (unlikely(!gsi_ctx)) {
pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
return -GSI_STATUS_NODEV;
}
- if (chan_hdl >= gsi_ctx->max_ch) {
+ if (unlikely(chan_hdl >= gsi_ctx->max_ch)) {
GSIERR("bad chan_hdl=%lu\n", chan_hdl);
return -GSI_STATUS_INVALID_PARAMS;
}
ctx = &gsi_ctx->chan[chan_hdl];
- if (ctx->state != GSI_CHAN_STATE_STARTED) {
+ if (unlikely(ctx->state != GSI_CHAN_STATE_STARTED)) {
GSIERR("bad state %d\n", ctx->state);
return -GSI_STATUS_UNSUPPORTED_OP;
}
@@ -3384,8 +3386,18 @@ int __gsi_get_gci_cookie(struct gsi_chan_ctx *ctx, uint16_t idx)
}
}
- /* TODO: Increase escape buffer size if we hit this */
- GSIERR("user_data is full\n");
+ /* Go over original userdata when escape buffer is full (costly) */
+ GSIDBG("escape buffer is full\n");
+ for (i = 0; i < end; i++) {
+ if (!ctx->user_data[i].valid) {
+ ctx->user_data[i].valid = true;
+ return i;
+ }
+ }
+
+ /* Everything is full (possibly a stall) */
+ GSIERR("both userdata array and escape buffer is full\n");
+ BUG();
return 0xFFFF;
}
@@ -3397,13 +3409,13 @@ int __gsi_populate_gci_tre(struct gsi_chan_ctx *ctx,
uint16_t idx;
memset(&gci_tre, 0, sizeof(gci_tre));
- if (xfer->addr & 0xFFFFFF0000000000) {
+ if (unlikely(xfer->addr & 0xFFFFFF0000000000)) {
GSIERR("chan_hdl=%u add too large=%llx\n",
ctx->props.ch_id, xfer->addr);
return -EINVAL;
}
- if (xfer->type != GSI_XFER_ELEM_DATA) {
+ if (unlikely(xfer->type != GSI_XFER_ELEM_DATA)) {
GSIERR("chan_hdl=%u bad RE type=%u\n", ctx->props.ch_id,
xfer->type);
return -EINVAL;
@@ -3417,12 +3429,12 @@ int __gsi_populate_gci_tre(struct gsi_chan_ctx *ctx,
gci_tre.buf_len = xfer->len;
gci_tre.re_type = GSI_RE_COAL;
gci_tre.cookie = __gsi_get_gci_cookie(ctx, idx);
- if (gci_tre.cookie > (ctx->ring.max_num_elem + GSI_VEID_MAX))
+ if (unlikely(gci_tre.cookie > (ctx->ring.max_num_elem + GSI_VEID_MAX)))
return -EPERM;
/* write the TRE to ring */
*tre_gci_ptr = gci_tre;
- ctx->user_data[idx].p = xfer->xfer_user_data;
+ ctx->user_data[gci_tre.cookie].p = xfer->xfer_user_data;
return 0;
}
@@ -3476,21 +3488,29 @@ int gsi_queue_xfer(unsigned long chan_hdl, uint16_t num_xfers,
spinlock_t *slock;
unsigned long flags;
- if (!gsi_ctx) {
+ if (unlikely(!gsi_ctx)) {
pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
return -GSI_STATUS_NODEV;
}
- if (chan_hdl >= gsi_ctx->max_ch || (num_xfers && !xfer)) {
+ if (unlikely(chan_hdl >= gsi_ctx->max_ch || (num_xfers && !xfer))) {
GSIERR("bad params chan_hdl=%lu num_xfers=%u xfer=%pK\n",
chan_hdl, num_xfers, xfer);
return -GSI_STATUS_INVALID_PARAMS;
}
+ if (unlikely(gsi_ctx->chan[chan_hdl].state
+ == GSI_CHAN_STATE_NOT_ALLOCATED)) {
+ GSIERR("bad state %d\n",
+ gsi_ctx->chan[chan_hdl].state);
+ return -GSI_STATUS_UNSUPPORTED_OP;
+ }
+
+
ctx = &gsi_ctx->chan[chan_hdl];
- if (ctx->props.prot != GSI_CHAN_PROT_GPI &&
- ctx->props.prot != GSI_CHAN_PROT_GCI) {
+ if (unlikely(ctx->props.prot != GSI_CHAN_PROT_GPI &&
+ ctx->props.prot != GSI_CHAN_PROT_GCI)) {
GSIERR("op not supported for protocol %u\n", ctx->props.prot);
return -GSI_STATUS_UNSUPPORTED_OP;
}
@@ -3512,7 +3532,7 @@ int gsi_queue_xfer(unsigned long chan_hdl, uint16_t num_xfers,
*/
if (ctx->props.prot != GSI_CHAN_PROT_GCI) {
__gsi_query_channel_free_re(ctx, &free);
- if (num_xfers > free) {
+ if (unlikely(num_xfers > free)) {
GSIERR("chan_hdl=%lu num_xfers=%u free=%u\n",
chan_hdl, num_xfers, free);
spin_unlock_irqrestore(slock, flags);
@@ -3532,7 +3552,7 @@ int gsi_queue_xfer(unsigned long chan_hdl, uint16_t num_xfers,
gsi_incr_ring_wp(&ctx->ring);
}
- if (i != num_xfers) {
+ if (unlikely(i != num_xfers)) {
/* reject all the xfers */
ctx->ring.wp_local = wp_rollback;
spin_unlock_irqrestore(slock, flags);
@@ -3609,13 +3629,13 @@ int gsi_poll_n_channel(unsigned long chan_hdl,
int i;
unsigned long flags;
- if (!gsi_ctx) {
+ if (unlikely(!gsi_ctx)) {
pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
return -GSI_STATUS_NODEV;
}
- if (chan_hdl >= gsi_ctx->max_ch || !notify ||
- !actual_num || expected_num <= 0) {
+ if (unlikely(chan_hdl >= gsi_ctx->max_ch || !notify ||
+ !actual_num || expected_num <= 0)) {
GSIERR("bad params chan_hdl=%lu notify=%pK\n",
chan_hdl, notify);
GSIERR("actual_num=%pK expected_num=%d\n",
@@ -3626,13 +3646,13 @@ int gsi_poll_n_channel(unsigned long chan_hdl,
ctx = &gsi_ctx->chan[chan_hdl];
ee = gsi_ctx->per.ee;
- if (ctx->props.prot != GSI_CHAN_PROT_GPI &&
- ctx->props.prot != GSI_CHAN_PROT_GCI) {
+ if (unlikely(ctx->props.prot != GSI_CHAN_PROT_GPI &&
+ ctx->props.prot != GSI_CHAN_PROT_GCI)) {
GSIERR("op not supported for protocol %u\n", ctx->props.prot);
return -GSI_STATUS_UNSUPPORTED_OP;
}
- if (!ctx->evtr) {
+ if (unlikely(!ctx->evtr)) {
GSIERR("no event ring associated chan_hdl=%lu\n", chan_hdl);
return -GSI_STATUS_UNSUPPORTED_OP;
}
@@ -3690,25 +3710,25 @@ int gsi_config_channel_mode(unsigned long chan_hdl, enum gsi_chan_mode mode)
unsigned long flags;
enum gsi_chan_mode chan_mode;
- if (!gsi_ctx) {
+ if (unlikely(!gsi_ctx)) {
pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
return -GSI_STATUS_NODEV;
}
- if (chan_hdl >= gsi_ctx->max_ch) {
+ if (unlikely(chan_hdl >= gsi_ctx->max_ch)) {
GSIERR("bad params chan_hdl=%lu mode=%u\n", chan_hdl, mode);
return -GSI_STATUS_INVALID_PARAMS;
}
ctx = &gsi_ctx->chan[chan_hdl];
- if (ctx->props.prot != GSI_CHAN_PROT_GPI &&
- ctx->props.prot != GSI_CHAN_PROT_GCI) {
+ if (unlikely(ctx->props.prot != GSI_CHAN_PROT_GPI &&
+ ctx->props.prot != GSI_CHAN_PROT_GCI)) {
GSIERR("op not supported for protocol %u\n", ctx->props.prot);
return -GSI_STATUS_UNSUPPORTED_OP;
}
- if (!ctx->evtr || !ctx->evtr->props.exclusive) {
+ if (unlikely(!ctx->evtr || !ctx->evtr->props.exclusive)) {
GSIERR("cannot configure mode on chan_hdl=%lu\n",
chan_hdl);
return -GSI_STATUS_UNSUPPORTED_OP;
@@ -3719,8 +3739,8 @@ int gsi_config_channel_mode(unsigned long chan_hdl, enum gsi_chan_mode mode)
else
curr = GSI_CHAN_MODE_CALLBACK;
- if (mode == curr) {
- GSIERR("already in requested mode %u chan_hdl=%lu\n",
+ if (unlikely(mode == curr)) {
+ GSIDBG("already in requested mode %u chan_hdl=%lu\n",
curr, chan_hdl);
return -GSI_STATUS_UNSUPPORTED_OP;
}
@@ -3731,7 +3751,7 @@ int gsi_config_channel_mode(unsigned long chan_hdl, enum gsi_chan_mode mode)
gsi_writel(1 << ctx->evtr->id, gsi_ctx->base +
GSI_EE_n_CNTXT_SRC_IEOB_IRQ_CLR_OFFS(gsi_ctx->per.ee));
atomic_set(&ctx->poll_mode, mode);
- if (ctx->props.prot == GSI_CHAN_PROT_GCI)
+ if ((ctx->props.prot == GSI_CHAN_PROT_GCI) && ctx->evtr->chan)
atomic_set(&ctx->evtr->chan->poll_mode, mode);
GSIDBG("set gsi_ctx evtr_id %d to %d mode\n",
ctx->evtr->id, mode);
@@ -3741,7 +3761,7 @@ int gsi_config_channel_mode(unsigned long chan_hdl, enum gsi_chan_mode mode)
if (curr == GSI_CHAN_MODE_POLL &&
mode == GSI_CHAN_MODE_CALLBACK) {
atomic_set(&ctx->poll_mode, mode);
- if (ctx->props.prot == GSI_CHAN_PROT_GCI)
+ if ((ctx->props.prot == GSI_CHAN_PROT_GCI) && ctx->evtr->chan)
atomic_set(&ctx->evtr->chan->poll_mode, mode);
__gsi_config_ieob_irq(gsi_ctx->per.ee, 1 << ctx->evtr->id, ~0);
GSIDBG("set gsi_ctx evtr_id %d to %d mode\n",
diff --git a/drivers/platform/msm/ipa/ipa_api.c b/drivers/platform/msm/ipa/ipa_api.c
index 28a2fc6..5b41486 100644
--- a/drivers/platform/msm/ipa/ipa_api.c
+++ b/drivers/platform/msm/ipa/ipa_api.c
@@ -1997,6 +1997,43 @@ int ipa_get_wdi_stats(struct IpaHwStatsWDIInfoData_t *stats)
EXPORT_SYMBOL(ipa_get_wdi_stats);
/**
+ * ipa_uc_bw_monitor() - start uc bw monitoring
+ * @info: [inout] set info populated by driver
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * @note Cannot be called from atomic context
+ *
+ */
+int ipa_uc_bw_monitor(struct ipa_wdi_bw_info *info)
+{
+ int ret;
+
+ IPA_API_DISPATCH_RETURN(ipa_uc_bw_monitor, info);
+
+ return ret;
+}
+EXPORT_SYMBOL(ipa_uc_bw_monitor);
+
+/**
+ * ipa_set_wlan_tx_info() -set WDI statistics from uc
+ * @info: [inout] set info populated by driver
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * @note Cannot be called from atomic context
+ *
+ */
+int ipa_set_wlan_tx_info(struct ipa_wdi_tx_info *info)
+{
+ int ret;
+
+ IPA_API_DISPATCH_RETURN(ipa_set_wlan_tx_info, info);
+
+ return ret;
+}
+EXPORT_SYMBOL(ipa_set_wlan_tx_info);
+/**
* ipa_get_smem_restr_bytes()- Return IPA smem restricted bytes
*
* Return value: u16 - number of IPA smem restricted bytes
diff --git a/drivers/platform/msm/ipa/ipa_api.h b/drivers/platform/msm/ipa/ipa_api.h
index 62457e2..553616f 100644
--- a/drivers/platform/msm/ipa/ipa_api.h
+++ b/drivers/platform/msm/ipa/ipa_api.h
@@ -212,6 +212,10 @@ struct ipa_api_controller {
int (*ipa_get_wdi_stats)(struct IpaHwStatsWDIInfoData_t *stats);
+ int (*ipa_uc_bw_monitor)(struct ipa_wdi_bw_info *info);
+
+ int (*ipa_set_wlan_tx_info)(struct ipa_wdi_tx_info *info);
+
u16 (*ipa_get_smem_restr_bytes)(void);
int (*ipa_broadcast_wdi_quota_reach_ind)(uint32_t fid,
diff --git a/drivers/platform/msm/ipa/ipa_clients/ecm_ipa.c b/drivers/platform/msm/ipa/ipa_clients/ecm_ipa.c
index ff70d64..f6fc8c7 100644
--- a/drivers/platform/msm/ipa/ipa_clients/ecm_ipa.c
+++ b/drivers/platform/msm/ipa/ipa_clients/ecm_ipa.c
@@ -584,7 +584,7 @@ static netdev_tx_t ecm_ipa_start_xmit
}
ret = ipa_pm_activate(ecm_ipa_ctx->pm_hdl);
- if (ret) {
+ if (unlikely(ret)) {
ECM_IPA_DEBUG("Failed to activate PM client\n");
netif_stop_queue(net);
goto fail_pm_activate;
@@ -607,7 +607,7 @@ static netdev_tx_t ecm_ipa_start_xmit
, skb->protocol);
ret = ipa_tx_dp(ecm_ipa_ctx->ipa_to_usb_client, skb, NULL);
- if (ret) {
+ if (unlikely(ret)) {
ECM_IPA_ERROR("ipa transmit failed (%d)\n", ret);
goto fail_tx_packet;
}
@@ -642,7 +642,7 @@ static void ecm_ipa_packet_receive_notify
int result;
unsigned int packet_len;
- if (!skb) {
+ if (unlikely(!skb)) {
ECM_IPA_ERROR("Bad SKB received from IPA driver\n");
return;
}
@@ -655,7 +655,7 @@ static void ecm_ipa_packet_receive_notify
return;
}
- if (evt != IPA_RECEIVE) {
+ if (unlikely(evt != IPA_RECEIVE)) {
ECM_IPA_ERROR("A none IPA_RECEIVE event in ecm_ipa_receive\n");
return;
}
@@ -664,7 +664,7 @@ static void ecm_ipa_packet_receive_notify
skb->protocol = eth_type_trans(skb, ecm_ipa_ctx->net);
result = netif_rx(skb);
- if (result)
+ if (unlikely(result))
ECM_IPA_ERROR("fail on netif_rx\n");
ecm_ipa_ctx->net->stats.rx_packets++;
ecm_ipa_ctx->net->stats.rx_bytes += packet_len;
@@ -1129,12 +1129,12 @@ static void ecm_ipa_tx_complete_notify
struct sk_buff *skb = (struct sk_buff *)data;
struct ecm_ipa_dev *ecm_ipa_ctx = priv;
- if (!skb) {
+ if (unlikely(!skb)) {
ECM_IPA_ERROR("Bad SKB received from IPA driver\n");
return;
}
- if (!ecm_ipa_ctx) {
+ if (unlikely(!ecm_ipa_ctx)) {
ECM_IPA_ERROR("ecm_ipa_ctx is NULL pointer\n");
return;
}
@@ -1144,7 +1144,7 @@ static void ecm_ipa_tx_complete_notify
skb->len, skb->protocol,
atomic_read(&ecm_ipa_ctx->outstanding_pkts));
- if (evt != IPA_WRITE_DONE) {
+ if (unlikely(evt != IPA_WRITE_DONE)) {
ECM_IPA_ERROR("unsupported event on Tx callback\n");
return;
}
diff --git a/drivers/platform/msm/ipa/ipa_clients/ipa_wdi3.c b/drivers/platform/msm/ipa/ipa_clients/ipa_wdi3.c
index 90082fc..0c8bb24 100644
--- a/drivers/platform/msm/ipa/ipa_clients/ipa_wdi3.c
+++ b/drivers/platform/msm/ipa/ipa_clients/ipa_wdi3.c
@@ -760,3 +760,15 @@ int ipa_wdi_get_stats(struct IpaHwStatsWDIInfoData_t *stats)
return ipa_get_wdi_stats(stats);
}
EXPORT_SYMBOL(ipa_wdi_get_stats);
+
+int ipa_wdi_bw_monitor(struct ipa_wdi_bw_info *info)
+{
+ return ipa_uc_bw_monitor(info);
+}
+EXPORT_SYMBOL(ipa_wdi_bw_monitor);
+
+int ipa_wdi_sw_stats(struct ipa_wdi_tx_info *info)
+{
+ return ipa_set_wlan_tx_info(info);
+}
+EXPORT_SYMBOL(ipa_wdi_sw_stats);
diff --git a/drivers/platform/msm/ipa/ipa_clients/odu_bridge.c b/drivers/platform/msm/ipa/ipa_clients/odu_bridge.c
index efdf97b..c74bbe1 100644
--- a/drivers/platform/msm/ipa/ipa_clients/odu_bridge.c
+++ b/drivers/platform/msm/ipa/ipa_clients/odu_bridge.c
@@ -800,7 +800,7 @@ int odu_bridge_tx_dp(struct sk_buff *skb, struct ipa_tx_meta *metadata)
case ODU_BRIDGE_MODE_ROUTER:
/* Router mode - pass skb to IPA */
res = ipa_tx_dp(IPA_CLIENT_ODU_PROD, skb, metadata);
- if (res) {
+ if (unlikely(res)) {
ODU_BRIDGE_DBG("tx dp failed %d\n", res);
goto out;
}
@@ -813,7 +813,7 @@ int odu_bridge_tx_dp(struct sk_buff *skb, struct ipa_tx_meta *metadata)
ODU_BRIDGE_IS_QMI_ADDR(ipv6hdr->daddr)) {
ODU_BRIDGE_DBG_LOW("QMI packet\n");
skb_copied = skb_clone(skb, GFP_KERNEL);
- if (!skb_copied) {
+ if (unlikely(!skb_copied)) {
ODU_BRIDGE_ERR("No memory\n");
return -ENOMEM;
}
@@ -834,13 +834,13 @@ int odu_bridge_tx_dp(struct sk_buff *skb, struct ipa_tx_meta *metadata)
ODU_BRIDGE_DBG_LOW(
"Multicast pkt, send to APPS and IPA\n");
skb_copied = skb_clone(skb, GFP_KERNEL);
- if (!skb_copied) {
+ if (unlikely(!skb_copied)) {
ODU_BRIDGE_ERR("No memory\n");
return -ENOMEM;
}
res = ipa_tx_dp(IPA_CLIENT_ODU_PROD, skb, metadata);
- if (res) {
+ if (unlikely(res)) {
ODU_BRIDGE_DBG("tx dp failed %d\n", res);
dev_kfree_skb(skb_copied);
goto out;
@@ -855,7 +855,7 @@ int odu_bridge_tx_dp(struct sk_buff *skb, struct ipa_tx_meta *metadata)
}
res = ipa_tx_dp(IPA_CLIENT_ODU_PROD, skb, metadata);
- if (res) {
+ if (unlikely(res)) {
ODU_BRIDGE_DBG("tx dp failed %d\n", res);
goto out;
}
diff --git a/drivers/platform/msm/ipa/ipa_clients/rndis_ipa.c b/drivers/platform/msm/ipa/ipa_clients/rndis_ipa.c
index fd2eab5..3c2f3acf 100644
--- a/drivers/platform/msm/ipa/ipa_clients/rndis_ipa.c
+++ b/drivers/platform/msm/ipa/ipa_clients/rndis_ipa.c
@@ -940,7 +940,7 @@ static netdev_tx_t rndis_ipa_start_xmit(struct sk_buff *skb,
}
ret = ipa_pm_activate(rndis_ipa_ctx->pm_hdl);
- if (ret) {
+ if (unlikely(ret)) {
RNDIS_IPA_DEBUG("Failed activate PM client\n");
netif_stop_queue(net);
goto fail_pm_activate;
@@ -959,7 +959,7 @@ static netdev_tx_t rndis_ipa_start_xmit(struct sk_buff *skb,
skb = rndis_encapsulate_skb(skb, rndis_ipa_ctx);
trace_rndis_tx_dp(skb->protocol);
ret = ipa_tx_dp(IPA_TO_USB_CLIENT, skb, NULL);
- if (ret) {
+ if (unlikely(ret)) {
RNDIS_IPA_ERROR("ipa transmit failed (%d)\n", ret);
goto fail_tx_packet;
}
@@ -1006,7 +1006,7 @@ static void rndis_ipa_tx_complete_notify(
ret = 0;
NULL_CHECK_RETVAL(private);
- if (ret)
+ if (unlikely(ret))
return;
trace_rndis_status_rcvd(skb->protocol);
@@ -1120,7 +1120,7 @@ static void rndis_ipa_packet_receive_notify(
return;
}
- if (evt != IPA_RECEIVE) {
+ if (unlikely(evt != IPA_RECEIVE)) {
RNDIS_IPA_ERROR("a none IPA_RECEIVE event in driver RX\n");
return;
}
@@ -1140,7 +1140,7 @@ static void rndis_ipa_packet_receive_notify(
trace_rndis_netif_ni(skb->protocol);
result = netif_rx_ni(skb);
- if (result)
+ if (unlikely(result))
RNDIS_IPA_ERROR("fail on netif_rx_ni\n");
rndis_ipa_ctx->net->stats.rx_packets++;
rndis_ipa_ctx->net->stats.rx_bytes += packet_len;
@@ -1817,7 +1817,7 @@ static struct sk_buff *rndis_encapsulate_skb(struct sk_buff *skb,
if (unlikely(skb_headroom(skb) < sizeof(rndis_template_hdr))) {
struct sk_buff *new_skb = skb_copy_expand(skb,
sizeof(rndis_template_hdr), 0, GFP_ATOMIC);
- if (!new_skb) {
+ if (unlikely(!new_skb)) {
RNDIS_IPA_ERROR("no memory for skb expand\n");
return skb;
}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa.c b/drivers/platform/msm/ipa/ipa_v3/ipa.c
index 77c0f2a..c7cfcce 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa.c
@@ -55,6 +55,8 @@
#include "ipa_trace.h"
#include "ipa_odl.h"
+#define IPA_SUSPEND_BUSY_TIMEOUT (msecs_to_jiffies(10))
+
/*
* The following for adding code (ie. for EMULATION) not found on x86.
*/
@@ -118,7 +120,7 @@ static void ipa3_load_ipa_fw(struct work_struct *work);
static DECLARE_WORK(ipa3_fw_loading_work, ipa3_load_ipa_fw);
static void ipa_dec_clients_disable_clks_on_wq(struct work_struct *work);
-static DECLARE_WORK(ipa_dec_clients_disable_clks_on_wq_work,
+static DECLARE_DELAYED_WORK(ipa_dec_clients_disable_clks_on_wq_work,
ipa_dec_clients_disable_clks_on_wq);
static int ipa3_ioctl_add_rt_rule_v2(unsigned long arg);
@@ -130,6 +132,7 @@ static int ipa3_ioctl_add_flt_rule_after_v2(unsigned long arg);
static int ipa3_ioctl_mdfy_flt_rule_v2(unsigned long arg);
static int ipa3_ioctl_fnr_counter_alloc(unsigned long arg);
static int ipa3_ioctl_fnr_counter_query(unsigned long arg);
+static int ipa3_ioctl_fnr_counter_set(unsigned long arg);
static struct ipa3_plat_drv_res ipa3_res = {0, };
@@ -1478,6 +1481,43 @@ static int ipa3_ioctl_fnr_counter_query(unsigned long arg)
return retval;
}
+static int ipa3_ioctl_fnr_counter_set(unsigned long arg)
+{
+ u8 header[128] = { 0 };
+ uint8_t value;
+
+ if (copy_from_user(header, (const void __user *)arg,
+ sizeof(struct ipa_ioc_fnr_index_info))) {
+ IPAERR_RL("copy_from_user fails\n");
+ return -EFAULT;
+ }
+
+ value = ((struct ipa_ioc_fnr_index_info *)
+ header)->hw_counter_offset;
+ if (value <= 0 || value > IPA_MAX_FLT_RT_CNT_INDEX) {
+ IPAERR("hw_counter_offset failed: num %d\n",
+ value);
+ return -EPERM;
+ }
+
+ ipa3_ctx->fnr_info.hw_counter_offset = value;
+
+ value = ((struct ipa_ioc_fnr_index_info *)
+ header)->sw_counter_offset;
+ if (value <= 0 || value > IPA_MAX_FLT_RT_CNT_INDEX) {
+ IPAERR("sw_counter_offset failed: num %d\n",
+ value);
+ return -EPERM;
+ }
+ ipa3_ctx->fnr_info.sw_counter_offset = value;
+ /* reset when ipacm-cleanup */
+ ipa3_ctx->fnr_info.valid = true;
+ IPADBG("fnr_info hw=%d, hw=%d\n",
+ ipa3_ctx->fnr_info.hw_counter_offset,
+ ipa3_ctx->fnr_info.sw_counter_offset);
+ return 0;
+}
+
static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
int retval = 0;
@@ -2646,6 +2686,10 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = ipa3_ioctl_fnr_counter_query(arg);
break;
+ case IPA_IOC_SET_FNR_COUNTER_INFO:
+ retval = ipa3_ioctl_fnr_counter_set(arg);
+ break;
+
case IPA_IOC_WIGIG_FST_SWITCH:
IPADBG("Got IPA_IOCTL_WIGIG_FST_SWITCH\n");
if (copy_from_user(&fst_switch, (const void __user *)arg,
@@ -4811,8 +4855,16 @@ static void __ipa3_dec_client_disable_clks(void)
ret = atomic_sub_return(1, &ipa3_ctx->ipa3_active_clients.cnt);
if (ret > 0)
goto unlock_mutex;
- ipa3_suspend_apps_pipes(true);
- ipa3_disable_clks();
+ ret = ipa3_suspend_apps_pipes(true);
+ if (ret) {
+ /* HW is busy, retry after some time */
+ atomic_inc(&ipa3_ctx->ipa3_active_clients.cnt);
+ queue_delayed_work(ipa3_ctx->power_mgmt_wq,
+ &ipa_dec_clients_disable_clks_on_wq_work,
+ IPA_SUSPEND_BUSY_TIMEOUT);
+ } else {
+ ipa3_disable_clks();
+ }
unlock_mutex:
mutex_unlock(&ipa3_ctx->ipa3_active_clients.mutex);
@@ -4865,8 +4917,8 @@ void ipa3_dec_client_disable_clks_no_block(
}
/* seems like this is the only client holding the clocks */
- queue_work(ipa3_ctx->power_mgmt_wq,
- &ipa_dec_clients_disable_clks_on_wq_work);
+ queue_delayed_work(ipa3_ctx->power_mgmt_wq,
+ &ipa_dec_clients_disable_clks_on_wq_work, 0);
}
/**
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
index 5bebb15..d0ee749 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
@@ -236,6 +236,10 @@ static void ipa3_send_nop_desc(struct work_struct *work)
struct ipa3_tx_pkt_wrapper *tx_pkt;
IPADBG_LOW("gsi send NOP for ch: %lu\n", sys->ep->gsi_chan_hdl);
+
+ if (atomic_read(&sys->workqueue_flushed))
+ return;
+
tx_pkt = kmem_cache_zalloc(ipa3_ctx->tx_pkt_wrapper_cache, GFP_KERNEL);
if (!tx_pkt) {
queue_work(sys->wq, &sys->work);
@@ -344,7 +348,7 @@ int ipa3_send(struct ipa3_sys_context *sys,
for (i = 0; i < num_desc; i++) {
tx_pkt = kmem_cache_zalloc(ipa3_ctx->tx_pkt_wrapper_cache,
GFP_ATOMIC);
- if (!tx_pkt) {
+ if (unlikely(!tx_pkt)) {
IPAERR("failed to alloc tx wrapper\n");
result = -ENOMEM;
goto failure;
@@ -358,8 +362,8 @@ int ipa3_send(struct ipa3_sys_context *sys,
/* populate tag field */
if (desc[i].is_tag_status) {
- if (ipa_populate_tag_field(&desc[i], tx_pkt,
- &tag_pyld_ret)) {
+ if (unlikely(ipa_populate_tag_field(&desc[i], tx_pkt,
+ &tag_pyld_ret))) {
IPAERR("Failed to populate tag field\n");
result = -EFAULT;
goto failure_dma_map;
@@ -399,7 +403,8 @@ int ipa3_send(struct ipa3_sys_context *sys,
tx_pkt->no_unmap_dma = true;
}
}
- if (dma_mapping_error(ipa3_ctx->pdev, tx_pkt->mem.phys_base)) {
+ if (unlikely(dma_mapping_error(ipa3_ctx->pdev,
+ tx_pkt->mem.phys_base))) {
IPAERR("failed to do dma map.\n");
result = -EFAULT;
goto failure_dma_map;
@@ -450,7 +455,7 @@ int ipa3_send(struct ipa3_sys_context *sys,
IPADBG_LOW("ch:%lu queue xfer\n", sys->ep->gsi_chan_hdl);
result = gsi_queue_xfer(sys->ep->gsi_chan_hdl, num_desc,
gsi_xfer, true);
- if (result != GSI_STATUS_SUCCESS) {
+ if (unlikely(result != GSI_STATUS_SUCCESS)) {
IPAERR_RL("GSI xfer failed.\n");
result = -EFAULT;
goto failure;
@@ -466,6 +471,7 @@ int ipa3_send(struct ipa3_sys_context *sys,
/* set the timer for sending the NOP descriptor */
if (send_nop) {
+
ktime_t time = ktime_set(0, IPA_TX_SEND_COMPL_NOP_DELAY_NS);
IPADBG_LOW("scheduling timer for ch %lu\n",
@@ -1268,6 +1274,8 @@ int ipa3_teardown_sys_pipe(u32 clnt_hdl)
if (IPA_CLIENT_IS_CONS(ep->client))
cancel_delayed_work_sync(&ep->sys->replenish_rx_work);
flush_workqueue(ep->sys->wq);
+ if (IPA_CLIENT_IS_PROD(ep->client))
+ atomic_set(&ep->sys->workqueue_flushed, 1);
/* tear down the default pipe before we reset the channel*/
if (ep->client == IPA_CLIENT_APPS_WAN_COAL_CONS) {
@@ -1490,7 +1498,7 @@ int ipa3_tx_dp(enum ipa_client_type dst, struct sk_buff *skb,
return -EINVAL;
}
- if (skb->len == 0) {
+ if (unlikely(skb->len == 0)) {
IPAERR("packet size is 0\n");
return -EINVAL;
}
@@ -1506,7 +1514,7 @@ int ipa3_tx_dp(enum ipa_client_type dst, struct sk_buff *skb,
*/
if (IPA_CLIENT_IS_CONS(dst)) {
src_ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_APPS_LAN_PROD);
- if (-1 == src_ep_idx) {
+ if (unlikely(-1 == src_ep_idx)) {
IPAERR("Client %u is not mapped\n",
IPA_CLIENT_APPS_LAN_PROD);
goto fail_gen;
@@ -1514,7 +1522,7 @@ int ipa3_tx_dp(enum ipa_client_type dst, struct sk_buff *skb,
dst_ep_idx = ipa3_get_ep_mapping(dst);
} else {
src_ep_idx = ipa3_get_ep_mapping(dst);
- if (-1 == src_ep_idx) {
+ if (unlikely(-1 == src_ep_idx)) {
IPAERR("Client %u is not mapped\n", dst);
goto fail_gen;
}
@@ -1526,7 +1534,7 @@ int ipa3_tx_dp(enum ipa_client_type dst, struct sk_buff *skb,
sys = ipa3_ctx->ep[src_ep_idx].sys;
- if (!sys || !sys->ep->valid) {
+ if (unlikely(!sys || !sys->ep->valid)) {
IPAERR_RL("pipe %d not valid\n", src_ep_idx);
goto fail_pipe_not_valid;
}
@@ -1547,7 +1555,7 @@ int ipa3_tx_dp(enum ipa_client_type dst, struct sk_buff *skb,
gsi_ep->prefetch_mode == GSI_FREE_PRE_FETCH)
max_desc -= gsi_ep->prefetch_threshold;
if (num_frags + 3 > max_desc) {
- if (skb_linearize(skb)) {
+ if (unlikely(skb_linearize(skb))) {
IPAERR("Failed to linear skb with %d frags\n",
num_frags);
goto fail_gen;
@@ -1561,7 +1569,7 @@ int ipa3_tx_dp(enum ipa_client_type dst, struct sk_buff *skb,
* 1 desc for each frag
*/
desc = kzalloc(sizeof(*desc) * (num_frags + 3), GFP_ATOMIC);
- if (!desc) {
+ if (unlikely(!desc)) {
IPAERR("failed to alloc desc array\n");
goto fail_gen;
}
@@ -1623,7 +1631,8 @@ int ipa3_tx_dp(enum ipa_client_type dst, struct sk_buff *skb,
desc[skb_idx].callback = NULL;
}
- if (ipa3_send(sys, num_frags + data_idx, desc, true)) {
+ if (unlikely(ipa3_send(sys, num_frags + data_idx,
+ desc, true))) {
IPAERR_RL("fail to send skb %pK num_frags %u SWP\n",
skb, num_frags);
goto fail_send;
@@ -1654,7 +1663,8 @@ int ipa3_tx_dp(enum ipa_client_type dst, struct sk_buff *skb,
desc[data_idx].dma_address = meta->dma_address;
}
if (num_frags == 0) {
- if (ipa3_send(sys, data_idx + 1, desc, true)) {
+ if (unlikely(ipa3_send(sys, data_idx + 1,
+ desc, true))) {
IPAERR("fail to send skb %pK HWP\n", skb);
goto fail_mem;
}
@@ -1673,8 +1683,8 @@ int ipa3_tx_dp(enum ipa_client_type dst, struct sk_buff *skb,
desc[data_idx+f].user2 = desc[data_idx].user2;
desc[data_idx].callback = NULL;
- if (ipa3_send(sys, num_frags + data_idx + 1,
- desc, true)) {
+ if (unlikely(ipa3_send(sys, num_frags + data_idx + 1,
+ desc, true))) {
IPAERR("fail to send skb %pK num_frags %u\n",
skb, num_frags);
goto fail_mem;
@@ -1729,26 +1739,27 @@ static void ipa3_wq_repl_rx(struct work_struct *work)
begin:
while (1) {
next = (curr + 1) % sys->repl->capacity;
- if (next == atomic_read(&sys->repl->head_idx))
+ if (unlikely(next == atomic_read(&sys->repl->head_idx)))
goto fail_kmem_cache_alloc;
rx_pkt = kmem_cache_zalloc(ipa3_ctx->rx_pkt_wrapper_cache,
flag);
- if (!rx_pkt)
+ if (unlikely(!rx_pkt))
goto fail_kmem_cache_alloc;
INIT_WORK(&rx_pkt->work, ipa3_wq_rx_avail);
rx_pkt->sys = sys;
rx_pkt->data.skb = sys->get_skb(sys->rx_buff_sz, flag);
- if (rx_pkt->data.skb == NULL)
+ if (unlikely(rx_pkt->data.skb == NULL))
goto fail_skb_alloc;
ptr = skb_put(rx_pkt->data.skb, sys->rx_buff_sz);
rx_pkt->data.dma_addr = dma_map_single(ipa3_ctx->pdev, ptr,
sys->rx_buff_sz,
DMA_FROM_DEVICE);
- if (dma_mapping_error(ipa3_ctx->pdev, rx_pkt->data.dma_addr)) {
+ if (unlikely(dma_mapping_error(ipa3_ctx->pdev,
+ rx_pkt->data.dma_addr))) {
pr_err_ratelimited("%s dma map fail %pK for %pK sys=%pK\n",
__func__, (void *)rx_pkt->data.dma_addr,
ptr, sys);
@@ -1802,8 +1813,8 @@ static struct ipa3_rx_pkt_wrapper *ipa3_alloc_rx_pkt_page(
rx_pkt->page_data.dma_addr = dma_map_page(ipa3_ctx->pdev,
rx_pkt->page_data.page, 0,
rx_pkt->len, DMA_FROM_DEVICE);
- if (dma_mapping_error(ipa3_ctx->pdev,
- rx_pkt->page_data.dma_addr)) {
+ if (unlikely(dma_mapping_error(ipa3_ctx->pdev,
+ rx_pkt->page_data.dma_addr))) {
pr_err_ratelimited("%s dma map fail %pK for %pK\n",
__func__, (void *)rx_pkt->page_data.dma_addr,
rx_pkt->page_data.page);
@@ -1829,7 +1840,7 @@ static void ipa3_replenish_rx_page_cache(struct ipa3_sys_context *sys)
for (curr = 0; curr < sys->repl->capacity; curr++) {
rx_pkt = ipa3_alloc_rx_pkt_page(GFP_KERNEL, false);
- if (!rx_pkt) {
+ if (unlikely(!rx_pkt)) {
IPAERR("ipa3_alloc_rx_pkt_page fails\n");
ipa_assert();
break;
@@ -1877,7 +1888,7 @@ static void ipa3_replenish_rx_page_recycle(struct ipa3_sys_context *sys)
rx_pkt = ipa3_alloc_rx_pkt_page(flag, true);
if (!rx_pkt && flag == GFP_ATOMIC)
break;
- else if (!rx_pkt)
+ else if (unlikely(!rx_pkt))
goto fail_kmem_cache_alloc;
rx_pkt->sys = sys;
}
@@ -1901,7 +1912,7 @@ static void ipa3_replenish_rx_page_recycle(struct ipa3_sys_context *sys)
if (idx == IPA_REPL_XFER_MAX) {
ret = gsi_queue_xfer(sys->ep->gsi_chan_hdl, idx,
gsi_xfer_elem_array, false);
- if (ret != GSI_STATUS_SUCCESS) {
+ if (unlikely(ret != GSI_STATUS_SUCCESS)) {
/* we don't expect this will happen */
IPAERR("failed to provide buffer: %d\n", ret);
ipa_assert();
@@ -1913,7 +1924,7 @@ static void ipa3_replenish_rx_page_recycle(struct ipa3_sys_context *sys)
/* only ring doorbell once here */
ret = gsi_queue_xfer(sys->ep->gsi_chan_hdl, idx,
gsi_xfer_elem_array, true);
- if (ret == GSI_STATUS_SUCCESS) {
+ if (likely(ret == GSI_STATUS_SUCCESS)) {
/* ensure write is done before setting head index */
mb();
atomic_set(&sys->repl->head_idx, curr);
@@ -1971,7 +1982,7 @@ static void ipa3_replenish_wlan_rx_cache(struct ipa3_sys_context *sys)
ret = gsi_queue_xfer(sys->ep->gsi_chan_hdl, 1,
&gsi_xfer_elem_one, true);
- if (ret) {
+ if (unlikely(ret)) {
IPAERR("failed to provide buffer: %d\n", ret);
goto fail_provide_rx_buffer;
}
@@ -2435,12 +2446,19 @@ static void free_rx_page(void *chan_user_data, void *xfer_user_data)
{
struct ipa3_rx_pkt_wrapper *rx_pkt = (struct ipa3_rx_pkt_wrapper *)
xfer_user_data;
+ struct ipa3_sys_context *sys = rx_pkt->sys;
+ int i;
+ for (i = 0; i < sys->repl->capacity; i++)
+ if (sys->repl->cache[i] == rx_pkt)
+ break;
dma_unmap_page(ipa3_ctx->pdev, rx_pkt->page_data.dma_addr,
rx_pkt->len, DMA_FROM_DEVICE);
__free_pages(rx_pkt->page_data.page,
IPA_WAN_PAGE_ORDER);
kmem_cache_free(ipa3_ctx->rx_pkt_wrapper_cache, rx_pkt);
+ if (i < sys->repl->capacity)
+ sys->repl->cache[i] = NULL;
}
/**
@@ -2489,14 +2507,17 @@ static void ipa3_cleanup_rx(struct ipa3_sys_context *sys)
} else {
for (i = 0; i < sys->repl->capacity; i++) {
rx_pkt = sys->repl->cache[i];
- dma_unmap_page(ipa3_ctx->pdev,
- rx_pkt->page_data.dma_addr,
- rx_pkt->len,
- DMA_FROM_DEVICE);
- __free_pages(rx_pkt->page_data.page,
- IPA_WAN_PAGE_ORDER);
- kmem_cache_free(ipa3_ctx->rx_pkt_wrapper_cache,
- rx_pkt);
+ if (rx_pkt) {
+ dma_unmap_page(ipa3_ctx->pdev,
+ rx_pkt->page_data.dma_addr,
+ rx_pkt->len,
+ DMA_FROM_DEVICE);
+ __free_pages(rx_pkt->page_data.page,
+ IPA_WAN_PAGE_ORDER);
+ kmem_cache_free(
+ ipa3_ctx->rx_pkt_wrapper_cache,
+ rx_pkt);
+ }
}
}
kfree(sys->repl->cache);
@@ -3124,14 +3145,14 @@ static struct sk_buff *handle_skb_completion(struct gsi_chan_xfer_notify
rx_skb->truesize = rx_pkt->len + sizeof(struct sk_buff);
}
- if (notify->veid >= GSI_VEID_MAX) {
+ if (unlikely(notify->veid >= GSI_VEID_MAX)) {
WARN_ON(1);
return NULL;
}
/*Assesrt when WAN consumer channel receive EOB event*/
- if (notify->evt_id == GSI_CHAN_EVT_EOB &&
- sys->ep->client == IPA_CLIENT_APPS_WAN_CONS) {
+ if (unlikely(notify->evt_id == GSI_CHAN_EVT_EOB &&
+ sys->ep->client == IPA_CLIENT_APPS_WAN_CONS)) {
IPAERR("EOB event received on WAN consumer channel\n");
ipa_assert();
}
@@ -3255,13 +3276,13 @@ static void ipa3_wq_rx_common(struct ipa3_sys_context *sys,
struct ipa3_sys_context *coal_sys;
int ipa_ep_idx;
- if (!notify) {
+ if (unlikely(!notify)) {
IPAERR_RL("gsi_chan_xfer_notify is null\n");
return;
}
rx_skb = handle_skb_completion(notify, true);
- if (rx_skb) {
+ if (likely(rx_skb)) {
sys->pyld_hdlr(rx_skb, sys);
/* For coalescing, we have 2 transfer rings to replenish */
@@ -3269,7 +3290,7 @@ static void ipa3_wq_rx_common(struct ipa3_sys_context *sys,
ipa_ep_idx = ipa3_get_ep_mapping(
IPA_CLIENT_APPS_WAN_CONS);
- if (ipa_ep_idx == IPA_EP_NOT_ALLOCATED) {
+ if (unlikely(ipa_ep_idx == IPA_EP_NOT_ALLOCATED)) {
IPAERR("Invalid client.\n");
return;
}
@@ -3329,8 +3350,8 @@ static void ipa3_rx_napi_chain(struct ipa3_sys_context *sys,
*/
ipa_ep_idx = ipa3_get_ep_mapping(
IPA_CLIENT_APPS_WAN_CONS);
- if (ipa_ep_idx ==
- IPA_EP_NOT_ALLOCATED) {
+ if (unlikely(ipa_ep_idx ==
+ IPA_EP_NOT_ALLOCATED)) {
IPAERR("Invalid client.\n");
return;
}
@@ -3366,8 +3387,8 @@ static void ipa3_rx_napi_chain(struct ipa3_sys_context *sys,
*/
ipa_ep_idx = ipa3_get_ep_mapping(
IPA_CLIENT_APPS_WAN_CONS);
- if (ipa_ep_idx ==
- IPA_EP_NOT_ALLOCATED) {
+ if (unlikely(ipa_ep_idx ==
+ IPA_EP_NOT_ALLOCATED)) {
IPAERR("Invalid client.\n");
return;
}
@@ -3504,6 +3525,7 @@ static int ipa3_assign_policy(struct ipa_sys_connect_params *in,
sys->policy = IPA_POLICY_INTR_MODE;
sys->use_comm_evt_ring = true;
INIT_WORK(&sys->work, ipa3_send_nop_desc);
+ atomic_set(&sys->workqueue_flushed, 0);
/*
* enable source notification status for exception packets
@@ -3533,6 +3555,7 @@ static int ipa3_assign_policy(struct ipa_sys_connect_params *in,
sys->policy = IPA_POLICY_INTR_MODE;
sys->use_comm_evt_ring = true;
INIT_WORK(&sys->work, ipa3_send_nop_desc);
+ atomic_set(&sys->workqueue_flushed, 0);
}
} else {
if (in->client == IPA_CLIENT_APPS_LAN_CONS ||
@@ -4155,7 +4178,7 @@ static void ipa_gsi_irq_rx_notify_cb(struct gsi_chan_xfer_notify *notify)
{
struct ipa3_sys_context *sys;
- if (!notify) {
+ if (unlikely(!notify)) {
IPAERR("gsi notify is NULL.\n");
return;
}
@@ -4187,7 +4210,7 @@ static void ipa_dma_gsi_irq_rx_notify_cb(struct gsi_chan_xfer_notify *notify)
{
struct ipa3_sys_context *sys;
- if (!notify) {
+ if (unlikely(!notify)) {
IPAERR("gsi notify is NULL.\n");
return;
}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
index 7bfe159..0222b28 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
@@ -975,6 +975,7 @@ struct ipa3_repl_ctx {
struct ipa3_sys_context {
u32 len;
atomic_t curr_polling_state;
+ atomic_t workqueue_flushed;
struct delayed_work switch_to_intr_work;
enum ipa3_sys_pipe_policy policy;
bool use_comm_evt_ring;
@@ -1411,6 +1412,12 @@ struct ipa3_uc_ctx {
u32 rdy_comp_ring_size;
u32 *rdy_ring_rp_va;
u32 *rdy_comp_ring_wp_va;
+ bool uc_event_ring_valid;
+ struct ipa_mem_buffer event_ring;
+ u32 ering_wp_local;
+ u32 ering_rp_local;
+ u32 ering_wp;
+ u32 ering_rp;
};
/**
@@ -1500,6 +1507,17 @@ struct ipa3cm_client_info {
bool uplink;
};
+/**
+ * struct ipacm_fnr_info - the fnr-info indicated from IPACM
+ * @ipacm_client_enum: the enum to indicate tether-client
+ * @ipacm_client_uplink: the bool to indicate pipe for uplink
+ */
+struct ipacm_fnr_info {
+ bool valid;
+ uint8_t hw_counter_offset;
+ uint8_t sw_counter_offset;
+};
+
struct ipa3_smp2p_info {
u32 out_base_id;
u32 in_base_id;
@@ -1892,6 +1910,7 @@ struct ipa3_context {
struct IpaHwOffloadStatsAllocCmdData_t
gsi_info[IPA_HW_PROTOCOL_MAX];
bool ipa_wan_skb_page;
+ struct ipacm_fnr_info fnr_info;
};
struct ipa3_plat_drv_res {
@@ -2571,6 +2590,9 @@ bool ipa3_get_client_uplink(int pipe_idx);
int ipa3_get_wlan_stats(struct ipa_get_wdi_sap_stats *wdi_sap_stats);
int ipa3_set_wlan_quota(struct ipa_set_wifi_quota *wdi_quota);
+
+int ipa3_inform_wlan_bw(struct ipa_inform_wlan_bw *wdi_bw);
+
/*
* IPADMA
*/
@@ -2838,6 +2860,10 @@ int ipa3_uc_send_remote_ipa_info(u32 remote_addr, uint32_t mbox_n);
int ipa3_uc_debug_stats_alloc(
struct IpaHwOffloadStatsAllocCmdData_t cmdinfo);
int ipa3_uc_debug_stats_dealloc(uint32_t protocol);
+int ipa3_uc_quota_monitor(uint64_t quota);
+int ipa3_uc_bw_monitor(struct ipa_wdi_bw_info *info);
+int ipa3_uc_setup_event_ring(void);
+int ipa3_set_wlan_tx_info(struct ipa_wdi_tx_info *info);
void ipa3_tag_destroy_imm(void *user1, int user2);
const struct ipa_gsi_ep_config *ipa3_get_gsi_ep_info
(enum ipa_client_type client);
@@ -2896,6 +2922,8 @@ int ipa_get_flt_rt_stats(struct ipa_ioc_flt_rt_query *query);
int ipa_set_flt_rt_stats(int index, struct ipa_flt_rt_stats stats);
+bool ipa_get_fnr_info(struct ipacm_fnr_info *fnr_info);
+
u32 ipa3_get_num_pipes(void);
struct ipa_smmu_cb_ctx *ipa3_get_smmu_ctx(enum ipa_smmu_cb_type);
struct iommu_domain *ipa3_get_smmu_domain(void);
@@ -2914,7 +2942,7 @@ int ipa3_create_wdi_mapping(u32 num_buffers, struct ipa_wdi_buffer_info *info);
int ipa3_set_flt_tuple_mask(int pipe_idx, struct ipahal_reg_hash_tuple *tuple);
int ipa3_set_rt_tuple_mask(int tbl_idx, struct ipahal_reg_hash_tuple *tuple);
void ipa3_set_resorce_groups_min_max_limits(void);
-void ipa3_suspend_apps_pipes(bool suspend);
+int ipa3_suspend_apps_pipes(bool suspend);
int ipa3_flt_read_tbl_from_hw(u32 pipe_idx,
enum ipa_ip_type ip_type,
bool hashable,
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_mpm.c b/drivers/platform/msm/ipa/ipa_v3/ipa_mpm.c
index 80a3e2a..4e50080 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_mpm.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_mpm.c
@@ -70,7 +70,7 @@
#define IPA_POLL_FOR_EMPTINESS_SLEEP_USEC 20
#define IPA_CHANNEL_STOP_IN_PROC_TO_MSEC 5
#define IPA_CHANNEL_STOP_IN_PROC_SLEEP_USEC 200
-
+#define IPA_MHIP_HOLB_TMO 31 /* value to match granularity on ipa HW 4.5 */
enum mhip_re_type {
MHIP_RE_XFER = 0x2,
MHIP_RE_NOP = 0x4,
@@ -376,12 +376,6 @@ struct ipa_mpm_mhi_driver {
/* General MPM mutex to protect concurrent update of MPM GSI states */
struct mutex mutex;
/*
- * Mutex to protect IPA clock vote/unvote to make sure IPA isn't double
- * devoted for concurrency scenarios such as SSR and LPM mode CB
- * concurrency.
- */
- struct mutex lpm_mutex;
- /*
* Mutex to protect mhi_dev update/ access, for concurrency such as
* 5G SSR and USB disconnect/connect.
*/
@@ -424,8 +418,6 @@ static void ipa_mpm_change_teth_state(int probe_id,
static void ipa_mpm_change_gsi_state(int probe_id,
enum ipa_mpm_mhip_chan mhip_chan,
enum ipa_mpm_gsi_state next_state);
-static int ipa_mpm_start_stop_ul_mhip_data_path(int probe_id,
- enum ipa_mpm_start_stop_type start);
static int ipa_mpm_probe(struct platform_device *pdev);
static int ipa_mpm_vote_unvote_pcie_clk(enum ipa_mpm_clk_vote_type vote,
int probe_id);
@@ -435,6 +427,7 @@ static enum mhip_status_type ipa_mpm_start_stop_mhip_chan(
enum ipa_mpm_mhip_chan mhip_chan,
int probe_id,
enum ipa_mpm_start_stop_type start_stop);
+static int ipa_mpm_start_mhip_holb_tmo(u32 clnt_hdl);
static struct mhi_driver mhi_driver = {
.id_table = mhi_driver_match_table,
@@ -500,6 +493,17 @@ static int ipa_mpm_set_dma_mode(enum ipa_client_type src_pipe,
return result;
}
+static int ipa_mpm_start_mhip_holb_tmo(u32 clnt_hdl)
+{
+ struct ipa_ep_cfg_holb holb_cfg;
+
+ memset(&holb_cfg, 0, sizeof(holb_cfg));
+ holb_cfg.en = IPA_HOLB_TMR_EN;
+ /* 31 ms timer, which is less than tag timeout */
+ holb_cfg.tmr_val = IPA_MHIP_HOLB_TMO;
+ return ipa3_cfg_ep_holb(clnt_hdl, &holb_cfg);
+}
+
/**
* ipa_mpm_smmu_map() - SMMU maps ring and the buffer pointer.
* @va_addr: virtual address that needs to be mapped
@@ -532,7 +536,7 @@ static dma_addr_t ipa_mpm_smmu_map(void *va_addr,
/* check cache coherent */
if (ipa_mpm_ctx->dev_info.is_cache_coherent) {
- IPA_MPM_DBG(" enable cache coherent\n");
+ IPA_MPM_DBG_LOW(" enable cache coherent\n");
prot |= IOMMU_CACHE;
}
@@ -1034,6 +1038,21 @@ static int ipa_mpm_connect_mhip_gsi_pipe(enum ipa_client_type mhip_client,
goto fail_alloc_channel;
}
+ if (IPA_CLIENT_IS_CONS(mhip_client)) {
+ /*
+ * Enable HOLB timer one time after bootup/SSR.
+ * The HOLB timeout drops the packets on MHIP if
+ * there is a stall on MHIP TX pipe greater than
+ * configured timeout.
+ */
+ result = ipa_mpm_start_mhip_holb_tmo(ipa_ep_idx);
+ if (result) {
+ IPA_MPM_ERR("HOLB config failed for %d, fail = %d\n",
+ ipa_ep_idx, result);
+ goto fail_alloc_channel;
+ }
+ }
+
if (IPA_CLIENT_IS_PROD(mhip_client))
ipa_mpm_change_gsi_state(mhi_idx,
IPA_MPM_MHIP_CHAN_DL,
@@ -1326,15 +1345,12 @@ static void ipa_mpm_mhip_shutdown(int mhip_idx)
get_ipa3_client(mhip_idx, &ul_prod_chan, &dl_cons_chan);
- if (mhip_idx != IPA_MPM_MHIP_CH_ID_2) {
+ if (mhip_idx != IPA_MPM_MHIP_CH_ID_2)
/* For DPL, stop only DL channel */
- ipa_mpm_start_stop_ul_mhip_data_path(mhip_idx, MPM_MHIP_STOP);
ipa_mpm_clean_mhip_chan(mhip_idx, ul_prod_chan);
- }
ipa_mpm_clean_mhip_chan(mhip_idx, dl_cons_chan);
- mutex_lock(&ipa_mpm_ctx->md[mhip_idx].lpm_mutex);
if (!ipa_mpm_ctx->md[mhip_idx].in_lpm) {
ipa_mpm_vote_unvote_ipa_clk(CLK_OFF, mhip_idx);
/* while in modem shutdown scenarios such as SSR, no explicit
@@ -1342,10 +1358,8 @@ static void ipa_mpm_mhip_shutdown(int mhip_idx)
*/
ipa_mpm_ctx->md[mhip_idx].in_lpm = true;
}
- mutex_unlock(&ipa_mpm_ctx->md[mhip_idx].lpm_mutex);
mutex_lock(&ipa_mpm_ctx->md[mhip_idx].mhi_mutex);
ipa_mpm_ctx->md[mhip_idx].mhi_dev = NULL;
- ipa_mpm_ctx->md[mhip_idx].init_complete = false;
mutex_unlock(&ipa_mpm_ctx->md[mhip_idx].mhi_mutex);
IPA_MPM_FUNC_EXIT();
}
@@ -1376,6 +1390,18 @@ static int ipa_mpm_vote_unvote_pcie_clk(enum ipa_mpm_clk_vote_type vote,
return 0;
}
+ if (!ipa_mpm_ctx->md[probe_id].init_complete) {
+ /*
+ * SSR might be in progress, dont have to vote/unvote for
+ * IPA clocks as it will be taken care in remove_cb/subsequent
+ * probe.
+ */
+ IPA_MPM_DBG("SSR in progress, return\n");
+ mutex_unlock(&ipa_mpm_ctx->md[probe_id].mhi_mutex);
+ return 0;
+ }
+ mutex_unlock(&ipa_mpm_ctx->md[probe_id].mhi_mutex);
+
IPA_MPM_ERR("PCIe clock vote/unvote = %d probe_id = %d clk_cnt = %d\n",
vote, probe_id,
atomic_read(&ipa_mpm_ctx->md[probe_id].clk_cnt.pcie_clk_cnt));
@@ -1386,7 +1412,6 @@ static int ipa_mpm_vote_unvote_pcie_clk(enum ipa_mpm_clk_vote_type vote,
if (result) {
IPA_MPM_ERR("mhi_sync_get failed for probe_id %d\n",
result, probe_id);
- mutex_unlock(&ipa_mpm_ctx->md[probe_id].mhi_mutex);
return result;
}
@@ -1400,7 +1425,6 @@ static int ipa_mpm_vote_unvote_pcie_clk(enum ipa_mpm_clk_vote_type vote,
IPA_MPM_DBG("probe_id %d PCIE clock already devoted\n",
probe_id);
WARN_ON(1);
- mutex_unlock(&ipa_mpm_ctx->md[probe_id].mhi_mutex);
return 0;
}
mhi_device_put(ipa_mpm_ctx->md[probe_id].mhi_dev, MHI_VOTE_BUS);
@@ -1408,8 +1432,6 @@ static int ipa_mpm_vote_unvote_pcie_clk(enum ipa_mpm_clk_vote_type vote,
atomic_dec(&ipa_mpm_ctx->md[probe_id].clk_cnt.pcie_clk_cnt);
atomic_dec(&ipa_mpm_ctx->pcie_clk_total_cnt);
}
-
- mutex_unlock(&ipa_mpm_ctx->md[probe_id].mhi_mutex);
return result;
}
@@ -1459,11 +1481,9 @@ static int ipa_mpm_start_stop_remote_mhip_chan(
return ret;
}
- mutex_lock(&ipa_mpm_ctx->md[probe_id].mhi_mutex);
/* For error state, expect modem SSR to recover from error */
if (ipa_mpm_ctx->md[probe_id].remote_state == MPM_MHIP_REMOTE_ERR) {
IPA_MPM_ERR("Remote channels in err state for %d\n", probe_id);
- mutex_unlock(&ipa_mpm_ctx->md[probe_id].mhi_mutex);
return -EFAULT;
}
@@ -1474,12 +1494,14 @@ static int ipa_mpm_start_stop_remote_mhip_chan(
probe_id);
} else {
ret = mhi_resume_transfer(mhi_dev);
+ mutex_lock(&ipa_mpm_ctx->md[probe_id].mhi_mutex);
if (ret)
ipa_mpm_ctx->md[probe_id].remote_state =
MPM_MHIP_REMOTE_ERR;
else
ipa_mpm_ctx->md[probe_id].remote_state =
MPM_MHIP_REMOTE_START;
+ mutex_unlock(&ipa_mpm_ctx->md[probe_id].mhi_mutex);
}
} else {
if (ipa_mpm_ctx->md[probe_id].remote_state ==
@@ -1488,15 +1510,16 @@ static int ipa_mpm_start_stop_remote_mhip_chan(
probe_id);
} else {
ret = mhi_pause_transfer(mhi_dev);
+ mutex_lock(&ipa_mpm_ctx->md[probe_id].mhi_mutex);
if (ret)
ipa_mpm_ctx->md[probe_id].remote_state =
MPM_MHIP_REMOTE_ERR;
else
ipa_mpm_ctx->md[probe_id].remote_state =
MPM_MHIP_REMOTE_STOP;
+ mutex_unlock(&ipa_mpm_ctx->md[probe_id].mhi_mutex);
}
}
- mutex_unlock(&ipa_mpm_ctx->md[probe_id].mhi_mutex);
return ret;
}
@@ -1540,6 +1563,15 @@ static enum mhip_status_type ipa_mpm_start_stop_mhip_chan(
IPA_MPM_ERR("fail to get EP# for idx %d\n", ipa_ep_idx);
return MHIP_STATUS_EP_NOT_FOUND;
}
+
+ mutex_lock(&ipa_mpm_ctx->md[probe_id].mhi_mutex);
+ if (!ipa_mpm_ctx->md[probe_id].init_complete) {
+ IPA_MPM_ERR("MHIP probe %d not initialized\n", probe_id);
+ mutex_unlock(&ipa_mpm_ctx->md[probe_id].mhi_mutex);
+ return MHIP_STATUS_EP_NOT_READY;
+ }
+ mutex_unlock(&ipa_mpm_ctx->md[probe_id].mhi_mutex);
+
ep = &ipa3_ctx->ep[ipa_ep_idx];
if (mhip_chan == IPA_MPM_MHIP_CHAN_UL) {
@@ -1713,13 +1745,6 @@ int ipa_mpm_notify_wan_state(struct wan_ioctl_notify_wan_state *state)
case MHIP_STATUS_SUCCESS:
ipa_mpm_ctx->md[probe_id].teth_state =
IPA_MPM_TETH_CONNECTED;
- ret = ipa_mpm_start_stop_ul_mhip_data_path(
- probe_id, MPM_MHIP_START);
- if (ret) {
- IPA_MPM_ERR("UL chan start err = %d\n", ret);
- ipa_mpm_vote_unvote_pcie_clk(CLK_OFF, probe_id);
- return ret;
- }
break;
case MHIP_STATUS_EP_NOT_READY:
case MHIP_STATUS_NO_OP:
@@ -1767,8 +1792,6 @@ int ipa_mpm_notify_wan_state(struct wan_ioctl_notify_wan_state *state)
switch (status) {
case MHIP_STATUS_SUCCESS:
ipa_mpm_change_teth_state(probe_id, IPA_MPM_TETH_INIT);
- ipa_mpm_start_stop_ul_mhip_data_path(probe_id,
- MPM_MHIP_STOP);
break;
case MHIP_STATUS_NO_OP:
case MHIP_STATUS_EP_NOT_READY:
@@ -1881,64 +1904,6 @@ static void ipa_mpm_read_channel(enum ipa_client_type chan)
IPA_MPM_ERR("Reading of channel failed for ep %d\n", ep);
}
-static int ipa_mpm_start_stop_ul_mhip_data_path(int probe_id,
- enum ipa_mpm_start_stop_type start)
-{
- int ipa_ep_idx;
- int res = 0;
- enum ipa_client_type ul_chan, dl_chan;
-
- if (probe_id >= IPA_MPM_MHIP_CH_ID_MAX) {
- IPA_MPM_ERR("Unknown probe_id\n");
- return 0;
- }
- get_ipa3_client(probe_id, &ul_chan, &dl_chan);
- IPA_MPM_DBG("Start/Stop Data Path ? = %d\n", start);
-
-
- /* MHIP Start Data path:
- * IPA MHIP Producer: remove HOLB
- * IPA MHIP Consumer : no op as there is no delay on these pipes.
- */
- if (start) {
- IPA_MPM_DBG("Enabling data path\n");
- if (ul_chan != IPA_CLIENT_MAX) {
- /* Remove HOLB on the producer pipe */
- IPA_MPM_DBG("Removing HOLB on ep = %s\n",
- __stringify(ul_chan));
- ipa_ep_idx = ipa3_get_ep_mapping(ul_chan);
-
- if (ipa_ep_idx == IPA_EP_NOT_ALLOCATED) {
- IPAERR("failed to get idx");
- return ipa_ep_idx;
- }
-
- res = ipa3_enable_data_path(ipa_ep_idx);
- if (res)
- IPA_MPM_ERR("Enable data path failed res=%d\n",
- res);
- }
- } else {
- IPA_MPM_DBG("Disabling data path\n");
- if (ul_chan != IPA_CLIENT_MAX) {
- /* Set HOLB on the producer pipe */
- ipa_ep_idx = ipa3_get_ep_mapping(ul_chan);
-
- if (ipa_ep_idx == IPA_EP_NOT_ALLOCATED) {
- IPAERR("failed to get idx");
- return ipa_ep_idx;
- }
-
- res = ipa3_disable_data_path(ipa_ep_idx);
- if (res)
- IPA_MPM_ERR("disable data path failed res=%d\n",
- res);
- }
- }
-
- return res;
-}
-
/* ipa_mpm_mhi_probe_cb is received for each MHI'/MHI channel
* Currently we have 4 MHI channels.
*/
@@ -1995,10 +1960,8 @@ static int ipa_mpm_mhi_probe_cb(struct mhi_device *mhi_dev,
ipa_mpm_ctx->md[probe_id].remote_state = MPM_MHIP_REMOTE_STOP;
mutex_unlock(&ipa_mpm_ctx->md[probe_id].mhi_mutex);
ipa_mpm_vote_unvote_pcie_clk(CLK_ON, probe_id);
- mutex_lock(&ipa_mpm_ctx->md[probe_id].lpm_mutex);
ipa_mpm_vote_unvote_ipa_clk(CLK_ON, probe_id);
ipa_mpm_ctx->md[probe_id].in_lpm = false;
- mutex_unlock(&ipa_mpm_ctx->md[probe_id].lpm_mutex);
IPA_MPM_DBG("ul chan = %d, dl_chan = %d\n", ul_prod, dl_cons);
/*
@@ -2133,8 +2096,10 @@ static int ipa_mpm_mhi_probe_cb(struct mhi_device *mhi_dev,
* to MHI driver. remove_cb will be called eventually when
* Device side comes from where pending cleanup happens.
*/
+ mutex_lock(&ipa_mpm_ctx->md[probe_id].mhi_mutex);
atomic_inc(&ipa_mpm_ctx->probe_cnt);
- ipa_mpm_ctx->md[probe_id].init_complete = true;
+ ipa_mpm_ctx->md[probe_id].init_complete = false;
+ mutex_unlock(&ipa_mpm_ctx->md[probe_id].mhi_mutex);
IPA_MPM_FUNC_EXIT();
return 0;
}
@@ -2294,12 +2259,6 @@ static int ipa_mpm_mhi_probe_cb(struct mhi_device *mhi_dev,
/* No teth started yet, disable UL channel */
ipa_mpm_start_stop_mhip_chan(IPA_MPM_MHIP_CHAN_UL,
probe_id, MPM_MHIP_STOP);
- /* Disable data path */
- if (ipa_mpm_start_stop_ul_mhip_data_path(probe_id,
- MPM_MHIP_STOP)) {
- IPA_MPM_ERR("MHIP Enable data path failed\n");
- goto fail_start_channel;
- }
}
ipa_mpm_vote_unvote_pcie_clk(CLK_OFF, probe_id);
break;
@@ -2308,14 +2267,6 @@ static int ipa_mpm_mhi_probe_cb(struct mhi_device *mhi_dev,
IPA_MPM_DBG("UL channel is already started, continue\n");
ipa_mpm_change_teth_state(probe_id, IPA_MPM_TETH_CONNECTED);
- /* Enable data path */
- if (ul_prod != IPA_CLIENT_MAX) {
- if (ipa_mpm_start_stop_ul_mhip_data_path(probe_id,
- MPM_MHIP_START)) {
- IPA_MPM_ERR("MHIP Enable data path failed\n");
- goto fail_start_channel;
- }
- }
/* Lift the delay for rmnet USB prod pipe */
if (probe_id == IPA_MPM_MHIP_CH_ID_1) {
pipe_idx = ipa3_get_ep_mapping(IPA_CLIENT_USB_PROD);
@@ -2329,8 +2280,6 @@ static int ipa_mpm_mhi_probe_cb(struct mhi_device *mhi_dev,
}
atomic_inc(&ipa_mpm_ctx->probe_cnt);
- ipa_mpm_ctx->md[probe_id].init_complete = true;
-
/* Check if ODL pipe is connected to MHIP DPL pipe before probe */
if (probe_id == IPA_MPM_MHIP_CH_ID_2 &&
ipa3_is_odl_connected()) {
@@ -2338,7 +2287,9 @@ static int ipa_mpm_mhi_probe_cb(struct mhi_device *mhi_dev,
ret = ipa_mpm_set_dma_mode(IPA_CLIENT_MHI_PRIME_DPL_PROD,
IPA_CLIENT_USB_DPL_CONS, false);
}
-
+ mutex_lock(&ipa_mpm_ctx->md[probe_id].mhi_mutex);
+ ipa_mpm_ctx->md[probe_id].init_complete = true;
+ mutex_unlock(&ipa_mpm_ctx->md[probe_id].mhi_mutex);
IPA_MPM_FUNC_EXIT();
return 0;
@@ -2407,6 +2358,11 @@ static void ipa_mpm_mhi_remove_cb(struct mhi_device *mhi_dev)
}
IPA_MPM_DBG("remove_cb for mhip_idx = %d", mhip_idx);
+
+ mutex_lock(&ipa_mpm_ctx->md[mhip_idx].mhi_mutex);
+ ipa_mpm_ctx->md[mhip_idx].init_complete = false;
+ mutex_unlock(&ipa_mpm_ctx->md[mhip_idx].mhi_mutex);
+
ipa_mpm_mhip_shutdown(mhip_idx);
atomic_dec(&ipa_mpm_ctx->probe_cnt);
@@ -2445,7 +2401,19 @@ static void ipa_mpm_mhi_status_cb(struct mhi_device *mhi_dev,
return;
}
- mutex_lock(&ipa_mpm_ctx->md[mhip_idx].lpm_mutex);
+ mutex_lock(&ipa_mpm_ctx->md[mhip_idx].mhi_mutex);
+ if (!ipa_mpm_ctx->md[mhip_idx].init_complete) {
+ /*
+ * SSR might be in progress, dont have to vote/unvote for
+ * IPA clocks as it will be taken care in remove_cb/subsequent
+ * probe.
+ */
+ IPA_MPM_DBG("SSR in progress, return\n");
+ mutex_unlock(&ipa_mpm_ctx->md[mhip_idx].mhi_mutex);
+ return;
+ }
+ mutex_unlock(&ipa_mpm_ctx->md[mhip_idx].mhi_mutex);
+
switch (mhi_cb) {
case MHI_CB_IDLE:
break;
@@ -2477,12 +2445,11 @@ static void ipa_mpm_mhi_status_cb(struct mhi_device *mhi_dev,
case MHI_CB_PENDING_DATA:
case MHI_CB_SYS_ERROR:
case MHI_CB_FATAL_ERROR:
- case MHI_CB_BW_REQ:
case MHI_CB_EE_MISSION_MODE:
+ case MHI_CB_DTR_SIGNAL:
IPA_MPM_ERR("unexpected event %d\n", mhi_cb);
break;
}
- mutex_unlock(&ipa_mpm_ctx->md[mhip_idx].lpm_mutex);
}
static void ipa_mpm_mhip_map_prot(enum ipa_usb_teth_prot prot,
@@ -2596,8 +2563,6 @@ int ipa_mpm_mhip_xdci_pipe_enable(enum ipa_usb_teth_prot xdci_teth_prot)
case MHIP_STATUS_SUCCESS:
case MHIP_STATUS_NO_OP:
ipa_mpm_change_teth_state(probe_id, IPA_MPM_TETH_CONNECTED);
- ipa_mpm_start_stop_ul_mhip_data_path(probe_id,
- MPM_MHIP_START);
pipe_idx = ipa3_get_ep_mapping(IPA_CLIENT_USB_PROD);
@@ -2717,7 +2682,6 @@ int ipa_mpm_mhip_xdci_pipe_disable(enum ipa_usb_teth_prot xdci_teth_prot)
case MHIP_STATUS_NO_OP:
case MHIP_STATUS_EP_NOT_READY:
ipa_mpm_change_teth_state(probe_id, IPA_MPM_TETH_INIT);
- ipa_mpm_start_stop_ul_mhip_data_path(probe_id, MPM_MHIP_STOP);
break;
case MHIP_STATUS_FAIL:
case MHIP_STATUS_BAD_STATE:
@@ -2839,7 +2803,6 @@ static int ipa_mpm_probe(struct platform_device *pdev)
for (i = 0; i < IPA_MPM_MHIP_CH_ID_MAX; i++) {
mutex_init(&ipa_mpm_ctx->md[i].mutex);
mutex_init(&ipa_mpm_ctx->md[i].mhi_mutex);
- mutex_init(&ipa_mpm_ctx->md[i].lpm_mutex);
}
ipa_mpm_ctx->dev_info.pdev = pdev;
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_pm.c b/drivers/platform/msm/ipa/ipa_v3/ipa_pm.c
index c73f32c..f35abd0 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_pm.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_pm.c
@@ -971,12 +971,13 @@ static int ipa_pm_activate_helper(struct ipa_pm_client *client, bool sync)
*/
int ipa_pm_activate(u32 hdl)
{
- if (ipa_pm_ctx == NULL) {
+ if (unlikely(ipa_pm_ctx == NULL)) {
IPA_PM_ERR("PM_ctx is null\n");
return -EINVAL;
}
- if (hdl >= IPA_PM_MAX_CLIENTS || ipa_pm_ctx->clients[hdl] == NULL) {
+ if (unlikely(hdl >= IPA_PM_MAX_CLIENTS ||
+ ipa_pm_ctx->clients[hdl] == NULL)) {
IPA_PM_ERR("Invalid Param\n");
return -EINVAL;
}
@@ -993,12 +994,13 @@ int ipa_pm_activate(u32 hdl)
*/
int ipa_pm_activate_sync(u32 hdl)
{
- if (ipa_pm_ctx == NULL) {
+ if (unlikely(ipa_pm_ctx == NULL)) {
IPA_PM_ERR("PM_ctx is null\n");
return -EINVAL;
}
- if (hdl >= IPA_PM_MAX_CLIENTS || ipa_pm_ctx->clients[hdl] == NULL) {
+ if (unlikely(hdl >= IPA_PM_MAX_CLIENTS ||
+ ipa_pm_ctx->clients[hdl] == NULL)) {
IPA_PM_ERR("Invalid Param\n");
return -EINVAL;
}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_uc.c b/drivers/platform/msm/ipa/ipa_v3/ipa_uc.c
index 5df8dcb..851ff62 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_uc.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_uc.c
@@ -14,7 +14,7 @@
#define IPA_UC_DBG_STATS_GET_PROT_ID(x) (0xff & ((x) >> 24))
#define IPA_UC_DBG_STATS_GET_OFFSET(x) (0x00ffffff & (x))
-
+#define IPA_UC_EVENT_RING_SIZE 10
/**
* Mailbox register to Interrupt HWP for CPU cmd
* Usage of IPA_UC_MAILBOX_m_n doorbell instead of IPA_IRQ_EE_UC_0
@@ -24,6 +24,11 @@
#define IPA_CPU_2_HW_CMD_MBOX_m 0
#define IPA_CPU_2_HW_CMD_MBOX_n 23
+#define IPA_UC_ERING_m 0
+#define IPA_UC_ERING_n_r 1
+#define IPA_UC_ERING_n_w 0
+#define IPA_UC_MON_INTERVAL 5
+
/**
* enum ipa3_cpu_2_hw_commands - Values that represent the commands from the CPU
* IPA_CPU_2_HW_CMD_NO_OP : No operation is required.
@@ -39,6 +44,7 @@
* IPA_CPU_2_HW_CMD_RESET_PIPE : Command to reset a pipe - SW WA for a HW bug.
* IPA_CPU_2_HW_CMD_GSI_CH_EMPTY : Command to check for GSI channel emptiness.
* IPA_CPU_2_HW_CMD_REMOTE_IPA_INFO: Command to store remote IPA Info
+ * IPA_CPU_2_HW_CMD_SETUP_EVENT_RING: Command to setup the event ring
*/
enum ipa3_cpu_2_hw_commands {
IPA_CPU_2_HW_CMD_NO_OP =
@@ -65,6 +71,8 @@ enum ipa3_cpu_2_hw_commands {
FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 10),
IPA_CPU_2_HW_CMD_REMOTE_IPA_INFO =
FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 11),
+ IPA_CPU_2_HW_CMD_SETUP_EVENT_RING =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 12),
};
/**
@@ -161,6 +169,23 @@ union IpaHwChkChEmptyCmdData_t {
u32 raw32b;
} __packed;
+struct IpaSetupEventRingCmdParams_t {
+ u32 ring_base_pa;
+ u32 ring_base_pa_hi;
+ u32 ring_size; //size = 10
+} __packed;
+
+
+/**
+ * Structure holding the parameters for
+ * IPA_CPU_2_HW_CMD_SETUP_EVENT_RING command. Parameters are
+ * sent as 32b immediate parameters.
+ */
+union IpaSetupEventRingCmdData_t {
+ struct IpaSetupEventRingCmdParams_t event;
+ u32 raw32b[6]; //uc-internal
+} __packed;
+
/**
* Structure holding the parameters for IPA_CPU_2_HW_CMD_REMOTE_IPA_INFO
@@ -314,6 +339,63 @@ static void ipa3_log_evt_hdlr(void)
ipa3_ctx->uc_ctx.uc_event_top_ofst = 0;
}
+static void ipa3_event_ring_hdlr(void)
+{
+ u32 ering_rp, offset;
+ void *rp_va;
+ struct ipa_inform_wlan_bw bw_info;
+ struct eventElement_t *e_b = NULL, *e_q = NULL;
+ int mul = 0;
+
+ ering_rp = ipahal_read_reg_mn(IPA_UC_MAILBOX_m_n,
+ IPA_UC_ERING_m, IPA_UC_ERING_n_r);
+ offset = sizeof(struct eventElement_t);
+ ipa3_ctx->uc_ctx.ering_rp = ering_rp;
+
+ while (ipa3_ctx->uc_ctx.ering_rp_local != ering_rp) {
+ rp_va = ipa3_ctx->uc_ctx.event_ring.base +
+ ipa3_ctx->uc_ctx.ering_rp_local;
+
+ if (((struct eventElement_t *) rp_va)->Opcode == BW_NOTIFY) {
+ e_b = ((struct eventElement_t *) rp_va);
+ IPADBG("prot(%d), index (%d) throughput (%lu)\n",
+ e_b->Protocol,
+ e_b->Value.bw_param.ThresholdIndex,
+ e_b->Value.bw_param.throughput);
+
+ memset(&bw_info, 0, sizeof(struct ipa_inform_wlan_bw));
+ bw_info.index =
+ e_b->Value.bw_param.ThresholdIndex;
+ mul = 1000 / IPA_UC_MON_INTERVAL;
+ bw_info.throughput =
+ e_b->Value.bw_param.throughput*mul;
+ if (ipa3_inform_wlan_bw(&bw_info))
+ IPAERR_RL("failed on index %d to wlan\n",
+ bw_info.index);
+ } else if (((struct eventElement_t *) rp_va)->Opcode
+ == QUOTA_NOTIFY) {
+ e_q = ((struct eventElement_t *) rp_va);
+ IPADBG("got quota-notify %d reach(%d) usage (%lu)\n",
+ e_q->Protocol,
+ e_q->Value.quota_param.ThreasholdReached,
+ e_q->Value.quota_param.usage);
+ if (ipa3_broadcast_wdi_quota_reach_ind(0,
+ e_q->Value.quota_param.usage))
+ IPAERR_RL("failed on quota_reach for %d\n",
+ e_q->Protocol);
+ }
+ ipa3_ctx->uc_ctx.ering_rp_local += offset;
+ ipa3_ctx->uc_ctx.ering_rp_local %=
+ ipa3_ctx->uc_ctx.event_ring.size;
+ /* update wp */
+ ipa3_ctx->uc_ctx.ering_wp_local += offset;
+ ipa3_ctx->uc_ctx.ering_wp_local %=
+ ipa3_ctx->uc_ctx.event_ring.size;
+ ipahal_write_reg_mn(IPA_UC_MAILBOX_m_n, IPA_UC_ERING_m,
+ IPA_UC_ERING_n_w, ipa3_ctx->uc_ctx.ering_wp_local);
+ }
+}
+
/**
* ipa3_uc_state_check() - Check the status of the uC interface
*
@@ -441,6 +523,11 @@ static void ipa3_uc_event_handler(enum ipa_irq_type interrupt,
IPADBG("uC evt log info ofst=0x%x\n",
ipa3_ctx->uc_ctx.uc_sram_mmio->eventParams);
ipa3_log_evt_hdlr();
+ } else if (ipa3_ctx->uc_ctx.uc_sram_mmio->eventOp ==
+ IPA_HW_2_CPU_EVNT_RING_NOTIFY) {
+ IPADBG("uC evt log info ofst=0x%x\n",
+ ipa3_ctx->uc_ctx.uc_sram_mmio->eventParams);
+ ipa3_event_ring_hdlr();
} else {
IPADBG("unsupported uC evt opcode=%u\n",
ipa3_ctx->uc_ctx.uc_sram_mmio->eventOp);
@@ -1099,3 +1186,254 @@ int ipa3_uc_debug_stats_dealloc(uint32_t protocol)
IPADBG("exit\n");
return result;
}
+
+int ipa3_uc_setup_event_ring(void)
+{
+ int res = 0;
+ struct ipa_mem_buffer cmd, *ring;
+ union IpaSetupEventRingCmdData_t *ring_info;
+
+ ring = &ipa3_ctx->uc_ctx.event_ring;
+ /* Allocate event ring */
+ ring->size = sizeof(struct eventElement_t) * IPA_UC_EVENT_RING_SIZE;
+ ring->base = dma_alloc_coherent(ipa3_ctx->uc_pdev, ring->size,
+ &ring->phys_base, GFP_KERNEL);
+ if (ring->base == NULL)
+ return -ENOMEM;
+
+ cmd.size = sizeof(*ring_info);
+ cmd.base = dma_alloc_coherent(ipa3_ctx->uc_pdev, cmd.size,
+ &cmd.phys_base, GFP_KERNEL);
+ if (cmd.base == NULL) {
+ dma_free_coherent(ipa3_ctx->uc_pdev, ring->size,
+ ring->base, ring->phys_base);
+ return -ENOMEM;
+ }
+
+ ring_info = (union IpaSetupEventRingCmdData_t *) cmd.base;
+ ring_info->event.ring_base_pa = (u32) (ring->phys_base & 0xFFFFFFFF);
+ ring_info->event.ring_base_pa_hi =
+ (u32) ((ring->phys_base & 0xFFFFFFFF00000000) >> 32);
+ ring_info->event.ring_size = IPA_UC_EVENT_RING_SIZE;
+
+ res = ipa3_uc_send_cmd((u32)(cmd.phys_base),
+ IPA_CPU_2_HW_CMD_SETUP_EVENT_RING, 0,
+ false, 10 * HZ);
+
+ if (res) {
+ IPAERR(" faile to setup event ring 0x%x 0x%x, size %d\n",
+ ring_info->event.ring_base_pa,
+ ring_info->event.ring_base_pa_hi,
+ ring_info->event.ring_size);
+ goto free_cmd;
+ }
+
+ ipa3_ctx->uc_ctx.uc_event_ring_valid = true;
+ /* write wp/rp values */
+ ipa3_ctx->uc_ctx.ering_rp_local = 0;
+ ipa3_ctx->uc_ctx.ering_wp_local =
+ ring->size - sizeof(struct eventElement_t);
+ ipahal_write_reg_mn(IPA_UC_MAILBOX_m_n,
+ IPA_UC_ERING_m, IPA_UC_ERING_n_r, 0);
+ ipahal_write_reg_mn(IPA_UC_MAILBOX_m_n,
+ IPA_UC_ERING_m, IPA_UC_ERING_n_w,
+ ipa3_ctx->uc_ctx.ering_wp_local);
+ ipa3_ctx->uc_ctx.ering_wp =
+ ipa3_ctx->uc_ctx.ering_wp_local;
+ ipa3_ctx->uc_ctx.ering_rp = 0;
+
+free_cmd:
+ dma_free_coherent(ipa3_ctx->uc_pdev,
+ cmd.size, cmd.base, cmd.phys_base);
+ return res;
+}
+
+int ipa3_uc_quota_monitor(uint64_t quota)
+{
+ int ind, res = 0;
+ struct ipa_mem_buffer cmd;
+ struct IpaQuotaMonitoring_t *quota_info;
+
+ cmd.size = sizeof(*quota_info);
+ cmd.base = dma_alloc_coherent(ipa3_ctx->uc_pdev, cmd.size,
+ &cmd.phys_base, GFP_KERNEL);
+ if (cmd.base == NULL)
+ return -ENOMEM;
+
+ quota_info = (struct IpaQuotaMonitoring_t *)cmd.base;
+ quota_info->protocol = IPA_HW_PROTOCOL_WDI3;
+ quota_info->params.WdiQM.Quota = quota;
+ quota_info->params.WdiQM.info.Num = 4;
+ ind = ipa3_ctx->fnr_info.hw_counter_offset +
+ UL_HW - 1;
+ quota_info->params.WdiQM.info.Offset[0] =
+ IPA_MEM_PART(stats_fnr_ofst) +
+ sizeof(struct ipa_flt_rt_stats) * ind + 8;
+ ind = ipa3_ctx->fnr_info.hw_counter_offset +
+ DL_ALL - 1;
+ quota_info->params.WdiQM.info.Offset[1] =
+ IPA_MEM_PART(stats_fnr_ofst) +
+ sizeof(struct ipa_flt_rt_stats) * ind + 8;
+ ind = ipa3_ctx->fnr_info.sw_counter_offset +
+ UL_HW_CACHE - 1;
+ quota_info->params.WdiQM.info.Offset[2] =
+ IPA_MEM_PART(stats_fnr_ofst) +
+ sizeof(struct ipa_flt_rt_stats) * ind + 8;
+ ind = ipa3_ctx->fnr_info.sw_counter_offset +
+ UL_WLAN_TX - 1;
+ quota_info->params.WdiQM.info.Offset[3] =
+ IPA_MEM_PART(stats_fnr_ofst) +
+ sizeof(struct ipa_flt_rt_stats) * ind + 8;
+ quota_info->params.WdiQM.info.Interval =
+ IPA_UC_MON_INTERVAL;
+
+ res = ipa3_uc_send_cmd((u32)(cmd.phys_base),
+ IPA_CPU_2_HW_CMD_QUOTA_MONITORING,
+ IPA_HW_2_CPU_OFFLOAD_CMD_STATUS_SUCCESS,
+ false, 10 * HZ);
+
+ if (res) {
+ IPAERR(" faile to set quota %d, number offset %d\n",
+ quota_info->params.WdiQM.Quota,
+ quota_info->params.WdiQM.info.Num);
+ goto free_cmd;
+ }
+
+ IPADBG(" offest1 %d offest2 %d offest3 %d offest4 %d\n",
+ quota_info->params.WdiQM.info.Offset[0],
+ quota_info->params.WdiQM.info.Offset[1],
+ quota_info->params.WdiQM.info.Offset[2],
+ quota_info->params.WdiQM.info.Offset[3]);
+
+free_cmd:
+ dma_free_coherent(ipa3_ctx->uc_pdev, cmd.size, cmd.base, cmd.phys_base);
+
+ return res;
+}
+
+int ipa3_uc_bw_monitor(struct ipa_wdi_bw_info *info)
+{
+ int i, ind, res = 0;
+ struct ipa_mem_buffer cmd;
+ struct IpaBwMonitoring_t *bw_info;
+
+ if (!info)
+ return -EINVAL;
+
+ /* check max entry */
+ if (info->num > BW_MONITORING_MAX_THRESHOLD) {
+ IPAERR("%d, support max %d bw monitor\n", info->num,
+ BW_MONITORING_MAX_THRESHOLD);
+ return -EINVAL;
+ }
+
+ cmd.size = sizeof(*bw_info);
+ cmd.base = dma_alloc_coherent(ipa3_ctx->uc_pdev, cmd.size,
+ &cmd.phys_base, GFP_KERNEL);
+ if (cmd.base == NULL)
+ return -ENOMEM;
+
+ bw_info = (struct IpaBwMonitoring_t *)cmd.base;
+ bw_info->protocol = IPA_HW_PROTOCOL_WDI3;
+ bw_info->params.WdiBw.NumThresh = info->num;
+ bw_info->params.WdiBw.Stop = info->stop;
+ IPADBG("stop bw-monitor? %d\n", bw_info->params.WdiBw.Stop);
+
+ for (i = 0; i < info->num; i++) {
+ bw_info->params.WdiBw.BwThreshold[i] = info->threshold[i];
+ IPADBG("%d-st, %lu\n", i, bw_info->params.WdiBw.BwThreshold[i]);
+ }
+
+ bw_info->params.WdiBw.info.Num = 8;
+ ind = ipa3_ctx->fnr_info.hw_counter_offset +
+ UL_HW - 1;
+ bw_info->params.WdiBw.info.Offset[0] =
+ IPA_MEM_PART(stats_fnr_ofst) +
+ sizeof(struct ipa_flt_rt_stats) * ind + 8;
+ ind = ipa3_ctx->fnr_info.hw_counter_offset +
+ DL_HW - 1;
+ bw_info->params.WdiBw.info.Offset[1] =
+ IPA_MEM_PART(stats_fnr_ofst) +
+ sizeof(struct ipa_flt_rt_stats) * ind + 8;
+ ind = ipa3_ctx->fnr_info.hw_counter_offset +
+ DL_ALL - 1;
+ bw_info->params.WdiBw.info.Offset[2] =
+ IPA_MEM_PART(stats_fnr_ofst) +
+ sizeof(struct ipa_flt_rt_stats) * ind + 8;
+ ind = ipa3_ctx->fnr_info.hw_counter_offset +
+ UL_ALL - 1;
+ bw_info->params.WdiBw.info.Offset[3] =
+ IPA_MEM_PART(stats_fnr_ofst) +
+ sizeof(struct ipa_flt_rt_stats) * ind + 8;
+ ind = ipa3_ctx->fnr_info.sw_counter_offset +
+ UL_HW_CACHE - 1;
+ bw_info->params.WdiBw.info.Offset[4] =
+ IPA_MEM_PART(stats_fnr_ofst) +
+ sizeof(struct ipa_flt_rt_stats) * ind + 8;
+ ind = ipa3_ctx->fnr_info.sw_counter_offset +
+ DL_HW_CACHE - 1;
+ bw_info->params.WdiBw.info.Offset[5] =
+ IPA_MEM_PART(stats_fnr_ofst) +
+ sizeof(struct ipa_flt_rt_stats) * ind + 8;
+ ind = ipa3_ctx->fnr_info.sw_counter_offset +
+ UL_WLAN_TX - 1;
+ bw_info->params.WdiBw.info.Offset[6] =
+ IPA_MEM_PART(stats_fnr_ofst) +
+ sizeof(struct ipa_flt_rt_stats) * ind + 8;
+ ind = ipa3_ctx->fnr_info.sw_counter_offset +
+ DL_WLAN_TX - 1;
+ bw_info->params.WdiBw.info.Offset[7] =
+ IPA_MEM_PART(stats_fnr_ofst) +
+ sizeof(struct ipa_flt_rt_stats) * ind + 8;
+ bw_info->params.WdiBw.info.Interval =
+ IPA_UC_MON_INTERVAL;
+
+ res = ipa3_uc_send_cmd((u32)(cmd.phys_base),
+ IPA_CPU_2_HW_CMD_BW_MONITORING,
+ IPA_HW_2_CPU_OFFLOAD_CMD_STATUS_SUCCESS,
+ false, 10 * HZ);
+
+ if (res) {
+ IPAERR(" faile to set bw %d level with %d coutners\n",
+ bw_info->params.WdiBw.NumThresh,
+ bw_info->params.WdiBw.info.Num);
+ goto free_cmd;
+ }
+
+free_cmd:
+ dma_free_coherent(ipa3_ctx->uc_pdev, cmd.size, cmd.base, cmd.phys_base);
+
+ return res;
+}
+
+int ipa3_set_wlan_tx_info(struct ipa_wdi_tx_info *info)
+{
+ struct ipa_flt_rt_stats stats;
+ struct ipacm_fnr_info fnr_info;
+
+ memset(&fnr_info, 0, sizeof(struct ipacm_fnr_info));
+ if (!ipa_get_fnr_info(&fnr_info)) {
+ IPAERR("FNR counter haven't configured\n");
+ return -EINVAL;
+ }
+
+ /* update sw counters */
+ memset(&stats, 0, sizeof(struct ipa_flt_rt_stats));
+ stats.num_bytes = info->sta_tx;
+ if (ipa_set_flt_rt_stats(fnr_info.sw_counter_offset +
+ UL_WLAN_TX, stats)) {
+ IPAERR("Failed to set stats to ul_wlan_tx %d\n",
+ fnr_info.sw_counter_offset + UL_WLAN_TX);
+ return -EINVAL;
+ }
+
+ stats.num_bytes = info->ap_tx;
+ if (ipa_set_flt_rt_stats(fnr_info.sw_counter_offset +
+ DL_WLAN_TX, stats)) {
+ IPAERR("Failed to set stats to dl_wlan_tx %d\n",
+ fnr_info.sw_counter_offset + DL_WLAN_TX);
+ return -EINVAL;
+ }
+
+ return 0;
+}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_uc_offload_i.h b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_offload_i.h
index 4d296ec..865b436 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_uc_offload_i.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_offload_i.h
@@ -30,6 +30,8 @@
#define MAX_MHIP_CHANNELS 4
#define MAX_USB_CHANNELS 2
+#define BW_QUOTA_MONITORING_MAX_ADDR_OFFSET 8
+#define BW_MONITORING_MAX_THRESHOLD 3
/**
* @brief Enum value determined based on the feature it
* corresponds to
@@ -98,6 +100,7 @@ enum ipa4_hw_protocol {
* @IPA_HW_2_CPU_EVENT_ERROR : Event specify a system error is detected by the
* device
* @IPA_HW_2_CPU_EVENT_LOG_INFO : Event providing logging specific information
+ * @IPA_HW_2_CPU_POST_EVNT_RING_NOTIFICAITON : Event to notify APPS
*/
enum ipa3_hw_2_cpu_events {
IPA_HW_2_CPU_EVENT_NO_OP =
@@ -106,6 +109,8 @@ enum ipa3_hw_2_cpu_events {
FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 1),
IPA_HW_2_CPU_EVENT_LOG_INFO =
FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 2),
+ IPA_HW_2_CPU_EVNT_RING_NOTIFY =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 3),
};
/**
@@ -447,6 +452,8 @@ struct Ipa3HwStatsNTNInfoData_t {
* uC stats calculation for a particular protocol
* @IPA_CPU_2_HW_CMD_OFFLOAD_STATS_DEALLOC: Command to stop the
* uC stats calculation for a particular protocol
+ * @IPA_CPU_2_HW_CMD_QUOTA_MONITORING : Command to start the Quota monitoring
+ * @IPA_CPU_2_HW_CMD_BW_MONITORING : Command to start the BW monitoring
*/
enum ipa_cpu_2_hw_offload_commands {
IPA_CPU_2_HW_CMD_OFFLOAD_CHANNEL_SET_UP =
@@ -461,6 +468,10 @@ enum ipa_cpu_2_hw_offload_commands {
FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 5),
IPA_CPU_2_HW_CMD_OFFLOAD_STATS_DEALLOC =
FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 6),
+ IPA_CPU_2_HW_CMD_QUOTA_MONITORING =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 7),
+ IPA_CPU_2_HW_CMD_BW_MONITORING =
+ FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 8),
};
/**
@@ -623,6 +634,48 @@ struct IpaHwOffloadSetUpCmdData_t {
union IpaHwSetUpCmd SetupCh_params;
} __packed;
+struct IpaCommonMonitoringParams_t {
+ /* max 8 */
+ uint8_t Num;
+ /* Sampling interval in ms */
+ uint8_t Interval;
+ uint16_t Offset[BW_QUOTA_MONITORING_MAX_ADDR_OFFSET];
+} __packed; // 18 bytes
+
+struct IpaWdiQuotaMonitoringParams_t {
+ uint64_t Quota;
+ struct IpaCommonMonitoringParams_t info;
+} __packed;
+
+struct IpaWdiBwMonitoringParams_t {
+ uint64_t BwThreshold[BW_MONITORING_MAX_THRESHOLD];
+ struct IpaCommonMonitoringParams_t info;
+ uint8_t NumThresh;
+ /*Variable to Start Stop Bw Monitoring*/
+ uint8_t Stop;
+} __packed;
+
+union IpaQuotaMonitoringParams_t {
+ struct IpaWdiQuotaMonitoringParams_t WdiQM;
+} __packed;
+
+union IpaBwMonitoringParams_t {
+ struct IpaWdiBwMonitoringParams_t WdiBw;
+} __packed;
+
+struct IpaQuotaMonitoring_t {
+ /* indicates below union needs to be interpreted */
+ uint32_t protocol;
+ union IpaQuotaMonitoringParams_t params;
+} __packed;
+
+struct IpaBwMonitoring_t {
+ /* indicates below union needs to be interpreted */
+ uint32_t protocol;
+ union IpaBwMonitoringParams_t params;
+} __packed;
+
+
struct IpaHwOffloadSetUpCmdData_t_v4_0 {
u32 protocol;
union IpaHwSetUpCmd SetupCh_params;
@@ -644,6 +697,46 @@ struct IpaHwOffloadCommonChCmdData_t {
union IpaHwCommonChCmd CommonCh_params;
} __packed;
+enum EVENT_2_CPU_OPCODE {
+ BW_NOTIFY = 0x0,
+ QUOTA_NOTIFY = 0x1,
+};
+
+struct EventStructureBwMonitoring_t {
+ uint32_t ThresholdIndex;
+ uint64_t throughput;
+} __packed;
+
+struct EventStructureQuotaMonitoring_t {
+ /* indicate threshold has reached */
+ uint32_t ThreasholdReached;
+ uint64_t usage;
+} __packed;
+
+union EventParamFormat_t {
+ struct EventStructureBwMonitoring_t bw_param;
+ struct EventStructureQuotaMonitoring_t quota_param;
+} __packed;
+
+/* EVT RING STRUCTURE
+ * | Word| bit | Field |
+ * -----------------------------
+ * | 0 |0 - 8| Protocol|
+ * | |8 - 16| Reserved0|
+ * | |16 - 24| Opcode |
+ * | |24 - 31| Reserved1|
+ * | 1 |0 - 31| Word1 |
+ * | 2 |0 - 31| Word2 |
+ * | 3 |0 - 31| Word3 |
+ */
+struct eventElement_t {
+ uint8_t Protocol;
+ uint8_t Reserved0;
+ uint8_t Opcode;
+ uint8_t Reserved1;
+ union EventParamFormat_t Value;
+} __packed;
+
struct IpaHwOffloadCommonChCmdData_t_v4_0 {
u32 protocol;
union IpaHwCommonChCmd CommonCh_params;
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c
index 983ff16..d6a057b 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c
@@ -3050,7 +3050,7 @@ int ipa3_create_wdi_mapping(u32 num_buffers, struct ipa_wdi_buffer_info *info)
}
for (i = 0; i < num_buffers; i++) {
- IPADBG("i=%d pa=0x%pa iova=0x%lx sz=0x%zx\n", i,
+ IPADBG_LOW("i=%d pa=0x%pa iova=0x%lx sz=0x%zx\n", i,
&info[i].pa, info[i].iova, info[i].size);
info[i].result = ipa3_iommu_map(cb->iommu_domain,
rounddown(info[i].iova, PAGE_SIZE),
@@ -3080,7 +3080,7 @@ int ipa3_release_wdi_mapping(u32 num_buffers, struct ipa_wdi_buffer_info *info)
}
for (i = 0; i < num_buffers; i++) {
- IPADBG("i=%d pa=0x%pa iova=0x%lx sz=0x%zx\n", i,
+ IPADBG_LOW("i=%d pa=0x%pa iova=0x%lx sz=0x%zx\n", i,
&info[i].pa, info[i].iova, info[i].size);
info[i].result = iommu_unmap(cb->iommu_domain,
rounddown(info[i].iova, PAGE_SIZE),
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
index 197e99b..d17ff30 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
@@ -39,7 +39,7 @@
#define IPA_V4_0_CLK_RATE_NOMINAL (220 * 1000 * 1000UL)
#define IPA_V4_0_CLK_RATE_TURBO (250 * 1000 * 1000UL)
-#define IPA_V3_0_MAX_HOLB_TMR_VAL (4294967296 - 1)
+#define IPA_MAX_HOLB_TMR_VAL (4294967296 - 1)
#define IPA_V3_0_BW_THRESHOLD_TURBO_MBPS (1000)
#define IPA_V3_0_BW_THRESHOLD_NOMINAL_MBPS (600)
@@ -3898,8 +3898,8 @@ static void ipa_cfg_qtime(void)
/* Configure timestamp resolution */
memset(&ts_cfg, 0, sizeof(ts_cfg));
- ts_cfg.dpl_timestamp_lsb = 0;
- ts_cfg.dpl_timestamp_sel = false; /* DPL: use legacy 1ms resolution */
+ ts_cfg.dpl_timestamp_lsb = IPA_TAG_TIMER_TIMESTAMP_SHFT;
+ ts_cfg.dpl_timestamp_sel = true;
ts_cfg.tag_timestamp_lsb = IPA_TAG_TIMER_TIMESTAMP_SHFT;
ts_cfg.nat_timestamp_lsb = IPA_NAT_TIMER_TIMESTAMP_SHFT;
val = ipahal_read_reg(IPA_QTIME_TIMESTAMP_CFG);
@@ -4037,18 +4037,18 @@ int ipa3_get_ep_mapping(enum ipa_client_type client)
int ipa_ep_idx;
u8 hw_idx = ipa3_get_hw_type_index();
- if (client >= IPA_CLIENT_MAX || client < 0) {
+ if (unlikely(client >= IPA_CLIENT_MAX || client < 0)) {
IPAERR_RL("Bad client number! client =%d\n", client);
return IPA_EP_NOT_ALLOCATED;
}
- if (!ipa3_ep_mapping[hw_idx][client].valid)
+ if (unlikely(!ipa3_ep_mapping[hw_idx][client].valid))
return IPA_EP_NOT_ALLOCATED;
ipa_ep_idx =
ipa3_ep_mapping[hw_idx][client].ipa_gsi_ep_info.ipa_ep_num;
- if (ipa_ep_idx < 0 || (ipa_ep_idx >= IPA3_MAX_NUM_PIPES
- && client != IPA_CLIENT_DUMMY_CONS))
+ if (unlikely(ipa_ep_idx < 0 || (ipa_ep_idx >= IPA3_MAX_NUM_PIPES
+ && client != IPA_CLIENT_DUMMY_CONS)))
return IPA_EP_NOT_ALLOCATED;
return ipa_ep_idx;
@@ -4066,7 +4066,7 @@ const struct ipa_gsi_ep_config *ipa3_get_gsi_ep_info
int ep_idx;
ep_idx = ipa3_get_ep_mapping(client);
- if (ep_idx == IPA_EP_NOT_ALLOCATED)
+ if (unlikely(ep_idx == IPA_EP_NOT_ALLOCATED))
return NULL;
if (!ipa3_ep_mapping[ipa3_get_hw_type_index()][client].valid)
@@ -4115,7 +4115,8 @@ u8 ipa3_get_qmb_master_sel(enum ipa_client_type client)
[client].qmb_master_sel;
}
-/* ipa3_set_client() - provide client mapping
+/**
+ * ipa3_set_client() - provide client mapping
* @client: client type
*
* Return value: none
@@ -4132,8 +4133,8 @@ void ipa3_set_client(int index, enum ipacm_client_enum client, bool uplink)
ipa3_ctx->ipacm_client[index].uplink = uplink;
}
}
-
-/* ipa3_get_wlan_stats() - get ipa wifi stats
+/**
+ * ipa3_get_wlan_stats() - get ipa wifi stats
*
* Return value: success or failure
*/
@@ -4149,6 +4150,12 @@ int ipa3_get_wlan_stats(struct ipa_get_wdi_sap_stats *wdi_sap_stats)
return 0;
}
+/**
+ * ipa3_set_wlan_quota() - set ipa wifi quota
+ * @wdi_quota: quota requirement
+ *
+ * Return value: success or failure
+ */
int ipa3_set_wlan_quota(struct ipa_set_wifi_quota *wdi_quota)
{
if (ipa3_ctx->uc_wdi_ctx.stats_notify) {
@@ -4162,6 +4169,23 @@ int ipa3_set_wlan_quota(struct ipa_set_wifi_quota *wdi_quota)
}
/**
+ * ipa3_inform_wlan_bw() - inform wlan bw-index
+ *
+ * Return value: success or failure
+ */
+int ipa3_inform_wlan_bw(struct ipa_inform_wlan_bw *wdi_bw)
+{
+ if (ipa3_ctx->uc_wdi_ctx.stats_notify) {
+ ipa3_ctx->uc_wdi_ctx.stats_notify(IPA_INFORM_WLAN_BW,
+ wdi_bw);
+ } else {
+ IPAERR("uc_wdi_ctx.stats_notify NULL\n");
+ return -EFAULT;
+ }
+ return 0;
+}
+
+/**
* ipa3_get_client() - provide client mapping
* @client: client type
*
@@ -5986,6 +6010,7 @@ int ipa3_controller_static_bind(struct ipa3_controller *ctrl,
ctrl->ipa_init_sram = _ipa_init_sram_v3;
ctrl->ipa_sram_read_settings = _ipa_sram_settings_read_v3_0;
ctrl->ipa_init_hdr = _ipa_init_hdr_v3_0;
+ ctrl->max_holb_tmr_val = IPA_MAX_HOLB_TMR_VAL;
if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0)
ctrl->ipa3_read_ep_reg = _ipa_read_ep_reg_v4_0;
@@ -6877,6 +6902,8 @@ int ipa3_bind_api_controller(enum ipa_hw_type ipa_hw_type,
api_ctrl->ipa_resume_wdi_pipe = ipa3_resume_wdi_pipe;
api_ctrl->ipa_suspend_wdi_pipe = ipa3_suspend_wdi_pipe;
api_ctrl->ipa_get_wdi_stats = ipa3_get_wdi_stats;
+ api_ctrl->ipa_uc_bw_monitor = ipa3_uc_bw_monitor;
+ api_ctrl->ipa_set_wlan_tx_info = ipa3_set_wlan_tx_info;
api_ctrl->ipa_get_smem_restr_bytes = ipa3_get_smem_restr_bytes;
api_ctrl->ipa_broadcast_wdi_quota_reach_ind =
ipa3_broadcast_wdi_quota_reach_ind;
@@ -7485,12 +7512,10 @@ void ipa3_set_resorce_groups_min_max_limits(void)
IPADBG("EXIT\n");
}
-static void ipa3_gsi_poll_after_suspend(struct ipa3_ep_context *ep)
+static bool ipa3_gsi_channel_is_quite(struct ipa3_ep_context *ep)
{
bool empty;
- IPADBG("switch ch %ld to poll\n", ep->gsi_chan_hdl);
- gsi_config_channel_mode(ep->gsi_chan_hdl, GSI_CHAN_MODE_POLL);
gsi_is_channel_empty(ep->gsi_chan_hdl, &empty);
if (!empty) {
IPADBG("ch %ld not empty\n", ep->gsi_chan_hdl);
@@ -7499,6 +7524,7 @@ static void ipa3_gsi_poll_after_suspend(struct ipa3_ep_context *ep)
if (!atomic_read(&ep->sys->curr_polling_state))
__ipa_gsi_irq_rx_scedule_poll(ep->sys);
}
+ return empty;
}
static int __ipa3_stop_gsi_channel(u32 clnt_hdl)
@@ -7624,141 +7650,168 @@ int ipa3_stop_gsi_channel(u32 clnt_hdl)
return res;
}
-void ipa3_suspend_apps_pipes(bool suspend)
+static int _ipa_suspend_resume_pipe(enum ipa_client_type client, bool suspend)
{
- struct ipa_ep_cfg_ctrl cfg;
int ipa_ep_idx;
struct ipa3_ep_context *ep;
int res;
- memset(&cfg, 0, sizeof(cfg));
- cfg.ipa_ep_suspend = suspend;
+ if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_0) {
+ IPAERR("not supported\n");
+ return -EPERM;
+ }
- ipa_ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_APPS_LAN_CONS);
+ ipa_ep_idx = ipa3_get_ep_mapping(client);
if (ipa_ep_idx < 0) {
- IPAERR("IPA client mapping failed\n");
+ IPADBG("client %d not configued\n", client);
+ return 0;
+ }
+
+ ep = &ipa3_ctx->ep[ipa_ep_idx];
+ if (!ep->valid)
+ return 0;
+
+ IPADBG("%s pipe %d\n", suspend ? "suspend" : "unsuspend", ipa_ep_idx);
+ /*
+ * move the channel to callback mode.
+ * This needs to happen before starting the channel to make
+ * sure we don't loose any interrupt
+ */
+ if (!suspend && !atomic_read(&ep->sys->curr_polling_state) &&
+ !IPA_CLIENT_IS_APPS_PROD(client))
+ gsi_config_channel_mode(ep->gsi_chan_hdl,
+ GSI_CHAN_MODE_CALLBACK);
+
+ if (suspend) {
+ res = __ipa3_stop_gsi_channel(ipa_ep_idx);
+ if (res) {
+ IPAERR("failed to stop LAN channel\n");
+ ipa_assert();
+ }
+ } else {
+ res = gsi_start_channel(ep->gsi_chan_hdl);
+ if (res) {
+ IPAERR("failed to start LAN channel\n");
+ ipa_assert();
+ }
+ }
+
+ /* Apps prod pipes use common event ring so cannot configure mode*/
+ if (IPA_CLIENT_IS_APPS_PROD(client))
+ return 0;
+
+ if (suspend) {
+ IPADBG("switch ch %ld to poll\n", ep->gsi_chan_hdl);
+ gsi_config_channel_mode(ep->gsi_chan_hdl, GSI_CHAN_MODE_POLL);
+ if (!ipa3_gsi_channel_is_quite(ep))
+ return -EAGAIN;
+ } else if (!atomic_read(&ep->sys->curr_polling_state)) {
+ IPADBG("switch ch %ld to callback\n", ep->gsi_chan_hdl);
+ gsi_config_channel_mode(ep->gsi_chan_hdl,
+ GSI_CHAN_MODE_CALLBACK);
+ }
+
+ return 0;
+}
+
+void ipa3_force_close_coal(void)
+{
+ struct ipahal_imm_cmd_pyld *cmd_pyld = NULL;
+ struct ipahal_imm_cmd_register_write reg_write_cmd = { 0 };
+ struct ipahal_reg_valmask valmask;
+ struct ipa3_desc desc;
+ int ep_idx;
+
+ ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS);
+ if (ep_idx == IPA_EP_NOT_ALLOCATED || (!ipa3_ctx->ep[ep_idx].valid))
+ return;
+
+ reg_write_cmd.skip_pipeline_clear = false;
+ reg_write_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
+ reg_write_cmd.offset = ipahal_get_reg_ofst(IPA_AGGR_FORCE_CLOSE);
+ ipahal_get_aggr_force_close_valmask(ep_idx, &valmask);
+ reg_write_cmd.value = valmask.val;
+ reg_write_cmd.value_mask = valmask.mask;
+ cmd_pyld = ipahal_construct_imm_cmd(IPA_IMM_CMD_REGISTER_WRITE,
+ ®_write_cmd, false);
+ if (!cmd_pyld) {
+ IPAERR("fail construct register_write imm cmd\n");
ipa_assert();
return;
}
- ep = &ipa3_ctx->ep[ipa_ep_idx];
- if (ep->valid) {
- IPADBG("%s pipe %d\n", suspend ? "suspend" : "unsuspend",
- ipa_ep_idx);
- /*
- * move the channel to callback mode.
- * This needs to happen before starting the channel to make
- * sure we don't loose any interrupt
- */
- if (!suspend && !atomic_read(&ep->sys->curr_polling_state))
- gsi_config_channel_mode(ep->gsi_chan_hdl,
- GSI_CHAN_MODE_CALLBACK);
+ ipa3_init_imm_cmd_desc(&desc, cmd_pyld);
- if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0) {
- if (suspend) {
- res = __ipa3_stop_gsi_channel(ipa_ep_idx);
- if (res) {
- IPAERR("failed to stop LAN channel\n");
- ipa_assert();
- }
- } else {
- res = gsi_start_channel(ep->gsi_chan_hdl);
- if (res) {
- IPAERR("failed to start LAN channel\n");
- ipa_assert();
- }
- }
- } else {
- ipa3_cfg_ep_ctrl(ipa_ep_idx, &cfg);
+ IPADBG("Sending 1 descriptor for coal force close\n");
+ if (ipa3_send_cmd_timeout(1, &desc,
+ IPA_DMA_TASK_FOR_GSI_TIMEOUT_MSEC)) {
+ IPAERR("ipa3_send_cmd failed\n");
+ ipa_assert();
+ }
+ ipahal_destroy_imm_cmd(cmd_pyld);
+}
+
+int ipa3_suspend_apps_pipes(bool suspend)
+{
+ int res;
+ enum ipa_client_type client;
+
+ if (suspend)
+ ipa3_force_close_coal();
+
+ for (client = 0; client < IPA_CLIENT_MAX; client++) {
+ if (IPA_CLIENT_IS_APPS_CONS(client)) {
+ res = _ipa_suspend_resume_pipe(client, suspend);
+ if (res)
+ goto undo_cons;
}
- if (suspend)
- ipa3_gsi_poll_after_suspend(ep);
}
- ipa_ep_idx = ipa_get_ep_mapping(IPA_CLIENT_APPS_WAN_CONS);
- /* Considering the case for SSR. */
- if (ipa_ep_idx == -1) {
- IPADBG("Invalid mapping for IPA_CLIENT_APPS_WAN_CONS\n");
- return;
- }
- ep = &ipa3_ctx->ep[ipa_ep_idx];
- if (ep->valid) {
- IPADBG("%s pipe %d\n", suspend ? "suspend" : "unsuspend",
- ipa_ep_idx);
- /*
- * move the channel to callback mode.
- * This needs to happen before starting the channel to make
- * sure we don't loose any interrupt
- */
- if (!suspend && !atomic_read(&ep->sys->curr_polling_state))
- gsi_config_channel_mode(ep->gsi_chan_hdl,
- GSI_CHAN_MODE_CALLBACK);
- if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0) {
- if (suspend) {
- res = __ipa3_stop_gsi_channel(ipa_ep_idx);
- if (res) {
- IPAERR("failed to stop WAN channel\n");
- ipa_assert();
- }
- } else if (!atomic_read(&ipa3_ctx->is_ssr)) {
- /* If SSR was alreday started not required to
- * start WAN channel,Because in SSR will stop
- * channel and reset the channel.
- */
- res = gsi_start_channel(ep->gsi_chan_hdl);
- if (res) {
- IPAERR("failed to start WAN channel\n");
- ipa_assert();
- }
- }
- } else {
- ipa3_cfg_ep_ctrl(ipa_ep_idx, &cfg);
+ if (suspend) {
+ struct ipahal_reg_tx_wrapper tx;
+ int ep_idx;
+
+ ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_COAL_CONS);
+ if (ep_idx == IPA_EP_NOT_ALLOCATED ||
+ (!ipa3_ctx->ep[ep_idx].valid))
+ goto do_prod;
+
+ ipahal_read_reg_fields(IPA_STATE_TX_WRAPPER, &tx);
+ if (tx.coal_slave_open_frame != 0) {
+ IPADBG("COAL frame is open 0x%x\n",
+ tx.coal_slave_open_frame);
+ res = -EAGAIN;
+ goto undo_cons;
}
- if (suspend)
- ipa3_gsi_poll_after_suspend(ep);
+
+ usleep_range(IPA_TAG_SLEEP_MIN_USEC, IPA_TAG_SLEEP_MAX_USEC);
+
+ res = ipahal_read_reg_n(IPA_SUSPEND_IRQ_INFO_EE_n,
+ ipa3_ctx->ee);
+ if (res) {
+ IPADBG("suspend irq is pending 0x%x\n", res);
+ goto undo_cons;
+ }
+ }
+do_prod:
+ for (client = 0; client < IPA_CLIENT_MAX; client++) {
+ if (IPA_CLIENT_IS_APPS_PROD(client)) {
+ res = _ipa_suspend_resume_pipe(client, suspend);
+ if (res)
+ goto undo_prod;
+ }
}
- ipa_ep_idx = ipa_get_ep_mapping(IPA_CLIENT_ODL_DPL_CONS);
- /* Considering the case for SSR. */
- if (ipa_ep_idx == -1) {
- IPADBG("Invalid mapping for IPA_CLIENT_ODL_DPL_CONS\n");
- return;
- }
- ep = &ipa3_ctx->ep[ipa_ep_idx];
- if (ep->valid) {
- IPADBG("%s pipe %d\n", suspend ? "suspend" : "unsuspend",
- ipa_ep_idx);
- /*
- * move the channel to callback mode.
- * This needs to happen before starting the channel to make
- * sure we don't loose any interrupt
- */
- if (!suspend && !atomic_read(&ep->sys->curr_polling_state))
- gsi_config_channel_mode(ep->gsi_chan_hdl,
- GSI_CHAN_MODE_CALLBACK);
- if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0) {
- if (suspend) {
- res = __ipa3_stop_gsi_channel(ipa_ep_idx);
- if (res) {
- IPAERR("failed to stop ODL channel\n");
- ipa_assert();
- }
- } else if (!atomic_read(&ipa3_ctx->is_ssr)) {
- /* If SSR was alreday started not required to
- * start WAN channel,Because in SSR will stop
- * channel and reset the channel.
- */
- res = gsi_start_channel(ep->gsi_chan_hdl);
- if (res) {
- IPAERR("failed to start ODL channel\n");
- ipa_assert();
- }
- }
- } else {
- ipa3_cfg_ep_ctrl(ipa_ep_idx, &cfg);
- }
- if (suspend)
- ipa3_gsi_poll_after_suspend(ep);
- }
+ return 0;
+undo_prod:
+ for (client; client <= IPA_CLIENT_MAX && client >= 0; client--)
+ if (IPA_CLIENT_IS_APPS_PROD(client))
+ _ipa_suspend_resume_pipe(client, !suspend);
+ client = IPA_CLIENT_MAX;
+undo_cons:
+ for (client; client <= IPA_CLIENT_MAX && client >= 0; client--)
+ if (IPA_CLIENT_IS_APPS_CONS(client))
+ _ipa_suspend_resume_pipe(client, !suspend);
+ return res;
}
int ipa3_allocate_dma_task_for_gsi(void)
@@ -8345,6 +8398,30 @@ bool ipa3_is_apq(void)
}
/**
+ * ipa_get_fnr_info() - get fnr_info
+ *
+ * Return value: true if set, false if not set
+ *
+ */
+bool ipa_get_fnr_info(struct ipacm_fnr_info *fnr_info)
+{
+ bool res = false;
+
+ if (ipa3_ctx->fnr_info.valid) {
+ fnr_info->valid = ipa3_ctx->fnr_info.valid;
+ fnr_info->hw_counter_offset =
+ ipa3_ctx->fnr_info.hw_counter_offset;
+ fnr_info->sw_counter_offset =
+ ipa3_ctx->fnr_info.sw_counter_offset;
+ res = true;
+ } else {
+ IPAERR("fnr_info not valid!\n");
+ res = false;
+ }
+ return res;
+}
+
+/**
* ipa3_disable_prefetch() - disable\enable tx prefetch
*
* @client: the client which is related to the TX where prefetch will be
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_wdi3_i.c b/drivers/platform/msm/ipa/ipa_v3/ipa_wdi3_i.c
index f213178..91cef82 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_wdi3_i.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_wdi3_i.c
@@ -725,6 +725,20 @@ int ipa3_enable_wdi3_pipes(int ipa_ep_idx_tx, int ipa_ep_idx_rx)
IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(ipa_ep_idx_tx));
+ /* start uC event ring */
+ if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_5) {
+ if (ipa3_ctx->uc_ctx.uc_loaded &&
+ !ipa3_ctx->uc_ctx.uc_event_ring_valid) {
+ if (ipa3_uc_setup_event_ring()) {
+ IPAERR("failed to set uc_event ring\n");
+ return -EFAULT;
+ }
+ } else
+ IPAERR("uc-loaded %d, ring-valid %d",
+ ipa3_ctx->uc_ctx.uc_loaded,
+ ipa3_ctx->uc_ctx.uc_event_ring_valid);
+ }
+
/* enable data path */
result = ipa3_enable_data_path(ipa_ep_idx_rx);
if (result) {
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c
index 6bf24c86..440788f 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c
@@ -1180,6 +1180,112 @@ static void ipareg_parse_comp_cfg_v4_5(
IPA_COMP_CFG_IPA_FULL_FLUSH_WAIT_RSC_CLOSURE_EN_BMSK_v4_5);
}
+static void ipareg_parse_state_tx_wrapper_v4_5(
+ enum ipahal_reg_name reg, void *fields, u32 val)
+{
+ struct ipahal_reg_tx_wrapper *tx =
+ (struct ipahal_reg_tx_wrapper *)fields;
+
+ tx->tx0_idle = IPA_GETFIELD_FROM_REG(val,
+ IPA_STATE_TX_WRAPPER_TX0_IDLE_SHFT,
+ IPA_STATE_TX_WRAPPER_TX0_IDLE_BMSK);
+
+ tx->tx1_idle = IPA_GETFIELD_FROM_REG(val,
+ IPA_STATE_TX_WRAPPER_TX1_IDLE_SHFT,
+ IPA_STATE_TX_WRAPPER_TX1_IDLE_BMSK);
+
+ tx->ipa_prod_ackmngr_db_empty = IPA_GETFIELD_FROM_REG(val,
+ IPA_STATE_TX_WRAPPER_IPA_PROD_ACKMNGR_DB_EMPTY_SHFT,
+ IPA_STATE_TX_WRAPPER_IPA_PROD_ACKMNGR_DB_EMPTY_BMSK);
+
+ tx->ipa_prod_ackmngr_state_idle = IPA_GETFIELD_FROM_REG(val,
+ IPA_STATE_TX_WRAPPER_IPA_PROD_ACKMNGR_STATE_IDLE_SHFT,
+ IPA_STATE_TX_WRAPPER_IPA_PROD_ACKMNGR_STATE_IDLE_BMSK);
+
+ tx->ipa_prod_prod_bresp_empty = IPA_GETFIELD_FROM_REG(val,
+ IPA_STATE_TX_WRAPPER_IPA_PROD_BRESP_EMPTY_SHFT,
+ IPA_STATE_TX_WRAPPER_IPA_PROD_BRESP_EMPTY_BMSK);
+
+ tx->ipa_prod_prod_bresp_toggle_idle = IPA_GETFIELD_FROM_REG(val,
+ IPA_STATE_TX_WRAPPER_IPA_PROD_BRESP_EMPTY_SHFT,
+ IPA_STATE_TX_WRAPPER_IPA_PROD_BRESP_EMPTY_BMSK);
+
+ tx->ipa_mbim_pkt_fms_idle = IPA_GETFIELD_FROM_REG(val,
+ IPA_STATE_TX_WRAPPER_IPA_MBIM_PKT_FMS_IDLE_SHFT,
+ IPA_STATE_TX_WRAPPER_IPA_MBIM_PKT_FMS_IDLE_BMSK);
+
+ tx->mbim_direct_dma = IPA_GETFIELD_FROM_REG(val,
+ IPA_STATE_TX_WRAPPER_MBIM_DIRECT_DMA_SHFT,
+ IPA_STATE_TX_WRAPPER_MBIM_DIRECT_DMA_BMSK);
+
+ tx->trnseq_force_valid = IPA_GETFIELD_FROM_REG(val,
+ IPA_STATE_TX_WRAPPER_TRNSEQ_FORCE_VALID_SHFT,
+ IPA_STATE_TX_WRAPPER_TRNSEQ_FORCE_VALID_BMSK);
+
+ tx->pkt_drop_cnt_idle = IPA_GETFIELD_FROM_REG(val,
+ IPA_STATE_TX_WRAPPER_PKT_DROP_CNT_IDLE_SHFT,
+ IPA_STATE_TX_WRAPPER_PKT_DROP_CNT_IDLE_BMSK);
+
+ tx->nlo_direct_dma = IPA_GETFIELD_FROM_REG(val,
+ IPA_STATE_TX_WRAPPER_NLO_DIRECT_DMA_SHFT,
+ IPA_STATE_TX_WRAPPER_NLO_DIRECT_DMA_BMSK);
+
+ tx->coal_direct_dma = IPA_GETFIELD_FROM_REG(val,
+ IPA_STATE_TX_WRAPPER_COAL_DIRECT_DMA_SHFT,
+ IPA_STATE_TX_WRAPPER_COAL_DIRECT_DMA_BMSK);
+
+ tx->coal_slave_idle = IPA_GETFIELD_FROM_REG(val,
+ IPA_STATE_TX_WRAPPER_COAL_SLAVE_IDLE_SHFT,
+ IPA_STATE_TX_WRAPPER_COAL_SLAVE_IDLE_BMSK);
+
+ tx->coal_slave_ctx_idle = IPA_GETFIELD_FROM_REG(val,
+ IPA_STATE_TX_WRAPPER_COAL_SLAVE_CTX_IDLE_SHFT,
+ IPA_STATE_TX_WRAPPER_COAL_SLAVE_CTX_IDLE_BMSK);
+
+ tx->coal_slave_open_frame = IPA_GETFIELD_FROM_REG(val,
+ IPA_STATE_TX_WRAPPER_COAL_SLAVE_OPEN_FRAME_SHFT,
+ IPA_STATE_TX_WRAPPER_COAL_SLAVE_OPEN_FRAME_BMSK);
+}
+
+static void ipareg_parse_state_tx_wrapper_v4_7(
+ enum ipahal_reg_name reg, void *fields, u32 val)
+{
+ struct ipahal_reg_tx_wrapper *tx =
+ (struct ipahal_reg_tx_wrapper *)fields;
+
+ tx->tx0_idle = IPA_GETFIELD_FROM_REG(val,
+ IPA_STATE_TX_WRAPPER_TX0_IDLE_SHFT_v4_7,
+ IPA_STATE_TX_WRAPPER_TX0_IDLE_BMSK_v4_7);
+
+ tx->tx1_idle = IPA_GETFIELD_FROM_REG(val,
+ IPA_STATE_TX_WRAPPER_TX1_IDLE_SHFT_v4_7,
+ IPA_STATE_TX_WRAPPER_TX1_IDLE_BMSK_v4_7);
+
+ tx->ipa_prod_ackmngr_db_empty = IPA_GETFIELD_FROM_REG(val,
+ IPA_STATE_TX_WRAPPER_IPA_PROD_ACKMNGR_DB_EMPTY_SHFT_v4_7,
+ IPA_STATE_TX_WRAPPER_IPA_PROD_ACKMNGR_DB_EMPTY_BMSK_v4_7);
+
+ tx->ipa_prod_ackmngr_state_idle = IPA_GETFIELD_FROM_REG(val,
+ IPA_STATE_TX_WRAPPER_IPA_PROD_ACKMNGR_STATE_IDLE_SHFT_v4_7,
+ IPA_STATE_TX_WRAPPER_IPA_PROD_ACKMNGR_STATE_IDLE_BMSK_v4_7);
+
+ tx->ipa_prod_prod_bresp_empty = IPA_GETFIELD_FROM_REG(val,
+ IPA_STATE_TX_WRAPPER_IPA_PROD_BRESP_EMPTY_SHFT_v4_7,
+ IPA_STATE_TX_WRAPPER_IPA_PROD_BRESP_EMPTY_BMSK_v4_7);
+
+ tx->coal_slave_idle = IPA_GETFIELD_FROM_REG(val,
+ IPA_STATE_TX_WRAPPER_COAL_SLAVE_IDLE_SHFT_v4_7,
+ IPA_STATE_TX_WRAPPER_COAL_SLAVE_IDLE_BMSK_v4_7);
+
+ tx->coal_slave_ctx_idle = IPA_GETFIELD_FROM_REG(val,
+ IPA_STATE_TX_WRAPPER_COAL_SLAVE_CTX_IDLE_SHFT_v4_7,
+ IPA_STATE_TX_WRAPPER_COAL_SLAVE_CTX_IDLE_BMSK_v4_7);
+
+ tx->coal_slave_open_frame = IPA_GETFIELD_FROM_REG(val,
+ IPA_STATE_TX_WRAPPER_COAL_SLAVE_OPEN_FRAME_SHFT_v4_7,
+ IPA_STATE_TX_WRAPPER_COAL_SLAVE_OPEN_FRAME_BMSK_v4_7);
+}
+
static void ipareg_construct_qcncm(
enum ipahal_reg_name reg, const void *fields, u32 *val)
{
@@ -2968,6 +3074,9 @@ static struct ipahal_reg_obj ipahal_reg_objs[IPA_HW_MAX][IPA_REG_MAX] = {
[IPA_HW_v4_5][IPA_COMP_CFG] = {
ipareg_construct_comp_cfg_v4_5, ipareg_parse_comp_cfg_v4_5,
0x0000003C, 0, 0, 0, 0},
+ [IPA_HW_v4_5][IPA_STATE_TX_WRAPPER] = {
+ ipareg_construct_dummy, ipareg_parse_state_tx_wrapper_v4_5,
+ 0x00000090, 0, 0, 0, 1 },
[IPA_HW_v4_5][IPA_STATE_FETCHER_MASK] = {
ipareg_construct_dummy, ipareg_parse_dummy,
-1, 0, 0, 0, 0},
@@ -3167,6 +3276,9 @@ static struct ipahal_reg_obj ipahal_reg_objs[IPA_HW_MAX][IPA_REG_MAX] = {
[IPA_HW_v4_5][IPA_COAL_QMAP_CFG] = {
ipareg_construct_coal_qmap_cfg, ipareg_parse_coal_qmap_cfg,
0x00001810, 0, 0, 0, 0},
+ [IPA_HW_v4_7][IPA_STATE_TX_WRAPPER] = {
+ ipareg_construct_dummy, ipareg_parse_state_tx_wrapper_v4_7,
+ 0x00000090, 0, 0, 0, 1 },
};
/*
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.h b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.h
index 9fa862c..5191d3b1 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.h
@@ -380,6 +380,27 @@ struct ipahal_reg_comp_cfg {
};
/*
+ * struct ipahal_reg_tx_wrapper- IPA TX Wrapper state information
+ */
+struct ipahal_reg_tx_wrapper {
+ bool tx0_idle;
+ bool tx1_idle;
+ bool ipa_prod_ackmngr_db_empty;
+ bool ipa_prod_ackmngr_state_idle;
+ bool ipa_prod_prod_bresp_empty;
+ bool ipa_prod_prod_bresp_toggle_idle;
+ bool ipa_mbim_pkt_fms_idle;
+ u8 mbim_direct_dma;
+ bool trnseq_force_valid;
+ bool pkt_drop_cnt_idle;
+ u8 nlo_direct_dma;
+ u8 coal_direct_dma;
+ bool coal_slave_idle;
+ bool coal_slave_ctx_idle;
+ u8 coal_slave_open_frame;
+};
+
+/*
* struct ipa_hash_tuple - Hash tuple members for flt and rt
* the fields tells if to be masked or not
* @src_id: pipe number for flt, table index for rt
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg_i.h b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg_i.h
index 119ad8c..d934b91 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg_i.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg_i.h
@@ -626,5 +626,52 @@ int ipahal_reg_init(enum ipa_hw_type ipa_hw_type);
#define IPA_COAL_QMAP_CFG_BMSK 0x1
#define IPA_COAL_QMAP_CFG_SHFT 0
+#define IPA_STATE_TX_WRAPPER_COAL_SLAVE_OPEN_FRAME_BMSK 0xf0000000
+#define IPA_STATE_TX_WRAPPER_COAL_SLAVE_OPEN_FRAME_SHFT 0x1f
+#define IPA_STATE_TX_WRAPPER_COAL_SLAVE_CTX_IDLE_BMSK 0x100000
+#define IPA_STATE_TX_WRAPPER_COAL_SLAVE_CTX_IDLE_SHFT 0x10
+#define IPA_STATE_TX_WRAPPER_COAL_SLAVE_IDLE_BMSK 0x8000
+#define IPA_STATE_TX_WRAPPER_COAL_SLAVE_IDLE_SHFT 0xf
+#define IPA_STATE_TX_WRAPPER_COAL_DIRECT_DMA_BMSK 0x6000
+#define IPA_STATE_TX_WRAPPER_COAL_DIRECT_DMA_SHFT 0xd
+#define IPA_STATE_TX_WRAPPER_NLO_DIRECT_DMA_BMSK 0x1800
+#define IPA_STATE_TX_WRAPPER_NLO_DIRECT_DMA_SHFT 0xb
+#define IPA_STATE_TX_WRAPPER_PKT_DROP_CNT_IDLE_BMSK 0x400
+#define IPA_STATE_TX_WRAPPER_PKT_DROP_CNT_IDLE_SHFT 0xa
+#define IPA_STATE_TX_WRAPPER_TRNSEQ_FORCE_VALID_BMSK 0x200
+#define IPA_STATE_TX_WRAPPER_TRNSEQ_FORCE_VALID_SHFT 0x9
+#define IPA_STATE_TX_WRAPPER_MBIM_DIRECT_DMA_BMSK 0x180
+#define IPA_STATE_TX_WRAPPER_MBIM_DIRECT_DMA_SHFT 0x7
+#define IPA_STATE_TX_WRAPPER_IPA_MBIM_PKT_FMS_IDLE_BMSK 0x40
+#define IPA_STATE_TX_WRAPPER_IPA_MBIM_PKT_FMS_IDLE_SHFT 0x6
+#define IPA_STATE_TX_WRAPPER_IPA_PROD_BRESP_TOGGLE_IDLE_BMSK 0x20
+#define IPA_STATE_TX_WRAPPER_IPA_PROD_BRESP_TOGGLE_IDLE_SHFT 0x5
+#define IPA_STATE_TX_WRAPPER_IPA_PROD_BRESP_EMPTY_BMSK 0x10
+#define IPA_STATE_TX_WRAPPER_IPA_PROD_BRESP_EMPTY_SHFT 0x4
+#define IPA_STATE_TX_WRAPPER_IPA_PROD_ACKMNGR_STATE_IDLE_BMSK 0x8
+#define IPA_STATE_TX_WRAPPER_IPA_PROD_ACKMNGR_STATE_IDLE_SHFT 0x3
+#define IPA_STATE_TX_WRAPPER_IPA_PROD_ACKMNGR_DB_EMPTY_BMSK 0x4
+#define IPA_STATE_TX_WRAPPER_IPA_PROD_ACKMNGR_DB_EMPTY_SHFT 0x2
+#define IPA_STATE_TX_WRAPPER_TX1_IDLE_BMSK 0x2
+#define IPA_STATE_TX_WRAPPER_TX1_IDLE_SHFT 0x1
+#define IPA_STATE_TX_WRAPPER_TX0_IDLE_BMSK 0x1
+#define IPA_STATE_TX_WRAPPER_TX0_IDLE_SHFT 0x0
+
+#define IPA_STATE_TX_WRAPPER_COAL_SLAVE_OPEN_FRAME_BMSK_v4_7 0xf0000000
+#define IPA_STATE_TX_WRAPPER_COAL_SLAVE_OPEN_FRAME_SHFT_v4_7 28
+#define IPA_STATE_TX_WRAPPER_COAL_SLAVE_CTX_IDLE_BMSK_v4_7 0x80000
+#define IPA_STATE_TX_WRAPPER_COAL_SLAVE_CTX_IDLE_SHFT_v4_7 19
+#define IPA_STATE_TX_WRAPPER_COAL_SLAVE_IDLE_BMSK_v4_7 0x40000
+#define IPA_STATE_TX_WRAPPER_COAL_SLAVE_IDLE_SHFT_v4_7 18
+#define IPA_STATE_TX_WRAPPER_IPA_PROD_BRESP_EMPTY_BMSK_v4_7 0x10
+#define IPA_STATE_TX_WRAPPER_IPA_PROD_BRESP_EMPTY_SHFT_v4_7 4
+#define IPA_STATE_TX_WRAPPER_IPA_PROD_ACKMNGR_STATE_IDLE_BMSK_v4_7 0x8
+#define IPA_STATE_TX_WRAPPER_IPA_PROD_ACKMNGR_STATE_IDLE_SHFT_v4_7 3
+#define IPA_STATE_TX_WRAPPER_IPA_PROD_ACKMNGR_DB_EMPTY_BMSK_v4_7 0x4
+#define IPA_STATE_TX_WRAPPER_IPA_PROD_ACKMNGR_DB_EMPTY_SHFT_v4_7 2
+#define IPA_STATE_TX_WRAPPER_TX1_IDLE_BMSK_v4_7 0x2
+#define IPA_STATE_TX_WRAPPER_TX1_IDLE_SHFT_v4_7 1
+#define IPA_STATE_TX_WRAPPER_TX0_IDLE_BMSK_v4_7 0x1
+#define IPA_STATE_TX_WRAPPER_TX0_IDLE_SHFT_v4_7 0
#endif /* _IPAHAL_REG_I_H_ */
diff --git a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
index 37246e1..11c520f 100644
--- a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
@@ -1228,7 +1228,7 @@ static int ipa3_wwan_xmit(struct sk_buff *skb, struct net_device *dev)
struct ipa3_wwan_private *wwan_ptr = netdev_priv(dev);
unsigned long flags;
- if (rmnet_ipa3_ctx->ipa_config_is_apq) {
+ if (unlikely(rmnet_ipa3_ctx->ipa_config_is_apq)) {
IPAWANERR_RL("IPA embedded data on APQ platform\n");
dev_kfree_skb_any(skb);
dev->stats.tx_dropped++;
@@ -1295,7 +1295,8 @@ static int ipa3_wwan_xmit(struct sk_buff *skb, struct net_device *dev)
spin_unlock_irqrestore(&wwan_ptr->lock, flags);
return NETDEV_TX_BUSY;
}
- if (ret) {
+
+ if (unlikely(ret)) {
IPAWANERR("[%s] fatal: ipa pm activate failed %d\n",
dev->name, ret);
dev_kfree_skb_any(skb);
@@ -1318,7 +1319,7 @@ static int ipa3_wwan_xmit(struct sk_buff *skb, struct net_device *dev)
* IPA_CLIENT_Q6_WAN_CONS based on status configuration
*/
ret = ipa3_tx_dp(IPA_CLIENT_APPS_WAN_PROD, skb, NULL);
- if (ret) {
+ if (unlikely(ret)) {
atomic_dec(&wwan_ptr->outstanding_pkts);
if (ret == -EPIPE) {
IPAWANERR_RL("[%s] fatal: pipe is not valid\n",
@@ -1419,7 +1420,7 @@ static void apps_ipa_packet_receive_notify(void *priv,
{
struct net_device *dev = (struct net_device *)priv;
- if (evt == IPA_RECEIVE) {
+ if (likely(evt == IPA_RECEIVE)) {
struct sk_buff *skb = (struct sk_buff *)data;
int result;
unsigned int packet_len = skb->len;
@@ -1442,7 +1443,7 @@ static void apps_ipa_packet_receive_notify(void *priv,
}
}
- if (result) {
+ if (unlikely(result)) {
pr_err_ratelimited(DEV_NAME " %s:%d fail on netif_receive_skb\n",
__func__, __LINE__);
dev->stats.rx_dropped++;
@@ -3103,10 +3104,15 @@ static int rmnet_ipa3_set_data_quota_wifi(struct wan_ioctl_set_data_quota *data)
IPAWANERR("iface name %s, quota %lu\n",
data->interface_name, (unsigned long) data->quota_mbytes);
- rc = ipa3_set_wlan_quota(&wifi_quota);
- /* check if wlan-fw takes this quota-set */
- if (!wifi_quota.set_valid)
- rc = -EFAULT;
+ if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_5) {
+ IPADBG("use ipa-uc for quota\n");
+ rc = ipa3_uc_quota_monitor(data->set_quota);
+ } else {
+ rc = ipa3_set_wlan_quota(&wifi_quota);
+ /* check if wlan-fw takes this quota-set */
+ if (!wifi_quota.set_valid)
+ rc = -EFAULT;
+ }
return rc;
}
@@ -3588,36 +3594,42 @@ static int rmnet_ipa3_query_tethering_stats_hw(
data->ipv6_tx_bytes +=
con_stats->client[index].num_ipv6_bytes;
- /* query WIGIG UL stats */
- memset(con_stats, 0, sizeof(struct ipa_quota_stats_all));
- rc = ipa_query_teth_stats(IPA_CLIENT_WIGIG_PROD, con_stats, reset);
- if (rc) {
- IPAERR("IPA_CLIENT_WIGIG_PROD query failed %d\n", rc);
- kfree(con_stats);
- return rc;
+ if (ipa3_get_ep_mapping(IPA_CLIENT_WIGIG_PROD) !=
+ IPA_EP_NOT_ALLOCATED) {
+ /* query WIGIG UL stats */
+ memset(con_stats, 0, sizeof(struct ipa_quota_stats_all));
+ rc = ipa_query_teth_stats(IPA_CLIENT_WIGIG_PROD, con_stats,
+ reset);
+ if (rc) {
+ IPAERR("IPA_CLIENT_WIGIG_PROD query failed %d\n", rc);
+ kfree(con_stats);
+ return rc;
+ }
+
+ if (rmnet_ipa3_ctx->ipa_config_is_apq)
+ index = IPA_CLIENT_MHI_PRIME_TETH_CONS;
+ else
+ index = IPA_CLIENT_Q6_WAN_CONS;
+
+ IPAWANDBG("wigig: v4_tx_p(%d) b(%lld) v6_tx_p(%d) b(%lld)\n",
+ con_stats->client[index].num_ipv4_pkts,
+ con_stats->client[index].num_ipv4_bytes,
+ con_stats->client[index].num_ipv6_pkts,
+ con_stats->client[index].num_ipv6_bytes);
+
+ /* update the WIGIG UL stats */
+ data->ipv4_tx_packets +=
+ con_stats->client[index].num_ipv4_pkts;
+ data->ipv6_tx_packets +=
+ con_stats->client[index].num_ipv6_pkts;
+ data->ipv4_tx_bytes +=
+ con_stats->client[index].num_ipv4_bytes;
+ data->ipv6_tx_bytes +=
+ con_stats->client[index].num_ipv6_bytes;
+ } else {
+ IPAWANDBG("IPA_CLIENT_WIGIG_PROD client not supported\n");
}
- if (rmnet_ipa3_ctx->ipa_config_is_apq)
- index = IPA_CLIENT_MHI_PRIME_TETH_CONS;
- else
- index = IPA_CLIENT_Q6_WAN_CONS;
-
- IPAWANDBG("wigig: v4_tx_p(%d) b(%lld) v6_tx_p(%d) b(%lld)\n",
- con_stats->client[index].num_ipv4_pkts,
- con_stats->client[index].num_ipv4_bytes,
- con_stats->client[index].num_ipv6_pkts,
- con_stats->client[index].num_ipv6_bytes);
-
- /* update the WIGIG UL stats */
- data->ipv4_tx_packets +=
- con_stats->client[index].num_ipv4_pkts;
- data->ipv6_tx_packets +=
- con_stats->client[index].num_ipv6_pkts;
- data->ipv4_tx_bytes +=
- con_stats->client[index].num_ipv4_bytes;
- data->ipv6_tx_bytes +=
- con_stats->client[index].num_ipv6_bytes;
-
IPAWANDBG("v4_tx_p(%lu) v6_tx_p(%lu) v4_tx_b(%lu) v6_tx_b(%lu)\n",
(unsigned long) data->ipv4_tx_packets,
(unsigned long) data->ipv6_tx_packets,
@@ -3627,6 +3639,144 @@ static int rmnet_ipa3_query_tethering_stats_hw(
return rc;
}
+static int rmnet_ipa3_query_tethering_stats_fnr(
+ struct wan_ioctl_query_tether_stats_all *data)
+{
+ int rc = 0;
+ int num_counters;
+ struct ipa_ioc_flt_rt_query fnr_stats, fnr_stats_sw;
+ struct ipacm_fnr_info fnr_info;
+
+ memset(&fnr_stats, 0, sizeof(struct ipa_ioc_flt_rt_query));
+ memset(&fnr_stats_sw, 0, sizeof(struct ipa_ioc_flt_rt_query));
+ memset(&fnr_info, 0, sizeof(struct ipacm_fnr_info));
+
+ if (!ipa_get_fnr_info(&fnr_info)) {
+ IPAERR("FNR counter haven't configured\n");
+ return -EINVAL;
+ }
+
+ fnr_stats.start_id = fnr_info.hw_counter_offset + UL_HW;
+ fnr_stats.end_id = fnr_info.hw_counter_offset + DL_HW;
+ fnr_stats.reset = data->reset_stats;
+ num_counters = fnr_stats.end_id - fnr_stats.start_id + 1;
+
+ if (num_counters != 2) {
+ IPAWANERR("Only query 2 counters\n");
+ return -EINVAL;
+ }
+
+ fnr_stats.stats = (uint64_t)kcalloc(
+ num_counters,
+ sizeof(struct ipa_flt_rt_stats),
+ GFP_KERNEL);
+ if (!fnr_stats.stats) {
+ IPAERR("Failed to allocate memory for query hw-stats\n");
+ return -ENOMEM;
+ }
+
+ if (ipa_get_flt_rt_stats(&fnr_stats)) {
+ IPAERR("Failed to request stats from h/w\n");
+ rc = -EINVAL;
+ goto free_stats;
+ }
+
+ IPAWANDBG("ul: bytes = %llu, pkts = %u, pkts_hash = %u\n",
+ ((struct ipa_flt_rt_stats *)fnr_stats.stats)[0].num_bytes,
+ ((struct ipa_flt_rt_stats *)fnr_stats.stats)[0].num_pkts,
+ ((struct ipa_flt_rt_stats *)fnr_stats.stats)[0].num_pkts_hash);
+ IPAWANDBG("dl: bytes = %llu, pkts = %u, pkts_hash = %u\n",
+ ((struct ipa_flt_rt_stats *)fnr_stats.stats)[1].num_bytes,
+ ((struct ipa_flt_rt_stats *)fnr_stats.stats)[1].num_pkts,
+ ((struct ipa_flt_rt_stats *)fnr_stats.stats)[1].num_pkts_hash);
+
+ data->tx_bytes =
+ ((struct ipa_flt_rt_stats *)fnr_stats.stats)[0].num_bytes;
+ data->rx_bytes =
+ ((struct ipa_flt_rt_stats *)fnr_stats.stats)[1].num_bytes;
+
+ /* get the sw stats */
+ fnr_stats_sw.start_id = fnr_info.sw_counter_offset + UL_HW_CACHE;
+ fnr_stats_sw.end_id = fnr_info.sw_counter_offset + DL_HW_CACHE;
+ fnr_stats_sw.reset = data->reset_stats;
+ num_counters = fnr_stats_sw.end_id - fnr_stats_sw.start_id + 1;
+
+ if (num_counters != 2) {
+ IPAWANERR("Only query 2 counters\n");
+ return -EINVAL;
+ }
+
+ fnr_stats_sw.stats = (uint64_t)kcalloc(
+ num_counters,
+ sizeof(struct ipa_flt_rt_stats),
+ GFP_KERNEL);
+ if (!fnr_stats_sw.stats) {
+ IPAERR("Failed to allocate memory for query sw-stats\n");
+ return -ENOMEM;
+ }
+
+ if (ipa_get_flt_rt_stats(&fnr_stats_sw)) {
+ IPAERR("Failed to request stats from h/w\n");
+ rc = -EINVAL;
+ goto free_stats2;
+ }
+
+ IPAWANDBG("ul sw: bytes = %llu, pkts = %u, pkts_hash = %u\n",
+ ((struct ipa_flt_rt_stats *)fnr_stats_sw.stats)[0].num_bytes,
+ ((struct ipa_flt_rt_stats *)fnr_stats_sw.stats)[0].num_pkts,
+ ((struct ipa_flt_rt_stats *)fnr_stats_sw.stats)[0].num_pkts_hash);
+ IPAWANDBG("dl sw: bytes = %llu, pkts = %u, pkts_hash = %u\n",
+ ((struct ipa_flt_rt_stats *)fnr_stats_sw.stats)[1].num_bytes,
+ ((struct ipa_flt_rt_stats *)fnr_stats_sw.stats)[1].num_pkts,
+ ((struct ipa_flt_rt_stats *)fnr_stats_sw.stats)[1].num_pkts_hash);
+
+ /* update the sw-cache */
+ ((struct ipa_flt_rt_stats *)fnr_stats_sw.stats)[0].num_bytes +=
+ ((struct ipa_flt_rt_stats *)fnr_stats.stats)[0].num_bytes;
+ ((struct ipa_flt_rt_stats *)fnr_stats_sw.stats)[0].num_pkts +=
+ ((struct ipa_flt_rt_stats *)fnr_stats.stats)[0].num_pkts;
+ ((struct ipa_flt_rt_stats *)fnr_stats_sw.stats)[0].num_pkts_hash +=
+ ((struct ipa_flt_rt_stats *)fnr_stats.stats)[0].num_pkts_hash;
+ ((struct ipa_flt_rt_stats *)fnr_stats_sw.stats)[1].num_bytes +=
+ ((struct ipa_flt_rt_stats *)fnr_stats.stats)[1].num_bytes;
+ ((struct ipa_flt_rt_stats *)fnr_stats_sw.stats)[1].num_pkts +=
+ ((struct ipa_flt_rt_stats *)fnr_stats.stats)[1].num_pkts;
+ ((struct ipa_flt_rt_stats *)fnr_stats_sw.stats)[1].num_pkts_hash +=
+ ((struct ipa_flt_rt_stats *)fnr_stats.stats)[1].num_pkts_hash;
+
+ IPAWANDBG("ul sw: bytes = %llu, pkts = %u, pkts_hash = %u\n",
+ ((struct ipa_flt_rt_stats *)fnr_stats_sw.stats)[0].num_bytes,
+ ((struct ipa_flt_rt_stats *)fnr_stats_sw.stats)[0].num_pkts,
+ ((struct ipa_flt_rt_stats *)fnr_stats_sw.stats)[0].num_pkts_hash);
+ IPAWANDBG("dl sw: bytes = %llu, pkts = %u, pkts_hash = %u\n",
+ ((struct ipa_flt_rt_stats *)fnr_stats_sw.stats)[1].num_bytes,
+ ((struct ipa_flt_rt_stats *)fnr_stats_sw.stats)[1].num_pkts,
+ ((struct ipa_flt_rt_stats *)fnr_stats_sw.stats)[1].num_pkts_hash);
+ /* write to the sw cache */
+ if (ipa_set_flt_rt_stats(fnr_info.sw_counter_offset +
+ UL_HW_CACHE,
+ ((struct ipa_flt_rt_stats *)fnr_stats_sw.stats)[0])) {
+ IPAERR("Failed to set stats to sw-cache %d\n",
+ fnr_info.sw_counter_offset + UL_HW_CACHE);
+ rc = -EINVAL;
+ goto free_stats2;
+ }
+
+ if (ipa_set_flt_rt_stats(fnr_info.sw_counter_offset +
+ DL_HW_CACHE,
+ ((struct ipa_flt_rt_stats *)fnr_stats_sw.stats)[1])) {
+ IPAERR("Failed to set stats to sw-cache %d\n",
+ fnr_info.sw_counter_offset + DL_HW_CACHE);
+ rc = -EINVAL;
+ goto free_stats2;
+ }
+
+free_stats2:
+ kfree((void *)fnr_stats_sw.stats);
+free_stats:
+ kfree((void *)fnr_stats.stats);
+ return rc;
+}
int rmnet_ipa3_query_tethering_stats(struct wan_ioctl_query_tether_stats *data,
bool reset)
@@ -3684,17 +3834,30 @@ int rmnet_ipa3_query_tethering_stats_all(
data->upstreamIface);
} else if (upstream_type == IPA_UPSTEAM_WLAN) {
IPAWANDBG_LOW(" query wifi-backhaul stats\n");
- rc = rmnet_ipa3_query_tethering_stats_wifi(
- &tether_stats, data->reset_stats);
- if (rc) {
- IPAWANERR_RL(
- "wlan WAN_IOC_QUERY_TETHER_STATS failed\n");
- return rc;
+ if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_5 ||
+ !ipa3_ctx->hw_stats.enabled) {
+ IPAWANDBG("hw version %d,hw_stats.enabled %d\n",
+ ipa3_ctx->ipa_hw_type,
+ ipa3_ctx->hw_stats.enabled);
+ rc = rmnet_ipa3_query_tethering_stats_wifi(
+ &tether_stats, data->reset_stats);
+ if (rc) {
+ IPAWANERR_RL(
+ "wlan WAN_IOC_QUERY_TETHER_STATS failed\n");
+ return rc;
+ }
+ data->tx_bytes = tether_stats.ipv4_tx_bytes
+ + tether_stats.ipv6_tx_bytes;
+ data->rx_bytes = tether_stats.ipv4_rx_bytes
+ + tether_stats.ipv6_rx_bytes;
+ } else {
+ rc = rmnet_ipa3_query_tethering_stats_fnr(data);
+ if (rc) {
+ IPAWANERR_RL(
+ "wlan WAN_IOC_QUERY_TETHER_STATS failed\n");
+ return rc;
+ }
}
- data->tx_bytes = tether_stats.ipv4_tx_bytes
- + tether_stats.ipv6_tx_bytes;
- data->rx_bytes = tether_stats.ipv4_rx_bytes
- + tether_stats.ipv6_rx_bytes;
} else {
IPAWANDBG_LOW(" query modem-backhaul stats\n");
tether_stats.ipa_client = data->ipa_client;
diff --git a/drivers/platform/msm/ipa/test/ipa_test_hw_stats.c b/drivers/platform/msm/ipa/test/ipa_test_hw_stats.c
index c1e450d..caae2ba 100644
--- a/drivers/platform/msm/ipa/test/ipa_test_hw_stats.c
+++ b/drivers/platform/msm/ipa/test/ipa_test_hw_stats.c
@@ -728,6 +728,191 @@ static int ipa_test_hw_stats_set_sw_stats(void *priv)
return ret;
}
+static int ipa_test_hw_stats_set_uc_event_ring(void *priv)
+{
+ struct ipa_ioc_flt_rt_counter_alloc *counter = NULL;
+ int ret = 0;
+
+ /* set uc event ring */
+ IPA_UT_INFO("========set uc event ring ========\n");
+
+ if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_5) {
+ if (ipa3_ctx->uc_ctx.uc_loaded &&
+ !ipa3_ctx->uc_ctx.uc_event_ring_valid) {
+ if (ipa3_uc_setup_event_ring()) {
+ IPA_UT_ERR("failed to set uc_event ring\n");
+ ret = -EFAULT;
+ }
+ } else
+ IPA_UT_ERR("uc-loaded %d, ring-valid %d",
+ ipa3_ctx->uc_ctx.uc_loaded,
+ ipa3_ctx->uc_ctx.uc_event_ring_valid);
+ }
+ IPA_UT_INFO("================ done ============\n");
+
+ /* allocate counters */
+ IPA_UT_INFO("========set hw counter ========\n");
+
+ counter = kzalloc(sizeof(struct ipa_ioc_flt_rt_counter_alloc),
+ GFP_KERNEL);
+ if (!counter)
+ return -ENOMEM;
+
+ counter->hw_counter.num_counters = 4;
+ counter->sw_counter.num_counters = 4;
+
+ /* allocate counters */
+ ret = ipa3_alloc_counter_id(counter);
+ if (ret < 0) {
+ IPA_UT_ERR("ipa3_alloc_counter_id fails\n");
+ ret = -ENOMEM;
+ }
+ ipa3_ctx->fnr_info.hw_counter_offset = counter->hw_counter.start_id;
+ ipa3_ctx->fnr_info.sw_counter_offset = counter->sw_counter.start_id;
+ IPA_UT_INFO("hw-offset %d, sw-offset %d\n",
+ ipa3_ctx->fnr_info.hw_counter_offset,
+ ipa3_ctx->fnr_info.sw_counter_offset);
+
+ kfree(counter);
+ return ret;
+}
+
+static int ipa_test_hw_stats_set_quota(void *priv)
+{
+ int ret;
+ uint64_t quota = 500;
+
+ IPA_UT_INFO("========set quota ========\n");
+ ret = ipa3_uc_quota_monitor(quota);
+ if (ret < 0) {
+ IPA_UT_ERR("ipa3_uc_quota_monitor fails\n");
+ ret = -ENOMEM;
+ }
+ IPA_UT_INFO("================ done ============\n");
+ return ret;
+}
+
+static int ipa_test_hw_stats_set_bw(void *priv)
+{
+ int ret;
+ struct ipa_wdi_bw_info *info = NULL;
+
+ IPA_UT_INFO("========set BW voting ========\n");
+ info = kzalloc(sizeof(struct ipa_wdi_bw_info),
+ GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ info->num = 3;
+ info->threshold[0] = 200;
+ info->threshold[1] = 400;
+ info->threshold[2] = 600;
+
+ ret = ipa3_uc_bw_monitor(info);
+ if (ret < 0) {
+ IPA_UT_ERR("ipa3_uc_bw_monitor fails\n");
+ ret = -ENOMEM;
+ }
+
+ IPA_UT_INFO("================ done ============\n");
+
+ kfree(info);
+ return ret;
+}
+static int ipa_test_hw_stats_hit_quota(void *priv)
+{
+ int ret = 0, counter_index, i;
+ struct ipa_flt_rt_stats stats;
+
+ /* set sw counters */
+ IPA_UT_INFO("========set quota hit========\n");
+ counter_index = ipa3_ctx->fnr_info.sw_counter_offset + UL_WLAN_TX;
+
+ for (i = 0; i < 5; i++) {
+ IPA_UT_INFO("========set 100 ========\n");
+ memset(&stats, 0, sizeof(struct ipa_flt_rt_stats));
+ stats.num_bytes = 100;
+ stats.num_pkts = 69;
+ IPA_UT_INFO(
+ "set counter %u pkt_cnt %u bytes cnt %llu\n",
+ counter_index, stats.num_pkts, stats.num_bytes);
+ ret = ipa_set_flt_rt_stats(counter_index, stats);
+ if (ret < 0) {
+ IPA_UT_ERR("ipa_set_flt_rt_stats fails\n");
+ ret = -ENOMEM;
+ }
+ msleep(1000);
+ IPA_UT_INFO("========set 200 ========\n");
+ memset(&stats, 0, sizeof(struct ipa_flt_rt_stats));
+ stats.num_bytes = 200;
+ stats.num_pkts = 69;
+ IPA_UT_INFO(
+ "set counter %u pkt_cnt %u bytes cnt %llu\n",
+ counter_index, stats.num_pkts, stats.num_bytes);
+ ret = ipa_set_flt_rt_stats(counter_index, stats);
+ if (ret < 0) {
+ IPA_UT_ERR("ipa_set_flt_rt_stats fails\n");
+ ret = -ENOMEM;
+ }
+ msleep(1000);
+ IPA_UT_INFO("========set 300 ========\n");
+ memset(&stats, 0, sizeof(struct ipa_flt_rt_stats));
+ stats.num_bytes = 300;
+ stats.num_pkts = 69;
+ IPA_UT_INFO(
+ "set counter %u pkt_cnt %u bytes cnt %llu\n",
+ counter_index, stats.num_pkts, stats.num_bytes);
+ ret = ipa_set_flt_rt_stats(counter_index, stats);
+ if (ret < 0) {
+ IPA_UT_ERR("ipa_set_flt_rt_stats fails\n");
+ ret = -ENOMEM;
+ }
+ msleep(1000);
+ IPA_UT_INFO("========set 500 ========\n");
+ memset(&stats, 0, sizeof(struct ipa_flt_rt_stats));
+ stats.num_bytes = 500;
+ stats.num_pkts = 69;
+ IPA_UT_INFO(
+ "set counter %u pkt_cnt %u bytes cnt %llu\n",
+ counter_index, stats.num_pkts, stats.num_bytes);
+ ret = ipa_set_flt_rt_stats(counter_index, stats);
+ if (ret < 0) {
+ IPA_UT_ERR("ipa_set_flt_rt_stats fails\n");
+ ret = -ENOMEM;
+ }
+ msleep(1000);
+ IPA_UT_INFO("========set 600 ========\n");
+ memset(&stats, 0, sizeof(struct ipa_flt_rt_stats));
+ stats.num_bytes = 600;
+ stats.num_pkts = 69;
+ IPA_UT_INFO(
+ "set counter %u pkt_cnt %u bytes cnt %llu\n",
+ counter_index, stats.num_pkts, stats.num_bytes);
+ ret = ipa_set_flt_rt_stats(counter_index, stats);
+ if (ret < 0) {
+ IPA_UT_ERR("ipa_set_flt_rt_stats fails\n");
+ ret = -ENOMEM;
+ }
+ msleep(1000);
+ IPA_UT_INFO("========set 1000 ========\n");
+ memset(&stats, 0, sizeof(struct ipa_flt_rt_stats));
+ stats.num_bytes = 1000;
+ stats.num_pkts = 69;
+ IPA_UT_INFO(
+ "set counter %u pkt_cnt %u bytes cnt %llu\n",
+ counter_index, stats.num_pkts, stats.num_bytes);
+ ret = ipa_set_flt_rt_stats(counter_index, stats);
+ if (ret < 0) {
+ IPA_UT_ERR("ipa_set_flt_rt_stats fails\n");
+ ret = -ENOMEM;
+ }
+ }
+ IPA_UT_INFO("================ done ============\n");
+ return ret;
+}
+
+
+
/* Suite definition block */
IPA_UT_DEFINE_SUITE_START(hw_stats, "HW stats test",
ipa_test_hw_stats_suite_setup, ipa_test_hw_stats_suite_teardown)
@@ -758,4 +943,20 @@ IPA_UT_DEFINE_SUITE_START(hw_stats, "HW stats test",
ipa_test_hw_stats_set_sw_stats, false,
IPA_HW_v4_5, IPA_HW_MAX),
+ IPA_UT_ADD_TEST(set_uc_evtring, "Set uc event ring",
+ ipa_test_hw_stats_set_uc_event_ring, false,
+ IPA_HW_v4_5, IPA_HW_MAX),
+
+ IPA_UT_ADD_TEST(set_quota, "Set quota",
+ ipa_test_hw_stats_set_quota, false,
+ IPA_HW_v4_5, IPA_HW_MAX),
+
+ IPA_UT_ADD_TEST(set_bw_voting, "Set bw_voting",
+ ipa_test_hw_stats_set_bw, false,
+ IPA_HW_v4_5, IPA_HW_MAX),
+
+ IPA_UT_ADD_TEST(hit_quota, "quota hits",
+ ipa_test_hw_stats_hit_quota, false,
+ IPA_HW_v4_5, IPA_HW_MAX),
+
} IPA_UT_DEFINE_SUITE_END(hw_stats);
diff --git a/drivers/platform/msm/msm_11ad/msm_11ad.c b/drivers/platform/msm/msm_11ad/msm_11ad.c
index 528b461..b485eb9 100644
--- a/drivers/platform/msm/msm_11ad/msm_11ad.c
+++ b/drivers/platform/msm/msm_11ad/msm_11ad.c
@@ -126,6 +126,7 @@ struct msm11ad_ctx {
struct cpumask boost_cpu_1;
bool keep_radio_on_during_sleep;
+ bool use_ap_ps;
int features;
};
@@ -1054,6 +1055,7 @@ static int msm_11ad_probe(struct platform_device *pdev)
* qcom,msm-bus,vectors-KBps =
* <100 512 0 0>,
* <100 512 600000 800000>;
+ * qcom,use-ap-power-save; (ctx->use_ap_ps)
*};
* rc_node stands for "qcom,pcie", selected entries:
* cell-index = <1>; (ctx->rc_index)
@@ -1094,6 +1096,8 @@ static int msm_11ad_probe(struct platform_device *pdev)
rc = -EINVAL;
goto out_module;
}
+ ctx->use_ap_ps = of_property_read_bool(of_node,
+ "qcom,use-ap-power-save");
/*== execute ==*/
/* turn device on */
@@ -1558,6 +1562,12 @@ static int ops_get_capa(void *handle)
if (!ctx->smmu_s1_bypass)
capa |= BIT(WIL_PLATFORM_CAPA_SMMU);
+ pr_debug("%s: use AP power save is %s\n", __func__, ctx->use_ap_ps ?
+ "allowed" : "not allowed");
+
+ if (ctx->use_ap_ps)
+ capa |= BIT(WIL_PLATFORM_CAPA_AP_PS);
+
return capa;
}
diff --git a/drivers/platform/msm/msm_ext_display.c b/drivers/platform/msm/msm_ext_display.c
index 1fbd8fc..33ff3d8 100644
--- a/drivers/platform/msm/msm_ext_display.c
+++ b/drivers/platform/msm/msm_ext_display.c
@@ -108,8 +108,6 @@ static int msm_ext_disp_add_intf_data(struct msm_ext_disp *ext_disp,
list_for_each(pos, &ext_disp->display_list)
count++;
- data->codec.stream_id = count;
-
list_add(&node->list, &ext_disp->display_list);
diff --git a/drivers/platform/msm/qcom-geni-se.c b/drivers/platform/msm/qcom-geni-se.c
index acfecd1..2750425 100644
--- a/drivers/platform/msm/qcom-geni-se.c
+++ b/drivers/platform/msm/qcom-geni-se.c
@@ -3,7 +3,6 @@
* Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
*/
-#include <asm/dma-iommu.h>
#include <linux/clk.h>
#include <linux/dma-mapping.h>
#include <linux/ipc_logging.h>
@@ -1365,56 +1364,6 @@ EXPORT_SYMBOL(geni_se_qupv3_hw_version);
static int geni_se_iommu_map_and_attach(struct geni_se_device *geni_se_dev)
{
- dma_addr_t va_start = GENI_SE_IOMMU_VA_START;
- size_t va_size = GENI_SE_IOMMU_VA_SIZE;
- int bypass = 1;
- struct device *cb_dev = geni_se_dev->cb_dev;
-
- /*Don't proceed if IOMMU node is disabled*/
- if (!iommu_present(&platform_bus_type))
- return 0;
-
- mutex_lock(&geni_se_dev->iommu_lock);
- if (likely(geni_se_dev->iommu_map)) {
- mutex_unlock(&geni_se_dev->iommu_lock);
- return 0;
- }
-
- geni_se_dev->iommu_map =
- __depr_arm_iommu_create_mapping(&platform_bus_type,
- va_start, va_size);
- if (IS_ERR(geni_se_dev->iommu_map)) {
- GENI_SE_ERR(geni_se_dev->log_ctx, false, NULL,
- "%s:%s iommu_create_mapping failure\n",
- __func__, dev_name(cb_dev));
- mutex_unlock(&geni_se_dev->iommu_lock);
- return PTR_ERR(geni_se_dev->iommu_map);
- }
-
- if (geni_se_dev->iommu_s1_bypass &&
- iommu_domain_set_attr(geni_se_dev->iommu_map->domain,
- DOMAIN_ATTR_S1_BYPASS, &bypass)) {
- GENI_SE_ERR(geni_se_dev->log_ctx, false, NULL,
- "%s:%s Couldn't bypass s1 translation\n",
- __func__, dev_name(cb_dev));
- __depr_arm_iommu_release_mapping(geni_se_dev->iommu_map);
- geni_se_dev->iommu_map = NULL;
- mutex_unlock(&geni_se_dev->iommu_lock);
- return -EIO;
- }
-
- if (__depr_arm_iommu_attach_device(cb_dev, geni_se_dev->iommu_map)) {
- GENI_SE_ERR(geni_se_dev->log_ctx, false, NULL,
- "%s:%s couldn't arm_iommu_attach_device\n",
- __func__, dev_name(cb_dev));
- __depr_arm_iommu_release_mapping(geni_se_dev->iommu_map);
- geni_se_dev->iommu_map = NULL;
- mutex_unlock(&geni_se_dev->iommu_lock);
- return -EIO;
- }
- mutex_unlock(&geni_se_dev->iommu_lock);
- GENI_SE_DBG(geni_se_dev->log_ctx, false, NULL, "%s:%s successful\n",
- __func__, dev_name(cb_dev));
return 0;
}
@@ -1856,10 +1805,6 @@ static int geni_se_remove(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct geni_se_device *geni_se_dev = dev_get_drvdata(dev);
- if (likely(!IS_ERR_OR_NULL(geni_se_dev->iommu_map))) {
- __depr_arm_iommu_detach_device(geni_se_dev->cb_dev);
- __depr_arm_iommu_release_mapping(geni_se_dev->iommu_map);
- }
ipc_log_context_destroy(geni_se_dev->log_ctx);
devm_iounmap(dev, geni_se_dev->base);
devm_kfree(dev, geni_se_dev);
diff --git a/drivers/power/supply/qcom/fg-core.h b/drivers/power/supply/qcom/fg-core.h
index 12cc4f6..a0ae712 100644
--- a/drivers/power/supply/qcom/fg-core.h
+++ b/drivers/power/supply/qcom/fg-core.h
@@ -103,6 +103,7 @@ enum fg_debug_flag {
FG_BUS_READ = BIT(6), /* Show REGMAP reads */
FG_CAP_LEARN = BIT(7), /* Show capacity learning */
FG_TTF = BIT(8), /* Show time to full */
+ FG_FVSS = BIT(9), /* Show FVSS */
};
/* SRAM access */
diff --git a/drivers/power/supply/qcom/qg-battery-profile.c b/drivers/power/supply/qcom/qg-battery-profile.c
index cc46ea5..69fff08 100644
--- a/drivers/power/supply/qcom/qg-battery-profile.c
+++ b/drivers/power/supply/qcom/qg-battery-profile.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (c) 2018 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
*/
#define pr_fmt(fmt) "QG-K: %s: " fmt, __func__
@@ -57,6 +57,8 @@ static struct tables table[] = {
static struct qg_battery_data *the_battery;
+static void qg_battery_profile_free(void);
+
static int qg_battery_data_open(struct inode *inode, struct file *file)
{
struct qg_battery_data *battery = container_of(inode->i_cdev,
@@ -100,7 +102,8 @@ static long qg_battery_data_ioctl(struct file *file, unsigned int cmd,
rc = -EINVAL;
} else {
/* OCV is passed as deci-uV - 10^-4 V */
- soc = interpolate_soc(&battery->profile[bp.table_index],
+ soc = qg_interpolate_soc(
+ &battery->profile[bp.table_index],
bp.batt_temp, UV_TO_DECIUV(bp.ocv_uv));
soc = CAP(QG_MIN_SOC, QG_MAX_SOC, soc);
rc = put_user(soc, &bp_user->soc);
@@ -120,7 +123,7 @@ static long qg_battery_data_ioctl(struct file *file, unsigned int cmd,
bp.table_index);
rc = -EINVAL;
} else {
- ocv_uv = interpolate_var(
+ ocv_uv = qg_interpolate_var(
&battery->profile[bp.table_index],
bp.batt_temp, bp.soc);
ocv_uv = DECIUV_TO_UV(ocv_uv);
@@ -142,7 +145,7 @@ static long qg_battery_data_ioctl(struct file *file, unsigned int cmd,
bp.table_index);
rc = -EINVAL;
} else {
- fcc_mah = interpolate_single_row_lut(
+ fcc_mah = qg_interpolate_single_row_lut(
&battery->profile[bp.table_index],
bp.batt_temp, DEGC_SCALE);
fcc_mah = CAP(QG_MIN_FCC_MAH, QG_MAX_FCC_MAH, fcc_mah);
@@ -162,7 +165,8 @@ static long qg_battery_data_ioctl(struct file *file, unsigned int cmd,
bp.table_index);
rc = -EINVAL;
} else {
- var = interpolate_var(&battery->profile[bp.table_index],
+ var = qg_interpolate_var(
+ &battery->profile[bp.table_index],
bp.batt_temp, bp.soc);
var = CAP(QG_MIN_VAR, QG_MAX_VAR, var);
rc = put_user(var, &bp_user->var);
@@ -182,7 +186,7 @@ static long qg_battery_data_ioctl(struct file *file, unsigned int cmd,
bp.table_index);
rc = -EINVAL;
} else {
- slope = interpolate_slope(
+ slope = qg_interpolate_slope(
&battery->profile[bp.table_index],
bp.batt_temp, bp.soc);
slope = CAP(QG_MIN_SLOPE, QG_MAX_SLOPE, slope);
@@ -394,7 +398,7 @@ int lookup_soc_ocv(u32 *soc, u32 ocv_uv, int batt_temp, bool charging)
if (!the_battery || !the_battery->profile_node)
return -ENODEV;
- *soc = interpolate_soc(&the_battery->profile[table_index],
+ *soc = qg_interpolate_soc(&the_battery->profile[table_index],
batt_temp, UV_TO_DECIUV(ocv_uv));
*soc = CAP(0, 100, DIV_ROUND_CLOSEST(*soc, 100));
@@ -410,7 +414,7 @@ int qg_get_nominal_capacity(u32 *nom_cap_uah, int batt_temp, bool charging)
if (!the_battery || !the_battery->profile_node)
return -ENODEV;
- fcc_mah = interpolate_single_row_lut(
+ fcc_mah = qg_interpolate_single_row_lut(
&the_battery->profile[table_index],
batt_temp, DEGC_SCALE);
fcc_mah = CAP(QG_MIN_FCC_MAH, QG_MAX_FCC_MAH, fcc_mah);
@@ -425,42 +429,56 @@ int qg_batterydata_init(struct device_node *profile_node)
int rc = 0;
struct qg_battery_data *battery;
- battery = kzalloc(sizeof(*battery), GFP_KERNEL);
- if (!battery)
- return -ENOMEM;
+ /*
+ * If a battery profile is already initialized, free the existing
+ * profile data and re-allocate and load the new profile. This is
+ * required for multi-profile load support.
+ */
+ if (the_battery) {
+ battery = the_battery;
+ battery->profile_node = NULL;
+ qg_battery_profile_free();
+ } else {
+ battery = kzalloc(sizeof(*battery), GFP_KERNEL);
+ if (!battery)
+ return -ENOMEM;
+ /* char device to access battery-profile data */
+ rc = alloc_chrdev_region(&battery->dev_no, 0, 1,
+ "qg_battery");
+ if (rc < 0) {
+ pr_err("Failed to allocate chrdev rc=%d\n", rc);
+ goto free_battery;
+ }
+
+ cdev_init(&battery->battery_cdev, &qg_battery_data_fops);
+ rc = cdev_add(&battery->battery_cdev,
+ battery->dev_no, 1);
+ if (rc) {
+ pr_err("Failed to add battery_cdev rc=%d\n", rc);
+ goto unregister_chrdev;
+ }
+
+ battery->battery_class = class_create(THIS_MODULE,
+ "qg_battery");
+ if (IS_ERR_OR_NULL(battery->battery_class)) {
+ pr_err("Failed to create qg-battery class\n");
+ rc = -ENODEV;
+ goto delete_cdev;
+ }
+
+ battery->battery_device = device_create(
+ battery->battery_class,
+ NULL, battery->dev_no,
+ NULL, "qg_battery");
+ if (IS_ERR_OR_NULL(battery->battery_device)) {
+ pr_err("Failed to create battery_device device\n");
+ rc = -ENODEV;
+ goto delete_cdev;
+ }
+ the_battery = battery;
+ }
battery->profile_node = profile_node;
-
- /* char device to access battery-profile data */
- rc = alloc_chrdev_region(&battery->dev_no, 0, 1, "qg_battery");
- if (rc < 0) {
- pr_err("Failed to allocate chrdev rc=%d\n", rc);
- goto free_battery;
- }
-
- cdev_init(&battery->battery_cdev, &qg_battery_data_fops);
- rc = cdev_add(&battery->battery_cdev, battery->dev_no, 1);
- if (rc) {
- pr_err("Failed to add battery_cdev rc=%d\n", rc);
- goto unregister_chrdev;
- }
-
- battery->battery_class = class_create(THIS_MODULE, "qg_battery");
- if (IS_ERR_OR_NULL(battery->battery_class)) {
- pr_err("Failed to create qg-battery class\n");
- rc = -ENODEV;
- goto delete_cdev;
- }
-
- battery->battery_device = device_create(battery->battery_class,
- NULL, battery->dev_no,
- NULL, "qg_battery");
- if (IS_ERR_OR_NULL(battery->battery_device)) {
- pr_err("Failed to create battery_device device\n");
- rc = -ENODEV;
- goto delete_cdev;
- }
-
/* parse the battery profile */
rc = qg_parse_battery_profile(battery);
if (rc < 0) {
@@ -468,9 +486,7 @@ int qg_batterydata_init(struct device_node *profile_node)
goto destroy_device;
}
- the_battery = battery;
-
- pr_info("QG Battery-profile loaded, '/dev/qg_battery' created!\n");
+ pr_info("QG Battery-profile loaded\n");
return 0;
@@ -485,27 +501,31 @@ int qg_batterydata_init(struct device_node *profile_node)
return rc;
}
-void qg_batterydata_exit(void)
+static void qg_battery_profile_free(void)
{
int i, j;
+ /* delete all the battery profile memory */
+ for (i = 0; i < TABLE_MAX; i++) {
+ kfree(the_battery->profile[i].name);
+ kfree(the_battery->profile[i].row_entries);
+ kfree(the_battery->profile[i].col_entries);
+ for (j = 0; j < the_battery->profile[i].rows; j++) {
+ if (the_battery->profile[i].data)
+ kfree(the_battery->profile[i].data[j]);
+ }
+ kfree(the_battery->profile[i].data);
+ }
+}
+
+void qg_batterydata_exit(void)
+{
if (the_battery) {
/* unregister the device node */
device_destroy(the_battery->battery_class, the_battery->dev_no);
cdev_del(&the_battery->battery_cdev);
unregister_chrdev_region(the_battery->dev_no, 1);
-
- /* delete all the battery profile memory */
- for (i = 0; i < TABLE_MAX; i++) {
- kfree(the_battery->profile[i].name);
- kfree(the_battery->profile[i].row_entries);
- kfree(the_battery->profile[i].col_entries);
- for (j = 0; j < the_battery->profile[i].rows; j++) {
- if (the_battery->profile[i].data)
- kfree(the_battery->profile[i].data[j]);
- }
- kfree(the_battery->profile[i].data);
- }
+ qg_battery_profile_free();
}
kfree(the_battery);
diff --git a/drivers/power/supply/qcom/qg-core.h b/drivers/power/supply/qcom/qg-core.h
index 7e0b20c..b011c86 100644
--- a/drivers/power/supply/qcom/qg-core.h
+++ b/drivers/power/supply/qcom/qg-core.h
@@ -57,6 +57,7 @@ struct qg_dt {
int shutdown_soc_threshold;
int min_sleep_time_secs;
int sys_min_volt_mv;
+ int fvss_vbat_mv;
bool hold_soc_while_full;
bool linearize_soc;
bool cl_disable;
@@ -67,6 +68,8 @@ struct qg_dt {
bool use_s7_ocv;
bool qg_sleep_config;
bool qg_fast_chg_cfg;
+ bool fvss_enable;
+ bool multi_profile_load;
};
struct qg_esr_data {
@@ -83,10 +86,12 @@ struct qpnp_qg {
struct pmic_revid_data *pmic_rev_id;
struct regmap *regmap;
struct qpnp_vadc_chip *vadc_dev;
+ struct soh_profile *sp;
struct power_supply *qg_psy;
struct class *qg_class;
struct device *qg_device;
struct cdev qg_cdev;
+ struct device_node *batt_node;
struct dentry *dfs_root;
dev_t dev_no;
struct work_struct udata_work;
@@ -129,6 +134,7 @@ struct qpnp_qg {
bool dc_present;
bool charge_full;
bool force_soc;
+ bool fvss_active;
int charge_status;
int charge_type;
int chg_iterm_ma;
@@ -137,6 +143,8 @@ struct qpnp_qg {
int esr_nominal;
int soh;
int soc_reporting_ready;
+ int last_fifo_v_uv;
+ int last_fifo_i_ua;
u32 fifo_done_count;
u32 wa_flags;
u32 seq_no;
@@ -145,6 +153,8 @@ struct qpnp_qg {
u32 esr_last;
u32 s2_state;
u32 s2_state_mask;
+ u32 soc_fvss_entry;
+ u32 vbat_fvss_entry;
ktime_t last_user_update_time;
ktime_t last_fifo_update_time;
unsigned long last_maint_soc_update_time;
@@ -163,6 +173,7 @@ struct qpnp_qg {
int sys_soc;
int last_adj_ssoc;
int recharge_soc;
+ int batt_age_level;
struct alarm alarm_timer;
u32 sdam_data[SDAM_MAX];
diff --git a/drivers/power/supply/qcom/qg-profile-lib.c b/drivers/power/supply/qcom/qg-profile-lib.c
index 7585dad..bf8efc9 100644
--- a/drivers/power/supply/qcom/qg-profile-lib.c
+++ b/drivers/power/supply/qcom/qg-profile-lib.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (c) 2018 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
*/
#include <linux/module.h>
@@ -9,7 +9,7 @@
#include "qg-profile-lib.h"
#include "qg-defs.h"
-static int linear_interpolate(int y0, int x0, int y1, int x1, int x)
+int qg_linear_interpolate(int y0, int x0, int y1, int x1, int x)
{
if (y0 == y1 || x == x0)
return y0;
@@ -19,7 +19,7 @@ static int linear_interpolate(int y0, int x0, int y1, int x1, int x)
return y0 + ((y1 - y0) * (x - x0) / (x1 - x0));
}
-int interpolate_single_row_lut(struct profile_table_data *lut,
+int qg_interpolate_single_row_lut(struct profile_table_data *lut,
int x, int scale)
{
int i, result;
@@ -45,7 +45,7 @@ int interpolate_single_row_lut(struct profile_table_data *lut,
if (x == lut->col_entries[i] * scale) {
result = lut->data[0][i];
} else {
- result = linear_interpolate(
+ result = qg_linear_interpolate(
lut->data[0][i-1],
lut->col_entries[i-1] * scale,
lut->data[0][i],
@@ -56,7 +56,7 @@ int interpolate_single_row_lut(struct profile_table_data *lut,
return result;
}
-int interpolate_soc(struct profile_table_data *lut,
+int qg_interpolate_soc(struct profile_table_data *lut,
int batt_temp, int ocv)
{
int i, j, soc_high, soc_low, soc;
@@ -87,7 +87,7 @@ int interpolate_soc(struct profile_table_data *lut,
if (ocv >= lut->data[i][j]) {
if (ocv == lut->data[i][j])
return lut->row_entries[i];
- soc = linear_interpolate(
+ soc = qg_linear_interpolate(
lut->row_entries[i],
lut->data[i][j],
lut->row_entries[i - 1],
@@ -108,7 +108,7 @@ int interpolate_soc(struct profile_table_data *lut,
for (i = 0; i < rows-1; i++) {
if (soc_high == 0 && is_between(lut->data[i][j],
lut->data[i+1][j], ocv)) {
- soc_high = linear_interpolate(
+ soc_high = qg_linear_interpolate(
lut->row_entries[i],
lut->data[i][j],
lut->row_entries[i + 1],
@@ -118,7 +118,7 @@ int interpolate_soc(struct profile_table_data *lut,
if (soc_low == 0 && is_between(lut->data[i][j-1],
lut->data[i+1][j-1], ocv)) {
- soc_low = linear_interpolate(
+ soc_low = qg_linear_interpolate(
lut->row_entries[i],
lut->data[i][j-1],
lut->row_entries[i + 1],
@@ -127,7 +127,7 @@ int interpolate_soc(struct profile_table_data *lut,
}
if (soc_high && soc_low) {
- soc = linear_interpolate(
+ soc = qg_linear_interpolate(
soc_low,
lut->col_entries[j-1] * DEGC_SCALE,
soc_high,
@@ -148,7 +148,7 @@ int interpolate_soc(struct profile_table_data *lut,
return 10000;
}
-int interpolate_var(struct profile_table_data *lut,
+int qg_interpolate_var(struct profile_table_data *lut,
int batt_temp, int soc)
{
int i, var1, var2, var, rows, cols;
@@ -192,7 +192,7 @@ int interpolate_var(struct profile_table_data *lut,
break;
if (batt_temp == lut->col_entries[i] * DEGC_SCALE) {
- var = linear_interpolate(
+ var = qg_linear_interpolate(
lut->data[row1][i],
lut->row_entries[row1],
lut->data[row2][i],
@@ -201,21 +201,21 @@ int interpolate_var(struct profile_table_data *lut,
return var;
}
- var1 = linear_interpolate(
+ var1 = qg_linear_interpolate(
lut->data[row1][i - 1],
lut->col_entries[i - 1] * DEGC_SCALE,
lut->data[row1][i],
lut->col_entries[i] * DEGC_SCALE,
batt_temp);
- var2 = linear_interpolate(
+ var2 = qg_linear_interpolate(
lut->data[row2][i - 1],
lut->col_entries[i - 1] * DEGC_SCALE,
lut->data[row2][i],
lut->col_entries[i] * DEGC_SCALE,
batt_temp);
- var = linear_interpolate(
+ var = qg_linear_interpolate(
var1,
lut->row_entries[row1],
var2,
@@ -225,7 +225,7 @@ int interpolate_var(struct profile_table_data *lut,
return var;
}
-int interpolate_slope(struct profile_table_data *lut,
+int qg_interpolate_slope(struct profile_table_data *lut,
int batt_temp, int soc)
{
int i, ocvrow1, ocvrow2, rows, cols;
@@ -277,14 +277,14 @@ int interpolate_slope(struct profile_table_data *lut,
lut->row_entries[row2]);
return slope;
}
- ocvrow1 = linear_interpolate(
+ ocvrow1 = qg_linear_interpolate(
lut->data[row1][i - 1],
lut->col_entries[i - 1] * DEGC_SCALE,
lut->data[row1][i],
lut->col_entries[i] * DEGC_SCALE,
batt_temp);
- ocvrow2 = linear_interpolate(
+ ocvrow2 = qg_linear_interpolate(
lut->data[row2][i - 1],
lut->col_entries[i - 1] * DEGC_SCALE,
lut->data[row2][i],
diff --git a/drivers/power/supply/qcom/qg-profile-lib.h b/drivers/power/supply/qcom/qg-profile-lib.h
index 5585239..ba7e03e 100644
--- a/drivers/power/supply/qcom/qg-profile-lib.h
+++ b/drivers/power/supply/qcom/qg-profile-lib.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
- * Copyright (c) 2018 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
*/
#ifndef __QG_PROFILE_LIB_H__
@@ -15,13 +15,14 @@ struct profile_table_data {
int **data;
};
-int interpolate_single_row_lut(struct profile_table_data *lut,
+int qg_linear_interpolate(int y0, int x0, int y1, int x1, int x);
+int qg_interpolate_single_row_lut(struct profile_table_data *lut,
int x, int scale);
-int interpolate_soc(struct profile_table_data *lut,
+int qg_interpolate_soc(struct profile_table_data *lut,
int batt_temp, int ocv);
-int interpolate_var(struct profile_table_data *lut,
+int qg_interpolate_var(struct profile_table_data *lut,
int batt_temp, int soc);
-int interpolate_slope(struct profile_table_data *lut,
+int qg_interpolate_slope(struct profile_table_data *lut,
int batt_temp, int soc);
#endif /*__QG_PROFILE_LIB_H__ */
diff --git a/drivers/power/supply/qcom/qg-reg.h b/drivers/power/supply/qcom/qg-reg.h
index 6e001de..0af5993 100644
--- a/drivers/power/supply/qcom/qg-reg.h
+++ b/drivers/power/supply/qcom/qg-reg.h
@@ -121,6 +121,7 @@
#define QG_SDAM_ESR_DISCHARGE_DELTA_OFFSET 0x6E /* 4-byte 0x6E-0x71 */
#define QG_SDAM_ESR_CHARGE_SF_OFFSET 0x72 /* 2-byte 0x72-0x73 */
#define QG_SDAM_ESR_DISCHARGE_SF_OFFSET 0x74 /* 2-byte 0x74-0x75 */
+#define QG_SDAM_BATT_AGE_LEVEL_OFFSET 0x76 /* 1-byte 0x76 */
#define QG_SDAM_MAGIC_OFFSET 0x80 /* 4-byte 0x80-0x83 */
#define QG_SDAM_MAX_OFFSET 0xA4
diff --git a/drivers/power/supply/qcom/qg-sdam.c b/drivers/power/supply/qcom/qg-sdam.c
index e641d11..aa357e5 100644
--- a/drivers/power/supply/qcom/qg-sdam.c
+++ b/drivers/power/supply/qcom/qg-sdam.c
@@ -81,6 +81,11 @@ static struct qg_sdam_info sdam_info[] = {
.offset = QG_SDAM_ESR_DISCHARGE_SF_OFFSET,
.length = 2,
},
+ [SDAM_BATT_AGE_LEVEL] = {
+ .name = "SDAM_BATT_AGE_LEVEL_OFFSET",
+ .offset = QG_SDAM_BATT_AGE_LEVEL_OFFSET,
+ .length = 1,
+ },
[SDAM_MAGIC] = {
.name = "SDAM_MAGIC_OFFSET",
.offset = QG_SDAM_MAGIC_OFFSET,
diff --git a/drivers/power/supply/qcom/qg-sdam.h b/drivers/power/supply/qcom/qg-sdam.h
index b0e48ed..32ce6f8 100644
--- a/drivers/power/supply/qcom/qg-sdam.h
+++ b/drivers/power/supply/qcom/qg-sdam.h
@@ -24,6 +24,7 @@ enum qg_sdam_param {
SDAM_ESR_CHARGE_SF,
SDAM_ESR_DISCHARGE_SF,
SDAM_MAGIC,
+ SDAM_BATT_AGE_LEVEL,
SDAM_MAX,
};
diff --git a/drivers/power/supply/qcom/qg-soc.c b/drivers/power/supply/qcom/qg-soc.c
index d8d0c6e..3ecd32b 100644
--- a/drivers/power/supply/qcom/qg-soc.c
+++ b/drivers/power/supply/qcom/qg-soc.c
@@ -17,6 +17,7 @@
#include "qg-reg.h"
#include "qg-util.h"
#include "qg-defs.h"
+#include "qg-profile-lib.h"
#include "qg-soc.h"
#define DEFAULT_UPDATE_TIME_MS 64000
@@ -45,6 +46,11 @@ static ssize_t soc_interval_ms_store(struct device *dev,
}
DEVICE_ATTR_RW(soc_interval_ms);
+static int qg_fvss_delta_soc_interval_ms = 10000;
+module_param_named(
+ fvss_soc_interval_ms, qg_fvss_delta_soc_interval_ms, int, 0600
+);
+
static int qg_delta_soc_cold_interval_ms = 4000;
static ssize_t soc_cold_interval_ms_show(struct device *dev,
struct device_attribute *attr, char *buf)
@@ -87,6 +93,84 @@ static ssize_t maint_soc_update_ms_store(struct device *dev,
}
DEVICE_ATTR_RW(maint_soc_update_ms);
+/* FVSS scaling only based on VBAT */
+static int qg_fvss_vbat_scaling = 1;
+module_param_named(
+ fvss_vbat_scaling, qg_fvss_vbat_scaling, int, 0600
+);
+
+static int qg_process_fvss_soc(struct qpnp_qg *chip, int sys_soc)
+{
+ int rc, vbat_uv = 0, vbat_cutoff_uv = chip->dt.vbatt_cutoff_mv * 1000;
+ int soc_vbat = 0, wt_vbat = 0, wt_sys = 0, soc_fvss = 0;
+
+ if (!chip->dt.fvss_enable)
+ goto exit_soc_scale;
+
+ if (chip->charge_status == POWER_SUPPLY_STATUS_CHARGING)
+ goto exit_soc_scale;
+
+ rc = qg_get_battery_voltage(chip, &vbat_uv);
+ if (rc < 0)
+ goto exit_soc_scale;
+
+ if (!chip->last_fifo_v_uv)
+ chip->last_fifo_v_uv = vbat_uv;
+
+ if (chip->last_fifo_v_uv > (chip->dt.fvss_vbat_mv * 1000)) {
+ qg_dbg(chip, QG_DEBUG_SOC, "FVSS: last_fifo_v=%d fvss_entry_uv=%d - exit\n",
+ chip->last_fifo_v_uv, chip->dt.fvss_vbat_mv * 1000);
+ goto exit_soc_scale;
+ }
+
+ /* Enter FVSS */
+ if (!chip->fvss_active) {
+ chip->vbat_fvss_entry = CAP(vbat_cutoff_uv,
+ chip->dt.fvss_vbat_mv * 1000,
+ chip->last_fifo_v_uv);
+ chip->soc_fvss_entry = sys_soc;
+ chip->fvss_active = true;
+ } else if (chip->last_fifo_v_uv > chip->vbat_fvss_entry) {
+ /* VBAT has gone beyond the entry voltage */
+ chip->vbat_fvss_entry = chip->last_fifo_v_uv;
+ chip->soc_fvss_entry = sys_soc;
+ }
+
+ soc_vbat = qg_linear_interpolate(chip->soc_fvss_entry,
+ chip->vbat_fvss_entry,
+ 0,
+ vbat_cutoff_uv,
+ chip->last_fifo_v_uv);
+ soc_vbat = CAP(0, 100, soc_vbat);
+
+ if (qg_fvss_vbat_scaling) {
+ wt_vbat = 100;
+ wt_sys = 0;
+ } else {
+ wt_sys = qg_linear_interpolate(100,
+ chip->soc_fvss_entry,
+ 0,
+ 0,
+ sys_soc);
+ wt_sys = CAP(0, 100, wt_sys);
+ wt_vbat = 100 - wt_sys;
+ }
+
+ soc_fvss = ((soc_vbat * wt_vbat) + (sys_soc * wt_sys)) / 100;
+ soc_fvss = CAP(0, 100, soc_fvss);
+
+ qg_dbg(chip, QG_DEBUG_SOC, "FVSS: vbat_fvss_entry=%d soc_fvss_entry=%d cutoff_uv=%d vbat_uv=%d fifo_avg_v=%d soc_vbat=%d sys_soc=%d wt_vbat=%d wt_sys=%d soc_fvss=%d\n",
+ chip->vbat_fvss_entry, chip->soc_fvss_entry,
+ vbat_cutoff_uv, vbat_uv, chip->last_fifo_v_uv,
+ soc_vbat, sys_soc, wt_vbat, wt_sys, soc_fvss);
+
+ return soc_fvss;
+
+exit_soc_scale:
+ chip->fvss_active = false;
+ return sys_soc;
+}
+
int qg_adjust_sys_soc(struct qpnp_qg *chip)
{
int soc, vbat_uv, rc;
@@ -94,7 +178,7 @@ int qg_adjust_sys_soc(struct qpnp_qg *chip)
chip->sys_soc = CAP(QG_MIN_SOC, QG_MAX_SOC, chip->sys_soc);
- if (chip->sys_soc == QG_MIN_SOC) {
+ if (chip->sys_soc <= 50) { /* 0.5% */
/* Hold SOC to 1% of VBAT has not dropped below cutoff */
rc = qg_get_battery_voltage(chip, &vbat_uv);
if (!rc && vbat_uv >= (vcutoff_uv + VBAT_LOW_HYST_UV))
@@ -113,8 +197,11 @@ int qg_adjust_sys_soc(struct qpnp_qg *chip)
soc = DIV_ROUND_CLOSEST(chip->sys_soc, 100);
}
- qg_dbg(chip, QG_DEBUG_SOC, "last_adj_sys_soc=%d adj_sys_soc=%d\n",
- chip->last_adj_ssoc, soc);
+ qg_dbg(chip, QG_DEBUG_SOC, "sys_soc=%d adjusted sys_soc=%d\n",
+ chip->sys_soc, soc);
+
+ soc = qg_process_fvss_soc(chip, soc);
+
chip->last_adj_ssoc = soc;
return soc;
@@ -144,6 +231,8 @@ static void get_next_update_time(struct qpnp_qg *chip)
else if (chip->maint_soc > 0 && chip->maint_soc >= chip->recharge_soc)
/* if in maintenance mode scale slower */
min_delta_soc_interval_ms = qg_maint_soc_update_ms;
+ else if (chip->fvss_active)
+ min_delta_soc_interval_ms = qg_fvss_delta_soc_interval_ms;
if (!min_delta_soc_interval_ms)
min_delta_soc_interval_ms = 1000; /* 1 second */
diff --git a/drivers/power/supply/qcom/qpnp-fg-gen4.c b/drivers/power/supply/qcom/qpnp-fg-gen4.c
index 24e8319..4bfa42f 100644
--- a/drivers/power/supply/qcom/qpnp-fg-gen4.c
+++ b/drivers/power/supply/qcom/qpnp-fg-gen4.c
@@ -296,6 +296,7 @@ struct fg_gen4_chip {
int soc_scale_msoc;
int prev_soc_scale_msoc;
int soc_scale_slope;
+ int msoc_actual;
int vbatt_avg;
int vbatt_now;
int vbatt_res;
@@ -1122,7 +1123,7 @@ static int fg_gen4_get_prop_soc_scale(struct fg_gen4_chip *chip)
chip->vbatt_now = DIV_ROUND_CLOSEST(chip->vbatt_now, 1000);
chip->vbatt_avg = DIV_ROUND_CLOSEST(chip->vbatt_avg, 1000);
chip->vbatt_res = chip->vbatt_avg - chip->dt.cutoff_volt_mv;
- pr_debug("FVSS: Vbatt now=%d Vbatt avg=%d Vbatt res=%d\n",
+ fg_dbg(fg, FG_FVSS, "Vbatt now=%d Vbatt avg=%d Vbatt res=%d\n",
chip->vbatt_now, chip->vbatt_avg, chip->vbatt_res);
return rc;
@@ -2493,8 +2494,8 @@ static void profile_load_work(struct work_struct *work)
out:
if (!chip->esr_fast_calib || is_debug_batt_id(fg)) {
/* If it is debug battery, then disable ESR fast calibration */
- chip->esr_fast_calib = false;
fg_gen4_esr_fast_calib_config(chip, false);
+ chip->esr_fast_calib = false;
}
if (chip->dt.multi_profile_load && rc < 0)
@@ -3215,7 +3216,7 @@ static int fg_gen4_enter_soc_scale(struct fg_gen4_chip *chip)
}
chip->soc_scale_mode = true;
- pr_debug("FVSS: Enter FVSS mode, SOC=%d slope=%d timer=%d\n", soc,
+ fg_dbg(fg, FG_FVSS, "Enter FVSS mode, SOC=%d slope=%d timer=%d\n", soc,
chip->soc_scale_slope, chip->scale_timer);
alarm_start_relative(&chip->soc_scale_alarm_timer,
ms_to_ktime(chip->scale_timer));
@@ -3245,6 +3246,8 @@ static void fg_gen4_write_scale_msoc(struct fg_gen4_chip *chip)
static void fg_gen4_exit_soc_scale(struct fg_gen4_chip *chip)
{
+ struct fg_dev *fg = &chip->fg;
+
if (chip->soc_scale_mode) {
alarm_cancel(&chip->soc_scale_alarm_timer);
cancel_work_sync(&chip->soc_scale_work);
@@ -3253,13 +3256,13 @@ static void fg_gen4_exit_soc_scale(struct fg_gen4_chip *chip)
}
chip->soc_scale_mode = false;
- pr_debug("FVSS: Exit FVSS mode\n");
+ fg_dbg(fg, FG_FVSS, "Exit FVSS mode\n");
}
static int fg_gen4_validate_soc_scale_mode(struct fg_gen4_chip *chip)
{
struct fg_dev *fg = &chip->fg;
- int rc, msoc_actual;
+ int rc;
if (!chip->dt.soc_scale_mode)
return 0;
@@ -3270,7 +3273,7 @@ static int fg_gen4_validate_soc_scale_mode(struct fg_gen4_chip *chip)
goto fail_soc_scale;
}
- rc = fg_get_msoc(fg, &msoc_actual);
+ rc = fg_get_msoc(fg, &chip->msoc_actual);
if (rc < 0) {
pr_err("Failed to get msoc rc=%d\n", rc);
goto fail_soc_scale;
@@ -3289,7 +3292,7 @@ static int fg_gen4_validate_soc_scale_mode(struct fg_gen4_chip *chip)
* Stay in SOC scale mode till H/W SOC catch scaled SOC
* while charging.
*/
- if (msoc_actual >= chip->soc_scale_msoc)
+ if (chip->msoc_actual >= chip->soc_scale_msoc)
fg_gen4_exit_soc_scale(chip);
}
@@ -3966,8 +3969,24 @@ static void soc_scale_work(struct work_struct *work)
mutex_lock(&chip->soc_scale_lock);
soc = DIV_ROUND_CLOSEST(chip->vbatt_res,
chip->soc_scale_slope);
- /* If calculated SOC is higher than current SOC, report current SOC */
- if (soc > chip->prev_soc_scale_msoc) {
+ chip->soc_scale_msoc = soc;
+ chip->scale_timer = chip->dt.scale_timer_ms;
+
+ fg_dbg(fg, FG_FVSS, "soc: %d last soc: %d msoc_actual: %d\n", soc,
+ chip->prev_soc_scale_msoc, chip->msoc_actual);
+ if ((chip->prev_soc_scale_msoc - chip->msoc_actual) > soc_thr_percent) {
+ /*
+ * If difference between previous SW calculated SOC and HW SOC
+ * is higher than SOC threshold, then handle this by
+ * showing previous SW SOC - SOC threshold.
+ */
+ chip->soc_scale_msoc = chip->prev_soc_scale_msoc -
+ soc_thr_percent;
+ } else if (soc > chip->prev_soc_scale_msoc) {
+ /*
+ * If calculated SOC is higher than current SOC, report current
+ * SOC
+ */
chip->soc_scale_msoc = chip->prev_soc_scale_msoc;
chip->scale_timer = chip->dt.scale_timer_ms;
} else if ((chip->prev_soc_scale_msoc - soc) > soc_thr_percent) {
@@ -3982,9 +4001,6 @@ static void soc_scale_work(struct work_struct *work)
soc_thr_percent;
chip->scale_timer = chip->dt.scale_timer_ms /
(chip->prev_soc_scale_msoc - soc);
- } else {
- chip->soc_scale_msoc = soc;
- chip->scale_timer = chip->dt.scale_timer_ms;
}
if (chip->soc_scale_msoc < 0)
@@ -3997,7 +4013,7 @@ static void soc_scale_work(struct work_struct *work)
}
chip->prev_soc_scale_msoc = chip->soc_scale_msoc;
- pr_debug("FVSS: Calculated SOC=%d SOC reported=%d timer resolution=%d\n",
+ fg_dbg(fg, FG_FVSS, "Calculated SOC=%d SOC reported=%d timer resolution=%d\n",
soc, chip->soc_scale_msoc, chip->scale_timer);
alarm_start_relative(&chip->soc_scale_alarm_timer,
ms_to_ktime(chip->scale_timer));
diff --git a/drivers/power/supply/qcom/qpnp-qg.c b/drivers/power/supply/qcom/qpnp-qg.c
index 6c85dd9..dd72e9b 100644
--- a/drivers/power/supply/qcom/qpnp-qg.c
+++ b/drivers/power/supply/qcom/qpnp-qg.c
@@ -91,6 +91,7 @@ static struct attribute *qg_attrs[] = {
ATTRIBUTE_GROUPS(qg);
static int qg_process_rt_fifo(struct qpnp_qg *chip);
+static int qg_load_battery_profile(struct qpnp_qg *chip);
static bool is_battery_present(struct qpnp_qg *chip)
{
@@ -493,6 +494,9 @@ static int qg_process_fifo(struct qpnp_qg *chip, u32 fifo_length)
chip->kdata.fifo[j].interval = sample_interval;
chip->kdata.fifo[j].count = sample_count;
+ chip->last_fifo_v_uv = chip->kdata.fifo[j].v;
+ chip->last_fifo_i_ua = chip->kdata.fifo[j].i;
+
qg_dbg(chip, QG_DEBUG_FIFO, "FIFO %d raw_v=%d uV=%d raw_i=%d uA=%d interval=%d count=%d\n",
j, fifo_v,
chip->kdata.fifo[j].v,
@@ -557,6 +561,9 @@ static int qg_process_accumulator(struct qpnp_qg *chip)
if (chip->kdata.fifo_length == MAX_FIFO_LENGTH)
chip->kdata.fifo_length = MAX_FIFO_LENGTH - 1;
+ chip->last_fifo_v_uv = chip->kdata.fifo[index].v;
+ chip->last_fifo_i_ua = chip->kdata.fifo[index].i;
+
if (chip->kdata.fifo_length == 1) /* Only accumulator data */
chip->kdata.seq_no = chip->seq_no++ % U32_MAX;
@@ -1534,8 +1541,6 @@ static int qg_get_learned_capacity(void *data, int64_t *learned_cap_uah)
}
*learned_cap_uah = cc_mah * 1000;
- qg_dbg(chip, QG_DEBUG_ALG_CL, "Retrieved learned capacity %llduah\n",
- *learned_cap_uah);
return 0;
}
@@ -1564,6 +1569,47 @@ static int qg_store_learned_capacity(void *data, int64_t learned_cap_uah)
return 0;
}
+static int qg_get_batt_age_level(void *data, u32 *batt_age_level)
+{
+ struct qpnp_qg *chip = data;
+ int rc;
+
+ if (!chip)
+ return -ENODEV;
+
+ if (chip->battery_missing || is_debug_batt_id(chip))
+ return -ENODEV;
+
+ *batt_age_level = 0;
+ rc = qg_sdam_read(SDAM_BATT_AGE_LEVEL, batt_age_level);
+ if (rc < 0) {
+ pr_err("Error in reading batt_age_level, rc=%d\n", rc);
+ return rc;
+ }
+
+ return 0;
+}
+
+static int qg_store_batt_age_level(void *data, u32 batt_age_level)
+{
+ struct qpnp_qg *chip = data;
+ int rc;
+
+ if (!chip)
+ return -ENODEV;
+
+ if (chip->battery_missing)
+ return -ENODEV;
+
+ rc = qg_sdam_write(SDAM_BATT_AGE_LEVEL, batt_age_level);
+ if (rc < 0) {
+ pr_err("Error in writing batt_age_level, rc=%d\n", rc);
+ return rc;
+ }
+
+ return 0;
+}
+
static int qg_get_cc_soc(void *data, int *cc_soc)
{
struct qpnp_qg *chip = data;
@@ -1571,6 +1617,11 @@ static int qg_get_cc_soc(void *data, int *cc_soc)
if (!chip)
return -ENODEV;
+ if (is_debug_batt_id(chip) || chip->battery_missing) {
+ *cc_soc = -EINVAL;
+ return 0;
+ }
+
if (chip->cc_soc == INT_MIN)
return -EINVAL;
@@ -1699,6 +1750,11 @@ static int qg_get_charge_counter(struct qpnp_qg *chip, int *charge_counter)
int rc, cc_soc = 0;
int64_t temp = 0;
+ if (is_debug_batt_id(chip) || chip->battery_missing) {
+ *charge_counter = -EINVAL;
+ return 0;
+ }
+
rc = qg_get_learned_capacity(chip, &temp);
if (rc < 0 || !temp)
rc = qg_get_nominal_capacity((int *)&temp, 250, true);
@@ -1934,6 +1990,40 @@ static int qg_reset(struct qpnp_qg *chip)
return rc;
}
+static int qg_setprop_batt_age_level(struct qpnp_qg *chip, int batt_age_level)
+{
+ int rc = 0;
+
+ if (!chip->dt.multi_profile_load)
+ return 0;
+
+ if (batt_age_level < 0) {
+ pr_err("Invalid age-level %d\n", batt_age_level);
+ return -EINVAL;
+ }
+
+ if (chip->batt_age_level == batt_age_level) {
+ qg_dbg(chip, QG_DEBUG_PROFILE, "Same age-level %d\n",
+ chip->batt_age_level);
+ return 0;
+ }
+
+ chip->batt_age_level = batt_age_level;
+ rc = qg_load_battery_profile(chip);
+ if (rc < 0) {
+ pr_err("failed to load profile\n");
+ } else {
+ rc = qg_store_batt_age_level(chip, batt_age_level);
+ if (rc < 0)
+ pr_err("error in storing batt_age_level rc =%d\n", rc);
+ }
+
+ qg_dbg(chip, QG_DEBUG_PROFILE, "Profile with batt_age_level = %d loaded\n",
+ chip->batt_age_level);
+
+ return rc;
+}
+
static int qg_psy_set_property(struct power_supply *psy,
enum power_supply_property psp,
const union power_supply_propval *pval)
@@ -1965,6 +2055,8 @@ static int qg_psy_set_property(struct power_supply *psy,
chip->soh = pval->intval;
qg_dbg(chip, QG_DEBUG_STATUS, "SOH update: SOH=%d esr_actual=%d esr_nominal=%d\n",
chip->soh, chip->esr_actual, chip->esr_nominal);
+ if (chip->sp)
+ soh_profile_update(chip->sp, chip->soh);
break;
case POWER_SUPPLY_PROP_ESR_ACTUAL:
chip->esr_actual = pval->intval;
@@ -1975,6 +2067,9 @@ static int qg_psy_set_property(struct power_supply *psy,
case POWER_SUPPLY_PROP_FG_RESET:
qg_reset(chip);
break;
+ case POWER_SUPPLY_PROP_BATT_AGE_LEVEL:
+ rc = qg_setprop_batt_age_level(chip, pval->intval);
+ break;
default:
break;
}
@@ -2101,6 +2196,12 @@ static int qg_psy_get_property(struct power_supply *psy,
case POWER_SUPPLY_PROP_POWER_AVG:
rc = qg_get_power(chip, &pval->intval, true);
break;
+ case POWER_SUPPLY_PROP_SCALE_MODE_EN:
+ pval->intval = chip->fvss_active;
+ break;
+ case POWER_SUPPLY_PROP_BATT_AGE_LEVEL:
+ pval->intval = chip->batt_age_level;
+ break;
default:
pr_debug("Unsupported property %d\n", psp);
break;
@@ -2118,6 +2219,7 @@ static int qg_property_is_writeable(struct power_supply *psy,
case POWER_SUPPLY_PROP_ESR_NOMINAL:
case POWER_SUPPLY_PROP_SOH:
case POWER_SUPPLY_PROP_FG_RESET:
+ case POWER_SUPPLY_PROP_BATT_AGE_LEVEL:
return 1;
default:
break;
@@ -2159,6 +2261,8 @@ static enum power_supply_property qg_psy_props[] = {
POWER_SUPPLY_PROP_VOLTAGE_AVG,
POWER_SUPPLY_PROP_POWER_AVG,
POWER_SUPPLY_PROP_POWER_NOW,
+ POWER_SUPPLY_PROP_SCALE_MODE_EN,
+ POWER_SUPPLY_PROP_BATT_AGE_LEVEL,
};
static const struct power_supply_desc qg_psy_desc = {
@@ -2578,6 +2682,12 @@ static ssize_t qg_device_read(struct file *file, char __user *buf, size_t count,
struct qpnp_qg *chip = file->private_data;
unsigned long data_size = sizeof(chip->kdata);
+ if (count < data_size) {
+ pr_err("Invalid datasize %lu, expected lesser then %zu\n",
+ data_size, count);
+ return -EINVAL;
+ }
+
/* non-blocking access, return */
if (!chip->data_ready && (file->f_flags & O_NONBLOCK))
return 0;
@@ -2791,17 +2901,39 @@ static int get_batt_id_ohm(struct qpnp_qg *chip, u32 *batt_id_ohm)
static int qg_load_battery_profile(struct qpnp_qg *chip)
{
struct device_node *node = chip->dev->of_node;
- struct device_node *batt_node, *profile_node;
- int rc, tuple_len, len, i;
+ struct device_node *profile_node;
+ int rc, tuple_len, len, i, avail_age_level = 0;
- batt_node = of_find_node_by_name(node, "qcom,battery-data");
- if (!batt_node) {
+ chip->batt_node = of_find_node_by_name(node, "qcom,battery-data");
+ if (!chip->batt_node) {
pr_err("Batterydata not available\n");
return -ENXIO;
}
- profile_node = of_batterydata_get_best_profile(batt_node,
+ if (chip->dt.multi_profile_load) {
+ if (chip->batt_age_level == -EINVAL) {
+ rc = qg_get_batt_age_level(chip, &chip->batt_age_level);
+ if (rc < 0) {
+ pr_err("error in retrieving batt age level rc=%d\n",
+ rc);
+ return rc;
+ }
+ }
+ profile_node = of_batterydata_get_best_aged_profile(
+ chip->batt_node,
+ chip->batt_id_ohm / 1000,
+ chip->batt_age_level,
+ &avail_age_level);
+ if (chip->batt_age_level != avail_age_level) {
+ qg_dbg(chip, QG_DEBUG_PROFILE, "Batt_age_level %d doesn't exist, using %d\n",
+ chip->batt_age_level, avail_age_level);
+ chip->batt_age_level = avail_age_level;
+ }
+ } else {
+ profile_node = of_batterydata_get_best_profile(chip->batt_node,
chip->batt_id_ohm / 1000, NULL);
+ }
+
if (IS_ERR(profile_node)) {
rc = PTR_ERR(profile_node);
pr_err("Failed to detect valid QG battery profile %d\n", rc);
@@ -3418,9 +3550,44 @@ static int qg_hw_init(struct qpnp_qg *chip)
return 0;
}
+static int qg_soh_batt_profile_init(struct qpnp_qg *chip)
+{
+ int rc = 0;
+
+ if (!chip->dt.multi_profile_load)
+ return 0;
+
+ if (is_debug_batt_id(chip) || chip->battery_missing)
+ return 0;
+
+ if (!chip->sp)
+ chip->sp = devm_kzalloc(chip->dev, sizeof(*chip->sp),
+ GFP_KERNEL);
+ if (!chip->sp)
+ return -ENOMEM;
+
+ if (!chip->sp->initialized) {
+ chip->sp->batt_id_kohms = chip->batt_id_ohm / 1000;
+ chip->sp->bp_node = chip->batt_node;
+ chip->sp->last_batt_age_level = chip->batt_age_level;
+ chip->sp->bms_psy = chip->qg_psy;
+ rc = soh_profile_init(chip->dev, chip->sp);
+ if (rc < 0) {
+ devm_kfree(chip->dev, chip->sp);
+ chip->sp = NULL;
+ } else {
+ qg_dbg(chip, QG_DEBUG_PROFILE, "SOH profile count: %d\n",
+ chip->sp->profile_count);
+ }
+ }
+
+ return rc;
+}
+
static int qg_post_init(struct qpnp_qg *chip)
{
u8 status = 0;
+ int rc = 0;
/* disable all IRQs if profile is not loaded */
if (!chip->profile_loaded) {
@@ -3439,6 +3606,14 @@ static int qg_post_init(struct qpnp_qg *chip)
/* read STATUS2 register to clear its last state */
qg_read(chip, chip->qg_base + QG_STATUS2_REG, &status, 1);
+ /*soh based multi profile init */
+ rc = qg_soh_batt_profile_init(chip);
+ if (rc < 0) {
+ pr_err("Failed to initialize battery based on soh rc=%d\n",
+ rc);
+ return rc;
+ }
+
return 0;
}
@@ -3812,6 +3987,7 @@ static int qg_parse_cl_dt(struct qpnp_qg *chip)
#define ESR_CHG_MIN_IBAT_UA (-450000)
#define DEFAULT_SLEEP_TIME_SECS 1800 /* 30 mins */
#define DEFAULT_SYS_MIN_VOLT_MV 2800
+#define DEFAULT_FVSS_VBAT_MV 3500
static int qg_parse_dt(struct qpnp_qg *chip)
{
int rc = 0;
@@ -4043,6 +4219,21 @@ static int qg_parse_dt(struct qpnp_qg *chip)
else
chip->dt.min_sleep_time_secs = temp;
+ if (of_property_read_bool(node, "qcom,fvss-enable")) {
+
+ chip->dt.fvss_enable = true;
+
+ rc = of_property_read_u32(node,
+ "qcom,fvss-vbatt-mv", &temp);
+ if (rc < 0)
+ chip->dt.fvss_vbat_mv = DEFAULT_FVSS_VBAT_MV;
+ else
+ chip->dt.fvss_vbat_mv = temp;
+ }
+
+ chip->dt.multi_profile_load = of_property_read_bool(node,
+ "qcom,multi-profile-load");
+
qg_dbg(chip, QG_DEBUG_PON, "DT: vbatt_empty_mv=%dmV vbatt_low_mv=%dmV delta_soc=%d ext-sns=%d\n",
chip->dt.vbatt_empty_mv, chip->dt.vbatt_low_mv,
chip->dt.delta_soc, chip->dt.qg_ext_sense);
@@ -4345,6 +4536,7 @@ static int qpnp_qg_probe(struct platform_device *pdev)
chip->soh = -EINVAL;
chip->esr_actual = -EINVAL;
chip->esr_nominal = -EINVAL;
+ chip->batt_age_level = -EINVAL;
qg_create_debugfs(chip);
@@ -4366,6 +4558,12 @@ static int qpnp_qg_probe(struct platform_device *pdev)
return rc;
}
+ rc = qg_sdam_init(chip->dev);
+ if (rc < 0) {
+ pr_err("Failed to initialize QG SDAM, rc=%d\n", rc);
+ return rc;
+ }
+
rc = qg_setup_battery(chip);
if (rc < 0) {
pr_err("Failed to setup battery, rc=%d\n", rc);
@@ -4378,12 +4576,6 @@ static int qpnp_qg_probe(struct platform_device *pdev)
return rc;
}
- rc = qg_sdam_init(chip->dev);
- if (rc < 0) {
- pr_err("Failed to initialize QG SDAM, rc=%d\n", rc);
- return rc;
- }
-
rc = qg_sanitize_sdam(chip);
if (rc < 0) {
pr_err("Failed to sanitize SDAM, rc=%d\n", rc);
diff --git a/drivers/power/supply/qcom/qpnp-smb5.c b/drivers/power/supply/qcom/qpnp-smb5.c
index a3f192b..9a00920 100644
--- a/drivers/power/supply/qcom/qpnp-smb5.c
+++ b/drivers/power/supply/qcom/qpnp-smb5.c
@@ -2115,6 +2115,10 @@ static int smb5_configure_micro_usb(struct smb_charger *chg)
}
}
+ /* Enable HVDCP detection and authentication */
+ if (!chg->hvdcp_disable)
+ smblib_hvdcp_detect_enable(chg, true);
+
return rc;
}
diff --git a/drivers/power/supply/qcom/smb5-lib.c b/drivers/power/supply/qcom/smb5-lib.c
index 2cf5cdb..30f1787 100644
--- a/drivers/power/supply/qcom/smb5-lib.c
+++ b/drivers/power/supply/qcom/smb5-lib.c
@@ -6128,8 +6128,9 @@ irqreturn_t dc_plugin_irq_handler(int irq, void *data)
/*
* Remove USB's CP ILIM vote - inapplicable for wireless
- * parallel charging.
+ * parallel charging. Also undo FCC STEPPER's 1.5 A vote.
*/
+ vote(chg->fcc_votable, FCC_STEPPER_VOTER, false, 0);
if (chg->cp_ilim_votable)
vote(chg->cp_ilim_votable, ICL_CHANGE_VOTER, false, 0);
@@ -6182,6 +6183,10 @@ irqreturn_t dc_plugin_irq_handler(int irq, void *data)
vote(chg->dc_suspend_votable, CHG_TERMINATION_VOTER, false, 0);
vote(chg->fcc_main_votable, WLS_PL_CHARGING_VOTER, false, 0);
+ /* Force 1500mA FCC on WLS removal if fcc stepper is enabled */
+ if (chg->fcc_stepper_enable)
+ vote(chg->fcc_votable, FCC_STEPPER_VOTER,
+ true, 1500000);
chg->last_wls_vout = 0;
}
diff --git a/drivers/pps/pps.c b/drivers/pps/pps.c
index 8febacb..0951564 100644
--- a/drivers/pps/pps.c
+++ b/drivers/pps/pps.c
@@ -166,6 +166,14 @@ static long pps_cdev_ioctl(struct file *file,
pps->params.mode |= PPS_CANWAIT;
pps->params.api_version = PPS_API_VERS;
+ /*
+ * Clear unused fields of pps_kparams to avoid leaking
+ * uninitialized data of the PPS_SETPARAMS caller via
+ * PPS_GETPARAMS
+ */
+ pps->params.assert_off_tu.flags = 0;
+ pps->params.clear_off_tu.flags = 0;
+
spin_unlock_irq(&pps->lock);
break;
diff --git a/drivers/pwm/sysfs.c b/drivers/pwm/sysfs.c
index 212d99d..4d851df 100644
--- a/drivers/pwm/sysfs.c
+++ b/drivers/pwm/sysfs.c
@@ -313,7 +313,6 @@ static int pwm_export_child(struct device *parent, struct pwm_device *pwm)
export->pwm = pwm;
mutex_init(&export->lock);
- export->child.class = parent->class;
export->child.release = pwm_export_release;
export->child.parent = parent;
export->child.devt = MKDEV(0, 0);
diff --git a/drivers/rapidio/devices/rio_mport_cdev.c b/drivers/rapidio/devices/rio_mport_cdev.c
index cbe467f..fa0bbda 100644
--- a/drivers/rapidio/devices/rio_mport_cdev.c
+++ b/drivers/rapidio/devices/rio_mport_cdev.c
@@ -1688,6 +1688,7 @@ static int rio_mport_add_riodev(struct mport_cdev_priv *priv,
if (copy_from_user(&dev_info, arg, sizeof(dev_info)))
return -EFAULT;
+ dev_info.name[sizeof(dev_info.name) - 1] = '\0';
rmcd_debug(RDEV, "name:%s ct:0x%x did:0x%x hc:0x%x", dev_info.name,
dev_info.comptag, dev_info.destid, dev_info.hopcount);
@@ -1819,6 +1820,7 @@ static int rio_mport_del_riodev(struct mport_cdev_priv *priv, void __user *arg)
if (copy_from_user(&dev_info, arg, sizeof(dev_info)))
return -EFAULT;
+ dev_info.name[sizeof(dev_info.name) - 1] = '\0';
mport = priv->md->mport;
diff --git a/drivers/rpmsg/qcom_glink_native.c b/drivers/rpmsg/qcom_glink_native.c
index d859315..c996be2 100644
--- a/drivers/rpmsg/qcom_glink_native.c
+++ b/drivers/rpmsg/qcom_glink_native.c
@@ -1607,9 +1607,7 @@ static const struct rpmsg_endpoint_ops glink_endpoint_ops = {
static void qcom_glink_rpdev_release(struct device *dev)
{
struct rpmsg_device *rpdev = to_rpmsg_device(dev);
- struct glink_channel *channel = to_glink_channel(rpdev->ept);
- channel->rpdev = NULL;
kfree(rpdev);
}
diff --git a/drivers/s390/block/dasd_alias.c b/drivers/s390/block/dasd_alias.c
index b9ce93e..99f8661 100644
--- a/drivers/s390/block/dasd_alias.c
+++ b/drivers/s390/block/dasd_alias.c
@@ -383,6 +383,20 @@ suborder_not_supported(struct dasd_ccw_req *cqr)
char msg_format;
char msg_no;
+ /*
+ * intrc values ENODEV, ENOLINK and EPERM
+ * will be optained from sleep_on to indicate that no
+ * IO operation can be started
+ */
+ if (cqr->intrc == -ENODEV)
+ return 1;
+
+ if (cqr->intrc == -ENOLINK)
+ return 1;
+
+ if (cqr->intrc == -EPERM)
+ return 1;
+
sense = dasd_get_sense(&cqr->irb);
if (!sense)
return 0;
@@ -447,12 +461,8 @@ static int read_unit_address_configuration(struct dasd_device *device,
lcu->flags &= ~NEED_UAC_UPDATE;
spin_unlock_irqrestore(&lcu->lock, flags);
- do {
- rc = dasd_sleep_on(cqr);
- if (rc && suborder_not_supported(cqr))
- return -EOPNOTSUPP;
- } while (rc && (cqr->retries > 0));
- if (rc) {
+ rc = dasd_sleep_on(cqr);
+ if (rc && !suborder_not_supported(cqr)) {
spin_lock_irqsave(&lcu->lock, flags);
lcu->flags |= NEED_UAC_UPDATE;
spin_unlock_irqrestore(&lcu->lock, flags);
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index 4ac4a73..4b7cc8d 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -1569,13 +1569,13 @@ static int handle_outbound(struct qdio_q *q, unsigned int callflags,
rc = qdio_kick_outbound_q(q, phys_aob);
} else if (need_siga_sync(q)) {
rc = qdio_siga_sync_q(q);
+ } else if (count < QDIO_MAX_BUFFERS_PER_Q &&
+ get_buf_state(q, prev_buf(bufnr), &state, 0) > 0 &&
+ state == SLSB_CU_OUTPUT_PRIMED) {
+ /* The previous buffer is not processed yet, tack on. */
+ qperf_inc(q, fast_requeue);
} else {
- /* try to fast requeue buffers */
- get_buf_state(q, prev_buf(bufnr), &state, 0);
- if (state != SLSB_CU_OUTPUT_PRIMED)
- rc = qdio_kick_outbound_q(q, 0);
- else
- qperf_inc(q, fast_requeue);
+ rc = qdio_kick_outbound_q(q, 0);
}
/* in case of SIGA errors we must process the error immediately */
diff --git a/drivers/s390/cio/vfio_ccw_cp.c b/drivers/s390/cio/vfio_ccw_cp.c
index 70a006ba..4fe06ff 100644
--- a/drivers/s390/cio/vfio_ccw_cp.c
+++ b/drivers/s390/cio/vfio_ccw_cp.c
@@ -89,8 +89,10 @@ static int pfn_array_alloc_pin(struct pfn_array *pa, struct device *mdev,
sizeof(*pa->pa_iova_pfn) +
sizeof(*pa->pa_pfn),
GFP_KERNEL);
- if (unlikely(!pa->pa_iova_pfn))
+ if (unlikely(!pa->pa_iova_pfn)) {
+ pa->pa_nr = 0;
return -ENOMEM;
+ }
pa->pa_pfn = pa->pa_iova_pfn + pa->pa_nr;
pa->pa_iova_pfn[0] = pa->pa_iova >> PAGE_SHIFT;
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
index ebdbc45..332701d 100644
--- a/drivers/s390/scsi/zfcp_erp.c
+++ b/drivers/s390/scsi/zfcp_erp.c
@@ -11,6 +11,7 @@
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/kthread.h>
+#include <linux/bug.h>
#include "zfcp_ext.h"
#include "zfcp_reqlist.h"
@@ -238,6 +239,12 @@ static struct zfcp_erp_action *zfcp_erp_setup_act(int need, u32 act_status,
struct zfcp_erp_action *erp_action;
struct zfcp_scsi_dev *zfcp_sdev;
+ if (WARN_ON_ONCE(need != ZFCP_ERP_ACTION_REOPEN_LUN &&
+ need != ZFCP_ERP_ACTION_REOPEN_PORT &&
+ need != ZFCP_ERP_ACTION_REOPEN_PORT_FORCED &&
+ need != ZFCP_ERP_ACTION_REOPEN_ADAPTER))
+ return NULL;
+
switch (need) {
case ZFCP_ERP_ACTION_REOPEN_LUN:
zfcp_sdev = sdev_to_zfcp(sdev);
diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
index d1154ba..9c21938 100644
--- a/drivers/scsi/device_handler/scsi_dh_alua.c
+++ b/drivers/scsi/device_handler/scsi_dh_alua.c
@@ -54,6 +54,7 @@
#define ALUA_FAILOVER_TIMEOUT 60
#define ALUA_FAILOVER_RETRIES 5
#define ALUA_RTPG_DELAY_MSECS 5
+#define ALUA_RTPG_RETRY_DELAY 2
/* device handler flags */
#define ALUA_OPTIMIZE_STPG 0x01
@@ -696,7 +697,7 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_port_group *pg)
case SCSI_ACCESS_STATE_TRANSITIONING:
if (time_before(jiffies, pg->expiry)) {
/* State transition, retry */
- pg->interval = 2;
+ pg->interval = ALUA_RTPG_RETRY_DELAY;
err = SCSI_DH_RETRY;
} else {
struct alua_dh_data *h;
@@ -821,6 +822,8 @@ static void alua_rtpg_work(struct work_struct *work)
spin_lock_irqsave(&pg->lock, flags);
pg->flags &= ~ALUA_PG_RUNNING;
pg->flags |= ALUA_PG_RUN_RTPG;
+ if (!pg->interval)
+ pg->interval = ALUA_RTPG_RETRY_DELAY;
spin_unlock_irqrestore(&pg->lock, flags);
queue_delayed_work(kaluad_wq, &pg->rtpg_work,
pg->interval * HZ);
@@ -832,6 +835,8 @@ static void alua_rtpg_work(struct work_struct *work)
spin_lock_irqsave(&pg->lock, flags);
if (err == SCSI_DH_RETRY || pg->flags & ALUA_PG_RUN_RTPG) {
pg->flags &= ~ALUA_PG_RUNNING;
+ if (!pg->interval && !(pg->flags & ALUA_PG_RUN_RTPG))
+ pg->interval = ALUA_RTPG_RETRY_DELAY;
pg->flags |= ALUA_PG_RUN_RTPG;
spin_unlock_irqrestore(&pg->lock, flags);
queue_delayed_work(kaluad_wq, &pg->rtpg_work,
diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c
index 7dc4ffa..24cbd0a 100644
--- a/drivers/scsi/fcoe/fcoe_ctlr.c
+++ b/drivers/scsi/fcoe/fcoe_ctlr.c
@@ -2017,7 +2017,7 @@ EXPORT_SYMBOL_GPL(fcoe_wwn_from_mac);
*/
static inline struct fcoe_rport *fcoe_ctlr_rport(struct fc_rport_priv *rdata)
{
- return (struct fcoe_rport *)(rdata + 1);
+ return container_of(rdata, struct fcoe_rport, rdata);
}
/**
@@ -2281,7 +2281,7 @@ static void fcoe_ctlr_vn_start(struct fcoe_ctlr *fip)
*/
static int fcoe_ctlr_vn_parse(struct fcoe_ctlr *fip,
struct sk_buff *skb,
- struct fc_rport_priv *rdata)
+ struct fcoe_rport *frport)
{
struct fip_header *fiph;
struct fip_desc *desc = NULL;
@@ -2289,16 +2289,12 @@ static int fcoe_ctlr_vn_parse(struct fcoe_ctlr *fip,
struct fip_wwn_desc *wwn = NULL;
struct fip_vn_desc *vn = NULL;
struct fip_size_desc *size = NULL;
- struct fcoe_rport *frport;
size_t rlen;
size_t dlen;
u32 desc_mask = 0;
u32 dtype;
u8 sub;
- memset(rdata, 0, sizeof(*rdata) + sizeof(*frport));
- frport = fcoe_ctlr_rport(rdata);
-
fiph = (struct fip_header *)skb->data;
frport->flags = ntohs(fiph->fip_flags);
@@ -2361,15 +2357,17 @@ static int fcoe_ctlr_vn_parse(struct fcoe_ctlr *fip,
if (dlen != sizeof(struct fip_wwn_desc))
goto len_err;
wwn = (struct fip_wwn_desc *)desc;
- rdata->ids.node_name = get_unaligned_be64(&wwn->fd_wwn);
+ frport->rdata.ids.node_name =
+ get_unaligned_be64(&wwn->fd_wwn);
break;
case FIP_DT_VN_ID:
if (dlen != sizeof(struct fip_vn_desc))
goto len_err;
vn = (struct fip_vn_desc *)desc;
memcpy(frport->vn_mac, vn->fd_mac, ETH_ALEN);
- rdata->ids.port_id = ntoh24(vn->fd_fc_id);
- rdata->ids.port_name = get_unaligned_be64(&vn->fd_wwpn);
+ frport->rdata.ids.port_id = ntoh24(vn->fd_fc_id);
+ frport->rdata.ids.port_name =
+ get_unaligned_be64(&vn->fd_wwpn);
break;
case FIP_DT_FC4F:
if (dlen != sizeof(struct fip_fc4_feat))
@@ -2750,10 +2748,7 @@ static int fcoe_ctlr_vn_recv(struct fcoe_ctlr *fip, struct sk_buff *skb)
{
struct fip_header *fiph;
enum fip_vn2vn_subcode sub;
- struct {
- struct fc_rport_priv rdata;
- struct fcoe_rport frport;
- } buf;
+ struct fcoe_rport frport = { };
int rc, vlan_id = 0;
fiph = (struct fip_header *)skb->data;
@@ -2769,7 +2764,7 @@ static int fcoe_ctlr_vn_recv(struct fcoe_ctlr *fip, struct sk_buff *skb)
goto drop;
}
- rc = fcoe_ctlr_vn_parse(fip, skb, &buf.rdata);
+ rc = fcoe_ctlr_vn_parse(fip, skb, &frport);
if (rc) {
LIBFCOE_FIP_DBG(fip, "vn_recv vn_parse error %d\n", rc);
goto drop;
@@ -2778,19 +2773,19 @@ static int fcoe_ctlr_vn_recv(struct fcoe_ctlr *fip, struct sk_buff *skb)
mutex_lock(&fip->ctlr_mutex);
switch (sub) {
case FIP_SC_VN_PROBE_REQ:
- fcoe_ctlr_vn_probe_req(fip, &buf.rdata);
+ fcoe_ctlr_vn_probe_req(fip, &frport.rdata);
break;
case FIP_SC_VN_PROBE_REP:
- fcoe_ctlr_vn_probe_reply(fip, &buf.rdata);
+ fcoe_ctlr_vn_probe_reply(fip, &frport.rdata);
break;
case FIP_SC_VN_CLAIM_NOTIFY:
- fcoe_ctlr_vn_claim_notify(fip, &buf.rdata);
+ fcoe_ctlr_vn_claim_notify(fip, &frport.rdata);
break;
case FIP_SC_VN_CLAIM_REP:
- fcoe_ctlr_vn_claim_resp(fip, &buf.rdata);
+ fcoe_ctlr_vn_claim_resp(fip, &frport.rdata);
break;
case FIP_SC_VN_BEACON:
- fcoe_ctlr_vn_beacon(fip, &buf.rdata);
+ fcoe_ctlr_vn_beacon(fip, &frport.rdata);
break;
default:
LIBFCOE_FIP_DBG(fip, "vn_recv unknown subcode %d\n", sub);
@@ -2814,22 +2809,18 @@ static int fcoe_ctlr_vn_recv(struct fcoe_ctlr *fip, struct sk_buff *skb)
*/
static int fcoe_ctlr_vlan_parse(struct fcoe_ctlr *fip,
struct sk_buff *skb,
- struct fc_rport_priv *rdata)
+ struct fcoe_rport *frport)
{
struct fip_header *fiph;
struct fip_desc *desc = NULL;
struct fip_mac_desc *macd = NULL;
struct fip_wwn_desc *wwn = NULL;
- struct fcoe_rport *frport;
size_t rlen;
size_t dlen;
u32 desc_mask = 0;
u32 dtype;
u8 sub;
- memset(rdata, 0, sizeof(*rdata) + sizeof(*frport));
- frport = fcoe_ctlr_rport(rdata);
-
fiph = (struct fip_header *)skb->data;
frport->flags = ntohs(fiph->fip_flags);
@@ -2883,7 +2874,8 @@ static int fcoe_ctlr_vlan_parse(struct fcoe_ctlr *fip,
if (dlen != sizeof(struct fip_wwn_desc))
goto len_err;
wwn = (struct fip_wwn_desc *)desc;
- rdata->ids.node_name = get_unaligned_be64(&wwn->fd_wwn);
+ frport->rdata.ids.node_name =
+ get_unaligned_be64(&wwn->fd_wwn);
break;
default:
LIBFCOE_FIP_DBG(fip, "unexpected descriptor type %x "
@@ -2994,22 +2986,19 @@ static int fcoe_ctlr_vlan_recv(struct fcoe_ctlr *fip, struct sk_buff *skb)
{
struct fip_header *fiph;
enum fip_vlan_subcode sub;
- struct {
- struct fc_rport_priv rdata;
- struct fcoe_rport frport;
- } buf;
+ struct fcoe_rport frport = { };
int rc;
fiph = (struct fip_header *)skb->data;
sub = fiph->fip_subcode;
- rc = fcoe_ctlr_vlan_parse(fip, skb, &buf.rdata);
+ rc = fcoe_ctlr_vlan_parse(fip, skb, &frport);
if (rc) {
LIBFCOE_FIP_DBG(fip, "vlan_recv vlan_parse error %d\n", rc);
goto drop;
}
mutex_lock(&fip->ctlr_mutex);
if (sub == FIP_SC_VL_REQ)
- fcoe_ctlr_vlan_disc_reply(fip, &buf.rdata);
+ fcoe_ctlr_vlan_disc_reply(fip, &frport.rdata);
mutex_unlock(&fip->ctlr_mutex);
drop:
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index c43eccd..f570b8c 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -2320,6 +2320,8 @@ static int handle_ioaccel_mode2_error(struct ctlr_info *h,
case IOACCEL2_SERV_RESPONSE_COMPLETE:
switch (c2->error_data.status) {
case IOACCEL2_STATUS_SR_TASK_COMP_GOOD:
+ if (cmd)
+ cmd->result = 0;
break;
case IOACCEL2_STATUS_SR_TASK_COMP_CHK_COND:
cmd->result |= SAM_STAT_CHECK_CONDITION;
@@ -2479,8 +2481,10 @@ static void process_ioaccel2_completion(struct ctlr_info *h,
/* check for good status */
if (likely(c2->error_data.serv_response == 0 &&
- c2->error_data.status == 0))
+ c2->error_data.status == 0)) {
+ cmd->result = 0;
return hpsa_cmd_free_and_done(h, c, cmd);
+ }
/*
* Any RAID offload error results in retry which will use
@@ -5618,6 +5622,12 @@ static int hpsa_scsi_queue_command(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
c = cmd_tagged_alloc(h, cmd);
/*
+ * This is necessary because the SML doesn't zero out this field during
+ * error recovery.
+ */
+ cmd->result = 0;
+
+ /*
* Call alternate submit routine for I/O accelerated commands.
* Retries always go down the normal I/O path.
*/
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index b64ca97..71d53bb 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -4874,8 +4874,8 @@ static int ibmvfc_remove(struct vio_dev *vdev)
spin_lock_irqsave(vhost->host->host_lock, flags);
ibmvfc_purge_requests(vhost, DID_ERROR);
- ibmvfc_free_event_pool(vhost);
spin_unlock_irqrestore(vhost->host->host_lock, flags);
+ ibmvfc_free_event_pool(vhost);
ibmvfc_free_mem(vhost);
spin_lock(&ibmvfc_driver_lock);
diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c
index 3d51a93..90a7485 100644
--- a/drivers/scsi/libfc/fc_rport.c
+++ b/drivers/scsi/libfc/fc_rport.c
@@ -140,6 +140,7 @@ EXPORT_SYMBOL(fc_rport_lookup);
struct fc_rport_priv *fc_rport_create(struct fc_lport *lport, u32 port_id)
{
struct fc_rport_priv *rdata;
+ size_t rport_priv_size = sizeof(*rdata);
lockdep_assert_held(&lport->disc.disc_mutex);
@@ -147,7 +148,9 @@ struct fc_rport_priv *fc_rport_create(struct fc_lport *lport, u32 port_id)
if (rdata)
return rdata;
- rdata = kzalloc(sizeof(*rdata) + lport->rport_priv_size, GFP_KERNEL);
+ if (lport->rport_priv_size > 0)
+ rport_priv_size = lport->rport_priv_size;
+ rdata = kzalloc(rport_priv_size, GFP_KERNEL);
if (!rdata)
return NULL;
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index e0c8722..806ceab 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -3025,6 +3025,7 @@ megasas_fw_crash_buffer_show(struct device *cdev,
u32 size;
unsigned long buff_addr;
unsigned long dmachunk = CRASH_DMA_BUF_SIZE;
+ unsigned long chunk_left_bytes;
unsigned long src_addr;
unsigned long flags;
u32 buff_offset;
@@ -3050,6 +3051,8 @@ megasas_fw_crash_buffer_show(struct device *cdev,
}
size = (instance->fw_crash_buffer_size * dmachunk) - buff_offset;
+ chunk_left_bytes = dmachunk - (buff_offset % dmachunk);
+ size = (size > chunk_left_bytes) ? chunk_left_bytes : size;
size = (size >= PAGE_SIZE) ? (PAGE_SIZE - 1) : size;
src_addr = (unsigned long)instance->crash_buf[buff_offset / dmachunk] +
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index 8776330..d2ab520 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -2565,12 +2565,14 @@ _base_config_dma_addressing(struct MPT3SAS_ADAPTER *ioc, struct pci_dev *pdev)
{
struct sysinfo s;
u64 consistent_dma_mask;
+ /* Set 63 bit DMA mask for all SAS3 and SAS35 controllers */
+ int dma_mask = (ioc->hba_mpi_version_belonged > MPI2_VERSION) ? 63 : 64;
if (ioc->is_mcpu_endpoint)
goto try_32bit;
if (ioc->dma_mask)
- consistent_dma_mask = DMA_BIT_MASK(64);
+ consistent_dma_mask = DMA_BIT_MASK(dma_mask);
else
consistent_dma_mask = DMA_BIT_MASK(32);
@@ -2578,11 +2580,11 @@ _base_config_dma_addressing(struct MPT3SAS_ADAPTER *ioc, struct pci_dev *pdev)
const uint64_t required_mask =
dma_get_required_mask(&pdev->dev);
if ((required_mask > DMA_BIT_MASK(32)) &&
- !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) &&
+ !pci_set_dma_mask(pdev, DMA_BIT_MASK(dma_mask)) &&
!pci_set_consistent_dma_mask(pdev, consistent_dma_mask)) {
ioc->base_add_sg_single = &_base_add_sg_single_64;
ioc->sge_size = sizeof(Mpi2SGESimple64_t);
- ioc->dma_mask = 64;
+ ioc->dma_mask = dma_mask;
goto out;
}
}
@@ -2609,7 +2611,7 @@ static int
_base_change_consistent_dma_mask(struct MPT3SAS_ADAPTER *ioc,
struct pci_dev *pdev)
{
- if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
+ if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(ioc->dma_mask))) {
if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)))
return -ENODEV;
}
@@ -4545,7 +4547,7 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
total_sz += sz;
} while (ioc->rdpq_array_enable && (++i < ioc->reply_queue_count));
- if (ioc->dma_mask == 64) {
+ if (ioc->dma_mask > 32) {
if (_base_change_consistent_dma_mask(ioc, ioc->pdev) != 0) {
pr_warn(MPT3SAS_FMT
"no suitable consistent DMA mask for %s\n",
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index f84f9bf..ddce32f 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -4732,7 +4732,7 @@ qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags)
ql_log(ql_log_warn, vha, 0xd049,
"Failed to allocate ct_sns request.\n");
kfree(fcport);
- fcport = NULL;
+ return NULL;
}
INIT_WORK(&fcport->del_work, qla24xx_delete_sess_fn);
INIT_LIST_HEAD(&fcport->gnl_entry);
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 9b2b3ba..648c717 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -3092,11 +3092,14 @@ scsi_device_quiesce(struct scsi_device *sdev)
*/
WARN_ON_ONCE(sdev->quiesced_by && sdev->quiesced_by != current);
- blk_set_preempt_only(q);
+ if (sdev->quiesced_by == current)
+ return 0;
+
+ blk_set_pm_only(q);
blk_mq_freeze_queue(q);
/*
- * Ensure that the effect of blk_set_preempt_only() will be visible
+ * Ensure that the effect of blk_set_pm_only() will be visible
* for percpu_ref_tryget() callers that occur after the queue
* unfreeze even if the queue was already frozen before this function
* was called. See also https://lwn.net/Articles/573497/.
@@ -3109,7 +3112,7 @@ scsi_device_quiesce(struct scsi_device *sdev)
if (err == 0)
sdev->quiesced_by = current;
else
- blk_clear_preempt_only(q);
+ blk_clear_pm_only(q);
mutex_unlock(&sdev->state_mutex);
return err;
@@ -3132,8 +3135,10 @@ void scsi_device_resume(struct scsi_device *sdev)
* device deleted during suspend)
*/
mutex_lock(&sdev->state_mutex);
- sdev->quiesced_by = NULL;
- blk_clear_preempt_only(sdev->request_queue);
+ if (sdev->quiesced_by) {
+ sdev->quiesced_by = NULL;
+ blk_clear_pm_only(sdev->request_queue);
+ }
if (sdev->sdev_state == SDEV_QUIESCE)
scsi_device_set_state(sdev, SDEV_RUNNING);
mutex_unlock(&sdev->state_mutex);
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index f7b612f..a029804 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -1806,7 +1806,7 @@ static int ufshcd_devfreq_scale(struct ufs_hba *hba, bool scale_up)
ret = ufshcd_uic_hibern8_enter(hba);
if (ret)
/* link will be bad state so no need to scale_up_gear */
- return ret;
+ goto clk_scaling_unprepare;
ufshcd_custom_cmd_log(hba, "Hibern8-entered");
}
@@ -1819,7 +1819,7 @@ static int ufshcd_devfreq_scale(struct ufs_hba *hba, bool scale_up)
ret = ufshcd_uic_hibern8_exit(hba);
if (ret)
/* link will be bad state so no need to scale_up_gear */
- return ret;
+ goto clk_scaling_unprepare;
ufshcd_custom_cmd_log(hba, "Hibern8-Exited");
}
@@ -7075,8 +7075,8 @@ static void ufshcd_err_handler(struct work_struct *work)
/*
* if host reset is required then skip clearing the pending
- * transfers forcefully because they will automatically get
- * cleared after link startup.
+ * transfers forcefully because they will get cleared during
+ * host reset and restore
*/
if (needs_reset)
goto skip_pending_xfer_clear;
@@ -7647,7 +7647,6 @@ static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd)
hba = shost_priv(host);
tag = cmd->request->tag;
- ufshcd_print_cmd_log(hba);
lrbp = &hba->lrb[tag];
err = ufshcd_issue_tm_cmd(hba, lrbp->lun, 0, UFS_LOGICAL_RESET, &resp);
if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
@@ -7891,9 +7890,15 @@ static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
int err;
unsigned long flags;
- /* Reset the host controller */
+ /*
+ * Stop the host controller and complete the requests
+ * cleared by h/w
+ */
spin_lock_irqsave(hba->host->host_lock, flags);
ufshcd_hba_stop(hba, false);
+ hba->silence_err_logs = true;
+ ufshcd_complete_requests(hba);
+ hba->silence_err_logs = false;
spin_unlock_irqrestore(hba->host->host_lock, flags);
/* scale up clocks to max frequency before full reinitialization */
diff --git a/drivers/slimbus/slim-msm-ngd.c b/drivers/slimbus/slim-msm-ngd.c
index 93e8457..0e754ff 100644
--- a/drivers/slimbus/slim-msm-ngd.c
+++ b/drivers/slimbus/slim-msm-ngd.c
@@ -2036,10 +2036,6 @@ static int ngd_slim_remove(struct platform_device *pdev)
struct msm_slim_ctrl *dev = platform_get_drvdata(pdev);
ngd_slim_enable(dev, false);
- if (!IS_ERR_OR_NULL(dev->iommu_desc.iommu_map)) {
- __depr_arm_iommu_detach_device(dev->iommu_desc.cb_dev);
- __depr_arm_iommu_release_mapping(dev->iommu_desc.iommu_map);
- }
if (dev->sysfs_created)
sysfs_remove_file(&dev->dev->kobj,
&dev_attr_debug_mask.attr);
diff --git a/drivers/slimbus/slim-msm.h b/drivers/slimbus/slim-msm.h
index 5baabc3..cd5b1ca 100644
--- a/drivers/slimbus/slim-msm.h
+++ b/drivers/slimbus/slim-msm.h
@@ -257,7 +257,6 @@ struct msm_slim_bulk_wr {
struct msm_slim_iommu {
struct device *cb_dev;
- struct dma_iommu_mapping *iommu_map;
bool s1_bypass;
};
diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig
index 1a36bc8..473e29d 100644
--- a/drivers/soc/qcom/Kconfig
+++ b/drivers/soc/qcom/Kconfig
@@ -24,6 +24,16 @@
helps reduce power consumption during idle mode of the system.
If unsure, say N
+config BUG_ON_HW_MEM_ONLINE_FAIL
+ bool "Trigger a BUG when HW memory online fails"
+ depends on QCOM_MEM_OFFLINE
+ help
+ Select this option if kernel should BUG when the hardware
+ onlining of memory hotplug blocks fails. This helps to catch
+ online failures much quicker and avoids the later side effects
+ of such memory online failures.
+ If unsure, say N
+
config QCOM_GENI_SE
tristate "QCOM GENI Serial Engine Driver"
depends on ARCH_QCOM || COMPILE_TEST
@@ -197,6 +207,8 @@
status indication and disables flows while grant size is reached.
If unsure or not use burst mode flow control, say 'N'.
+source "drivers/soc/qcom/rmnet_ctl/Kconfig"
+
config QCOM_QMI_POWER_COLLAPSE
bool "Enable power save features"
depends on QCOM_QMI_RMNET
diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile
index 64024f4..ef36b9c 100644
--- a/drivers/soc/qcom/Makefile
+++ b/drivers/soc/qcom/Makefile
@@ -8,7 +8,7 @@
obj-$(CONFIG_QCOM_QMI_HELPERS) += qmi_helpers.o
qmi_helpers-y += qmi_encdec.o qmi_interface.o
obj-$(CONFIG_QCOM_QMI_RMNET) += qmi_rmnet.o
-obj-$(CONFIG_QCOM_QMI_DFC) += dfc_qmi.o
+obj-$(CONFIG_QCOM_QMI_DFC) += dfc_qmi.o dfc_qmap.o
obj-$(CONFIG_QCOM_QMI_POWER_COLLAPSE) += wda_qmi.o
obj-$(CONFIG_QCOM_RMTFS_MEM) += rmtfs_mem.o
obj-$(CONFIG_QCOM_RPMH) += qcom_rpmh.o
@@ -86,3 +86,4 @@
obj-$(CONFIG_QCOM_CDSP_RM) += cdsprm.o
obj-$(CONFIG_ICNSS) += icnss.o
obj-$(CONFIG_ICNSS_QMI) += icnss_qmi.o wlan_firmware_service_v01.o
+obj-$(CONFIG_RMNET_CTL) += rmnet_ctl/
diff --git a/drivers/soc/qcom/dcc_v2.c b/drivers/soc/qcom/dcc_v2.c
index d1241fe5..eb0c41c 100644
--- a/drivers/soc/qcom/dcc_v2.c
+++ b/drivers/soc/qcom/dcc_v2.c
@@ -564,13 +564,15 @@ static int dcc_valid_list(struct dcc_drvdata *drvdata, int curr_list)
return -EINVAL;
if (drvdata->enable[curr_list]) {
- dev_err(drvdata->dev, "DCC is already enabled\n");
+ dev_err(drvdata->dev, "List %d is already enabled\n",
+ curr_list);
return -EINVAL;
}
lock_reg = dcc_readl(drvdata, DCC_LL_LOCK(curr_list));
if (lock_reg & 0x1) {
- dev_err(drvdata->dev, "DCC is already enabled\n");
+ dev_err(drvdata->dev, "List %d is already locked\n",
+ curr_list);
return -EINVAL;
}
@@ -578,6 +580,21 @@ static int dcc_valid_list(struct dcc_drvdata *drvdata, int curr_list)
return 0;
}
+static bool is_dcc_enabled(struct dcc_drvdata *drvdata)
+{
+ bool dcc_enable = false;
+ int list;
+
+ for (list = 0; list < DCC_MAX_LINK_LIST; list++) {
+ if (drvdata->enable[list]) {
+ dcc_enable = true;
+ break;
+ }
+ }
+
+ return dcc_enable;
+}
+
static int dcc_enable(struct dcc_drvdata *drvdata)
{
int ret = 0;
@@ -586,7 +603,9 @@ static int dcc_enable(struct dcc_drvdata *drvdata)
mutex_lock(&drvdata->mutex);
- memset_io(drvdata->ram_base, 0xDE, drvdata->ram_size);
+ if (!is_dcc_enabled(drvdata)) {
+ memset_io(drvdata->ram_base, 0xDE, drvdata->ram_size);
+ }
for (list = 0; list < DCC_MAX_LINK_LIST; list++) {
@@ -667,21 +686,6 @@ static void dcc_disable(struct dcc_drvdata *drvdata)
mutex_unlock(&drvdata->mutex);
}
-static bool is_dcc_enabled(struct dcc_drvdata *drvdata)
-{
- bool dcc_enable = false;
- int list;
-
- for (list = 0; list < DCC_MAX_LINK_LIST; list++) {
- if (drvdata->enable[list]) {
- dcc_enable = true;
- break;
- }
- }
-
- return dcc_enable;
-}
-
static ssize_t curr_list_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
diff --git a/drivers/soc/qcom/ddr_stats.c b/drivers/soc/qcom/ddr_stats.c
index cb10342..49b0209 100644
--- a/drivers/soc/qcom/ddr_stats.c
+++ b/drivers/soc/qcom/ddr_stats.c
@@ -18,9 +18,10 @@
#include <linux/uaccess.h>
#include <asm/arch_timer.h>
+#include <clocksource/arm_arch_timer.h>
+
#define MAGIC_KEY1 0xA1157A75
#define MAX_NUM_MODES 0x14
-#define MSM_ARCH_TIMER_FREQ 19200000
#define GET_PDATA_OF_ATTR(attr) \
(container_of(attr, struct ddr_stats_kobj_attr, ka)->pd)
@@ -48,10 +49,9 @@ struct ddr_stats_kobj_attr {
struct ddr_stats_platform_data *pd;
};
-static inline u64 get_time_in_msec(u64 counter)
+static u64 get_time_in_msec(u64 counter)
{
- do_div(counter, MSM_ARCH_TIMER_FREQ);
- counter *= MSEC_PER_SEC;
+ do_div(counter, (arch_timer_get_rate()/MSEC_PER_SEC));
return counter;
}
diff --git a/drivers/soc/qcom/dfc_defs.h b/drivers/soc/qcom/dfc_defs.h
new file mode 100644
index 0000000..7553707
--- /dev/null
+++ b/drivers/soc/qcom/dfc_defs.h
@@ -0,0 +1,96 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DFC_DEFS_H
+#define _DFC_DEFS_H
+
+#include <linux/soc/qcom/qmi.h>
+#include "qmi_rmnet_i.h"
+
+#define DFC_ACK_TYPE_DISABLE 1
+#define DFC_ACK_TYPE_THRESHOLD 2
+
+#define DFC_MASK_TCP_BIDIR 0x1
+#define DFC_MASK_RAT_SWITCH 0x2
+#define DFC_IS_TCP_BIDIR(r) (bool)((r) & DFC_MASK_TCP_BIDIR)
+#define DFC_IS_RAT_SWITCH(r) (bool)((r) & DFC_MASK_RAT_SWITCH)
+
+#define DFC_MAX_QOS_ID_V01 2
+
+struct dfc_qmi_data {
+ void *rmnet_port;
+ struct workqueue_struct *dfc_wq;
+ struct work_struct svc_arrive;
+ struct qmi_handle handle;
+ struct sockaddr_qrtr ssctl;
+ struct svc_info svc;
+ struct work_struct qmi_ind_work;
+ struct list_head qmi_ind_q;
+ spinlock_t qmi_ind_lock;
+ int index;
+ int restart_state;
+};
+
+enum dfc_ip_type_enum_v01 {
+ DFC_IP_TYPE_ENUM_MIN_ENUM_VAL_V01 = -2147483647,
+ DFC_IPV4_TYPE_V01 = 0x4,
+ DFC_IPV6_TYPE_V01 = 0x6,
+ DFC_IP_TYPE_ENUM_MAX_ENUM_VAL_V01 = 2147483647
+};
+
+struct dfc_qos_id_type_v01 {
+ u32 qos_id;
+ enum dfc_ip_type_enum_v01 ip_type;
+};
+
+struct dfc_flow_status_info_type_v01 {
+ u8 subs_id;
+ u8 mux_id;
+ u8 bearer_id;
+ u32 num_bytes;
+ u16 seq_num;
+ u8 qos_ids_len;
+ struct dfc_qos_id_type_v01 qos_ids[DFC_MAX_QOS_ID_V01];
+};
+
+struct dfc_ancillary_info_type_v01 {
+ u8 subs_id;
+ u8 mux_id;
+ u8 bearer_id;
+ u32 reserved;
+};
+
+struct dfc_flow_status_ind_msg_v01 {
+ u8 flow_status_valid;
+ u8 flow_status_len;
+ struct dfc_flow_status_info_type_v01 flow_status[DFC_MAX_BEARERS_V01];
+ u8 eod_ack_reqd_valid;
+ u8 eod_ack_reqd;
+ u8 ancillary_info_valid;
+ u8 ancillary_info_len;
+ struct dfc_ancillary_info_type_v01 ancillary_info[DFC_MAX_BEARERS_V01];
+};
+
+struct dfc_bearer_info_type_v01 {
+ u8 subs_id;
+ u8 mux_id;
+ u8 bearer_id;
+ enum dfc_ip_type_enum_v01 ip_type;
+};
+
+struct dfc_tx_link_status_ind_msg_v01 {
+ u8 tx_status;
+ u8 bearer_info_valid;
+ u8 bearer_info_len;
+ struct dfc_bearer_info_type_v01 bearer_info[DFC_MAX_BEARERS_V01];
+};
+
+void dfc_do_burst_flow_control(struct dfc_qmi_data *dfc,
+ struct dfc_flow_status_ind_msg_v01 *ind);
+
+void dfc_handle_tx_link_status_ind(struct dfc_qmi_data *dfc,
+ struct dfc_tx_link_status_ind_msg_v01 *ind);
+
+#endif /* _DFC_DEFS_H */
diff --git a/drivers/soc/qcom/dfc_qmap.c b/drivers/soc/qcom/dfc_qmap.c
new file mode 100644
index 0000000..a4b2095
--- /dev/null
+++ b/drivers/soc/qcom/dfc_qmap.c
@@ -0,0 +1,513 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+
+#include <net/pkt_sched.h>
+#include <soc/qcom/rmnet_qmi.h>
+#include <soc/qcom/qmi_rmnet.h>
+#include <trace/events/dfc.h>
+#include <soc/qcom/rmnet_ctl.h>
+#include "dfc_defs.h"
+
+#define QMAP_DFC_VER 1
+
+#define QMAP_CMD_DONE -1
+
+#define QMAP_CMD_REQUEST 0
+#define QMAP_CMD_ACK 1
+#define QMAP_CMD_UNSUPPORTED 2
+#define QMAP_CMD_INVALID 3
+
+#define QMAP_DFC_CONFIG 10
+#define QMAP_DFC_IND 11
+#define QMAP_DFC_QUERY 12
+#define QMAP_DFC_END_MARKER 13
+
+struct qmap_hdr {
+ u8 cd_pad;
+ u8 mux_id;
+ __be16 pkt_len;
+} __aligned(1);
+
+#define QMAP_HDR_LEN sizeof(struct qmap_hdr)
+
+struct qmap_cmd_hdr {
+ u8 pad_len:6;
+ u8 reserved_bit:1;
+ u8 cd_bit:1;
+ u8 mux_id;
+ __be16 pkt_len;
+ u8 cmd_name;
+ u8 cmd_type:2;
+ u8 reserved:6;
+ u16 reserved2;
+ __be32 tx_id;
+} __aligned(1);
+
+struct qmap_dfc_config {
+ struct qmap_cmd_hdr hdr;
+ u8 cmd_ver;
+ u8 cmd_id;
+ u8 reserved;
+ u8 tx_info:1;
+ u8 reserved2:7;
+ __be32 ep_type;
+ __be32 iface_id;
+ u32 reserved3;
+} __aligned(1);
+
+struct qmap_dfc_ind {
+ struct qmap_cmd_hdr hdr;
+ u8 cmd_ver;
+ u8 reserved;
+ __be16 seq_num;
+ u8 reserved2;
+ u8 tx_info_valid:1;
+ u8 tx_info:1;
+ u8 reserved3:6;
+ u8 bearer_id;
+ u8 tcp_bidir:1;
+ u8 bearer_status:3;
+ u8 reserved4:4;
+ __be32 grant;
+ u32 reserved5;
+ u32 reserved6;
+} __aligned(1);
+
+struct qmap_dfc_query {
+ struct qmap_cmd_hdr hdr;
+ u8 cmd_ver;
+ u8 reserved;
+ u8 bearer_id;
+ u8 reserved2;
+ u32 reserved3;
+} __aligned(1);
+
+struct qmap_dfc_query_resp {
+ struct qmap_cmd_hdr hdr;
+ u8 cmd_ver;
+ u8 bearer_id;
+ u8 tcp_bidir:1;
+ u8 reserved:7;
+ u8 invalid:1;
+ u8 reserved2:7;
+ __be32 grant;
+ u32 reserved3;
+ u32 reserved4;
+} __aligned(1);
+
+struct qmap_dfc_end_marker_req {
+ struct qmap_cmd_hdr hdr;
+ u8 cmd_ver;
+ u8 reserved;
+ u8 bearer_id;
+ u8 reserved2;
+ u16 reserved3;
+ __be16 seq_num;
+ u32 reserved4;
+} __aligned(1);
+
+struct qmap_dfc_end_marker_cnf {
+ struct qmap_cmd_hdr hdr;
+ u8 cmd_ver;
+ u8 reserved;
+ u8 bearer_id;
+ u8 reserved2;
+ u16 reserved3;
+ __be16 seq_num;
+ u32 reserved4;
+} __aligned(1);
+
+static struct dfc_flow_status_ind_msg_v01 qmap_flow_ind;
+static struct dfc_tx_link_status_ind_msg_v01 qmap_tx_ind;
+static struct dfc_qmi_data __rcu *qmap_dfc_data;
+static atomic_t qmap_txid;
+static void *rmnet_ctl_handle;
+
+static void dfc_qmap_send_end_marker_cnf(struct qos_info *qos,
+ u8 bearer_id, u16 seq, u32 tx_id);
+
+static void dfc_qmap_send_cmd(struct sk_buff *skb)
+{
+ trace_dfc_qmap(skb->data, skb->len, false);
+
+ if (rmnet_ctl_send_client(rmnet_ctl_handle, skb)) {
+ pr_err("Failed to send to rmnet ctl\n");
+ kfree_skb(skb);
+ }
+}
+
+static void dfc_qmap_send_inband_ack(struct dfc_qmi_data *dfc,
+ struct sk_buff *skb)
+{
+ struct qmap_cmd_hdr *cmd;
+
+ cmd = (struct qmap_cmd_hdr *)skb->data;
+
+ skb->protocol = htons(ETH_P_MAP);
+ skb->dev = rmnet_get_real_dev(dfc->rmnet_port);
+
+ trace_dfc_qmap(skb->data, skb->len, false);
+ dev_queue_xmit(skb);
+}
+
+static int dfc_qmap_handle_ind(struct dfc_qmi_data *dfc,
+ struct sk_buff *skb)
+{
+ struct qmap_dfc_ind *cmd;
+
+ if (skb->len < sizeof(struct qmap_dfc_ind))
+ return QMAP_CMD_INVALID;
+
+ cmd = (struct qmap_dfc_ind *)skb->data;
+
+ if (cmd->tx_info_valid) {
+ memset(&qmap_tx_ind, 0, sizeof(qmap_tx_ind));
+ qmap_tx_ind.tx_status = cmd->tx_info;
+ qmap_tx_ind.bearer_info_valid = 1;
+ qmap_tx_ind.bearer_info_len = 1;
+ qmap_tx_ind.bearer_info[0].mux_id = cmd->hdr.mux_id;
+ qmap_tx_ind.bearer_info[0].bearer_id = cmd->bearer_id;
+
+ dfc_handle_tx_link_status_ind(dfc, &qmap_tx_ind);
+
+ /* Ignore grant since it is always 0 */
+ goto done;
+ }
+
+ memset(&qmap_flow_ind, 0, sizeof(qmap_flow_ind));
+ qmap_flow_ind.flow_status_valid = 1;
+ qmap_flow_ind.flow_status_len = 1;
+ qmap_flow_ind.flow_status[0].mux_id = cmd->hdr.mux_id;
+ qmap_flow_ind.flow_status[0].bearer_id = cmd->bearer_id;
+ qmap_flow_ind.flow_status[0].num_bytes = ntohl(cmd->grant);
+ qmap_flow_ind.flow_status[0].seq_num = ntohs(cmd->seq_num);
+
+ if (cmd->tcp_bidir) {
+ qmap_flow_ind.ancillary_info_valid = 1;
+ qmap_flow_ind.ancillary_info_len = 1;
+ qmap_flow_ind.ancillary_info[0].mux_id = cmd->hdr.mux_id;
+ qmap_flow_ind.ancillary_info[0].bearer_id = cmd->bearer_id;
+ qmap_flow_ind.ancillary_info[0].reserved = DFC_MASK_TCP_BIDIR;
+ }
+
+ dfc_do_burst_flow_control(dfc, &qmap_flow_ind);
+
+done:
+ return QMAP_CMD_ACK;
+}
+
+static int dfc_qmap_handle_query_resp(struct dfc_qmi_data *dfc,
+ struct sk_buff *skb)
+{
+ struct qmap_dfc_query_resp *cmd;
+
+ if (skb->len < sizeof(struct qmap_dfc_query_resp))
+ return QMAP_CMD_DONE;
+
+ cmd = (struct qmap_dfc_query_resp *)skb->data;
+
+ if (cmd->invalid)
+ return QMAP_CMD_DONE;
+
+ memset(&qmap_flow_ind, 0, sizeof(qmap_flow_ind));
+ qmap_flow_ind.flow_status_valid = 1;
+ qmap_flow_ind.flow_status_len = 1;
+
+ qmap_flow_ind.flow_status[0].mux_id = cmd->hdr.mux_id;
+ qmap_flow_ind.flow_status[0].bearer_id = cmd->bearer_id;
+ qmap_flow_ind.flow_status[0].num_bytes = ntohl(cmd->grant);
+ qmap_flow_ind.flow_status[0].seq_num = 0xFFFF;
+
+ if (cmd->tcp_bidir) {
+ qmap_flow_ind.ancillary_info_valid = 1;
+ qmap_flow_ind.ancillary_info_len = 1;
+ qmap_flow_ind.ancillary_info[0].mux_id = cmd->hdr.mux_id;
+ qmap_flow_ind.ancillary_info[0].bearer_id = cmd->bearer_id;
+ qmap_flow_ind.ancillary_info[0].reserved = DFC_MASK_TCP_BIDIR;
+ }
+
+ dfc_do_burst_flow_control(dfc, &qmap_flow_ind);
+
+ return QMAP_CMD_DONE;
+}
+
+static void dfc_qmap_set_end_marker(struct dfc_qmi_data *dfc, u8 mux_id,
+ u8 bearer_id, u16 seq_num, u32 tx_id)
+{
+ struct net_device *dev;
+ struct qos_info *qos;
+ struct rmnet_bearer_map *bearer;
+
+ dev = rmnet_get_rmnet_dev(dfc->rmnet_port, mux_id);
+ if (!dev)
+ return;
+
+ qos = (struct qos_info *)rmnet_get_qos_pt(dev);
+ if (!qos)
+ return;
+
+ spin_lock_bh(&qos->qos_lock);
+
+ bearer = qmi_rmnet_get_bearer_map(qos, bearer_id);
+
+ if (bearer && bearer->last_seq == seq_num && bearer->grant_size) {
+ bearer->ack_req = 1;
+ bearer->ack_txid = tx_id;
+ } else {
+ dfc_qmap_send_end_marker_cnf(qos, bearer_id, seq_num, tx_id);
+ }
+
+ spin_unlock_bh(&qos->qos_lock);
+}
+
+static int dfc_qmap_handle_end_marker_req(struct dfc_qmi_data *dfc,
+ struct sk_buff *skb)
+{
+ struct qmap_dfc_end_marker_req *cmd;
+
+ if (skb->len < sizeof(struct qmap_dfc_end_marker_req))
+ return QMAP_CMD_INVALID;
+
+ cmd = (struct qmap_dfc_end_marker_req *)skb->data;
+
+ dfc_qmap_set_end_marker(dfc, cmd->hdr.mux_id, cmd->bearer_id,
+ ntohs(cmd->seq_num), ntohl(cmd->hdr.tx_id));
+
+ return QMAP_CMD_DONE;
+}
+
+static void dfc_qmap_cmd_handler(struct sk_buff *skb)
+{
+ struct qmap_cmd_hdr *cmd;
+ struct dfc_qmi_data *dfc;
+ int rc = QMAP_CMD_DONE;
+
+ if (!skb)
+ return;
+
+ trace_dfc_qmap(skb->data, skb->len, true);
+
+ if (skb->len < sizeof(struct qmap_cmd_hdr))
+ goto free_skb;
+
+ cmd = (struct qmap_cmd_hdr *)skb->data;
+ if (!cmd->cd_bit || skb->len != ntohs(cmd->pkt_len) + QMAP_HDR_LEN)
+ goto free_skb;
+
+ if (cmd->cmd_name == QMAP_DFC_QUERY) {
+ if (cmd->cmd_type != QMAP_CMD_ACK)
+ goto free_skb;
+ } else if (cmd->cmd_type != QMAP_CMD_REQUEST) {
+ goto free_skb;
+ }
+
+ rcu_read_lock();
+
+ dfc = rcu_dereference(qmap_dfc_data);
+ if (!dfc || READ_ONCE(dfc->restart_state)) {
+ rcu_read_unlock();
+ goto free_skb;
+ }
+
+ switch (cmd->cmd_name) {
+ case QMAP_DFC_IND:
+ rc = dfc_qmap_handle_ind(dfc, skb);
+ qmi_rmnet_set_dl_msg_active(dfc->rmnet_port);
+ break;
+
+ case QMAP_DFC_QUERY:
+ rc = dfc_qmap_handle_query_resp(dfc, skb);
+ break;
+
+ case QMAP_DFC_END_MARKER:
+ rc = dfc_qmap_handle_end_marker_req(dfc, skb);
+ break;
+
+ default:
+ rc = QMAP_CMD_UNSUPPORTED;
+ }
+
+ /* Send ack */
+ if (rc != QMAP_CMD_DONE) {
+ cmd->cmd_type = rc;
+ if (cmd->cmd_name == QMAP_DFC_IND)
+ dfc_qmap_send_inband_ack(dfc, skb);
+ else
+ dfc_qmap_send_cmd(skb);
+
+ rcu_read_unlock();
+ return;
+ }
+
+ rcu_read_unlock();
+
+free_skb:
+ kfree_skb(skb);
+}
+
+static void dfc_qmap_send_config(struct dfc_qmi_data *data)
+{
+ struct sk_buff *skb;
+ struct qmap_dfc_config *dfc_config;
+ unsigned int len = sizeof(struct qmap_dfc_config);
+
+ skb = alloc_skb(len, GFP_ATOMIC);
+ if (!skb)
+ return;
+
+ skb->protocol = htons(ETH_P_MAP);
+ dfc_config = (struct qmap_dfc_config *)skb_put(skb, len);
+ memset(dfc_config, 0, len);
+
+ dfc_config->hdr.cd_bit = 1;
+ dfc_config->hdr.mux_id = 0;
+ dfc_config->hdr.pkt_len = htons(len - QMAP_HDR_LEN);
+ dfc_config->hdr.cmd_name = QMAP_DFC_CONFIG;
+ dfc_config->hdr.cmd_type = QMAP_CMD_REQUEST;
+ dfc_config->hdr.tx_id = htonl(atomic_inc_return(&qmap_txid));
+
+ dfc_config->cmd_ver = QMAP_DFC_VER;
+ dfc_config->cmd_id = QMAP_DFC_IND;
+ dfc_config->tx_info = 1;
+ dfc_config->ep_type = htonl(data->svc.ep_type);
+ dfc_config->iface_id = htonl(data->svc.iface_id);
+
+ dfc_qmap_send_cmd(skb);
+}
+
+static void dfc_qmap_send_query(u8 mux_id, u8 bearer_id)
+{
+ struct sk_buff *skb;
+ struct qmap_dfc_query *dfc_query;
+ unsigned int len = sizeof(struct qmap_dfc_query);
+
+ skb = alloc_skb(len, GFP_ATOMIC);
+ if (!skb)
+ return;
+
+ skb->protocol = htons(ETH_P_MAP);
+ dfc_query = (struct qmap_dfc_query *)skb_put(skb, len);
+ memset(dfc_query, 0, len);
+
+ dfc_query->hdr.cd_bit = 1;
+ dfc_query->hdr.mux_id = mux_id;
+ dfc_query->hdr.pkt_len = htons(len - QMAP_HDR_LEN);
+ dfc_query->hdr.cmd_name = QMAP_DFC_QUERY;
+ dfc_query->hdr.cmd_type = QMAP_CMD_REQUEST;
+ dfc_query->hdr.tx_id = htonl(atomic_inc_return(&qmap_txid));
+
+ dfc_query->cmd_ver = QMAP_DFC_VER;
+ dfc_query->bearer_id = bearer_id;
+
+ dfc_qmap_send_cmd(skb);
+}
+
+static void dfc_qmap_send_end_marker_cnf(struct qos_info *qos,
+ u8 bearer_id, u16 seq, u32 tx_id)
+{
+ struct sk_buff *skb;
+ struct qmap_dfc_end_marker_cnf *em_cnf;
+ unsigned int len = sizeof(struct qmap_dfc_end_marker_cnf);
+
+ skb = alloc_skb(len, GFP_ATOMIC);
+ if (!skb)
+ return;
+
+ em_cnf = (struct qmap_dfc_end_marker_cnf *)skb_put(skb, len);
+ memset(em_cnf, 0, len);
+
+ em_cnf->hdr.cd_bit = 1;
+ em_cnf->hdr.mux_id = qos->mux_id;
+ em_cnf->hdr.pkt_len = htons(len - QMAP_HDR_LEN);
+ em_cnf->hdr.cmd_name = QMAP_DFC_END_MARKER;
+ em_cnf->hdr.cmd_type = QMAP_CMD_ACK;
+ em_cnf->hdr.tx_id = htonl(tx_id);
+
+ em_cnf->cmd_ver = QMAP_DFC_VER;
+ em_cnf->bearer_id = bearer_id;
+ em_cnf->seq_num = htons(seq);
+
+ skb->protocol = htons(ETH_P_MAP);
+ skb->dev = qos->real_dev;
+
+ /* This cmd needs to be sent in-band */
+ trace_dfc_qmap(skb->data, skb->len, false);
+ rmnet_map_tx_qmap_cmd(skb);
+}
+
+void dfc_qmap_send_ack(struct qos_info *qos, u8 bearer_id, u16 seq, u8 type)
+{
+ struct rmnet_bearer_map *bearer;
+
+ if (type == DFC_ACK_TYPE_DISABLE) {
+ bearer = qmi_rmnet_get_bearer_map(qos, bearer_id);
+ if (bearer)
+ dfc_qmap_send_end_marker_cnf(qos, bearer_id,
+ seq, bearer->ack_txid);
+ } else if (type == DFC_ACK_TYPE_THRESHOLD) {
+ dfc_qmap_send_query(qos->mux_id, bearer_id);
+ }
+}
+
+static struct rmnet_ctl_client_hooks cb = {
+ .ctl_dl_client_hook = dfc_qmap_cmd_handler,
+};
+
+int dfc_qmap_client_init(void *port, int index, struct svc_info *psvc,
+ struct qmi_info *qmi)
+{
+ struct dfc_qmi_data *data;
+
+ if (!port || !qmi)
+ return -EINVAL;
+
+ data = kzalloc(sizeof(struct dfc_qmi_data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->rmnet_port = port;
+ data->index = index;
+ memcpy(&data->svc, psvc, sizeof(data->svc));
+
+ qmi->dfc_clients[index] = (void *)data;
+ rcu_assign_pointer(qmap_dfc_data, data);
+
+ atomic_set(&qmap_txid, 0);
+
+ rmnet_ctl_handle = rmnet_ctl_register_client(&cb);
+ if (!rmnet_ctl_handle)
+ pr_err("Failed to register with rmnet ctl\n");
+
+ trace_dfc_client_state_up(data->index, data->svc.instance,
+ data->svc.ep_type, data->svc.iface_id);
+
+ pr_info("DFC QMAP init\n");
+
+ dfc_qmap_send_config(data);
+
+ return 0;
+}
+
+void dfc_qmap_client_exit(void *dfc_data)
+{
+ struct dfc_qmi_data *data = (struct dfc_qmi_data *)dfc_data;
+
+ if (!data) {
+ pr_err("%s() data is null\n", __func__);
+ return;
+ }
+
+ trace_dfc_client_state_down(data->index, 0);
+
+ rmnet_ctl_unregister_client(rmnet_ctl_handle);
+
+ WRITE_ONCE(data->restart_state, 1);
+ RCU_INIT_POINTER(qmap_dfc_data, NULL);
+ synchronize_rcu();
+
+ kfree(data);
+
+ pr_info("DFC QMAP exit\n");
+}
diff --git a/drivers/soc/qcom/dfc_qmi.c b/drivers/soc/qcom/dfc_qmi.c
index 05a491c..f175881 100644
--- a/drivers/soc/qcom/dfc_qmi.c
+++ b/drivers/soc/qcom/dfc_qmi.c
@@ -3,26 +3,14 @@
* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
*/
-#include <linux/rtnetlink.h>
#include <net/pkt_sched.h>
-#include <linux/soc/qcom/qmi.h>
#include <soc/qcom/rmnet_qmi.h>
#include <soc/qcom/qmi_rmnet.h>
+#include "dfc_defs.h"
-#include "qmi_rmnet_i.h"
#define CREATE_TRACE_POINTS
#include <trace/events/dfc.h>
-#define DFC_MASK_TCP_BIDIR 0x1
-#define DFC_MASK_RAT_SWITCH 0x2
-#define DFC_IS_TCP_BIDIR(r) (bool)((r) & DFC_MASK_TCP_BIDIR)
-#define DFC_IS_RAT_SWITCH(r) (bool)((r) & DFC_MASK_RAT_SWITCH)
-
-#define DFC_MAX_QOS_ID_V01 2
-
-#define DFC_ACK_TYPE_DISABLE 1
-#define DFC_ACK_TYPE_THRESHOLD 2
-
struct dfc_qmap_header {
u8 pad_len:6;
u8 reserved_bit:1;
@@ -47,20 +35,6 @@ struct dfc_ack_cmd {
u8 bearer_id;
} __aligned(1);
-struct dfc_qmi_data {
- void *rmnet_port;
- struct workqueue_struct *dfc_wq;
- struct work_struct svc_arrive;
- struct qmi_handle handle;
- struct sockaddr_qrtr ssctl;
- struct svc_info svc;
- struct work_struct qmi_ind_work;
- struct list_head qmi_ind_q;
- spinlock_t qmi_ind_lock;
- int index;
- int restart_state;
-};
-
static void dfc_svc_init(struct work_struct *work);
/* **************************************************** */
@@ -106,28 +80,6 @@ struct dfc_indication_register_resp_msg_v01 {
struct qmi_response_type_v01 resp;
};
-enum dfc_ip_type_enum_v01 {
- DFC_IP_TYPE_ENUM_MIN_ENUM_VAL_V01 = -2147483647,
- DFC_IPV4_TYPE_V01 = 0x4,
- DFC_IPV6_TYPE_V01 = 0x6,
- DFC_IP_TYPE_ENUM_MAX_ENUM_VAL_V01 = 2147483647
-};
-
-struct dfc_qos_id_type_v01 {
- u32 qos_id;
- enum dfc_ip_type_enum_v01 ip_type;
-};
-
-struct dfc_flow_status_info_type_v01 {
- u8 subs_id;
- u8 mux_id;
- u8 bearer_id;
- u32 num_bytes;
- u16 seq_num;
- u8 qos_ids_len;
- struct dfc_qos_id_type_v01 qos_ids[DFC_MAX_QOS_ID_V01];
-};
-
static struct qmi_elem_info dfc_qos_id_type_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_4_BYTE,
@@ -241,13 +193,6 @@ static struct qmi_elem_info dfc_flow_status_info_type_v01_ei[] = {
},
};
-struct dfc_ancillary_info_type_v01 {
- u8 subs_id;
- u8 mux_id;
- u8 bearer_id;
- u32 reserved;
-};
-
static struct qmi_elem_info dfc_ancillary_info_type_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_1_BYTE,
@@ -300,31 +245,6 @@ static struct qmi_elem_info dfc_ancillary_info_type_v01_ei[] = {
},
};
-struct dfc_flow_status_ind_msg_v01 {
- u8 flow_status_valid;
- u8 flow_status_len;
- struct dfc_flow_status_info_type_v01 flow_status[DFC_MAX_BEARERS_V01];
- u8 eod_ack_reqd_valid;
- u8 eod_ack_reqd;
- u8 ancillary_info_valid;
- u8 ancillary_info_len;
- struct dfc_ancillary_info_type_v01 ancillary_info[DFC_MAX_BEARERS_V01];
-};
-
-struct dfc_bearer_info_type_v01 {
- u8 subs_id;
- u8 mux_id;
- u8 bearer_id;
- enum dfc_ip_type_enum_v01 ip_type;
-};
-
-struct dfc_tx_link_status_ind_msg_v01 {
- u8 tx_status;
- u8 bearer_info_valid;
- u8 bearer_info_len;
- struct dfc_bearer_info_type_v01 bearer_info[DFC_MAX_BEARERS_V01];
-};
-
struct dfc_get_flow_status_req_msg_v01 {
u8 bearer_id_list_valid;
u8 bearer_id_list_len;
@@ -954,6 +874,11 @@ dfc_send_ack(struct net_device *dev, u8 bearer_id, u16 seq, u8 mux_id, u8 type)
if (!qos)
return;
+ if (dfc_qmap) {
+ dfc_qmap_send_ack(qos, bearer_id, seq, type);
+ return;
+ }
+
skb = alloc_skb(data_size, GFP_ATOMIC);
if (!skb)
return;
@@ -1083,6 +1008,11 @@ static int dfc_update_fc_map(struct net_device *dev, struct qos_info *qos,
(itm->grant_size > 0 && fc_info->num_bytes == 0))
action = true;
+ /* This is needed by qmap */
+ if (dfc_qmap && itm->ack_req && !ack_req && itm->grant_size)
+ dfc_qmap_send_ack(qos, itm->bearer_id,
+ itm->seq, DFC_ACK_TYPE_DISABLE);
+
itm->grant_size = fc_info->num_bytes;
itm->grant_thresh = qmi_rmnet_grant_per(itm->grant_size);
itm->seq = fc_info->seq_num;
@@ -1099,10 +1029,9 @@ static int dfc_update_fc_map(struct net_device *dev, struct qos_info *qos,
return rc;
}
-static void dfc_do_burst_flow_control(struct dfc_qmi_data *dfc,
- struct dfc_svc_ind *svc_ind)
+void dfc_do_burst_flow_control(struct dfc_qmi_data *dfc,
+ struct dfc_flow_status_ind_msg_v01 *ind)
{
- struct dfc_flow_status_ind_msg_v01 *ind = &svc_ind->d.dfc_info;
struct net_device *dev;
struct qos_info *qos;
struct dfc_flow_status_info_type_v01 *flow_status;
@@ -1176,13 +1105,17 @@ static void dfc_update_tx_link_status(struct net_device *dev,
if (!itm)
return;
+ /* If no change in tx status, ignore */
+ if (itm->tx_off == !tx_status)
+ return;
+
if (itm->grant_size && !tx_status) {
itm->grant_size = 0;
itm->tcp_bidir = false;
dfc_bearer_flow_ctl(dev, itm, qos);
} else if (itm->grant_size == 0 && tx_status && !itm->rat_switch) {
itm->grant_size = DEFAULT_GRANT;
- itm->grant_thresh = DEFAULT_GRANT;
+ itm->grant_thresh = qmi_rmnet_grant_per(DEFAULT_GRANT);
itm->seq = 0;
itm->ack_req = 0;
dfc_bearer_flow_ctl(dev, itm, qos);
@@ -1191,10 +1124,9 @@ static void dfc_update_tx_link_status(struct net_device *dev,
itm->tx_off = !tx_status;
}
-static void dfc_handle_tx_link_status_ind(struct dfc_qmi_data *dfc,
- struct dfc_svc_ind *svc_ind)
+void dfc_handle_tx_link_status_ind(struct dfc_qmi_data *dfc,
+ struct dfc_tx_link_status_ind_msg_v01 *ind)
{
- struct dfc_tx_link_status_ind_msg_v01 *ind = &svc_ind->d.tx_status;
struct net_device *dev;
struct qos_info *qos;
struct dfc_bearer_info_type_v01 *bearer_info;
@@ -1256,10 +1188,12 @@ static void dfc_qmi_ind_work(struct work_struct *work)
if (!dfc->restart_state) {
if (svc_ind->msg_id == QMI_DFC_FLOW_STATUS_IND_V01)
- dfc_do_burst_flow_control(dfc, svc_ind);
+ dfc_do_burst_flow_control(
+ dfc, &svc_ind->d.dfc_info);
else if (svc_ind->msg_id ==
QMI_DFC_TX_LINK_STATUS_IND_V01)
- dfc_handle_tx_link_status_ind(dfc, svc_ind);
+ dfc_handle_tx_link_status_ind(
+ dfc, &svc_ind->d.tx_status);
}
kfree(svc_ind);
} while (1);
@@ -1583,7 +1517,7 @@ void dfc_qmi_query_flow(void *dfc_data)
svc_ind->d.dfc_info.flow_status_len = resp->flow_status_len;
memcpy(&svc_ind->d.dfc_info.flow_status, resp->flow_status,
sizeof(resp->flow_status[0]) * resp->flow_status_len);
- dfc_do_burst_flow_control(data, svc_ind);
+ dfc_do_burst_flow_control(data, &svc_ind->d.dfc_info);
done:
kfree(svc_ind);
diff --git a/drivers/soc/qcom/glink_probe.c b/drivers/soc/qcom/glink_probe.c
index 721d80d..678efeb 100644
--- a/drivers/soc/qcom/glink_probe.c
+++ b/drivers/soc/qcom/glink_probe.c
@@ -187,6 +187,7 @@ static void glink_ssr_init_notify(struct glink_ssr *ssr)
nb->nb.notifier_call = glink_ssr_ssr_cb;
nb->nb.priority = GLINK_SSR_PRIORITY;
+ nb->ssr = ssr;
handle = subsys_notif_register_notifier(nb->ssr_label, &nb->nb);
if (IS_ERR_OR_NULL(handle)) {
@@ -195,7 +196,6 @@ static void glink_ssr_init_notify(struct glink_ssr *ssr)
continue;
}
- nb->ssr = ssr;
nb->ssr_register_handle = handle;
list_add_tail(&nb->list, &ssr->notify_list);
}
diff --git a/drivers/soc/qcom/mem-offline.c b/drivers/soc/qcom/mem-offline.c
index 4b0785a..82f0717 100644
--- a/drivers/soc/qcom/mem-offline.c
+++ b/drivers/soc/qcom/mem-offline.c
@@ -45,6 +45,8 @@ enum memory_states {
MAX_STATE,
};
+static enum memory_states *mem_sec_state;
+
static struct mem_offline_mailbox {
struct mbox_client cl;
struct mbox_chan *mbox;
@@ -134,6 +136,148 @@ static int aop_send_msg(unsigned long addr, bool online)
return (mbox_send_message(mailbox.mbox, &pkt) < 0);
}
+/*
+ * When offline_granule >= memory block size, this returns the number of
+ * sections in a offlineable segment.
+ * When offline_granule < memory block size, returns the sections_per_block.
+ */
+static unsigned long get_rounded_sections_per_segment(void)
+{
+
+ return max(((offline_granule * SZ_1M) / memory_block_size_bytes()) *
+ sections_per_block,
+ (unsigned long)sections_per_block);
+}
+
+static int send_msg(struct memory_notify *mn, bool online, int count)
+{
+ unsigned long segment_size = offline_granule * SZ_1M;
+ unsigned long start, base_sec_nr, sec_nr, sections_per_segment;
+ int ret, idx, i;
+
+ sections_per_segment = get_rounded_sections_per_segment();
+ sec_nr = pfn_to_section_nr(SECTION_ALIGN_DOWN(mn->start_pfn));
+ idx = (sec_nr - start_section_nr) / sections_per_segment;
+ base_sec_nr = start_section_nr + (idx * sections_per_segment);
+ start = section_nr_to_pfn(base_sec_nr);
+
+ for (i = 0; i < count; ++i) {
+ ret = aop_send_msg(__pfn_to_phys(start), online);
+ if (ret) {
+ pr_err("PASR: AOP %s request addr:0x%llx failed\n",
+ online ? "online" : "offline",
+ __pfn_to_phys(start));
+ goto undo;
+ }
+
+ start = __phys_to_pfn(__pfn_to_phys(start) + segment_size);
+ }
+
+ return 0;
+undo:
+ start = section_nr_to_pfn(base_sec_nr);
+ while (i-- > 0) {
+ int ret;
+
+ ret = aop_send_msg(__pfn_to_phys(start), !online);
+ if (ret)
+ panic("Failed to completely online/offline a hotpluggable segment. A quasi state of memblock can cause randomn system failures.");
+ start = __phys_to_pfn(__pfn_to_phys(start) + segment_size);
+ }
+
+ return ret;
+}
+
+static bool need_to_send_remote_request(struct memory_notify *mn,
+ enum memory_states request)
+{
+ int i, idx, cur_idx;
+ int base_sec_nr, sec_nr;
+ unsigned long sections_per_segment;
+
+ sections_per_segment = get_rounded_sections_per_segment();
+ sec_nr = pfn_to_section_nr(SECTION_ALIGN_DOWN(mn->start_pfn));
+ idx = (sec_nr - start_section_nr) / sections_per_segment;
+ cur_idx = (sec_nr - start_section_nr) / sections_per_block;
+ base_sec_nr = start_section_nr + (idx * sections_per_segment);
+
+ /*
+ * For MEM_OFFLINE, don't send the request if there are other online
+ * blocks in the segment.
+ * For MEM_ONLINE, don't send the request if there is already one
+ * online block in the segment.
+ */
+ if (request == MEMORY_OFFLINE || request == MEMORY_ONLINE) {
+ for (i = base_sec_nr;
+ i < (base_sec_nr + sections_per_segment);
+ i += sections_per_block) {
+ idx = (i - start_section_nr) / sections_per_block;
+ /* current operating block */
+ if (idx == cur_idx)
+ continue;
+ if (mem_sec_state[idx] == MEMORY_ONLINE)
+ goto out;
+ }
+ return true;
+ }
+out:
+ return false;
+}
+
+/*
+ * This returns the number of hotpluggable segments in a memory block.
+ */
+static int get_num_memblock_hotplug_segments(void)
+{
+ unsigned long segment_size = offline_granule * SZ_1M;
+ unsigned long block_size = memory_block_size_bytes();
+
+ if (segment_size < block_size) {
+ if (block_size % segment_size) {
+ pr_warn("PASR is unusable. Offline granule size should be in multiples for memory_block_size_bytes.\n");
+ return 0;
+ }
+ return block_size / segment_size;
+ }
+
+ return 1;
+}
+
+static int mem_change_refresh_state(struct memory_notify *mn,
+ enum memory_states state)
+{
+ int start = SECTION_ALIGN_DOWN(mn->start_pfn);
+ unsigned long sec_nr = pfn_to_section_nr(start);
+ bool online = (state == MEMORY_ONLINE) ? true : false;
+ unsigned long idx = (sec_nr - start_section_nr) / sections_per_block;
+ int ret, count;
+
+ if (mem_sec_state[idx] == state) {
+ /* we shouldn't be getting this request */
+ pr_warn("mem-offline: state of mem%d block already in %s state. Ignoring refresh state change request\n",
+ sec_nr, online ? "online" : "offline");
+ return 0;
+ }
+
+ count = get_num_memblock_hotplug_segments();
+ if (!count)
+ return -EINVAL;
+
+ if (!need_to_send_remote_request(mn, state))
+ goto out;
+
+ ret = send_msg(mn, online, count);
+ if (ret) {
+ /* online failures are critical failures */
+ if (online)
+ BUG_ON(IS_ENABLED(CONFIG_BUG_ON_HW_MEM_ONLINE_FAIL));
+ return -EINVAL;
+ }
+out:
+ mem_sec_state[idx] = state;
+ return 0;
+}
+
static int mem_event_callback(struct notifier_block *self,
unsigned long action, void *arg)
{
@@ -173,9 +317,9 @@ static int mem_event_callback(struct notifier_block *self,
idx) / sections_per_block].fail_count;
cur = ktime_get();
- if (aop_send_msg(__pfn_to_phys(start), true))
- pr_err("PASR: AOP online request addr:0x%llx failed\n",
- __pfn_to_phys(start));
+ if (mem_change_refresh_state(mn, MEMORY_ONLINE))
+ return NOTIFY_BAD;
+
if (!debug_pagealloc_enabled()) {
/* Create kernel page-tables */
create_pgtable_mapping(start_addr, end_addr);
@@ -201,9 +345,11 @@ static int mem_event_callback(struct notifier_block *self,
/* Clear kernel page-tables */
clear_pgtable_mapping(start_addr, end_addr);
}
- if (aop_send_msg(__pfn_to_phys(start), false))
- pr_err("PASR: AOP offline request addr:0x%llx failed\n",
- __pfn_to_phys(start));
+ mem_change_refresh_state(mn, MEMORY_OFFLINE);
+ /*
+ * Notifying that something went bad at this stage won't
+ * help since this is the last stage of memory hotplug.
+ */
delay = ktime_ms_delta(ktime_get(), cur);
record_stat(sec_nr, delay, MEMORY_OFFLINE);
@@ -214,9 +360,7 @@ static int mem_event_callback(struct notifier_block *self,
case MEM_CANCEL_ONLINE:
pr_info("mem-offline: MEM_CANCEL_ONLINE: start = 0x%llx end = 0x%llx\n",
start_addr, end_addr);
- if (aop_send_msg(__pfn_to_phys(start), false))
- pr_err("PASR: AOP online request addr:0x%llx failed\n",
- __pfn_to_phys(start));
+ mem_change_refresh_state(mn, MEMORY_OFFLINE);
break;
default:
break;
@@ -348,9 +492,6 @@ static struct attribute_group mem_attr_group = {
static int mem_sysfs_init(void)
{
- unsigned int total_blks = (end_section_nr - start_section_nr + 1) /
- sections_per_block;
-
if (start_section_nr == end_section_nr)
return -EINVAL;
@@ -361,11 +502,6 @@ static int mem_sysfs_init(void)
if (sysfs_create_group(kobj, &mem_attr_group))
kobject_put(kobj);
- mem_info = kzalloc(sizeof(*mem_info) * total_blks * MAX_STATE,
- GFP_KERNEL);
- if (!mem_info)
- return -ENOMEM;
-
return 0;
}
@@ -384,8 +520,9 @@ static int mem_parse_dt(struct platform_device *pdev)
return -EINVAL;
}
offline_granule = be32_to_cpup(val);
- if (!offline_granule && !(offline_granule & (offline_granule - 1)) &&
- offline_granule * SZ_1M < MIN_MEMORY_BLOCK_SIZE) {
+ if (!offline_granule || (offline_granule & (offline_granule - 1)) ||
+ ((offline_granule * SZ_1M < MIN_MEMORY_BLOCK_SIZE) &&
+ (MIN_MEMORY_BLOCK_SIZE % (offline_granule * SZ_1M)))) {
pr_err("mem-offine: invalid granule property\n");
return -EINVAL;
}
@@ -413,7 +550,8 @@ static struct notifier_block hotplug_memory_callback_nb = {
static int mem_offline_driver_probe(struct platform_device *pdev)
{
- int ret;
+ unsigned int total_blks;
+ int ret, i;
ret = mem_parse_dt(pdev);
if (ret)
@@ -426,16 +564,46 @@ static int mem_offline_driver_probe(struct platform_device *pdev)
if (ret > 0)
pr_err("mem-offline: !!ERROR!! Auto onlining some memory blocks failed. System could run with less RAM\n");
- if (mem_sysfs_init())
- return -ENODEV;
+ total_blks = (end_section_nr - start_section_nr + 1) /
+ sections_per_block;
+ mem_info = kcalloc(total_blks * MAX_STATE, sizeof(*mem_info),
+ GFP_KERNEL);
+ if (!mem_info)
+ return -ENOMEM;
+
+ mem_sec_state = kcalloc(total_blks, sizeof(*mem_sec_state), GFP_KERNEL);
+ if (!mem_sec_state) {
+ ret = -ENOMEM;
+ goto err_free_mem_info;
+ }
+
+ /* we assume that hardware state of mem blocks are online after boot */
+ for (i = 0; i < total_blks; i++)
+ mem_sec_state[i] = MEMORY_ONLINE;
+
+ if (mem_sysfs_init()) {
+ ret = -ENODEV;
+ goto err_free_mem_sec_state;
+ }
if (register_hotmemory_notifier(&hotplug_memory_callback_nb)) {
pr_err("mem-offline: Registering memory hotplug notifier failed\n");
- return -ENODEV;
+ ret = -ENODEV;
+ goto err_sysfs_remove_group;
}
pr_info("mem-offline: Added memory blocks ranging from mem%lu - mem%lu\n",
start_section_nr, end_section_nr);
+
return 0;
+
+err_sysfs_remove_group:
+ sysfs_remove_group(kobj, &mem_attr_group);
+ kobject_put(kobj);
+err_free_mem_sec_state:
+ kfree(mem_sec_state);
+err_free_mem_info:
+ kfree(mem_info);
+ return ret;
}
static const struct of_device_id mem_offline_match_table[] = {
diff --git a/drivers/soc/qcom/qmi_rmnet.c b/drivers/soc/qcom/qmi_rmnet.c
index 91c99f2..46732bf 100644
--- a/drivers/soc/qcom/qmi_rmnet.c
+++ b/drivers/soc/qcom/qmi_rmnet.c
@@ -23,11 +23,15 @@
#define FLAG_DFC_MASK 0x000F
#define FLAG_POWERSAVE_MASK 0x0010
+#define FLAG_QMAP_MASK 0x0020
+
#define FLAG_TO_MODE(f) ((f) & FLAG_DFC_MASK)
#define DFC_SUPPORTED_MODE(m) \
((m) == DFC_MODE_FLOW_ID || (m) == DFC_MODE_MQ_NUM)
+#define FLAG_TO_QMAP(f) ((f) & FLAG_QMAP_MASK)
int dfc_mode;
+int dfc_qmap;
#define IS_ANCILLARY(type) ((type) != AF_INET && (type) != AF_INET6)
unsigned int rmnet_wq_frequency __read_mostly = 1000;
@@ -82,7 +86,7 @@ void *qmi_rmnet_has_dfc_client(struct qmi_info *qmi)
{
int i;
- if (!qmi || !DFC_SUPPORTED_MODE(FLAG_TO_MODE(qmi->flag)))
+ if (!qmi)
return NULL;
for (i = 0; i < MAX_CLIENT_NUM; i++) {
@@ -379,18 +383,12 @@ static void qmi_rmnet_query_flows(struct qmi_info *qmi)
int i;
for (i = 0; i < MAX_CLIENT_NUM; i++) {
- if (qmi->dfc_clients[i])
+ if (qmi->dfc_clients[i] && !dfc_qmap)
dfc_qmi_query_flow(qmi->dfc_clients[i]);
}
}
#else
-static inline void
-qmi_rmnet_update_flow_link(struct qmi_info *qmi, struct net_device *dev,
- struct rmnet_flow_map *itm, int add_flow)
-{
-}
-
static inline void qmi_rmnet_clean_flow_list(struct qos_info *qos)
{
}
@@ -423,7 +421,7 @@ static inline void qmi_rmnet_query_flows(struct qmi_info *qmi)
static int
qmi_rmnet_setup_client(void *port, struct qmi_info *qmi, struct tcmsg *tcm)
{
- int idx, rc, err = 0;
+ int idx, err = 0;
struct svc_info svc;
ASSERT_RTNL();
@@ -447,18 +445,17 @@ qmi_rmnet_setup_client(void *port, struct qmi_info *qmi, struct tcmsg *tcm)
svc.ep_type = tcm->tcm_info;
svc.iface_id = tcm->tcm_parent;
- if (DFC_SUPPORTED_MODE(FLAG_TO_MODE(tcm->tcm_ifindex)) &&
+ if (DFC_SUPPORTED_MODE(dfc_mode) &&
!qmi->dfc_clients[idx] && !qmi->dfc_pending[idx]) {
- rc = dfc_qmi_client_init(port, idx, &svc, qmi);
- if (rc < 0)
- err = rc;
+ if (dfc_qmap)
+ err = dfc_qmap_client_init(port, idx, &svc, qmi);
+ else
+ err = dfc_qmi_client_init(port, idx, &svc, qmi);
}
if ((tcm->tcm_ifindex & FLAG_POWERSAVE_MASK) &&
(idx == 0) && !qmi->wda_client && !qmi->wda_pending) {
- rc = wda_qmi_client_init(port, &svc, qmi);
- if (rc < 0)
- err = rc;
+ err = wda_qmi_client_init(port, &svc, qmi);
}
return err;
@@ -477,7 +474,10 @@ __qmi_rmnet_delete_client(void *port, struct qmi_info *qmi, int idx)
data = qmi->dfc_pending[idx];
if (data) {
- dfc_qmi_client_exit(data);
+ if (dfc_qmap)
+ dfc_qmap_client_exit(data);
+ else
+ dfc_qmi_client_exit(data);
qmi->dfc_clients[idx] = NULL;
qmi->dfc_pending[idx] = NULL;
}
@@ -524,20 +524,22 @@ void qmi_rmnet_change_link(struct net_device *dev, void *port, void *tcm_pt)
switch (tcm->tcm_family) {
case NLMSG_FLOW_ACTIVATE:
- if (!qmi || !DFC_SUPPORTED_MODE(FLAG_TO_MODE(qmi->flag)) ||
+ if (!qmi || !DFC_SUPPORTED_MODE(dfc_mode) ||
!qmi_rmnet_has_dfc_client(qmi))
return;
qmi_rmnet_add_flow(dev, tcm, qmi);
break;
case NLMSG_FLOW_DEACTIVATE:
- if (!qmi || !DFC_SUPPORTED_MODE(FLAG_TO_MODE(qmi->flag)))
+ if (!qmi || !DFC_SUPPORTED_MODE(dfc_mode))
return;
qmi_rmnet_del_flow(dev, tcm, qmi);
break;
case NLMSG_CLIENT_SETUP:
dfc_mode = FLAG_TO_MODE(tcm->tcm_ifindex);
+ dfc_qmap = FLAG_TO_QMAP(tcm->tcm_ifindex);
+
if (!DFC_SUPPORTED_MODE(dfc_mode) &&
!(tcm->tcm_ifindex & FLAG_POWERSAVE_MASK))
return;
@@ -628,7 +630,7 @@ void qmi_rmnet_enable_all_flows(struct net_device *dev)
continue;
do_wake = !bearer->grant_size;
bearer->grant_size = DEFAULT_GRANT;
- bearer->grant_thresh = DEFAULT_GRANT;
+ bearer->grant_thresh = qmi_rmnet_grant_per(DEFAULT_GRANT);
bearer->seq = 0;
bearer->ack_req = 0;
bearer->tcp_bidir = false;
@@ -795,7 +797,7 @@ void qmi_rmnet_ps_on_notify(void *port)
{
struct qmi_rmnet_ps_ind *tmp;
- list_for_each_entry(tmp, &ps_list, list)
+ list_for_each_entry_rcu(tmp, &ps_list, list)
tmp->ps_on_handler(port);
}
EXPORT_SYMBOL(qmi_rmnet_ps_on_notify);
@@ -804,8 +806,9 @@ void qmi_rmnet_ps_off_notify(void *port)
{
struct qmi_rmnet_ps_ind *tmp;
- list_for_each_entry(tmp, &ps_list, list)
+ list_for_each_entry_rcu(tmp, &ps_list, list)
tmp->ps_off_handler(port);
+
}
EXPORT_SYMBOL(qmi_rmnet_ps_off_notify);
@@ -831,13 +834,12 @@ int qmi_rmnet_ps_ind_deregister(void *port,
if (!port || !ps_ind)
return -EINVAL;
- list_for_each_entry(tmp, &ps_list, list) {
+ list_for_each_entry_rcu(tmp, &ps_list, list) {
if (tmp == ps_ind) {
list_del_rcu(&ps_ind->list);
goto done;
}
}
-
done:
return 0;
}
diff --git a/drivers/soc/qcom/qmi_rmnet_i.h b/drivers/soc/qcom/qmi_rmnet_i.h
index 1466822..15dee7c 100644
--- a/drivers/soc/qcom/qmi_rmnet_i.h
+++ b/drivers/soc/qcom/qmi_rmnet_i.h
@@ -9,9 +9,6 @@
#include <linux/netdevice.h>
#include <linux/skbuff.h>
-#define IP_VER_4 4
-#define IP_VER_6 6
-
#define MAX_MQ_NUM 10
#define MAX_CLIENT_NUM 2
#define MAX_FLOW_NUM 32
@@ -21,6 +18,7 @@
#define DFC_MODE_FLOW_ID 2
#define DFC_MODE_MQ_NUM 3
extern int dfc_mode;
+extern int dfc_qmap;
struct rmnet_bearer_map {
struct list_head list;
@@ -35,6 +33,7 @@ struct rmnet_bearer_map {
bool tcp_bidir;
bool rat_switch;
bool tx_off;
+ u32 ack_txid;
};
struct rmnet_flow_map {
@@ -125,6 +124,13 @@ void dfc_qmi_query_flow(void *dfc_data);
int dfc_bearer_flow_ctl(struct net_device *dev,
struct rmnet_bearer_map *bearer,
struct qos_info *qos);
+
+int dfc_qmap_client_init(void *port, int index, struct svc_info *psvc,
+ struct qmi_info *qmi);
+
+void dfc_qmap_client_exit(void *dfc_data);
+
+void dfc_qmap_send_ack(struct qos_info *qos, u8 bearer_id, u16 seq, u8 type);
#else
static inline struct rmnet_flow_map *
qmi_rmnet_get_flow_map(struct qos_info *qos_info,
@@ -150,17 +156,6 @@ static inline void dfc_qmi_client_exit(void *dfc_data)
{
}
-static inline void
-dfc_qmi_burst_check(struct net_device *dev, struct qos_info *qos,
- int ip_type, u32 mark, unsigned int len)
-{
-}
-
-static inline void
-dfc_qmi_query_flow(void *dfc_data)
-{
-}
-
static inline int
dfc_bearer_flow_ctl(struct net_device *dev,
struct rmnet_bearer_map *bearer,
@@ -168,6 +163,17 @@ dfc_bearer_flow_ctl(struct net_device *dev,
{
return 0;
}
+
+static inline int
+dfc_qmap_client_init(void *port, int index, struct svc_info *psvc,
+ struct qmi_info *qmi)
+{
+ return -EINVAL;
+}
+
+static inline void dfc_qmap_client_exit(void *dfc_data)
+{
+}
#endif
#ifdef CONFIG_QCOM_QMI_POWER_COLLAPSE
diff --git a/drivers/soc/qcom/rmnet_ctl/Kconfig b/drivers/soc/qcom/rmnet_ctl/Kconfig
new file mode 100644
index 0000000..bfb91fbd
--- /dev/null
+++ b/drivers/soc/qcom/rmnet_ctl/Kconfig
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# RMNET CTL driver
+#
+
+menuconfig RMNET_CTL
+ tristate "RmNet Control driver"
+ depends on MHI_BUS
+ help
+ Enable the RMNET CTL module which is used for communicating with
+ device via map command protocol. This module will receive QMAP
+ control commands via MHI.
diff --git a/drivers/soc/qcom/rmnet_ctl/Makefile b/drivers/soc/qcom/rmnet_ctl/Makefile
new file mode 100644
index 0000000..bf798da
--- /dev/null
+++ b/drivers/soc/qcom/rmnet_ctl/Makefile
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Makefile for the RMNET CTL module
+#
+
+rmnet_ctl-y += rmnet_ctl_client.o
+rmnet_ctl-y += rmnet_ctl_mhi.o
+obj-$(CONFIG_RMNET_CTL) += rmnet_ctl.o
diff --git a/drivers/soc/qcom/rmnet_ctl/rmnet_ctl_client.c b/drivers/soc/qcom/rmnet_ctl/rmnet_ctl_client.c
new file mode 100644
index 0000000..299b301
--- /dev/null
+++ b/drivers/soc/qcom/rmnet_ctl/rmnet_ctl_client.c
@@ -0,0 +1,122 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ *
+ * RMNET_CTL client handlers
+ *
+ */
+
+#include <soc/qcom/rmnet_ctl.h>
+#include "rmnet_ctl_client.h"
+
+struct rmnet_ctl_client {
+ struct rmnet_ctl_client_hooks hooks;
+};
+
+struct rmnet_ctl_endpoint {
+ struct rmnet_ctl_dev __rcu *dev;
+ struct rmnet_ctl_client __rcu *client;
+};
+
+static DEFINE_SPINLOCK(client_lock);
+static struct rmnet_ctl_endpoint ctl_ep;
+
+void rmnet_ctl_endpoint_setdev(const struct rmnet_ctl_dev *dev)
+{
+ rcu_assign_pointer(ctl_ep.dev, dev);
+}
+
+void rmnet_ctl_endpoint_post(const void *data, size_t len)
+{
+ struct rmnet_ctl_client *client;
+ struct sk_buff *skb;
+
+ if (unlikely(!data || !len))
+ return;
+
+ rcu_read_lock();
+
+ client = rcu_dereference(ctl_ep.client);
+
+ if (client && client->hooks.ctl_dl_client_hook) {
+ skb = alloc_skb(len, GFP_ATOMIC);
+ if (skb) {
+ skb_put_data(skb, data, len);
+ skb->protocol = htons(ETH_P_MAP);
+ client->hooks.ctl_dl_client_hook(skb);
+ }
+ }
+
+ rcu_read_unlock();
+}
+
+void *rmnet_ctl_register_client(struct rmnet_ctl_client_hooks *hook)
+{
+ struct rmnet_ctl_client *client;
+
+ if (!hook)
+ return NULL;
+
+ client = kzalloc(sizeof(*client), GFP_KERNEL);
+ if (!client)
+ return NULL;
+ client->hooks = *hook;
+
+ spin_lock(&client_lock);
+
+ /* Only support one client for now */
+ if (rcu_dereference(ctl_ep.client)) {
+ spin_unlock(&client_lock);
+ kfree(client);
+ return NULL;
+ }
+
+ rcu_assign_pointer(ctl_ep.client, client);
+
+ spin_unlock(&client_lock);
+
+ return client;
+}
+EXPORT_SYMBOL(rmnet_ctl_register_client);
+
+int rmnet_ctl_unregister_client(void *handle)
+{
+ struct rmnet_ctl_client *client = (struct rmnet_ctl_client *)handle;
+
+ spin_lock(&client_lock);
+
+ if (rcu_dereference(ctl_ep.client) != client) {
+ spin_unlock(&client_lock);
+ return -EINVAL;
+ }
+
+ RCU_INIT_POINTER(ctl_ep.client, NULL);
+
+ spin_unlock(&client_lock);
+
+ synchronize_rcu();
+ kfree(client);
+
+ return 0;
+}
+EXPORT_SYMBOL(rmnet_ctl_unregister_client);
+
+int rmnet_ctl_send_client(void *handle, struct sk_buff *skb)
+{
+ struct rmnet_ctl_client *client = (struct rmnet_ctl_client *)handle;
+ struct rmnet_ctl_dev *dev;
+ int rc = -EINVAL;
+
+ if (client != rcu_dereference(ctl_ep.client))
+ return rc;
+
+ rcu_read_lock();
+
+ dev = rcu_dereference(ctl_ep.dev);
+ if (dev && dev->xmit)
+ rc = dev->xmit(dev, skb);
+
+ rcu_read_unlock();
+
+ return rc;
+}
+EXPORT_SYMBOL(rmnet_ctl_send_client);
diff --git a/drivers/soc/qcom/rmnet_ctl/rmnet_ctl_client.h b/drivers/soc/qcom/rmnet_ctl/rmnet_ctl_client.h
new file mode 100644
index 0000000..6362581
--- /dev/null
+++ b/drivers/soc/qcom/rmnet_ctl/rmnet_ctl_client.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ *
+ * RMNET_CTL client handlers
+ *
+ */
+
+#ifndef _RMNET_CTL_CLIENT_H_
+#define _RMNET_CTL_CLIENT_H_
+
+#include <linux/skbuff.h>
+
+struct rmnet_ctl_stats {
+ u64 rx_pkts;
+ u64 rx_err;
+ u64 tx_pkts;
+ u64 tx_err;
+ u64 tx_complete;
+};
+
+struct rmnet_ctl_dev {
+ int (*xmit)(struct rmnet_ctl_dev *dev, struct sk_buff *skb);
+ struct rmnet_ctl_stats stats;
+};
+
+void rmnet_ctl_endpoint_post(const void *data, size_t len);
+void rmnet_ctl_endpoint_setdev(const struct rmnet_ctl_dev *dev);
+
+#endif /* _RMNET_CTL_CLIENT_H_ */
diff --git a/drivers/soc/qcom/rmnet_ctl/rmnet_ctl_mhi.c b/drivers/soc/qcom/rmnet_ctl/rmnet_ctl_mhi.c
new file mode 100644
index 0000000..af84e13
--- /dev/null
+++ b/drivers/soc/qcom/rmnet_ctl/rmnet_ctl_mhi.c
@@ -0,0 +1,187 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ *
+ * RMNET_CTL mhi handler
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/of.h>
+#include <linux/skbuff.h>
+#include <linux/mhi.h>
+#include "rmnet_ctl_client.h"
+
+#define RMNET_CTL_DEFAULT_MRU 1024
+
+struct rmnet_ctl_mhi_dev {
+ struct mhi_device *mhi_dev;
+ struct rmnet_ctl_dev dev;
+ u32 mru;
+ spinlock_t rx_lock; /* rx lock */
+ spinlock_t tx_lock; /* tx lock */
+ atomic_t in_reset;
+};
+
+static int rmnet_ctl_send_mhi(struct rmnet_ctl_dev *dev, struct sk_buff *skb)
+{
+ struct rmnet_ctl_mhi_dev *ctl_dev = container_of(
+ dev, struct rmnet_ctl_mhi_dev, dev);
+ int rc;
+
+ spin_lock_bh(&ctl_dev->tx_lock);
+
+ rc = mhi_queue_transfer(ctl_dev->mhi_dev,
+ DMA_TO_DEVICE, skb, skb->len, MHI_EOT);
+ if (rc)
+ dev->stats.tx_err++;
+ else
+ dev->stats.tx_pkts++;
+
+ spin_unlock_bh(&ctl_dev->tx_lock);
+
+ return rc;
+}
+
+static void rmnet_ctl_alloc_buffers(struct rmnet_ctl_mhi_dev *ctl_dev,
+ gfp_t gfp, void *free_buf)
+{
+ struct mhi_device *mhi_dev = ctl_dev->mhi_dev;
+ void *buf;
+ int no_tre, i, rc;
+
+ no_tre = mhi_get_no_free_descriptors(mhi_dev, DMA_FROM_DEVICE);
+ for (i = 0; i < no_tre; i++) {
+ if (free_buf) {
+ buf = free_buf;
+ free_buf = NULL;
+ } else {
+ buf = kmalloc(ctl_dev->mru, gfp);
+ }
+
+ if (!buf)
+ return;
+
+ spin_lock_bh(&ctl_dev->rx_lock);
+ rc = mhi_queue_transfer(mhi_dev, DMA_FROM_DEVICE,
+ buf, ctl_dev->mru, MHI_EOT);
+ spin_unlock_bh(&ctl_dev->rx_lock);
+
+ if (rc) {
+ kfree(buf);
+ return;
+ }
+ }
+}
+
+static void rmnet_ctl_dl_callback(struct mhi_device *mhi_dev,
+ struct mhi_result *mhi_res)
+{
+ struct rmnet_ctl_mhi_dev *ctl_dev = dev_get_drvdata(&mhi_dev->dev);
+
+ if (mhi_res->transaction_status || !mhi_res->buf_addr) {
+ ctl_dev->dev.stats.rx_err++;
+ } else {
+ ctl_dev->dev.stats.rx_pkts++;
+ rmnet_ctl_endpoint_post(mhi_res->buf_addr,
+ mhi_res->bytes_xferd);
+ }
+
+ /* Re-supply receive buffers */
+ rmnet_ctl_alloc_buffers(ctl_dev, GFP_ATOMIC, mhi_res->buf_addr);
+}
+
+static void rmnet_ctl_ul_callback(struct mhi_device *mhi_dev,
+ struct mhi_result *mhi_res)
+{
+ struct rmnet_ctl_mhi_dev *ctl_dev = dev_get_drvdata(&mhi_dev->dev);
+ struct sk_buff *skb = (struct sk_buff *)mhi_res->buf_addr;
+
+ if (skb) {
+ ctl_dev->dev.stats.tx_complete++;
+ kfree_skb(skb);
+ }
+}
+
+static void rmnet_ctl_status_callback(struct mhi_device *mhi_dev,
+ enum MHI_CB mhi_cb)
+{
+ struct rmnet_ctl_mhi_dev *ctl_dev = dev_get_drvdata(&mhi_dev->dev);
+
+ if (mhi_cb != MHI_CB_FATAL_ERROR)
+ return;
+
+ atomic_inc(&ctl_dev->in_reset);
+}
+
+static int rmnet_ctl_probe(struct mhi_device *mhi_dev,
+ const struct mhi_device_id *id)
+{
+ struct rmnet_ctl_mhi_dev *ctl_dev;
+ struct device_node *of_node = mhi_dev->dev.of_node;
+ int rc;
+
+ ctl_dev = devm_kzalloc(&mhi_dev->dev, sizeof(*ctl_dev), GFP_KERNEL);
+ if (!ctl_dev)
+ return -ENOMEM;
+
+ ctl_dev->mhi_dev = mhi_dev;
+ ctl_dev->dev.xmit = rmnet_ctl_send_mhi;
+
+ spin_lock_init(&ctl_dev->rx_lock);
+ spin_lock_init(&ctl_dev->tx_lock);
+ atomic_set(&ctl_dev->in_reset, 0);
+ dev_set_drvdata(&mhi_dev->dev, ctl_dev);
+
+ rc = of_property_read_u32(of_node, "mhi,mru", &ctl_dev->mru);
+ if (rc || !ctl_dev->mru)
+ ctl_dev->mru = RMNET_CTL_DEFAULT_MRU;
+
+ rc = mhi_prepare_for_transfer(mhi_dev);
+ if (rc) {
+ pr_err("%s(): Failed to prep for transfer %d\n", __func__, rc);
+ return -EINVAL;
+ }
+
+ /* Post receive buffers */
+ rmnet_ctl_alloc_buffers(ctl_dev, GFP_KERNEL, NULL);
+
+ rmnet_ctl_endpoint_setdev(&ctl_dev->dev);
+
+ pr_info("rmnet_ctl driver probed\n");
+
+ return 0;
+}
+
+static void rmnet_ctl_remove(struct mhi_device *mhi_dev)
+{
+ rmnet_ctl_endpoint_setdev(NULL);
+ synchronize_rcu();
+ dev_set_drvdata(&mhi_dev->dev, NULL);
+
+ pr_info("rmnet_ctl driver removed\n");
+}
+
+static const struct mhi_device_id rmnet_ctl_mhi_match[] = {
+ { .chan = "RMNET_CTL" },
+ {}
+};
+
+static struct mhi_driver rmnet_ctl_driver = {
+ .probe = rmnet_ctl_probe,
+ .remove = rmnet_ctl_remove,
+ .dl_xfer_cb = rmnet_ctl_dl_callback,
+ .ul_xfer_cb = rmnet_ctl_ul_callback,
+ .status_cb = rmnet_ctl_status_callback,
+ .id_table = rmnet_ctl_mhi_match,
+ .driver = {
+ .name = "rmnet_ctl",
+ .owner = THIS_MODULE,
+ },
+};
+
+module_driver(rmnet_ctl_driver,
+ mhi_driver_register, mhi_driver_unregister);
+
+MODULE_DESCRIPTION("RmNet Control Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/secure_buffer.c b/drivers/soc/qcom/secure_buffer.c
index 1ab2680..84ab642 100644
--- a/drivers/soc/qcom/secure_buffer.c
+++ b/drivers/soc/qcom/secure_buffer.c
@@ -14,6 +14,9 @@
#include <soc/qcom/scm.h>
#include <soc/qcom/secure_buffer.h>
+#define CREATE_TRACE_POINTS
+#include "trace_secure_buffer.h"
+
DEFINE_MUTEX(secure_buffer_mutex);
struct cp2_mem_chunks {
@@ -28,24 +31,12 @@ struct cp2_lock_req {
u32 lock;
} __attribute__ ((__packed__));
-struct mem_prot_info {
- phys_addr_t addr;
- u64 size;
-};
-
#define MEM_PROT_ASSIGN_ID 0x16
#define MEM_PROTECT_LOCK_ID2 0x0A
#define MEM_PROTECT_LOCK_ID2_FLAT 0x11
#define V2_CHUNK_SIZE SZ_1M
#define FEATURE_ID_CP 12
-struct dest_vm_and_perm_info {
- u32 vm;
- u32 perm;
- u64 ctx;
- u32 ctx_size;
-};
-
#define BATCH_MAX_SIZE SZ_2M
#define BATCH_MAX_SECTIONS 32
@@ -228,9 +219,13 @@ static int batched_hyp_assign(struct sg_table *table, struct scm_desc *desc)
unsigned int entries_size;
unsigned int batch_start = 0;
unsigned int batches_processed;
+ unsigned int i = 0;
+ u64 total_delta;
struct scatterlist *curr_sgl = table->sgl;
struct scatterlist *next_sgl;
int ret = 0;
+ ktime_t batch_assign_start_ts;
+ ktime_t first_assign_ts;
struct mem_prot_info *sg_table_copy = kcalloc(BATCH_MAX_SECTIONS,
sizeof(*sg_table_copy),
GFP_KERNEL);
@@ -238,6 +233,7 @@ static int batched_hyp_assign(struct sg_table *table, struct scm_desc *desc)
if (!sg_table_copy)
return -ENOMEM;
+ first_assign_ts = ktime_get();
while (batch_start < table->nents) {
batches_processed = get_batches_from_sgl(sg_table_copy,
curr_sgl, &next_sgl);
@@ -248,8 +244,13 @@ static int batched_hyp_assign(struct sg_table *table, struct scm_desc *desc)
desc->args[0] = virt_to_phys(sg_table_copy);
desc->args[1] = entries_size;
+ trace_hyp_assign_batch_start(sg_table_copy, batches_processed);
+ batch_assign_start_ts = ktime_get();
ret = scm_call2(SCM_SIP_FNID(SCM_SVC_MP,
MEM_PROT_ASSIGN_ID), desc);
+ trace_hyp_assign_batch_end(ret, ktime_us_delta(ktime_get(),
+ batch_assign_start_ts));
+ i++;
if (ret) {
pr_info("%s: Failed to assign memory protection, ret = %d\n",
__func__, ret);
@@ -263,7 +264,8 @@ static int batched_hyp_assign(struct sg_table *table, struct scm_desc *desc)
batch_start += batches_processed;
}
-
+ total_delta = ktime_us_delta(ktime_get(), first_assign_ts);
+ trace_hyp_assign_end(total_delta, total_delta / i);
kfree(sg_table_copy);
return ret;
}
@@ -288,7 +290,7 @@ static int __hyp_assign_table(struct sg_table *table,
size_t dest_vm_copy_size;
if (!table || !table->sgl || !source_vm_list || !source_nelems ||
- !dest_vmids || !dest_perms || !dest_nelems)
+ !dest_vmids || !dest_perms || !dest_nelems || !table->nents)
return -EINVAL;
/*
@@ -333,6 +335,8 @@ static int __hyp_assign_table(struct sg_table *table,
dmac_flush_range(dest_vm_copy,
(void *)dest_vm_copy + dest_vm_copy_size);
+ trace_hyp_assign_info(source_vm_list, source_nelems, dest_vmids,
+ dest_perms, dest_nelems);
ret = batched_hyp_assign(table, &desc);
mutex_unlock(&secure_buffer_mutex);
diff --git a/drivers/soc/qcom/smcinvoke.c b/drivers/soc/qcom/smcinvoke.c
index d1be4a9..f735395 100644
--- a/drivers/soc/qcom/smcinvoke.c
+++ b/drivers/soc/qcom/smcinvoke.c
@@ -103,6 +103,8 @@
#define MEM_RGN_SRVR_ID 1
#define MEM_MAP_SRVR_ID 2
#define CBOBJ_SERVER_ID_START 0x10
+/* local obj id is represented by 15 bits */
+#define MAX_LOCAL_OBJ_ID ((1<<15) - 1)
/* CBOBJs will be served by server id 0x10 onwards */
#define TZHANDLE_GET_SERVER(h) ((uint16_t)((h) & 0xFFFF))
#define TZHANDLE_GET_OBJID(h) (((h) >> 16) & 0x7FFF)
@@ -294,6 +296,9 @@ static struct smcinvoke_mem_obj *find_mem_obj_locked(uint16_t mem_obj_id,
static uint32_t next_mem_region_obj_id_locked(void)
{
+ if (g_last_mem_rgn_id == MAX_LOCAL_OBJ_ID)
+ g_last_mem_rgn_id = 0;
+
while (find_mem_obj_locked(++g_last_mem_rgn_id, SMCINVOKE_MEM_RGN_OBJ))
;
@@ -302,6 +307,9 @@ static uint32_t next_mem_region_obj_id_locked(void)
static uint32_t next_mem_map_obj_id_locked(void)
{
+ if (g_last_mem_map_obj_id == MAX_LOCAL_OBJ_ID)
+ g_last_mem_map_obj_id = 0;
+
while (find_mem_obj_locked(++g_last_mem_map_obj_id,
SMCINVOKE_MEM_MAP_OBJ))
;
@@ -842,8 +850,10 @@ static void process_tzcb_req(void *buf, size_t buf_len, struct file **arr_filp)
cb_req = kmemdup(buf, buf_len, GFP_KERNEL);
if (!cb_req) {
- ret = OBJECT_ERROR_KMEM;
- goto out;
+ /* we need to return error to caller so fill up result */
+ cb_req = buf;
+ cb_req->result = OBJECT_ERROR_KMEM;
+ return;
}
/* check whether it is to be served by kernel or userspace */
@@ -901,9 +911,11 @@ static void process_tzcb_req(void *buf, size_t buf_len, struct file **arr_filp)
release_tzhandle_locked(cb_req->hdr.tzhandle);
}
}
- hash_del(&cb_txn->hash);
- memcpy(buf, cb_req, buf_len);
- kref_put(&cb_txn->ref_cnt, delete_cb_txn);
+ if (cb_txn) {
+ hash_del(&cb_txn->hash);
+ memcpy(buf, cb_req, buf_len);
+ kref_put(&cb_txn->ref_cnt, delete_cb_txn);
+ }
mutex_unlock(&g_smcinvoke_lock);
}
@@ -1523,34 +1535,26 @@ static long process_invoke_req(struct file *filp, unsigned int cmd,
int32_t tzhandles_to_release[OBJECT_COUNTS_MAX_OO] = {0};
bool tz_acked = false;
- if (_IOC_SIZE(cmd) != sizeof(req)) {
- ret = -EINVAL;
- goto out;
- }
- if (tzobj->context_type != SMCINVOKE_OBJ_TYPE_TZ_OBJ) {
- ret = -EPERM;
- goto out;
- }
+ if (_IOC_SIZE(cmd) != sizeof(req))
+ return -EINVAL;
+
+ if (tzobj->context_type != SMCINVOKE_OBJ_TYPE_TZ_OBJ)
+ return -EPERM;
+
ret = copy_from_user(&req, (void __user *)arg, sizeof(req));
- if (ret) {
- ret = -EFAULT;
- goto out;
- }
+ if (ret)
+ return -EFAULT;
+
+ if (req.argsize != sizeof(union smcinvoke_arg))
+ return -EINVAL;
nr_args = OBJECT_COUNTS_NUM_buffers(req.counts) +
OBJECT_COUNTS_NUM_objects(req.counts);
- if (req.argsize != sizeof(union smcinvoke_arg)) {
- ret = -EINVAL;
- goto out;
- }
-
if (nr_args) {
args_buf = kcalloc(nr_args, req.argsize, GFP_KERNEL);
- if (!args_buf) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!args_buf)
+ return -ENOMEM;
ret = copy_from_user(args_buf, u64_to_user_ptr(req.args),
nr_args * req.argsize);
diff --git a/drivers/soc/qcom/spcom.c b/drivers/soc/qcom/spcom.c
index c2b48dd..8d37358 100644
--- a/drivers/soc/qcom/spcom.c
+++ b/drivers/soc/qcom/spcom.c
@@ -1439,6 +1439,7 @@ static int spcom_device_open(struct inode *inode, struct file *filp)
*/
if (ch->pid == pid) {
pr_err("client is already registered with channel[%s]\n", name);
+ mutex_unlock(&ch->lock);
return -EINVAL;
}
diff --git a/drivers/soc/qcom/spss_utils.c b/drivers/soc/qcom/spss_utils.c
index 13a450c..910991b 100644
--- a/drivers/soc/qcom/spss_utils.c
+++ b/drivers/soc/qcom/spss_utils.c
@@ -28,6 +28,7 @@
#include <linux/bitops.h> /* BIT(x) */
#include <linux/platform_device.h> /* platform_driver_register() */
#include <linux/of.h> /* of_property_count_strings() */
+#include <linux/of_address.h> /* of_address_to_resource() */
#include <linux/io.h> /* ioremap_nocache() */
#include <linux/notifier.h>
#include <linux/sizes.h> /* SZ_4K */
@@ -62,15 +63,21 @@ static u32 spss_emul_type_reg_addr; /* TCSR_SOC_EMULATION_TYPE */
static void *iar_notif_handle;
static struct notifier_block *iar_nb;
-#define CMAC_SIZE_IN_BYTES (128/8) /* 128 bit */
+#define CMAC_SIZE_IN_BYTES (128/8) /* 128 bit = 16 bytes */
+#define CMAC_SIZE_IN_DWORDS (CMAC_SIZE_IN_BYTES/sizeof(u32)) /* 4 dwords */
+
+/* Asym , Crypt , Keym */
+#define NUM_UEFI_APPS 3
static u32 pil_addr;
static u32 pil_size;
-static u32 cmac_buf[CMAC_SIZE_IN_BYTES/sizeof(u32)]; /* saved cmac */
-static u32 pbl_cmac_buf[CMAC_SIZE_IN_BYTES/sizeof(u32)]; /* pbl cmac */
+static u32 cmac_buf[CMAC_SIZE_IN_DWORDS]; /* saved cmac */
+static u32 pbl_cmac_buf[CMAC_SIZE_IN_DWORDS]; /* pbl cmac */
+
+static u32 apps_cmac_buf[NUM_UEFI_APPS][CMAC_SIZE_IN_DWORDS];
+
static u32 iar_state;
static bool is_iar_enabled;
-static bool is_pbl_ce; /* Did SPU PBL performed Cryptographic Erase (CE) */
static void __iomem *cmac_mem;
static size_t cmac_mem_size = SZ_4K; /* XPU align to 4KB */
@@ -96,7 +103,7 @@ static struct spss_utils_device *spss_utils_dev;
/* static functions declaration */
static int spss_set_fw_cmac(u32 *cmac, size_t cmac_size);
-static int spss_get_pbl_calc_cmac(u32 *cmac, size_t cmac_size);
+static int spss_get_pbl_and_apps_calc_cmac(void);
/*==========================================================================*/
/* Device Sysfs */
@@ -199,7 +206,7 @@ static ssize_t cmac_buf_show(struct device *dev,
return -EINVAL;
}
- ret = snprintf(buf, PAGE_SIZE, "0x%x,0x%x,0x%x,0x%x\n",
+ ret = snprintf(buf, PAGE_SIZE, "0x%08x,0x%08x,0x%08x,0x%08x\n",
cmac_buf[0], cmac_buf[1], cmac_buf[2], cmac_buf[3]);
return ret;
@@ -244,24 +251,6 @@ static ssize_t iar_enabled_show(struct device *dev,
static DEVICE_ATTR_RO(iar_enabled);
-static ssize_t pbl_ce_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- int ret = 0;
-
- if (!dev || !attr || !buf) {
- pr_err("invalid param.\n");
- return -EINVAL;
- }
-
- ret = snprintf(buf, PAGE_SIZE, "0x%x\n", is_pbl_ce);
-
- return ret;
-}
-
-static DEVICE_ATTR_RO(pbl_ce);
-
static ssize_t pbl_cmac_show(struct device *dev,
struct device_attribute *attr,
char *buf)
@@ -273,7 +262,7 @@ static ssize_t pbl_cmac_show(struct device *dev,
return -EINVAL;
}
- ret = snprintf(buf, PAGE_SIZE, "0x%x,0x%x,0x%x,0x%x\n",
+ ret = snprintf(buf, PAGE_SIZE, "0x%08x,0x%08x,0x%08x,0x%08x\n",
pbl_cmac_buf[0], pbl_cmac_buf[1], pbl_cmac_buf[2], pbl_cmac_buf[3]);
return ret;
@@ -281,6 +270,21 @@ static ssize_t pbl_cmac_show(struct device *dev,
static DEVICE_ATTR_RO(pbl_cmac);
+static ssize_t apps_cmac_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ if (!dev || !attr || !buf) {
+ pr_err("invalid param.\n");
+ return -EINVAL;
+ }
+
+ memcpy(buf, apps_cmac_buf, sizeof(apps_cmac_buf));
+
+ return sizeof(apps_cmac_buf);
+}
+
+static DEVICE_ATTR_RO(apps_cmac);
+
/*--------------------------------------------------------------------------*/
static int spss_create_sysfs(struct device *dev)
{
@@ -322,22 +326,23 @@ static int spss_create_sysfs(struct device *dev)
goto remove_iar_state;
}
- ret = device_create_file(dev, &dev_attr_pbl_ce);
- if (ret < 0) {
- pr_err("failed to create sysfs file for pbl_ce.\n");
- goto remove_iar_enabled;
- }
-
ret = device_create_file(dev, &dev_attr_pbl_cmac);
if (ret < 0) {
pr_err("failed to create sysfs file for pbl_cmac.\n");
- goto remove_pbl_ce;
+ goto remove_iar_enabled;
}
+ ret = device_create_file(dev, &dev_attr_apps_cmac);
+ if (ret < 0) {
+ pr_err("failed to create sysfs file for apps_cmac.\n");
+ goto remove_pbl_cmac;
+ }
+
+
return 0;
-remove_pbl_ce:
- device_remove_file(dev, &dev_attr_pbl_ce);
+remove_pbl_cmac:
+ device_remove_file(dev, &dev_attr_pbl_cmac);
remove_iar_enabled:
device_remove_file(dev, &dev_attr_iar_enabled);
remove_iar_state:
@@ -392,22 +397,18 @@ static long spss_utils_ioctl(struct file *file,
}
memcpy(cmac_buf, data, sizeof(cmac_buf));
- pr_info("cmac_buf: 0x%x,0x%x,0x%x,0x%x\n",
+ pr_info("saved fw cmac: 0x%08x,0x%08x,0x%08x,0x%08x\n",
cmac_buf[0], cmac_buf[1], cmac_buf[2], cmac_buf[3]);
/*
* SPSS is loaded now by UEFI,
* so IAR callback is not being called on powerup by PIL.
- * therefore read the spu pbl fw cmac from ioctl.
+ * therefore read the spu pbl fw cmac and apps cmac from ioctl.
* The callback shall be called on spss SSR.
*/
- pr_info("read pbl cmac from shared memory\n");
+ pr_debug("read pbl cmac from shared memory\n");
spss_set_fw_cmac(cmac_buf, sizeof(cmac_buf));
- spss_get_pbl_calc_cmac(pbl_cmac_buf, sizeof(pbl_cmac_buf));
- if (memcmp(cmac_buf, pbl_cmac_buf, sizeof(cmac_buf)) != 0)
- is_pbl_ce = true; /* cmacs not the same */
- else
- is_pbl_ce = false;
+ spss_get_pbl_and_apps_calc_cmac();
break;
default:
@@ -513,7 +514,8 @@ static int spss_parse_dt(struct device_node *node)
u32 spss_fuse4_bit = 0;
u32 spss_fuse4_mask = 0;
void __iomem *spss_fuse4_reg = NULL;
-
+ struct device_node *np;
+ struct resource r;
u32 val1 = 0;
u32 val2 = 0;
void __iomem *spss_emul_type_reg = NULL;
@@ -649,24 +651,36 @@ static int spss_parse_dt(struct device_node *node)
iounmap(spss_emul_type_reg);
/* PIL-SPSS area */
- ret = of_property_read_u32(node, "qcom,pil-addr",
- &pil_addr);
- if (ret < 0) {
- pr_err("can't get pil_addr\n");
- return -EFAULT;
+ np = of_parse_phandle(node, "pil-mem", 0);
+ if (!np) {
+ pr_err("no pil-mem entry, check pil-addr\n");
+ ret = of_property_read_u32(node, "qcom,pil-addr",
+ &pil_addr);
+ if (ret < 0) {
+ pr_err("can't get pil_addr\n");
+ return -EFAULT;
+ }
+ } else {
+ ret = of_address_to_resource(np, 0, &r);
+ of_node_put(np);
+ if (ret)
+ return ret;
+ pil_addr = (u32)r.start;
}
+
ret = of_property_read_u32(node, "qcom,pil-size",
- &pil_size);
+ &pil_size);
if (ret < 0) {
pr_err("can't get pil_size\n");
return -EFAULT;
}
- pr_info("pil_addr [0x%x].\n", pil_addr);
- pr_info("pil_size [0x%x].\n", pil_size);
+ pr_debug("pil_addr [0x%08x].\n", pil_addr);
+ pr_debug("pil_size [0x%08x].\n", pil_size);
/* cmac buffer after spss firmware end */
cmac_mem_addr = pil_addr + pil_size;
+ pr_info("iar_buf_addr [0x%08x].\n", cmac_mem_addr);
ret = of_property_read_u32(node, "qcom,spss-fuse3-addr",
&spss_fuse3_addr);
@@ -692,7 +706,7 @@ static int spss_parse_dt(struct device_node *node)
/* read IAR_FEATURE_ENABLED from soc fuse */
val1 = readl_relaxed(spss_fuse3_reg);
spss_fuse3_mask = (1<<spss_fuse3_bit);
- pr_info("iar_enabled fuse, addr [0x%x] val [0x%x] mask [0x%x].\n",
+ pr_debug("iar_enabled fuse, addr [0x%x] val [0x%x] mask [0x%x].\n",
spss_fuse3_addr, val1, spss_fuse3_mask);
if (val1 & spss_fuse3_mask)
is_iar_enabled = true;
@@ -724,7 +738,7 @@ static int spss_parse_dt(struct device_node *node)
val1 = readl_relaxed(spss_fuse4_reg);
spss_fuse4_mask = (0x07 << spss_fuse4_bit); /* 3 bits */
- pr_info("IAR_STATE fuse, addr [0x%x] val [0x%x] mask [0x%x].\n",
+ pr_debug("IAR_STATE fuse, addr [0x%x] val [0x%x] mask [0x%x].\n",
spss_fuse4_addr, val1, spss_fuse4_mask);
val1 = ((val1 & spss_fuse4_mask) >> spss_fuse4_bit) & 0x07;
@@ -763,23 +777,43 @@ static int spss_set_fw_cmac(u32 *cmac, size_t cmac_size)
return 0;
}
-static int spss_get_pbl_calc_cmac(u32 *cmac, size_t cmac_size)
+static int spss_get_pbl_and_apps_calc_cmac(void)
{
u8 __iomem *reg = NULL;
- int i;
+ int i, j;
u32 val;
if (cmac_mem == NULL)
return -EFAULT;
- /* PBL calculated cmac after HLOS expected cmac */
- reg = cmac_mem + cmac_size;
+ reg = cmac_mem; /* IAR buffer base */
+ reg += CMAC_SIZE_IN_BYTES; /* skip the saved cmac */
pr_debug("reg [%pK]\n", reg);
- for (i = 0; i < cmac_size/4; i++) {
- val = readl_relaxed(reg + i*sizeof(u32));
- cmac[i] = val;
- pr_debug("cmac[%d] [0x%x]\n", (int) i, (int) val);
+ /* get pbl fw cmac from ddr */
+ for (i = 0; i < CMAC_SIZE_IN_DWORDS; i++) {
+ val = readl_relaxed(reg);
+ pbl_cmac_buf[i] = val;
+ reg += sizeof(u32);
+ }
+ reg += CMAC_SIZE_IN_BYTES; /* skip the saved cmac */
+
+ pr_info("pbl_cmac_buf : 0x%08x,0x%08x,0x%08x,0x%08x\n",
+ pbl_cmac_buf[0], pbl_cmac_buf[1],
+ pbl_cmac_buf[2], pbl_cmac_buf[3]);
+
+ /* get apps cmac from ddr */
+ for (j = 0; j < NUM_UEFI_APPS; j++) {
+ for (i = 0; i < CMAC_SIZE_IN_DWORDS; i++) {
+ val = readl_relaxed(reg);
+ apps_cmac_buf[j][i] = val;
+ reg += sizeof(u32);
+ }
+ reg += CMAC_SIZE_IN_BYTES; /* skip the saved cmac */
+
+ pr_info("app [%d] cmac : 0x%08x,0x%08x,0x%08x,0x%08x\n", j,
+ apps_cmac_buf[j][0], apps_cmac_buf[j][1],
+ apps_cmac_buf[j][2], apps_cmac_buf[j][3]);
}
return 0;
@@ -802,11 +836,7 @@ static int spss_utils_iar_callback(struct notifier_block *nb,
break;
case SUBSYS_AFTER_POWERUP:
pr_debug("[SUBSYS_AFTER_POWERUP] event.\n");
- spss_get_pbl_calc_cmac(pbl_cmac_buf, sizeof(pbl_cmac_buf));
- if (memcmp(cmac_buf, pbl_cmac_buf, sizeof(cmac_buf)) != 0)
- is_pbl_ce = true; /* cmacs not the same */
- else
- is_pbl_ce = false;
+ spss_get_pbl_and_apps_calc_cmac();
break;
case SUBSYS_BEFORE_AUTH_AND_RESET:
pr_debug("[SUBSYS_BEFORE_AUTH_AND_RESET] event.\n");
diff --git a/drivers/soc/qcom/sysmon-qmi.c b/drivers/soc/qcom/sysmon-qmi.c
index b1ab461..5a4c1ab 100644
--- a/drivers/soc/qcom/sysmon-qmi.c
+++ b/drivers/soc/qcom/sysmon-qmi.c
@@ -37,6 +37,7 @@
#define QMI_SSCTL_SUBSYS_EVENT_REQ_LENGTH 40
#define QMI_SSCTL_RESP_MSG_LENGTH 7
#define QMI_SSCTL_EMPTY_MSG_LENGTH 0
+#define QMI_SSCTL_MAX_MSG_LENGTH 90
#define SSCTL_SERVICE_ID 0x2B
#define SSCTL_VER_2 2
@@ -532,11 +533,10 @@ static struct qmi_elem_info qmi_ssctl_get_failure_reason_resp_msg_ei[] = {
*/
int sysmon_get_reason(struct subsys_desc *dest_desc, char *buf, size_t len)
{
- struct qmi_ssctl_get_failure_reason_resp_msg resp;
+ struct qmi_ssctl_get_failure_reason_resp_msg resp = { { 0 } };
struct sysmon_qmi_data *data = NULL, *temp;
struct qmi_txn txn;
const char *dest_ss = dest_desc->name;
- const char expect[] = "ssr:return:";
char req = 0;
int ret;
@@ -601,12 +601,8 @@ int sysmon_get_reason(struct subsys_desc *dest_desc, char *buf, size_t len)
goto out;
}
- if (!strcmp(resp.error_message, expect)) {
- pr_err("Unexpected response %s\n", resp.error_message);
- ret = -EPROTO;
- goto out;
- }
strlcpy(buf, resp.error_message, resp.error_message_len);
+ return 0;
out:
return ret;
}
@@ -643,7 +639,7 @@ int sysmon_notifier_register(struct subsys_desc *desc)
}
rc = qmi_handle_init(&data->clnt_handle,
- QMI_SSCTL_RESP_MSG_LENGTH, &ssctl_ops,
+ QMI_SSCTL_MAX_MSG_LENGTH, &ssctl_ops,
qmi_indication_handler);
if (rc < 0) {
pr_err("Sysmon QMI handle init failed rc:%d\n", rc);
diff --git a/drivers/soc/qcom/trace_secure_buffer.h b/drivers/soc/qcom/trace_secure_buffer.h
new file mode 100644
index 0000000..f0655c8
--- /dev/null
+++ b/drivers/soc/qcom/trace_secure_buffer.h
@@ -0,0 +1,140 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2019 The Linux Foundation. All rights reserved.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM secure_buffer
+
+#if !defined(_TRACE_SECURE_BUFFER_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_SECURE_BUFFER_H
+#include <linux/types.h>
+#include <linux/tracepoint.h>
+#include <soc/qcom/secure_buffer.h>
+
+TRACE_EVENT(hyp_assign_info,
+
+ TP_PROTO(u32 *source_vm_list,
+ int source_nelems, int *dest_vmids, int *dest_perms,
+ int dest_nelems),
+
+ TP_ARGS(source_vm_list, source_nelems, dest_vmids,
+ dest_perms, dest_nelems),
+
+ TP_STRUCT__entry(
+ __field(int, source_nelems)
+ __field(int, dest_nelems)
+ __dynamic_array(u32, source_vm_list, source_nelems)
+ __dynamic_array(int, dest_vmids, dest_nelems)
+ __dynamic_array(int, dest_perms, dest_nelems)
+ ),
+
+ TP_fast_assign(
+ __entry->source_nelems = source_nelems;
+ __entry->dest_nelems = dest_nelems;
+ memcpy(__get_dynamic_array(source_vm_list), source_vm_list,
+ source_nelems * sizeof(*source_vm_list));
+ memcpy(__get_dynamic_array(dest_vmids), dest_vmids,
+ dest_nelems * sizeof(*dest_vmids));
+ memcpy(__get_dynamic_array(dest_perms), dest_perms,
+ dest_nelems * sizeof(*dest_perms));
+ ),
+
+ TP_printk("srcVMIDs: %s dstVMIDs: %s dstPerms: %s",
+ __print_array(__get_dynamic_array(source_vm_list),
+ __entry->source_nelems, sizeof(u32)),
+ __print_array(__get_dynamic_array(dest_vmids),
+ __entry->dest_nelems, sizeof(int)),
+ __print_array(__get_dynamic_array(dest_perms),
+ __entry->dest_nelems, sizeof(int))
+ )
+);
+
+TRACE_EVENT(hyp_assign_batch_start,
+
+ TP_PROTO(struct mem_prot_info *info, int info_nelems),
+
+ TP_ARGS(info, info_nelems),
+
+ TP_STRUCT__entry(
+ __field(int, info_nelems)
+ __field(u64, batch_size)
+ __dynamic_array(phys_addr_t, addrs, info_nelems)
+ __dynamic_array(u64, sizes, info_nelems)
+ ),
+
+ TP_fast_assign(
+ unsigned int i;
+ phys_addr_t *addr_arr_ptr = __get_dynamic_array(addrs);
+ u64 *sizes_arr_ptr = __get_dynamic_array(sizes);
+
+ __entry->info_nelems = info_nelems;
+ __entry->batch_size = 0;
+
+ for (i = 0; i < info_nelems; i++) {
+ addr_arr_ptr[i] = info[i].addr;
+ sizes_arr_ptr[i] = info[i].size;
+ __entry->batch_size += info[i].size;
+ }
+ ),
+
+ TP_printk("num entries: %d batch size: %llu phys addrs: %s sizes: %s",
+ __entry->info_nelems, __entry->batch_size,
+ __print_array(__get_dynamic_array(addrs),
+ __entry->info_nelems, sizeof(phys_addr_t)),
+ __print_array(__get_dynamic_array(sizes),
+ __entry->info_nelems, sizeof(u64))
+ )
+);
+
+TRACE_EVENT(hyp_assign_batch_end,
+
+ TP_PROTO(int ret, u64 delta),
+
+ TP_ARGS(ret, delta),
+
+ TP_STRUCT__entry(
+ __field(int, ret)
+ __field(u64, delta)
+ ),
+
+ TP_fast_assign(
+ __entry->ret = ret;
+ __entry->delta = delta;
+ ),
+
+ TP_printk("ret: %d time delta: %lld us",
+ __entry->ret, __entry->delta
+ )
+);
+
+TRACE_EVENT(hyp_assign_end,
+
+ TP_PROTO(u64 tot_delta, u64 avg_delta),
+
+ TP_ARGS(tot_delta, avg_delta),
+
+ TP_STRUCT__entry(
+ __field(u64, tot_delta)
+ __field(u64, avg_delta)
+ ),
+
+ TP_fast_assign(
+ __entry->tot_delta = tot_delta;
+ __entry->avg_delta = avg_delta;
+ ),
+
+ TP_printk("total time delta: %lld us avg batch delta: %lld us",
+ __entry->tot_delta, __entry->avg_delta
+ )
+);
+#endif /* _TRACE_SECURE_BUFFER_H */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH ../../drivers/soc/qcom/
+
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE trace_secure_buffer
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/drivers/spi/spi-bcm2835.c b/drivers/spi/spi-bcm2835.c
index 25abf2d..eab27d4 100644
--- a/drivers/spi/spi-bcm2835.c
+++ b/drivers/spi/spi-bcm2835.c
@@ -554,7 +554,8 @@ static int bcm2835_spi_transfer_one(struct spi_master *master,
bcm2835_wr(bs, BCM2835_SPI_CLK, cdiv);
/* handle all the 3-wire mode */
- if ((spi->mode & SPI_3WIRE) && (tfr->rx_buf))
+ if (spi->mode & SPI_3WIRE && tfr->rx_buf &&
+ tfr->rx_buf != master->dummy_rx)
cs |= BCM2835_SPI_CS_REN;
else
cs &= ~BCM2835_SPI_CS_REN;
diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c
index 1c85745..c7d45dc 100644
--- a/drivers/staging/android/ion/ion.c
+++ b/drivers/staging/android/ion/ion.c
@@ -1185,6 +1185,28 @@ static const struct file_operations ion_fops = {
#endif
};
+static int ion_debug_heap_show(struct seq_file *s, void *unused)
+{
+ struct ion_heap *heap = s->private;
+
+ if (heap->debug_show)
+ heap->debug_show(heap, s, unused);
+
+ return 0;
+}
+
+static int ion_debug_heap_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, ion_debug_heap_show, inode->i_private);
+}
+
+static const struct file_operations debug_heap_fops = {
+ .open = ion_debug_heap_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
static int debug_shrink_set(void *data, u64 val)
{
struct ion_heap *heap = data;
@@ -1222,6 +1244,7 @@ DEFINE_SIMPLE_ATTRIBUTE(debug_shrink_fops, debug_shrink_get,
void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
{
+ char debug_name[64], buf[256];
int ret;
if (!heap->ops->allocate || !heap->ops->free)
@@ -1249,12 +1272,22 @@ void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
plist_node_init(&heap->node, -heap->id);
plist_add(&heap->node, &dev->heaps);
- if (heap->shrinker.count_objects && heap->shrinker.scan_objects) {
- char debug_name[64];
+ if (heap->debug_show) {
+ snprintf(debug_name, 64, "%s_stats", heap->name);
+ if (!debugfs_create_file(debug_name, 0664, dev->debug_root,
+ heap, &debug_heap_fops))
+ pr_err("Failed to create heap debugfs at %s/%s\n",
+ dentry_path(dev->debug_root, buf, 256),
+ debug_name);
+ }
+ if (heap->shrinker.count_objects && heap->shrinker.scan_objects) {
snprintf(debug_name, 64, "%s_shrink", heap->name);
- debugfs_create_file(debug_name, 0644, dev->debug_root,
- heap, &debug_shrink_fops);
+ if (!debugfs_create_file(debug_name, 0644, dev->debug_root,
+ heap, &debug_shrink_fops))
+ pr_err("Failed to create heap debugfs at %s/%s\n",
+ dentry_path(dev->debug_root, buf, 256),
+ debug_name);
}
dev->heap_cnt++;
diff --git a/drivers/staging/android/ion/ion_page_pool.c b/drivers/staging/android/ion/ion_page_pool.c
index ed0898f..63e9218 100644
--- a/drivers/staging/android/ion/ion_page_pool.c
+++ b/drivers/staging/android/ion/ion_page_pool.c
@@ -49,6 +49,8 @@ static bool pool_refill_ok(struct ion_page_pool *pool)
static inline struct page *ion_page_pool_alloc_pages(struct ion_page_pool *pool)
{
+ if (fatal_signal_pending(current))
+ return NULL;
return alloc_pages(pool->gfp_mask, pool->order);
}
diff --git a/drivers/staging/comedi/drivers/dt3000.c b/drivers/staging/comedi/drivers/dt3000.c
index 2edf3ee..caf4d4d 100644
--- a/drivers/staging/comedi/drivers/dt3000.c
+++ b/drivers/staging/comedi/drivers/dt3000.c
@@ -342,9 +342,9 @@ static irqreturn_t dt3k_interrupt(int irq, void *d)
static int dt3k_ns_to_timer(unsigned int timer_base, unsigned int *nanosec,
unsigned int flags)
{
- int divider, base, prescale;
+ unsigned int divider, base, prescale;
- /* This function needs improvment */
+ /* This function needs improvement */
/* Don't know if divider==0 works. */
for (prescale = 0; prescale < 16; prescale++) {
@@ -358,7 +358,7 @@ static int dt3k_ns_to_timer(unsigned int timer_base, unsigned int *nanosec,
divider = (*nanosec) / base;
break;
case CMDF_ROUND_UP:
- divider = (*nanosec) / base;
+ divider = DIV_ROUND_UP(*nanosec, base);
break;
}
if (divider < 65536) {
@@ -368,7 +368,7 @@ static int dt3k_ns_to_timer(unsigned int timer_base, unsigned int *nanosec,
}
prescale = 15;
- base = timer_base * (1 << prescale);
+ base = timer_base * (prescale + 1);
divider = 65535;
*nanosec = divider * base;
return (prescale << 16) | (divider);
diff --git a/drivers/staging/gasket/apex_driver.c b/drivers/staging/gasket/apex_driver.c
index c747e9c..0cef1d6 100644
--- a/drivers/staging/gasket/apex_driver.c
+++ b/drivers/staging/gasket/apex_driver.c
@@ -538,7 +538,7 @@ static ssize_t sysfs_show(struct device *device, struct device_attribute *attr,
break;
case ATTR_KERNEL_HIB_SIMPLE_PAGE_TABLE_SIZE:
ret = scnprintf(buf, PAGE_SIZE, "%u\n",
- gasket_page_table_num_entries(
+ gasket_page_table_num_simple_entries(
gasket_dev->page_table[0]));
break;
case ATTR_KERNEL_HIB_NUM_ACTIVE_PAGES:
diff --git a/drivers/staging/vt6656/main_usb.c b/drivers/staging/vt6656/main_usb.c
index ccafcc2..70433f7 100644
--- a/drivers/staging/vt6656/main_usb.c
+++ b/drivers/staging/vt6656/main_usb.c
@@ -402,16 +402,19 @@ static void vnt_free_int_bufs(struct vnt_private *priv)
kfree(priv->int_buf.data_buf);
}
-static bool vnt_alloc_bufs(struct vnt_private *priv)
+static int vnt_alloc_bufs(struct vnt_private *priv)
{
+ int ret = 0;
struct vnt_usb_send_context *tx_context;
struct vnt_rcb *rcb;
int ii;
for (ii = 0; ii < priv->num_tx_context; ii++) {
tx_context = kmalloc(sizeof(*tx_context), GFP_KERNEL);
- if (!tx_context)
+ if (!tx_context) {
+ ret = -ENOMEM;
goto free_tx;
+ }
priv->tx_context[ii] = tx_context;
tx_context->priv = priv;
@@ -419,16 +422,20 @@ static bool vnt_alloc_bufs(struct vnt_private *priv)
/* allocate URBs */
tx_context->urb = usb_alloc_urb(0, GFP_KERNEL);
- if (!tx_context->urb)
+ if (!tx_context->urb) {
+ ret = -ENOMEM;
goto free_tx;
+ }
tx_context->in_use = false;
}
for (ii = 0; ii < priv->num_rcb; ii++) {
priv->rcb[ii] = kzalloc(sizeof(*priv->rcb[ii]), GFP_KERNEL);
- if (!priv->rcb[ii])
+ if (!priv->rcb[ii]) {
+ ret = -ENOMEM;
goto free_rx_tx;
+ }
rcb = priv->rcb[ii];
@@ -436,39 +443,46 @@ static bool vnt_alloc_bufs(struct vnt_private *priv)
/* allocate URBs */
rcb->urb = usb_alloc_urb(0, GFP_KERNEL);
- if (!rcb->urb)
+ if (!rcb->urb) {
+ ret = -ENOMEM;
goto free_rx_tx;
+ }
rcb->skb = dev_alloc_skb(priv->rx_buf_sz);
- if (!rcb->skb)
+ if (!rcb->skb) {
+ ret = -ENOMEM;
goto free_rx_tx;
+ }
rcb->in_use = false;
/* submit rx urb */
- if (vnt_submit_rx_urb(priv, rcb))
+ ret = vnt_submit_rx_urb(priv, rcb);
+ if (ret)
goto free_rx_tx;
}
priv->interrupt_urb = usb_alloc_urb(0, GFP_KERNEL);
- if (!priv->interrupt_urb)
- goto free_rx_tx;
-
- priv->int_buf.data_buf = kmalloc(MAX_INTERRUPT_SIZE, GFP_KERNEL);
- if (!priv->int_buf.data_buf) {
- usb_free_urb(priv->interrupt_urb);
+ if (!priv->interrupt_urb) {
+ ret = -ENOMEM;
goto free_rx_tx;
}
- return true;
+ priv->int_buf.data_buf = kmalloc(MAX_INTERRUPT_SIZE, GFP_KERNEL);
+ if (!priv->int_buf.data_buf) {
+ ret = -ENOMEM;
+ goto free_rx_tx_urb;
+ }
+ return 0;
+
+free_rx_tx_urb:
+ usb_free_urb(priv->interrupt_urb);
free_rx_tx:
vnt_free_rx_bufs(priv);
-
free_tx:
vnt_free_tx_bufs(priv);
-
- return false;
+ return ret;
}
static void vnt_tx_80211(struct ieee80211_hw *hw,
diff --git a/drivers/thermal/msm-tsens.c b/drivers/thermal/msm-tsens.c
index 5449287..f0415de 100644
--- a/drivers/thermal/msm-tsens.c
+++ b/drivers/thermal/msm-tsens.c
@@ -25,6 +25,13 @@ static int tsens_get_temp(void *data, int *temp)
return tmdev->ops->get_temp(s, temp);
}
+static int tsens_get_min_temp(void *data, int *temp)
+{
+ struct tsens_sensor *s = data;
+
+ return tsens_2xxx_get_min_temp(s, temp);
+}
+
static int tsens_set_trip_temp(void *data, int low_temp, int high_temp)
{
struct tsens_sensor *s = data;
@@ -82,6 +89,9 @@ static const struct of_device_id tsens_table[] = {
{ .compatible = "qcom,tsens24xx",
.data = &data_tsens24xx,
},
+ { .compatible = "qcom,tsens26xx",
+ .data = &data_tsens26xx,
+ },
{ .compatible = "qcom,msm8937-tsens",
.data = &data_tsens14xx,
},
@@ -97,6 +107,10 @@ static struct thermal_zone_of_device_ops tsens_tm_thermal_zone_ops = {
.set_trips = tsens_set_trip_temp,
};
+static struct thermal_zone_of_device_ops tsens_tm_min_thermal_zone_ops = {
+ .get_temp = tsens_get_min_temp,
+};
+
static int get_device_tree_data(struct platform_device *pdev,
struct tsens_device *tmdev)
{
@@ -105,6 +119,7 @@ static int get_device_tree_data(struct platform_device *pdev,
const struct tsens_data *data;
int rc = 0;
struct resource *res_tsens_mem;
+ u32 min_temp_id;
if (!of_match_node(tsens_table, of_node)) {
pr_err("Need to read SoC specific fuse map\n");
@@ -179,6 +194,11 @@ static int get_device_tree_data(struct platform_device *pdev,
}
}
+ if (!of_property_read_u32(of_node, "0C-sensor-num", &min_temp_id))
+ tmdev->min_temp_sensor_id = (int)min_temp_id;
+ else
+ tmdev->min_temp_sensor_id = MIN_TEMP_DEF_OFFSET;
+
return rc;
}
@@ -209,6 +229,17 @@ static int tsens_thermal_zone_register(struct tsens_device *tmdev)
return -ENODEV;
}
+ if (tmdev->min_temp_sensor_id != MIN_TEMP_DEF_OFFSET) {
+ tmdev->min_temp.tmdev = tmdev;
+ tmdev->min_temp.hw_id = tmdev->min_temp_sensor_id;
+ tmdev->min_temp.tzd =
+ devm_thermal_zone_of_sensor_register(
+ &tmdev->pdev->dev, tmdev->min_temp_sensor_id,
+ &tmdev->min_temp, &tsens_tm_min_thermal_zone_ops);
+ if (IS_ERR(tmdev->min_temp.tzd))
+ pr_err("Error registering min temp sensor\n");
+ }
+
/* Register virtual thermal sensors. */
qti_virtual_sensor_register(&tmdev->pdev->dev);
diff --git a/drivers/thermal/thermal_sysfs.c b/drivers/thermal/thermal_sysfs.c
index d7037a7..cb0e13f 100644
--- a/drivers/thermal/thermal_sysfs.c
+++ b/drivers/thermal/thermal_sysfs.c
@@ -898,6 +898,9 @@ void thermal_cooling_device_stats_update(struct thermal_cooling_device *cdev,
{
struct cooling_dev_stats *stats = cdev->stats;
+ if (!stats)
+ return;
+
spin_lock(&stats->lock);
if (stats->state == new_state)
@@ -919,6 +922,9 @@ static ssize_t total_trans_show(struct device *dev,
struct cooling_dev_stats *stats = cdev->stats;
int ret;
+ if (!stats)
+ return -ENODEV;
+
spin_lock(&stats->lock);
ret = sprintf(buf, "%u\n", stats->total_trans);
spin_unlock(&stats->lock);
@@ -935,6 +941,9 @@ time_in_state_ms_show(struct device *dev, struct device_attribute *attr,
ssize_t len = 0;
int i;
+ if (!stats)
+ return -ENODEV;
+
spin_lock(&stats->lock);
update_time_in_state(stats);
@@ -953,8 +962,12 @@ reset_store(struct device *dev, struct device_attribute *attr, const char *buf,
{
struct thermal_cooling_device *cdev = to_cooling_device(dev);
struct cooling_dev_stats *stats = cdev->stats;
- int i, states = stats->max_states;
+ int i, states;
+ if (!stats)
+ return -ENODEV;
+
+ states = stats->max_states;
spin_lock(&stats->lock);
stats->total_trans = 0;
@@ -978,6 +991,9 @@ static ssize_t trans_table_show(struct device *dev,
ssize_t len = 0;
int i, j;
+ if (!stats)
+ return -ENODEV;
+
len += snprintf(buf + len, PAGE_SIZE - len, " From : To\n");
len += snprintf(buf + len, PAGE_SIZE - len, " : ");
for (i = 0; i < stats->max_states; i++) {
diff --git a/drivers/thermal/tsens.h b/drivers/thermal/tsens.h
index 8ee67c6..bf8768a 100644
--- a/drivers/thermal/tsens.h
+++ b/drivers/thermal/tsens.h
@@ -31,6 +31,7 @@
#define SLOPE_DEFAULT 3200
#define IPC_LOGPAGES 10
+#define MIN_TEMP_DEF_OFFSET 0xFF
enum tsens_dbg_type {
TSENS_DBG_POLL,
@@ -208,14 +209,20 @@ struct tsens_device {
const struct tsens_data *ctrl_data;
struct tsens_mtc_sysfs mtcsys;
int trdy_fail_ctr;
+ struct tsens_sensor min_temp;
+ u8 min_temp_sensor_id;
struct tsens_sensor sensor[0];
};
-extern const struct tsens_data data_tsens2xxx, data_tsens23xx, data_tsens24xx;
+extern const struct tsens_data data_tsens2xxx, data_tsens23xx, data_tsens24xx,
+ data_tsens26xx;
extern const struct tsens_data data_tsens14xx, data_tsens14xx_405;
extern struct list_head tsens_device_list;
extern int calibrate_8937(struct tsens_device *tmdev);
extern int calibrate_405(struct tsens_device *tmdev);
+extern int tsens_2xxx_get_min_temp(
+ struct tsens_sensor *sensor, int *temp);
+
#endif /* __QCOM_TSENS_H__ */
diff --git a/drivers/thermal/tsens2xxx.c b/drivers/thermal/tsens2xxx.c
index 062e53e..7d040ae 100644
--- a/drivers/thermal/tsens2xxx.c
+++ b/drivers/thermal/tsens2xxx.c
@@ -42,12 +42,15 @@
#define TSENS_TM_UPPER_LOWER_INT_MASK(n) ((n) + 0x10)
#define TSENS_TM_UPPER_INT_SET(n) (1 << (n + 16))
#define TSENS_TM_SN_CRITICAL_THRESHOLD_MASK 0xfff
+#define TSENS_TM_MIN_TEMP_VALID_BIT BIT(16)
#define TSENS_TM_SN_STATUS_VALID_BIT BIT(21)
#define TSENS_TM_SN_STATUS_CRITICAL_STATUS BIT(19)
#define TSENS_TM_SN_STATUS_UPPER_STATUS BIT(18)
#define TSENS_TM_SN_STATUS_LOWER_STATUS BIT(17)
#define TSENS_TM_SN_LAST_TEMP_MASK 0xfff
#define TSENS_TM_CODE_BIT_MASK 0xfff
+#define TSENS_TM_0C_THR_MASK 0xfff
+#define TSENS_TM_0C_THR_OFFSET 12
#define TSENS_TM_CODE_SIGN_BIT 0x800
#define TSENS_TM_SCALE_DECI_MILLIDEG 100
#define TSENS_DEBUG_WDOG_TRIGGER_COUNT 5
@@ -58,6 +61,10 @@
#define TSENS_TM_TRDY(n) ((n) + 0xe4)
#define TSENS_TM_TRDY_FIRST_ROUND_COMPLETE BIT(3)
#define TSENS_TM_TRDY_FIRST_ROUND_COMPLETE_SHIFT 3
+#define TSENS_TM_0C_INT_STATUS(n) ((n) + 0xe0)
+#define TSENS_TM_MIN_TEMP(n) ((n) + 0xec)
+#define TSENS_TM_0C_THRESHOLDS(n) ((n) + 0x1c)
+#define TSENS_MAX_READ_FAIL 50
static void msm_tsens_convert_temp(int last_temp, int *temp)
{
@@ -92,7 +99,7 @@ static int tsens2xxx_get_temp(struct tsens_sensor *sensor, int *temp)
code, tmdev->trdy_fail_ctr);
tmdev->trdy_fail_ctr++;
- if (tmdev->trdy_fail_ctr >= 50) {
+ if (tmdev->trdy_fail_ctr >= TSENS_MAX_READ_FAIL) {
if (tmdev->ops->dbg)
tmdev->ops->dbg(tmdev, 0,
TSENS_DBG_LOG_BUS_ID_DATA, NULL);
@@ -147,6 +154,75 @@ static int tsens2xxx_get_temp(struct tsens_sensor *sensor, int *temp)
return 0;
}
+int tsens_2xxx_get_min_temp(struct tsens_sensor *sensor, int *temp)
+{
+ struct tsens_device *tmdev = NULL;
+ unsigned int code;
+ void __iomem *sensor_addr, *trdy;
+ int last_temp = 0, last_temp2 = 0, last_temp3 = 0, valid_bit;
+
+ if (!sensor)
+ return -EINVAL;
+
+ tmdev = sensor->tmdev;
+ trdy = TSENS_TM_TRDY(tmdev->tsens_tm_addr);
+
+ valid_bit = TSENS_TM_MIN_TEMP_VALID_BIT;
+ sensor_addr = TSENS_TM_MIN_TEMP(tmdev->tsens_tm_addr);
+
+ code = readl_relaxed_no_log(trdy);
+ if (!((code & TSENS_TM_TRDY_FIRST_ROUND_COMPLETE) >>
+ TSENS_TM_TRDY_FIRST_ROUND_COMPLETE_SHIFT)) {
+ pr_err("tsens device first round not complete0x%x, ctr is %d\n",
+ code, tmdev->trdy_fail_ctr);
+ tmdev->trdy_fail_ctr++;
+ if (tmdev->trdy_fail_ctr >= TSENS_MAX_READ_FAIL) {
+ if (tmdev->ops->dbg)
+ tmdev->ops->dbg(tmdev, 0,
+ TSENS_DBG_LOG_BUS_ID_DATA, NULL);
+ BUG();
+ }
+ return -ENODATA;
+ }
+
+ tmdev->trdy_fail_ctr = 0;
+
+ code = readl_relaxed_no_log(sensor_addr);
+ last_temp = code & TSENS_TM_SN_LAST_TEMP_MASK;
+ if (code & valid_bit) {
+ msm_tsens_convert_temp(last_temp, temp);
+ goto dbg;
+ }
+
+ code = readl_relaxed_no_log(sensor_addr);
+ last_temp2 = code & TSENS_TM_SN_LAST_TEMP_MASK;
+ if (code & valid_bit) {
+ last_temp = last_temp2;
+ msm_tsens_convert_temp(last_temp, temp);
+ goto dbg;
+ }
+
+ code = readl_relaxed_no_log(sensor_addr);
+ last_temp3 = code & TSENS_TM_SN_LAST_TEMP_MASK;
+ if (code & valid_bit) {
+ last_temp = last_temp3;
+ msm_tsens_convert_temp(last_temp, temp);
+ goto dbg;
+ }
+
+ if (last_temp == last_temp2)
+ last_temp = last_temp2;
+ else if (last_temp2 == last_temp3)
+ last_temp = last_temp3;
+
+ msm_tsens_convert_temp(last_temp, temp);
+
+dbg:
+ TSENS_DBG(tmdev, "Min temp: %d\n", *temp);
+
+ return 0;
+}
+
static int tsens_tm_activate_trip_type(struct tsens_sensor *tm_sensor,
int trip, enum thermal_device_mode mode)
{
@@ -518,6 +594,31 @@ static irqreturn_t tsens_tm_irq_thread(int irq, void *data)
return IRQ_HANDLED;
}
+static irqreturn_t tsens_tm_0C_irq_thread(int irq, void *data)
+{
+ struct tsens_device *tm = data;
+ int status, thrs, set_thr, reset_thr;
+ void __iomem *srot_addr, *addr;
+
+ addr = TSENS_TM_0C_INT_STATUS(tm->tsens_tm_addr);
+ status = readl_relaxed(addr);
+
+ srot_addr = TSENS_CTRL_ADDR(tm->tsens_srot_addr);
+ thrs = readl_relaxed(TSENS_TM_0C_THRESHOLDS(srot_addr));
+
+ msm_tsens_convert_temp(thrs & TSENS_TM_0C_THR_MASK, &reset_thr);
+ msm_tsens_convert_temp(
+ ((thrs >> TSENS_TM_0C_THR_OFFSET) &
+ TSENS_TM_0C_THR_MASK), &set_thr);
+
+ if (status)
+ of_thermal_handle_trip_temp(tm->min_temp.tzd, set_thr);
+ else
+ of_thermal_handle_trip_temp(tm->min_temp.tzd, reset_thr);
+
+ return IRQ_HANDLED;
+}
+
static int tsens2xxx_hw_sensor_en(struct tsens_device *tmdev,
u32 sensor_id)
{
@@ -602,19 +703,26 @@ static int tsens2xxx_hw_init(struct tsens_device *tmdev)
static const struct tsens_irqs tsens2xxx_irqs[] = {
{ "tsens-upper-lower", tsens_tm_irq_thread},
{ "tsens-critical", tsens_tm_critical_irq_thread},
+ { "tsens-0C", tsens_tm_0C_irq_thread},
};
static int tsens2xxx_register_interrupts(struct tsens_device *tmdev)
{
struct platform_device *pdev;
- int i, rc;
+ int i, rc, irq_no;
+ unsigned long irqflags = IRQF_TRIGGER_HIGH | IRQF_ONESHOT;
if (!tmdev)
return -EINVAL;
+ if (tmdev->min_temp_sensor_id != MIN_TEMP_DEF_OFFSET)
+ irq_no = ARRAY_SIZE(tsens2xxx_irqs);
+ else
+ irq_no = ARRAY_SIZE(tsens2xxx_irqs) - 1;
+
pdev = tmdev->pdev;
- for (i = 0; i < ARRAY_SIZE(tsens2xxx_irqs); i++) {
+ for (i = 0; i < irq_no; i++) {
int irq;
irq = platform_get_irq_byname(pdev, tsens2xxx_irqs[i].name);
@@ -624,10 +732,12 @@ static int tsens2xxx_register_interrupts(struct tsens_device *tmdev)
return irq;
}
+ if (i == 2)
+ irqflags = IRQF_TRIGGER_RISING | IRQF_ONESHOT;
+
rc = devm_request_threaded_irq(&pdev->dev, irq, NULL,
tsens2xxx_irqs[i].handler,
- IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
- tsens2xxx_irqs[i].name, tmdev);
+ irqflags, tsens2xxx_irqs[i].name, tmdev);
if (rc) {
dev_err(&pdev->dev, "failed to get irq %s\n",
tsens2xxx_irqs[i].name);
@@ -677,3 +787,14 @@ const struct tsens_data data_tsens24xx = {
.ops = &ops_tsens2xxx,
.mtc = false,
};
+
+const struct tsens_data data_tsens26xx = {
+ .cycle_monitor = true,
+ .cycle_compltn_monitor_mask = 1,
+ .wd_bark = true,
+ .wd_bark_mask = 0,
+ .ops = &ops_tsens2xxx,
+ .mtc = false,
+ .ver_major = 2,
+ .ver_minor = 6,
+};
diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
index e26d87b..aa4de69 100644
--- a/drivers/tty/serial/8250/8250_port.c
+++ b/drivers/tty/serial/8250/8250_port.c
@@ -1874,7 +1874,8 @@ int serial8250_handle_irq(struct uart_port *port, unsigned int iir)
status = serial8250_rx_chars(up, status);
}
serial8250_modem_status(up);
- if ((!up->dma || up->dma->tx_err) && (status & UART_LSR_THRE))
+ if ((!up->dma || up->dma->tx_err) && (status & UART_LSR_THRE) &&
+ (up->ier & UART_IER_THRI))
serial8250_tx_chars(up);
spin_unlock_irqrestore(&port->lock, flags);
diff --git a/drivers/tty/serial/cpm_uart/cpm_uart_core.c b/drivers/tty/serial/cpm_uart/cpm_uart_core.c
index e538959..ad40c75 100644
--- a/drivers/tty/serial/cpm_uart/cpm_uart_core.c
+++ b/drivers/tty/serial/cpm_uart/cpm_uart_core.c
@@ -407,7 +407,16 @@ static int cpm_uart_startup(struct uart_port *port)
clrbits16(&pinfo->sccp->scc_sccm, UART_SCCM_RX);
}
cpm_uart_initbd(pinfo);
- cpm_line_cr_cmd(pinfo, CPM_CR_INIT_TRX);
+ if (IS_SMC(pinfo)) {
+ out_be32(&pinfo->smcup->smc_rstate, 0);
+ out_be32(&pinfo->smcup->smc_tstate, 0);
+ out_be16(&pinfo->smcup->smc_rbptr,
+ in_be16(&pinfo->smcup->smc_rbase));
+ out_be16(&pinfo->smcup->smc_tbptr,
+ in_be16(&pinfo->smcup->smc_tbase));
+ } else {
+ cpm_line_cr_cmd(pinfo, CPM_CR_INIT_TRX);
+ }
}
/* Install interrupt handler. */
retval = request_irq(port->irq, cpm_uart_int, 0, "cpm_uart", port);
@@ -861,16 +870,14 @@ static void cpm_uart_init_smc(struct uart_cpm_port *pinfo)
(u8 __iomem *)pinfo->tx_bd_base - DPRAM_BASE);
/*
- * In case SMC1 is being relocated...
+ * In case SMC is being relocated...
*/
-#if defined (CONFIG_I2C_SPI_SMC1_UCODE_PATCH)
out_be16(&up->smc_rbptr, in_be16(&pinfo->smcup->smc_rbase));
out_be16(&up->smc_tbptr, in_be16(&pinfo->smcup->smc_tbase));
out_be32(&up->smc_rstate, 0);
out_be32(&up->smc_tstate, 0);
out_be16(&up->smc_brkcr, 1); /* number of break chars */
out_be16(&up->smc_brkec, 0);
-#endif
/* Set up the uart parameters in the
* parameter ram.
@@ -884,8 +891,6 @@ static void cpm_uart_init_smc(struct uart_cpm_port *pinfo)
out_be16(&up->smc_brkec, 0);
out_be16(&up->smc_brkcr, 1);
- cpm_line_cr_cmd(pinfo, CPM_CR_INIT_TRX);
-
/* Set UART mode, 8 bit, no parity, one stop.
* Enable receive and transmit.
*/
diff --git a/drivers/tty/serial/digicolor-usart.c b/drivers/tty/serial/digicolor-usart.c
index f460cca..13ac36e 100644
--- a/drivers/tty/serial/digicolor-usart.c
+++ b/drivers/tty/serial/digicolor-usart.c
@@ -541,7 +541,11 @@ static int __init digicolor_uart_init(void)
if (ret)
return ret;
- return platform_driver_register(&digicolor_uart_platform);
+ ret = platform_driver_register(&digicolor_uart_platform);
+ if (ret)
+ uart_unregister_driver(&digicolor_uart);
+
+ return ret;
}
module_init(digicolor_uart_init);
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
index 0f67197..105de92 100644
--- a/drivers/tty/serial/imx.c
+++ b/drivers/tty/serial/imx.c
@@ -382,6 +382,7 @@ static void imx_uart_ucrs_restore(struct imx_port *sport,
}
#endif
+/* called with port.lock taken and irqs caller dependent */
static void imx_uart_rts_active(struct imx_port *sport, u32 *ucr2)
{
*ucr2 &= ~(UCR2_CTSC | UCR2_CTS);
@@ -390,6 +391,7 @@ static void imx_uart_rts_active(struct imx_port *sport, u32 *ucr2)
mctrl_gpio_set(sport->gpios, sport->port.mctrl);
}
+/* called with port.lock taken and irqs caller dependent */
static void imx_uart_rts_inactive(struct imx_port *sport, u32 *ucr2)
{
*ucr2 &= ~UCR2_CTSC;
@@ -399,6 +401,7 @@ static void imx_uart_rts_inactive(struct imx_port *sport, u32 *ucr2)
mctrl_gpio_set(sport->gpios, sport->port.mctrl);
}
+/* called with port.lock taken and irqs caller dependent */
static void imx_uart_rts_auto(struct imx_port *sport, u32 *ucr2)
{
*ucr2 |= UCR2_CTSC;
@@ -1554,6 +1557,16 @@ imx_uart_set_termios(struct uart_port *port, struct ktermios *termios,
old_csize = CS8;
}
+ del_timer_sync(&sport->timer);
+
+ /*
+ * Ask the core to calculate the divisor for us.
+ */
+ baud = uart_get_baud_rate(port, termios, old, 50, port->uartclk / 16);
+ quot = uart_get_divisor(port, baud);
+
+ spin_lock_irqsave(&sport->port.lock, flags);
+
if ((termios->c_cflag & CSIZE) == CS8)
ucr2 = UCR2_WS | UCR2_SRST | UCR2_IRTS;
else
@@ -1597,16 +1610,6 @@ imx_uart_set_termios(struct uart_port *port, struct ktermios *termios,
ucr2 |= UCR2_PROE;
}
- del_timer_sync(&sport->timer);
-
- /*
- * Ask the core to calculate the divisor for us.
- */
- baud = uart_get_baud_rate(port, termios, old, 50, port->uartclk / 16);
- quot = uart_get_divisor(port, baud);
-
- spin_lock_irqsave(&sport->port.lock, flags);
-
sport->port.read_status_mask = 0;
if (termios->c_iflag & INPCK)
sport->port.read_status_mask |= (URXD_FRMERR | URXD_PRERR);
diff --git a/drivers/tty/serial/max310x.c b/drivers/tty/serial/max310x.c
index 38c48a0..bd3e6cf 100644
--- a/drivers/tty/serial/max310x.c
+++ b/drivers/tty/serial/max310x.c
@@ -491,37 +491,48 @@ static bool max310x_reg_precious(struct device *dev, unsigned int reg)
static int max310x_set_baud(struct uart_port *port, int baud)
{
- unsigned int mode = 0, clk = port->uartclk, div = clk / baud;
+ unsigned int mode = 0, div = 0, frac = 0, c = 0, F = 0;
- /* Check for minimal value for divider */
- if (div < 16)
- div = 16;
-
- if (clk % baud && (div / 16) < 0x8000) {
+ /*
+ * Calculate the integer divisor first. Select a proper mode
+ * in case if the requested baud is too high for the pre-defined
+ * clocks frequency.
+ */
+ div = port->uartclk / baud;
+ if (div < 8) {
+ /* Mode x4 */
+ c = 4;
+ mode = MAX310X_BRGCFG_4XMODE_BIT;
+ } else if (div < 16) {
/* Mode x2 */
+ c = 8;
mode = MAX310X_BRGCFG_2XMODE_BIT;
- clk = port->uartclk * 2;
- div = clk / baud;
-
- if (clk % baud && (div / 16) < 0x8000) {
- /* Mode x4 */
- mode = MAX310X_BRGCFG_4XMODE_BIT;
- clk = port->uartclk * 4;
- div = clk / baud;
- }
+ } else {
+ c = 16;
}
- max310x_port_write(port, MAX310X_BRGDIVMSB_REG, (div / 16) >> 8);
- max310x_port_write(port, MAX310X_BRGDIVLSB_REG, div / 16);
- max310x_port_write(port, MAX310X_BRGCFG_REG, (div % 16) | mode);
+ /* Calculate the divisor in accordance with the fraction coefficient */
+ div /= c;
+ F = c*baud;
- return DIV_ROUND_CLOSEST(clk, div);
+ /* Calculate the baud rate fraction */
+ if (div > 0)
+ frac = (16*(port->uartclk % F)) / F;
+ else
+ div = 1;
+
+ max310x_port_write(port, MAX310X_BRGDIVMSB_REG, div >> 8);
+ max310x_port_write(port, MAX310X_BRGDIVLSB_REG, div);
+ max310x_port_write(port, MAX310X_BRGCFG_REG, frac | mode);
+
+ /* Return the actual baud rate we just programmed */
+ return (16*port->uartclk) / (c*(16*div + frac));
}
static int max310x_update_best_err(unsigned long f, long *besterr)
{
/* Use baudrate 115200 for calculate error */
- long err = f % (115200 * 16);
+ long err = f % (460800 * 16);
if ((*besterr < 0) || (*besterr > err)) {
*besterr = err;
diff --git a/drivers/tty/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c
index 0f41b93..310bbae 100644
--- a/drivers/tty/serial/msm_serial.c
+++ b/drivers/tty/serial/msm_serial.c
@@ -383,10 +383,14 @@ static void msm_request_rx_dma(struct msm_port *msm_port, resource_size_t base)
static inline void msm_wait_for_xmitr(struct uart_port *port)
{
+ unsigned int timeout = 500000;
+
while (!(msm_read(port, UART_SR) & UART_SR_TX_EMPTY)) {
if (msm_read(port, UART_ISR) & UART_ISR_TX_READY)
break;
udelay(1);
+ if (!timeout--)
+ break;
}
msm_write(port, UART_CR_CMD_RESET_TX_READY, UART_CR);
}
diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
index 8dbeb14..fe9261f 100644
--- a/drivers/tty/serial/serial_core.c
+++ b/drivers/tty/serial/serial_core.c
@@ -1738,6 +1738,7 @@ static int uart_port_activate(struct tty_port *port, struct tty_struct *tty)
{
struct uart_state *state = container_of(port, struct uart_state, port);
struct uart_port *uport;
+ int ret;
uport = uart_port_check(state);
if (!uport || uport->flags & UPF_DEAD)
@@ -1748,7 +1749,11 @@ static int uart_port_activate(struct tty_port *port, struct tty_struct *tty)
/*
* Start up the serial port.
*/
- return uart_startup(tty, state, 0);
+ ret = uart_startup(tty, state, 0);
+ if (ret > 0)
+ tty_port_set_active(port, 1);
+
+ return ret;
}
static const char *uart_type(struct uart_port *port)
diff --git a/drivers/tty/serial/serial_mctrl_gpio.c b/drivers/tty/serial/serial_mctrl_gpio.c
index 1c06325..07f3186 100644
--- a/drivers/tty/serial/serial_mctrl_gpio.c
+++ b/drivers/tty/serial/serial_mctrl_gpio.c
@@ -12,6 +12,7 @@
#include <linux/termios.h>
#include <linux/serial_core.h>
#include <linux/module.h>
+#include <linux/property.h>
#include "serial_mctrl_gpio.h"
@@ -115,6 +116,19 @@ struct mctrl_gpios *mctrl_gpio_init_noauto(struct device *dev, unsigned int idx)
for (i = 0; i < UART_GPIO_MAX; i++) {
enum gpiod_flags flags;
+ char *gpio_str;
+ bool present;
+
+ /* Check if GPIO property exists and continue if not */
+ gpio_str = kasprintf(GFP_KERNEL, "%s-gpios",
+ mctrl_gpios_desc[i].name);
+ if (!gpio_str)
+ continue;
+
+ present = device_property_present(dev, gpio_str);
+ kfree(gpio_str);
+ if (!present)
+ continue;
if (mctrl_gpios_desc[i].dir_out)
flags = GPIOD_OUT_LOW;
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
index 040832635..5550289 100644
--- a/drivers/tty/serial/sh-sci.c
+++ b/drivers/tty/serial/sh-sci.c
@@ -1376,6 +1376,7 @@ static void work_fn_tx(struct work_struct *work)
struct circ_buf *xmit = &port->state->xmit;
unsigned long flags;
dma_addr_t buf;
+ int head, tail;
/*
* DMA is idle now.
@@ -1385,16 +1386,23 @@ static void work_fn_tx(struct work_struct *work)
* consistent xmit buffer state.
*/
spin_lock_irq(&port->lock);
- buf = s->tx_dma_addr + (xmit->tail & (UART_XMIT_SIZE - 1));
+ head = xmit->head;
+ tail = xmit->tail;
+ buf = s->tx_dma_addr + (tail & (UART_XMIT_SIZE - 1));
s->tx_dma_len = min_t(unsigned int,
- CIRC_CNT(xmit->head, xmit->tail, UART_XMIT_SIZE),
- CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE));
- spin_unlock_irq(&port->lock);
+ CIRC_CNT(head, tail, UART_XMIT_SIZE),
+ CIRC_CNT_TO_END(head, tail, UART_XMIT_SIZE));
+ if (!s->tx_dma_len) {
+ /* Transmit buffer has been flushed */
+ spin_unlock_irq(&port->lock);
+ return;
+ }
desc = dmaengine_prep_slave_single(chan, buf, s->tx_dma_len,
DMA_MEM_TO_DEV,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!desc) {
+ spin_unlock_irq(&port->lock);
dev_warn(port->dev, "Failed preparing Tx DMA descriptor\n");
goto switch_to_pio;
}
@@ -1402,18 +1410,18 @@ static void work_fn_tx(struct work_struct *work)
dma_sync_single_for_device(chan->device->dev, buf, s->tx_dma_len,
DMA_TO_DEVICE);
- spin_lock_irq(&port->lock);
desc->callback = sci_dma_tx_complete;
desc->callback_param = s;
- spin_unlock_irq(&port->lock);
s->cookie_tx = dmaengine_submit(desc);
if (dma_submit_error(s->cookie_tx)) {
+ spin_unlock_irq(&port->lock);
dev_warn(port->dev, "Failed submitting Tx DMA descriptor\n");
goto switch_to_pio;
}
+ spin_unlock_irq(&port->lock);
dev_dbg(port->dev, "%s: %p: %d...%d, cookie %d\n",
- __func__, xmit->buf, xmit->tail, xmit->head, s->cookie_tx);
+ __func__, xmit->buf, tail, head, s->cookie_tx);
dma_async_issue_pending(chan);
return;
@@ -1633,11 +1641,18 @@ static void sci_free_dma(struct uart_port *port)
static void sci_flush_buffer(struct uart_port *port)
{
+ struct sci_port *s = to_sci_port(port);
+
/*
* In uart_flush_buffer(), the xmit circular buffer has just been
- * cleared, so we have to reset tx_dma_len accordingly.
+ * cleared, so we have to reset tx_dma_len accordingly, and stop any
+ * pending transfers
*/
- to_sci_port(port)->tx_dma_len = 0;
+ s->tx_dma_len = 0;
+ if (s->chan_tx) {
+ dmaengine_terminate_async(s->chan_tx);
+ s->cookie_tx = -EINVAL;
+ }
}
#else /* !CONFIG_SERIAL_SH_SCI_DMA */
static inline void sci_request_dma(struct uart_port *port)
diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
index 4d147f89..7c7217a 100644
--- a/drivers/tty/tty_ldisc.c
+++ b/drivers/tty/tty_ldisc.c
@@ -840,8 +840,13 @@ int tty_ldisc_init(struct tty_struct *tty)
*/
void tty_ldisc_deinit(struct tty_struct *tty)
{
- if (tty->ldisc)
+ if (tty->ldisc) {
+#if defined(CONFIG_TTY_FLUSH_LOCAL_ECHO)
+ if (tty->echo_delayed_work.work.func)
+ cancel_delayed_work_sync(&tty->echo_delayed_work);
+#endif
tty_ldisc_put(tty->ldisc);
+ }
tty->ldisc = NULL;
}
diff --git a/drivers/tty/tty_ldsem.c b/drivers/tty/tty_ldsem.c
index b989ca2..2f03729 100644
--- a/drivers/tty/tty_ldsem.c
+++ b/drivers/tty/tty_ldsem.c
@@ -116,8 +116,7 @@ static void __ldsem_wake_readers(struct ld_semaphore *sem)
list_for_each_entry_safe(waiter, next, &sem->read_wait, list) {
tsk = waiter->task;
- smp_mb();
- waiter->task = NULL;
+ smp_store_release(&waiter->task, NULL);
wake_up_process(tsk);
put_task_struct(tsk);
}
@@ -217,7 +216,7 @@ down_read_failed(struct ld_semaphore *sem, long count, long timeout)
for (;;) {
set_current_state(TASK_UNINTERRUPTIBLE);
- if (!waiter.task)
+ if (!smp_load_acquire(&waiter.task))
break;
if (!timeout)
break;
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 5b442bc..59675cc 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -1333,10 +1333,6 @@ static int acm_probe(struct usb_interface *intf,
tty_port_init(&acm->port);
acm->port.ops = &acm_port_ops;
- minor = acm_alloc_minor(acm);
- if (minor < 0)
- goto alloc_fail1;
-
ctrlsize = usb_endpoint_maxp(epctrl);
readsize = usb_endpoint_maxp(epread) *
(quirks == SINGLE_RX_URB ? 1 : 2);
@@ -1344,6 +1340,13 @@ static int acm_probe(struct usb_interface *intf,
acm->writesize = usb_endpoint_maxp(epwrite) * 20;
acm->control = control_interface;
acm->data = data_interface;
+
+ usb_get_intf(acm->control); /* undone in destruct() */
+
+ minor = acm_alloc_minor(acm);
+ if (minor < 0)
+ goto alloc_fail1;
+
acm->minor = minor;
acm->dev = usb_dev;
if (h.usb_cdc_acm_descriptor)
@@ -1490,7 +1493,6 @@ static int acm_probe(struct usb_interface *intf,
usb_driver_claim_interface(&acm_driver, data_interface, acm);
usb_set_intfdata(data_interface, acm);
- usb_get_intf(control_interface);
tty_dev = tty_port_register_device(&acm->port, acm_tty_driver, minor,
&control_interface->dev);
if (IS_ERR(tty_dev)) {
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
index ffccd40..29c6414 100644
--- a/drivers/usb/core/devio.c
+++ b/drivers/usb/core/devio.c
@@ -1792,8 +1792,6 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb
return 0;
error:
- if (as && as->usbm)
- dec_usb_memory_use_count(as->usbm, &as->usbm->urb_use_count);
kfree(isopkt);
kfree(dr);
if (as)
diff --git a/drivers/usb/core/file.c b/drivers/usb/core/file.c
index 65de6f7..558890a 100644
--- a/drivers/usb/core/file.c
+++ b/drivers/usb/core/file.c
@@ -193,9 +193,10 @@ int usb_register_dev(struct usb_interface *intf,
intf->minor = minor;
break;
}
- up_write(&minor_rwsem);
- if (intf->minor < 0)
+ if (intf->minor < 0) {
+ up_write(&minor_rwsem);
return -EXFULL;
+ }
/* create a usb class device for this usb interface */
snprintf(name, sizeof(name), class_driver->name, minor - minor_base);
@@ -203,12 +204,11 @@ int usb_register_dev(struct usb_interface *intf,
MKDEV(USB_MAJOR, minor), class_driver,
"%s", kbasename(name));
if (IS_ERR(intf->usb_dev)) {
- down_write(&minor_rwsem);
usb_minors[minor] = NULL;
intf->minor = -1;
- up_write(&minor_rwsem);
retval = PTR_ERR(intf->usb_dev);
}
+ up_write(&minor_rwsem);
return retval;
}
EXPORT_SYMBOL_GPL(usb_register_dev);
@@ -234,12 +234,12 @@ void usb_deregister_dev(struct usb_interface *intf,
return;
dev_dbg(&intf->dev, "removing %d minor\n", intf->minor);
+ device_destroy(usb_class->class, MKDEV(USB_MAJOR, intf->minor));
down_write(&minor_rwsem);
usb_minors[intf->minor] = NULL;
up_write(&minor_rwsem);
- device_destroy(usb_class->class, MKDEV(USB_MAJOR, intf->minor));
intf->usb_dev = NULL;
intf->minor = -1;
destroy_usb_class();
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index f9c6e27..4884591b 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -3975,6 +3975,9 @@ static int usb_set_lpm_timeout(struct usb_device *udev,
* control transfers to set the hub timeout or enable device-initiated U1/U2
* will be successful.
*
+ * If the control transfer to enable device-initiated U1/U2 entry fails, then
+ * hub-initiated U1/U2 will be disabled.
+ *
* If we cannot set the parent hub U1/U2 timeout, we attempt to let the xHCI
* driver know about it. If that call fails, it should be harmless, and just
* take up more slightly more bus bandwidth for unnecessary U1/U2 exit latency.
@@ -4029,23 +4032,24 @@ static void usb_enable_link_state(struct usb_hcd *hcd, struct usb_device *udev,
* host know that this link state won't be enabled.
*/
hcd->driver->disable_usb3_lpm_timeout(hcd, udev, state);
- } else {
- /* Only a configured device will accept the Set Feature
- * U1/U2_ENABLE
- */
- if (udev->actconfig)
- usb_set_device_initiated_lpm(udev, state, true);
+ return;
+ }
- /* As soon as usb_set_lpm_timeout(timeout) returns 0, the
- * hub-initiated LPM is enabled. Thus, LPM is enabled no
- * matter the result of usb_set_device_initiated_lpm().
- * The only difference is whether device is able to initiate
- * LPM.
- */
+ /* Only a configured device will accept the Set Feature
+ * U1/U2_ENABLE
+ */
+ if (udev->actconfig &&
+ usb_set_device_initiated_lpm(udev, state, true) == 0) {
if (state == USB3_LPM_U1)
udev->usb3_lpm_u1_enabled = 1;
else if (state == USB3_LPM_U2)
udev->usb3_lpm_u2_enabled = 1;
+ } else {
+ /* Don't request U1/U2 entry if the device
+ * cannot transition to U1/U2.
+ */
+ usb_set_lpm_timeout(udev, state, 0);
+ hcd->driver->disable_usb3_lpm_timeout(hcd, udev, state);
}
}
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
index 4020ce8d..0d3fd20 100644
--- a/drivers/usb/core/message.c
+++ b/drivers/usb/core/message.c
@@ -2211,14 +2211,14 @@ int cdc_parse_cdc_header(struct usb_cdc_parsed_header *hdr,
(struct usb_cdc_dmm_desc *)buffer;
break;
case USB_CDC_MDLM_TYPE:
- if (elength < sizeof(struct usb_cdc_mdlm_desc *))
+ if (elength < sizeof(struct usb_cdc_mdlm_desc))
goto next_desc;
if (desc)
return -EINVAL;
desc = (struct usb_cdc_mdlm_desc *)buffer;
break;
case USB_CDC_MDLM_DETAIL_TYPE:
- if (elength < sizeof(struct usb_cdc_mdlm_detail_desc *))
+ if (elength < sizeof(struct usb_cdc_mdlm_detail_desc))
goto next_desc;
if (detail)
return -EINVAL;
diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
index 03614ef..3f68edd 100644
--- a/drivers/usb/dwc2/gadget.c
+++ b/drivers/usb/dwc2/gadget.c
@@ -3125,6 +3125,7 @@ void dwc2_hsotg_disconnect(struct dwc2_hsotg *hsotg)
hsotg->connected = 0;
hsotg->test_mode = 0;
+ /* all endpoints should be shutdown */
for (ep = 0; ep < hsotg->num_of_eps; ep++) {
if (hsotg->eps_in[ep])
kill_all_requests(hsotg, hsotg->eps_in[ep],
@@ -3175,6 +3176,7 @@ static void dwc2_hsotg_irq_fifoempty(struct dwc2_hsotg *hsotg, bool periodic)
GINTSTS_PTXFEMP | \
GINTSTS_RXFLVL)
+static int dwc2_hsotg_ep_disable(struct usb_ep *ep);
/**
* dwc2_hsotg_core_init - issue softreset to the core
* @hsotg: The device state
@@ -3189,13 +3191,23 @@ void dwc2_hsotg_core_init_disconnected(struct dwc2_hsotg *hsotg,
u32 val;
u32 usbcfg;
u32 dcfg = 0;
+ int ep;
/* Kill any ep0 requests as controller will be reinitialized */
kill_all_requests(hsotg, hsotg->eps_out[0], -ECONNRESET);
- if (!is_usb_reset)
+ if (!is_usb_reset) {
if (dwc2_core_reset(hsotg, true))
return;
+ } else {
+ /* all endpoints should be shutdown */
+ for (ep = 1; ep < hsotg->num_of_eps; ep++) {
+ if (hsotg->eps_in[ep])
+ dwc2_hsotg_ep_disable(&hsotg->eps_in[ep]->ep);
+ if (hsotg->eps_out[ep])
+ dwc2_hsotg_ep_disable(&hsotg->eps_out[ep]->ep);
+ }
+ }
/*
* we must now enable ep0 ready for host detection and then
@@ -3993,7 +4005,6 @@ static int dwc2_hsotg_ep_disable(struct usb_ep *ep)
struct dwc2_hsotg *hsotg = hs_ep->parent;
int dir_in = hs_ep->dir_in;
int index = hs_ep->index;
- unsigned long flags;
u32 epctrl_reg;
u32 ctrl;
@@ -4011,8 +4022,6 @@ static int dwc2_hsotg_ep_disable(struct usb_ep *ep)
epctrl_reg = dir_in ? DIEPCTL(index) : DOEPCTL(index);
- spin_lock_irqsave(&hsotg->lock, flags);
-
ctrl = dwc2_readl(hsotg, epctrl_reg);
if (ctrl & DXEPCTL_EPENA)
@@ -4035,10 +4044,22 @@ static int dwc2_hsotg_ep_disable(struct usb_ep *ep)
hs_ep->fifo_index = 0;
hs_ep->fifo_size = 0;
- spin_unlock_irqrestore(&hsotg->lock, flags);
return 0;
}
+static int dwc2_hsotg_ep_disable_lock(struct usb_ep *ep)
+{
+ struct dwc2_hsotg_ep *hs_ep = our_ep(ep);
+ struct dwc2_hsotg *hsotg = hs_ep->parent;
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&hsotg->lock, flags);
+ ret = dwc2_hsotg_ep_disable(ep);
+ spin_unlock_irqrestore(&hsotg->lock, flags);
+ return ret;
+}
+
/**
* on_list - check request is on the given endpoint
* @ep: The endpoint to check.
@@ -4186,7 +4207,7 @@ static int dwc2_hsotg_ep_sethalt_lock(struct usb_ep *ep, int value)
static const struct usb_ep_ops dwc2_hsotg_ep_ops = {
.enable = dwc2_hsotg_ep_enable,
- .disable = dwc2_hsotg_ep_disable,
+ .disable = dwc2_hsotg_ep_disable_lock,
.alloc_request = dwc2_hsotg_ep_alloc_request,
.free_request = dwc2_hsotg_ep_free_request,
.queue = dwc2_hsotg_ep_queue_lock,
@@ -4326,9 +4347,9 @@ static int dwc2_hsotg_udc_stop(struct usb_gadget *gadget)
/* all endpoints should be shutdown */
for (ep = 1; ep < hsotg->num_of_eps; ep++) {
if (hsotg->eps_in[ep])
- dwc2_hsotg_ep_disable(&hsotg->eps_in[ep]->ep);
+ dwc2_hsotg_ep_disable_lock(&hsotg->eps_in[ep]->ep);
if (hsotg->eps_out[ep])
- dwc2_hsotg_ep_disable(&hsotg->eps_out[ep]->ep);
+ dwc2_hsotg_ep_disable_lock(&hsotg->eps_out[ep]->ep);
}
spin_lock_irqsave(&hsotg->lock, flags);
@@ -4776,9 +4797,9 @@ int dwc2_hsotg_suspend(struct dwc2_hsotg *hsotg)
for (ep = 0; ep < hsotg->num_of_eps; ep++) {
if (hsotg->eps_in[ep])
- dwc2_hsotg_ep_disable(&hsotg->eps_in[ep]->ep);
+ dwc2_hsotg_ep_disable_lock(&hsotg->eps_in[ep]->ep);
if (hsotg->eps_out[ep])
- dwc2_hsotg_ep_disable(&hsotg->eps_out[ep]->ep);
+ dwc2_hsotg_ep_disable_lock(&hsotg->eps_out[ep]->ep);
}
}
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index 82c761d..1b0f981 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -145,6 +145,10 @@ void dwc3_en_sleep_mode(struct dwc3 *dwc)
reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
reg |= DWC3_GUSB2PHYCFG_ENBLSLPM;
dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
+
+ reg = dwc3_readl(dwc->regs, DWC3_GUCTL1);
+ reg |= DWC3_GUCTL1_L1_SUSP_THRLD_EN_FOR_HOST;
+ dwc3_writel(dwc->regs, DWC3_GUCTL1, reg);
}
void dwc3_dis_sleep_mode(struct dwc3 *dwc)
@@ -154,6 +158,10 @@ void dwc3_dis_sleep_mode(struct dwc3 *dwc)
reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
reg &= ~DWC3_GUSB2PHYCFG_ENBLSLPM;
dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
+
+ reg = dwc3_readl(dwc->regs, DWC3_GUCTL1);
+ reg &= ~DWC3_GUCTL1_L1_SUSP_THRLD_EN_FOR_HOST;
+ dwc3_writel(dwc->regs, DWC3_GUCTL1, reg);
}
void dwc3_set_mode(struct dwc3 *dwc, u32 mode)
diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
index c80f5d2..d2d3e16 100644
--- a/drivers/usb/dwc3/core.h
+++ b/drivers/usb/dwc3/core.h
@@ -258,6 +258,7 @@
#define DWC3_GUCTL1_TX_IPGAP_LINECHECK_DIS BIT(28)
#define DWC3_GUCTL1_DEV_L1_EXIT_BY_HW BIT(24)
#define DWC3_GUCTL1_IP_GAP_ADD_ON(n) (n << 21)
+#define DWC3_GUCTL1_L1_SUSP_THRLD_EN_FOR_HOST BIT(8)
/* Global Status Register */
#define DWC3_GSTS_OTG_IP BIT(10)
diff --git a/drivers/usb/dwc3/debugfs.c b/drivers/usb/dwc3/debugfs.c
index dbd869d..b678848 100644
--- a/drivers/usb/dwc3/debugfs.c
+++ b/drivers/usb/dwc3/debugfs.c
@@ -282,6 +282,32 @@ static const struct debugfs_reg32 dwc3_regs[] = {
dump_register(OSTS),
};
+static int dwc3_regdump_show(struct seq_file *s, void *unused)
+{
+ struct dwc3 *dwc = s->private;
+
+ if (atomic_read(&dwc->in_lpm)) {
+ seq_puts(s, "USB device is powered off\n");
+ return 0;
+ }
+
+ debugfs_print_regs32(s, dwc->regset->regs, dwc->regset->nregs,
+ dwc->regset->base, "");
+ return 0;
+}
+
+static int dwc3_regdump_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, dwc3_regdump_show, inode->i_private);
+}
+
+static const struct file_operations dwc3_regdump_fops = {
+ .open = dwc3_regdump_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
static int dwc3_mode_show(struct seq_file *s, void *unused)
{
struct dwc3 *dwc = s->private;
@@ -997,7 +1023,7 @@ void dwc3_debugfs_init(struct dwc3 *dwc)
dwc->root = root;
- debugfs_create_regset32("regdump", S_IRUGO, root, dwc->regset);
+ debugfs_create_file("regdump", 0444, root, dwc, &dwc3_regdump_fops);
if (IS_ENABLED(CONFIG_USB_DWC3_DUAL_ROLE)) {
debugfs_create_file("mode", S_IRUGO | S_IWUSR, root, dwc,
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index f294228..f916f87 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -1202,11 +1202,12 @@ static ssize_t ffs_epfile_write_iter(struct kiocb *kiocb, struct iov_iter *from)
ffs_log("enter");
if (!is_sync_kiocb(kiocb)) {
- p = kmalloc(sizeof(io_data), GFP_KERNEL);
+ p = kzalloc(sizeof(io_data), GFP_KERNEL);
if (unlikely(!p))
return -ENOMEM;
p->aio = true;
} else {
+ memset(p, 0, sizeof(*p));
p->aio = false;
}
@@ -1245,11 +1246,12 @@ static ssize_t ffs_epfile_read_iter(struct kiocb *kiocb, struct iov_iter *to)
ffs_log("enter");
if (!is_sync_kiocb(kiocb)) {
- p = kmalloc(sizeof(io_data), GFP_KERNEL);
+ p = kzalloc(sizeof(io_data), GFP_KERNEL);
if (unlikely(!p))
return -ENOMEM;
p->aio = true;
} else {
+ memset(p, 0, sizeof(*p));
p->aio = false;
}
@@ -1363,6 +1365,7 @@ static long ffs_epfile_ioctl(struct file *file, unsigned code,
struct usb_endpoint_descriptor desc1, *desc;
switch (epfile->ffs->gadget->speed) {
+ case USB_SPEED_SUPER_PLUS:
case USB_SPEED_SUPER:
desc_idx = 2;
break;
diff --git a/drivers/usb/gadget/function/f_gsi.c b/drivers/usb/gadget/function/f_gsi.c
index cf3dc1d..c0d0449 100644
--- a/drivers/usb/gadget/function/f_gsi.c
+++ b/drivers/usb/gadget/function/f_gsi.c
@@ -1339,7 +1339,6 @@ static ssize_t gsi_ctrl_dev_write(struct file *fp, const char __user *buf,
unsigned long flags;
struct gsi_ctrl_pkt *cpkt;
struct gsi_ctrl_port *c_port;
- struct usb_request *req;
enum ipa_usb_teth_prot prot_id =
*(enum ipa_usb_teth_prot *)(fp->private_data);
struct gsi_inst_status *inst_cur = &inst_status[prot_id];
@@ -1358,13 +1357,6 @@ static ssize_t gsi_ctrl_dev_write(struct file *fp, const char __user *buf,
gsi = inst_cur->opts->gsi;
c_port = &gsi->c_port;
- req = c_port->notify_req;
-
- if (!c_port || !req || !req->buf) {
- log_event_err("%s: c_port %pK req %p req->buf %p",
- __func__, c_port, req, req ? req->buf : req);
- return -ENODEV;
- }
if (!count || count > GSI_MAX_CTRL_PKT_SIZE) {
log_event_err("error: ctrl pkt length %zu", count);
@@ -1439,9 +1431,9 @@ static long gsi_ctrl_dev_ioctl(struct file *fp, unsigned int cmd,
gsi = inst_cur->opts->gsi;
c_port = &gsi->c_port;
- if (!c_port) {
- log_event_err("%s: gsi ctrl port %pK", __func__, c_port);
- return -ENODEV;
+ if (!atomic_read(&gsi->connected)) {
+ log_event_err("USB cable not connected\n");
+ return -ECONNRESET;
}
switch (cmd) {
@@ -1807,7 +1799,7 @@ static int gsi_ctrl_send_notification(struct f_gsi *gsi)
__le32 *data;
struct usb_cdc_notification *event;
struct usb_request *req = gsi->c_port.notify_req;
- struct usb_composite_dev *cdev = gsi->function.config->cdev;
+ struct usb_composite_dev *cdev;
struct gsi_ctrl_pkt *cpkt;
unsigned long flags;
bool del_free_cpkt = false;
@@ -1838,6 +1830,7 @@ static int gsi_ctrl_send_notification(struct f_gsi *gsi)
log_event_dbg("%s: cpkt->type:%d\n", __func__, cpkt->type);
event = req->buf;
+ cdev = gsi->function.config->cdev;
switch (cpkt->type) {
case GSI_CTRL_NOTIFY_CONNECT:
diff --git a/drivers/usb/gadget/function/f_qdss.c b/drivers/usb/gadget/function/f_qdss.c
index 0b64fbe..1212e7e 100644
--- a/drivers/usb/gadget/function/f_qdss.c
+++ b/drivers/usb/gadget/function/f_qdss.c
@@ -915,6 +915,9 @@ int usb_qdss_write(struct usb_qdss_ch *ch, struct qdss_request *d_req)
req->buf = d_req->buf;
req->length = d_req->length;
req->context = d_req;
+ req->sg = d_req->sg;
+ req->num_sgs = d_req->num_sgs;
+ req->num_mapped_sgs = d_req->num_mapped_sgs;
if (usb_ep_queue(qdss->port.data, req, GFP_ATOMIC)) {
spin_lock_irqsave(&qdss->lock, flags);
list_add_tail(&req->list, &qdss->data_write_pool);
diff --git a/drivers/usb/gadget/udc/renesas_usb3.c b/drivers/usb/gadget/udc/renesas_usb3.c
index fea02c7..a5254e8 100644
--- a/drivers/usb/gadget/udc/renesas_usb3.c
+++ b/drivers/usb/gadget/udc/renesas_usb3.c
@@ -19,6 +19,7 @@
#include <linux/pm_runtime.h>
#include <linux/sizes.h>
#include <linux/slab.h>
+#include <linux/string.h>
#include <linux/sys_soc.h>
#include <linux/uaccess.h>
#include <linux/usb/ch9.h>
@@ -2378,9 +2379,9 @@ static ssize_t role_store(struct device *dev, struct device_attribute *attr,
if (usb3->forced_b_device)
return -EBUSY;
- if (!strncmp(buf, "host", strlen("host")))
+ if (sysfs_streq(buf, "host"))
new_mode_is_host = true;
- else if (!strncmp(buf, "peripheral", strlen("peripheral")))
+ else if (sysfs_streq(buf, "peripheral"))
new_mode_is_host = false;
else
return -EINVAL;
diff --git a/drivers/usb/host/hwa-hc.c b/drivers/usb/host/hwa-hc.c
index 09a8ebd..6968b9f 100644
--- a/drivers/usb/host/hwa-hc.c
+++ b/drivers/usb/host/hwa-hc.c
@@ -159,7 +159,7 @@ static int hwahc_op_start(struct usb_hcd *usb_hcd)
return result;
error_set_cluster_id:
- wusb_cluster_id_put(wusbhc->cluster_id);
+ wusb_cluster_id_put(addr);
error_cluster_id_get:
goto out;
diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c
index 3625a5c..070c66f 100644
--- a/drivers/usb/host/pci-quirks.c
+++ b/drivers/usb/host/pci-quirks.c
@@ -205,7 +205,7 @@ int usb_amd_find_chipset_info(void)
{
unsigned long flags;
struct amd_chipset_info info;
- int ret;
+ int need_pll_quirk = 0;
spin_lock_irqsave(&amd_lock, flags);
@@ -219,21 +219,28 @@ int usb_amd_find_chipset_info(void)
spin_unlock_irqrestore(&amd_lock, flags);
if (!amd_chipset_sb_type_init(&info)) {
- ret = 0;
goto commit;
}
- /* Below chipset generations needn't enable AMD PLL quirk */
- if (info.sb_type.gen == AMD_CHIPSET_UNKNOWN ||
- info.sb_type.gen == AMD_CHIPSET_SB600 ||
- info.sb_type.gen == AMD_CHIPSET_YANGTZE ||
- (info.sb_type.gen == AMD_CHIPSET_SB700 &&
- info.sb_type.rev > 0x3b)) {
+ switch (info.sb_type.gen) {
+ case AMD_CHIPSET_SB700:
+ need_pll_quirk = info.sb_type.rev <= 0x3B;
+ break;
+ case AMD_CHIPSET_SB800:
+ case AMD_CHIPSET_HUDSON2:
+ case AMD_CHIPSET_BOLTON:
+ need_pll_quirk = 1;
+ break;
+ default:
+ need_pll_quirk = 0;
+ break;
+ }
+
+ if (!need_pll_quirk) {
if (info.smbus_dev) {
pci_dev_put(info.smbus_dev);
info.smbus_dev = NULL;
}
- ret = 0;
goto commit;
}
@@ -252,7 +259,7 @@ int usb_amd_find_chipset_info(void)
}
}
- ret = info.probe_result = 1;
+ need_pll_quirk = info.probe_result = 1;
printk(KERN_DEBUG "QUIRK: Enable AMD PLL fix\n");
commit:
@@ -263,7 +270,7 @@ int usb_amd_find_chipset_info(void)
/* Mark that we where here */
amd_chipset.probe_count++;
- ret = amd_chipset.probe_result;
+ need_pll_quirk = amd_chipset.probe_result;
spin_unlock_irqrestore(&amd_lock, flags);
@@ -277,7 +284,7 @@ int usb_amd_find_chipset_info(void)
spin_unlock_irqrestore(&amd_lock, flags);
}
- return ret;
+ return need_pll_quirk;
}
EXPORT_SYMBOL_GPL(usb_amd_find_chipset_info);
diff --git a/drivers/usb/host/xhci-rcar.c b/drivers/usb/host/xhci-rcar.c
index 671bce1..8616c52 100644
--- a/drivers/usb/host/xhci-rcar.c
+++ b/drivers/usb/host/xhci-rcar.c
@@ -238,10 +238,15 @@ int xhci_rcar_init_quirk(struct usb_hcd *hcd)
* pointers. So, this driver clears the AC64 bit of xhci->hcc_params
* to call dma_set_coherent_mask(dev, DMA_BIT_MASK(32)) in
* xhci_gen_setup().
+ *
+ * And, since the firmware/internal CPU control the USBSTS.STS_HALT
+ * and the process speed is down when the roothub port enters U3,
+ * long delay for the handshake of STS_HALT is neeed in xhci_suspend().
*/
if (xhci_rcar_is_gen2(hcd->self.controller) ||
- xhci_rcar_is_gen3(hcd->self.controller))
- xhci->quirks |= XHCI_NO_64BIT_SUPPORT;
+ xhci_rcar_is_gen3(hcd->self.controller)) {
+ xhci->quirks |= XHCI_NO_64BIT_SUPPORT | XHCI_SLOW_SUSPEND;
+ }
if (!xhci_rcar_wait_for_pll_active(hcd))
return -ETIMEDOUT;
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 438d8ce..1d8c40f 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -450,7 +450,7 @@ struct xhci_op_regs {
* e.g. not program values > '4' if BLC = '1' and a HIRD device is attached,
* or not program values < '4' if BLC = '0' and a BESL device is attached.
*/
-#define XHCI_DEFAULT_BESL 4
+#define XHCI_DEFAULT_BESL 0
/*
* USB3 specification define a 360ms tPollingLFPSTiemout for USB3 ports
diff --git a/drivers/usb/misc/iowarrior.c b/drivers/usb/misc/iowarrior.c
index c2991b8..55db0fc 100644
--- a/drivers/usb/misc/iowarrior.c
+++ b/drivers/usb/misc/iowarrior.c
@@ -866,19 +866,20 @@ static void iowarrior_disconnect(struct usb_interface *interface)
dev = usb_get_intfdata(interface);
mutex_lock(&iowarrior_open_disc_lock);
usb_set_intfdata(interface, NULL);
+ /* prevent device read, write and ioctl */
+ dev->present = 0;
minor = dev->minor;
+ mutex_unlock(&iowarrior_open_disc_lock);
+ /* give back our minor - this will call close() locks need to be dropped at this point*/
- /* give back our minor */
usb_deregister_dev(interface, &iowarrior_class);
mutex_lock(&dev->mutex);
/* prevent device read, write and ioctl */
- dev->present = 0;
mutex_unlock(&dev->mutex);
- mutex_unlock(&iowarrior_open_disc_lock);
if (dev->opened) {
/* There is a process that holds a filedescriptor to the device ,
diff --git a/drivers/usb/misc/ssusb-redriver-nb7vpq904m.c b/drivers/usb/misc/ssusb-redriver-nb7vpq904m.c
index 3c97e40..39745c1 100644
--- a/drivers/usb/misc/ssusb-redriver-nb7vpq904m.c
+++ b/drivers/usb/misc/ssusb-redriver-nb7vpq904m.c
@@ -445,8 +445,7 @@ static int ssusb_redriver_vbus_notifier(struct notifier_block *nb,
redriver->vbus_active = event;
- if (redriver->vbus_active)
- queue_work(redriver->redriver_wq, &redriver->config_work);
+ queue_work(redriver->redriver_wq, &redriver->config_work);
return NOTIFY_DONE;
}
@@ -466,8 +465,7 @@ static int ssusb_redriver_id_notifier(struct notifier_block *nb,
redriver->host_active = host_active;
- if (redriver->host_active)
- queue_work(redriver->redriver_wq, &redriver->config_work);
+ queue_work(redriver->redriver_wq, &redriver->config_work);
return NOTIFY_DONE;
}
diff --git a/drivers/usb/misc/yurex.c b/drivers/usb/misc/yurex.c
index 7b306aa..6715a12 100644
--- a/drivers/usb/misc/yurex.c
+++ b/drivers/usb/misc/yurex.c
@@ -92,7 +92,6 @@ static void yurex_delete(struct kref *kref)
dev_dbg(&dev->interface->dev, "%s\n", __func__);
- usb_put_dev(dev->udev);
if (dev->cntl_urb) {
usb_kill_urb(dev->cntl_urb);
kfree(dev->cntl_req);
@@ -108,6 +107,7 @@ static void yurex_delete(struct kref *kref)
dev->int_buffer, dev->urb->transfer_dma);
usb_free_urb(dev->urb);
}
+ usb_put_dev(dev->udev);
kfree(dev);
}
diff --git a/drivers/usb/pd/policy_engine.c b/drivers/usb/pd/policy_engine.c
index f3ff59d..4d52f1a 100644
--- a/drivers/usb/pd/policy_engine.c
+++ b/drivers/usb/pd/policy_engine.c
@@ -442,7 +442,6 @@ struct usbpd {
struct regulator *vconn;
bool vbus_enabled;
bool vconn_enabled;
- bool vconn_is_external;
u8 tx_msgid[SOPII_MSG + 1];
u8 rx_msgid[SOPII_MSG + 1];
@@ -516,6 +515,21 @@ enum plug_orientation usbpd_get_plug_orientation(struct usbpd *pd)
}
EXPORT_SYMBOL(usbpd_get_plug_orientation);
+static unsigned int get_connector_type(struct usbpd *pd)
+{
+ int ret;
+ union power_supply_propval val;
+
+ ret = power_supply_get_property(pd->usb_psy,
+ POWER_SUPPLY_PROP_CONNECTOR_TYPE, &val);
+ if (ret) {
+ dev_err(&pd->dev, "Unable to read CONNECTOR TYPE: %d\n", ret);
+ return ret;
+ }
+
+ return val.intval;
+}
+
static inline void stop_usb_host(struct usbpd *pd)
{
extcon_set_state_sync(pd->extcon, EXTCON_USB_HOST, 0);
@@ -831,11 +845,6 @@ static int pd_select_pdo(struct usbpd *pd, int pdo_pos, int uv, int ua)
return -ENOTSUPP;
}
- /* Can't sink more than 5V if VCONN is sourced from the VBUS input */
- if (pd->vconn_enabled && !pd->vconn_is_external &&
- pd->requested_voltage > 5000000)
- return -ENOTSUPP;
-
pd->requested_current = curr;
pd->requested_pdo = pdo_pos;
@@ -899,6 +908,7 @@ static void pd_send_hard_reset(struct usbpd *pd)
pd->hard_reset_count++;
pd_phy_signal(HARD_RESET_SIG);
pd->in_pr_swap = false;
+ pd->pd_connected = false;
power_supply_set_property(pd->usb_psy, POWER_SUPPLY_PROP_PR_SWAP, &val);
}
@@ -2213,6 +2223,10 @@ static void handle_state_src_send_capabilities(struct usbpd *pd,
ret = pd_send_msg(pd, MSG_SOURCE_CAPABILITIES, default_src_caps,
ARRAY_SIZE(default_src_caps), SOP_MSG);
if (ret) {
+ if (pd->pd_connected) {
+ usbpd_set_state(pd, PE_SEND_SOFT_RESET);
+ return;
+ }
/*
* Technically this is PE_SRC_Discovery, but we can
* handle it by setting a timer to come back to the
@@ -2850,21 +2864,6 @@ static bool handle_ctrl_snk_ready(struct usbpd *pd, struct rx_msg *rx_msg)
usbpd_set_state(pd, PE_PRS_SNK_SRC_TRANSITION_TO_OFF);
break;
case MSG_VCONN_SWAP:
- /*
- * if VCONN is connected to VBUS, make sure we are
- * not in high voltage contract, otherwise reject.
- */
- if (!pd->vconn_is_external &&
- (pd->requested_voltage > 5000000)) {
- ret = pd_send_msg(pd, MSG_REJECT, NULL, 0,
- SOP_MSG);
- if (ret)
- usbpd_set_state(pd,
- PE_SEND_SOFT_RESET);
-
- break;
- }
-
ret = pd_send_msg(pd, MSG_ACCEPT, NULL, 0, SOP_MSG);
if (ret) {
usbpd_set_state(pd, PE_SEND_SOFT_RESET);
@@ -4673,9 +4672,6 @@ struct usbpd *usbpd_create(struct device *parent)
extcon_set_property_capability(pd->extcon, EXTCON_USB_HOST,
EXTCON_PROP_USB_SS);
- pd->vconn_is_external = device_property_present(parent,
- "qcom,vconn-uses-external-source");
-
pd->num_sink_caps = device_property_read_u32_array(parent,
"qcom,default-sink-caps", NULL, 0);
if (pd->num_sink_caps > 0) {
@@ -4727,10 +4723,17 @@ struct usbpd *usbpd_create(struct device *parent)
pd->typec_caps.port_type_set = usbpd_typec_port_type_set;
pd->partner_desc.identity = &pd->partner_identity;
- pd->typec_port = typec_register_port(parent, &pd->typec_caps);
- if (IS_ERR(pd->typec_port)) {
- usbpd_err(&pd->dev, "could not register typec port\n");
+ ret = get_connector_type(pd);
+ if (ret < 0)
goto put_psy;
+
+ /* For non-TypeC connector, it will be handled elsewhere */
+ if (ret != POWER_SUPPLY_CONNECTOR_MICRO_USB) {
+ pd->typec_port = typec_register_port(parent, &pd->typec_caps);
+ if (IS_ERR(pd->typec_port)) {
+ usbpd_err(&pd->dev, "could not register typec port\n");
+ goto put_psy;
+ }
}
pd->current_pr = PR_NONE;
diff --git a/drivers/usb/phy/Makefile b/drivers/usb/phy/Makefile
index 55ba3db..f24b20f 100644
--- a/drivers/usb/phy/Makefile
+++ b/drivers/usb/phy/Makefile
@@ -25,5 +25,6 @@
obj-$(CONFIG_USB_ULPI) += phy-ulpi.o
obj-$(CONFIG_USB_ULPI_VIEWPORT) += phy-ulpi-viewport.o
obj-$(CONFIG_KEYSTONE_USB_PHY) += phy-keystone.o
+obj-$(CONFIG_MSM_QUSB_PHY) += phy-msm-qusb.o
obj-$(CONFIG_USB_MSM_SSPHY_QMP) += phy-msm-ssusb-qmp.o
obj-$(CONFIG_MSM_HSUSB_PHY) += phy-msm-snps-hs.o
diff --git a/drivers/usb/phy/phy-msm-qusb.c b/drivers/usb/phy/phy-msm-qusb.c
new file mode 100644
index 0000000..93fc8fd
--- /dev/null
+++ b/drivers/usb/phy/phy-msm-qusb.c
@@ -0,0 +1,1173 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2014-2019, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/debugfs.h>
+#include <linux/platform_device.h>
+#include <linux/power_supply.h>
+#include <linux/regulator/consumer.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/usb/phy.h>
+#include <linux/reset.h>
+
+#define QUSB2PHY_PLL_STATUS 0x38
+#define QUSB2PHY_PLL_LOCK BIT(5)
+
+#define QUSB2PHY_PORT_QC1 0x70
+#define VDM_SRC_EN BIT(4)
+#define VDP_SRC_EN BIT(2)
+
+#define QUSB2PHY_PORT_QC2 0x74
+#define RDM_UP_EN BIT(1)
+#define RDP_UP_EN BIT(3)
+#define RPUM_LOW_EN BIT(4)
+#define RPUP_LOW_EN BIT(5)
+
+#define QUSB2PHY_PORT_POWERDOWN 0xB4
+#define CLAMP_N_EN BIT(5)
+#define FREEZIO_N BIT(1)
+#define POWER_DOWN BIT(0)
+
+#define QUSB2PHY_PORT_TEST_CTRL 0xB8
+
+#define QUSB2PHY_PWR_CTRL1 0x210
+#define PWR_CTRL1_CLAMP_N_EN BIT(1)
+#define PWR_CTRL1_POWR_DOWN BIT(0)
+
+#define QUSB2PHY_PLL_COMMON_STATUS_ONE 0x1A0
+#define CORE_READY_STATUS BIT(0)
+
+#define QUSB2PHY_PORT_UTMI_CTRL1 0xC0
+#define TERM_SELECT BIT(4)
+#define XCVR_SELECT_FS BIT(2)
+#define OP_MODE_NON_DRIVE BIT(0)
+
+#define QUSB2PHY_PORT_UTMI_CTRL2 0xC4
+#define UTMI_ULPI_SEL BIT(7)
+#define UTMI_TEST_MUX_SEL BIT(6)
+
+#define QUSB2PHY_PLL_TEST 0x04
+#define CLK_REF_SEL BIT(7)
+
+#define QUSB2PHY_PORT_TUNE1 0x80
+#define QUSB2PHY_PORT_TUNE2 0x84
+#define QUSB2PHY_PORT_TUNE3 0x88
+#define QUSB2PHY_PORT_TUNE4 0x8C
+#define QUSB2PHY_PORT_TUNE5 0x90
+
+/* Get TUNE2's high nibble value read from efuse */
+#define TUNE2_HIGH_NIBBLE_VAL(val, pos, mask) ((val >> pos) & mask)
+
+#define QUSB2PHY_PORT_INTR_CTRL 0xBC
+#define CHG_DET_INTR_EN BIT(4)
+#define DMSE_INTR_HIGH_SEL BIT(3)
+#define DMSE_INTR_EN BIT(2)
+#define DPSE_INTR_HIGH_SEL BIT(1)
+#define DPSE_INTR_EN BIT(0)
+
+#define QUSB2PHY_PORT_UTMI_STATUS 0xF4
+#define LINESTATE_DP BIT(0)
+#define LINESTATE_DM BIT(1)
+
+
+#define QUSB2PHY_1P8_VOL_MIN 1800000 /* uV */
+#define QUSB2PHY_1P8_VOL_MAX 1800000 /* uV */
+#define QUSB2PHY_1P8_HPM_LOAD 30000 /* uA */
+
+#define QUSB2PHY_3P3_VOL_MIN 3075000 /* uV */
+#define QUSB2PHY_3P3_VOL_MAX 3200000 /* uV */
+#define QUSB2PHY_3P3_HPM_LOAD 30000 /* uA */
+
+#define QUSB2PHY_REFCLK_ENABLE BIT(0)
+
+#define HSTX_TRIMSIZE 4
+
+struct qusb_phy {
+ struct usb_phy phy;
+ void __iomem *base;
+ void __iomem *tune2_efuse_reg;
+ void __iomem *ref_clk_base;
+ void __iomem *tcsr_clamp_dig_n;
+ void __iomem *tcsr_conn_box_spare;
+
+ struct clk *ref_clk_src;
+ struct clk *ref_clk;
+ struct clk *cfg_ahb_clk;
+ struct reset_control *phy_reset;
+ struct clk *iface_clk;
+ struct clk *core_clk;
+
+ struct regulator *gdsc;
+ struct regulator *vdd;
+ struct regulator *vdda33;
+ struct regulator *vdda18;
+ int vdd_levels[3]; /* none, low, high */
+ int init_seq_len;
+ int *qusb_phy_init_seq;
+ u32 major_rev;
+ u32 usb_hs_ac_bitmask;
+ u32 usb_hs_ac_value;
+
+ u32 tune2_val;
+ int tune2_efuse_bit_pos;
+ int tune2_efuse_num_of_bits;
+ int tune2_efuse_correction;
+
+ bool cable_connected;
+ bool suspended;
+ bool ulpi_mode;
+ bool dpdm_enable;
+ bool is_se_clk;
+
+ struct regulator_desc dpdm_rdesc;
+ struct regulator_dev *dpdm_rdev;
+
+ bool put_into_high_z_state;
+ struct mutex phy_lock;
+
+ /* debugfs entries */
+ struct dentry *root;
+ u8 tune1;
+ u8 tune2;
+ u8 tune3;
+ u8 tune4;
+ u8 tune5;
+};
+
+static void qusb_phy_enable_clocks(struct qusb_phy *qphy, bool on)
+{
+ dev_dbg(qphy->phy.dev, "%s(): on:%d\n", __func__, on);
+
+ if (on) {
+ clk_prepare_enable(qphy->ref_clk_src);
+ clk_prepare_enable(qphy->ref_clk);
+ clk_prepare_enable(qphy->iface_clk);
+ clk_prepare_enable(qphy->core_clk);
+ clk_prepare_enable(qphy->cfg_ahb_clk);
+ } else {
+ clk_disable_unprepare(qphy->cfg_ahb_clk);
+ /*
+ * FSM depedency beween iface_clk and core_clk.
+ * Hence turned off core_clk before iface_clk.
+ */
+ clk_disable_unprepare(qphy->core_clk);
+ clk_disable_unprepare(qphy->iface_clk);
+ clk_disable_unprepare(qphy->ref_clk);
+ clk_disable_unprepare(qphy->ref_clk_src);
+ }
+
+}
+
+static int qusb_phy_gdsc(struct qusb_phy *qphy, bool on)
+{
+ int ret;
+
+ if (IS_ERR_OR_NULL(qphy->gdsc))
+ return -EPERM;
+
+ if (on) {
+ dev_dbg(qphy->phy.dev, "TURNING ON GDSC\n");
+ ret = regulator_enable(qphy->gdsc);
+ if (ret) {
+ dev_err(qphy->phy.dev, "unable to enable gdsc\n");
+ return ret;
+ }
+ } else {
+ dev_dbg(qphy->phy.dev, "TURNING OFF GDSC\n");
+ ret = regulator_disable(qphy->gdsc);
+ if (ret) {
+ dev_err(qphy->phy.dev, "unable to disable gdsc\n");
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
+static int qusb_phy_config_vdd(struct qusb_phy *qphy, int high)
+{
+ int min, ret;
+
+ min = high ? 1 : 0; /* low or none? */
+ ret = regulator_set_voltage(qphy->vdd, qphy->vdd_levels[min],
+ qphy->vdd_levels[2]);
+ if (ret) {
+ dev_err(qphy->phy.dev, "unable to set voltage for qusb vdd\n");
+ return ret;
+ }
+
+ dev_dbg(qphy->phy.dev, "min_vol:%d max_vol:%d\n",
+ qphy->vdd_levels[min], qphy->vdd_levels[2]);
+ return ret;
+}
+
+static int qusb_phy_enable_power(struct qusb_phy *qphy, bool on)
+{
+ int ret = 0;
+
+ dev_dbg(qphy->phy.dev, "%s turn %s regulators\n",
+ __func__, on ? "on" : "off");
+
+ if (!on)
+ goto disable_vdda33;
+
+ ret = qusb_phy_config_vdd(qphy, true);
+ if (ret) {
+ dev_err(qphy->phy.dev, "Unable to config VDD:%d\n",
+ ret);
+ goto err_vdd;
+ }
+
+ ret = regulator_enable(qphy->vdd);
+ if (ret) {
+ dev_err(qphy->phy.dev, "Unable to enable VDD\n");
+ goto unconfig_vdd;
+ }
+
+ ret = regulator_set_load(qphy->vdda18, QUSB2PHY_1P8_HPM_LOAD);
+ if (ret < 0) {
+ dev_err(qphy->phy.dev, "Unable to set HPM of vdda18:%d\n", ret);
+ goto disable_vdd;
+ }
+
+ ret = regulator_set_voltage(qphy->vdda18, QUSB2PHY_1P8_VOL_MIN,
+ QUSB2PHY_1P8_VOL_MAX);
+ if (ret) {
+ dev_err(qphy->phy.dev,
+ "Unable to set voltage for vdda18:%d\n", ret);
+ goto put_vdda18_lpm;
+ }
+
+ ret = regulator_enable(qphy->vdda18);
+ if (ret) {
+ dev_err(qphy->phy.dev, "Unable to enable vdda18:%d\n", ret);
+ goto unset_vdda18;
+ }
+
+ ret = regulator_set_load(qphy->vdda33, QUSB2PHY_3P3_HPM_LOAD);
+ if (ret < 0) {
+ dev_err(qphy->phy.dev, "Unable to set HPM of vdda33:%d\n", ret);
+ goto disable_vdda18;
+ }
+
+ ret = regulator_set_voltage(qphy->vdda33, QUSB2PHY_3P3_VOL_MIN,
+ QUSB2PHY_3P3_VOL_MAX);
+ if (ret) {
+ dev_err(qphy->phy.dev,
+ "Unable to set voltage for vdda33:%d\n", ret);
+ goto put_vdda33_lpm;
+ }
+
+ ret = regulator_enable(qphy->vdda33);
+ if (ret) {
+ dev_err(qphy->phy.dev, "Unable to enable vdda33:%d\n", ret);
+ goto unset_vdd33;
+ }
+
+ pr_debug("%s(): QUSB PHY's regulators are turned ON.\n", __func__);
+ return ret;
+
+disable_vdda33:
+ ret = regulator_disable(qphy->vdda33);
+ if (ret)
+ dev_err(qphy->phy.dev, "Unable to disable vdda33:%d\n", ret);
+
+unset_vdd33:
+ ret = regulator_set_voltage(qphy->vdda33, 0, QUSB2PHY_3P3_VOL_MAX);
+ if (ret)
+ dev_err(qphy->phy.dev,
+ "Unable to set (0) voltage for vdda33:%d\n", ret);
+
+put_vdda33_lpm:
+ ret = regulator_set_load(qphy->vdda33, 0);
+ if (ret < 0)
+ dev_err(qphy->phy.dev, "Unable to set (0) HPM of vdda33\n");
+
+disable_vdda18:
+ ret = regulator_disable(qphy->vdda18);
+ if (ret)
+ dev_err(qphy->phy.dev, "Unable to disable vdda18:%d\n", ret);
+
+unset_vdda18:
+ ret = regulator_set_voltage(qphy->vdda18, 0, QUSB2PHY_1P8_VOL_MAX);
+ if (ret)
+ dev_err(qphy->phy.dev,
+ "Unable to set (0) voltage for vdda18:%d\n", ret);
+
+put_vdda18_lpm:
+ ret = regulator_set_load(qphy->vdda18, 0);
+ if (ret < 0)
+ dev_err(qphy->phy.dev, "Unable to set LPM of vdda18\n");
+
+disable_vdd:
+ ret = regulator_disable(qphy->vdd);
+ if (ret)
+ dev_err(qphy->phy.dev, "Unable to disable vdd:%d\n",
+ ret);
+
+unconfig_vdd:
+ ret = qusb_phy_config_vdd(qphy, false);
+ if (ret)
+ dev_err(qphy->phy.dev, "Unable unconfig VDD:%d\n",
+ ret);
+err_vdd:
+ dev_dbg(qphy->phy.dev, "QUSB PHY's regulators are turned OFF.\n");
+
+ return ret;
+}
+
+static void qusb_phy_get_tune2_param(struct qusb_phy *qphy)
+{
+ u32 bit_mask = 1;
+ u8 reg_val;
+
+ pr_debug("%s(): num_of_bits:%d bit_pos:%d\n", __func__,
+ qphy->tune2_efuse_num_of_bits,
+ qphy->tune2_efuse_bit_pos);
+
+ /* get bit mask based on number of bits to use with efuse reg */
+ bit_mask = (bit_mask << qphy->tune2_efuse_num_of_bits) - 1;
+
+ /*
+ * Read EFUSE register having TUNE2 parameter's high nibble.
+ * If efuse register shows value as 0x0, then use previous value
+ * as it is. Otherwise use efuse register based value for this purpose.
+ */
+ if (qphy->tune2_efuse_num_of_bits < HSTX_TRIMSIZE) {
+ qphy->tune2_val =
+ TUNE2_HIGH_NIBBLE_VAL(readl_relaxed(qphy->tune2_efuse_reg),
+ qphy->tune2_efuse_bit_pos, bit_mask);
+ bit_mask =
+ (1 << (HSTX_TRIMSIZE - qphy->tune2_efuse_num_of_bits)) - 1;
+ qphy->tune2_val |=
+ TUNE2_HIGH_NIBBLE_VAL(readl_relaxed(qphy->tune2_efuse_reg + 4),
+ 0, bit_mask) << qphy->tune2_efuse_num_of_bits;
+ } else {
+ qphy->tune2_val = readl_relaxed(qphy->tune2_efuse_reg);
+ qphy->tune2_val = TUNE2_HIGH_NIBBLE_VAL(qphy->tune2_val,
+ qphy->tune2_efuse_bit_pos, bit_mask);
+ }
+
+ pr_debug("%s(): efuse based tune2 value:%d\n",
+ __func__, qphy->tune2_val);
+
+ /* Update higher nibble of TUNE2 value for better rise/fall times */
+ if (qphy->tune2_efuse_correction && qphy->tune2_val) {
+ if (qphy->tune2_efuse_correction > 5 ||
+ qphy->tune2_efuse_correction < -10)
+ pr_warn("Correction value is out of range : %d\n",
+ qphy->tune2_efuse_correction);
+ else
+ qphy->tune2_val = qphy->tune2_val +
+ qphy->tune2_efuse_correction;
+ }
+
+ reg_val = readb_relaxed(qphy->base + QUSB2PHY_PORT_TUNE2);
+ if (qphy->tune2_val) {
+ reg_val &= 0x0f;
+ reg_val |= (qphy->tune2_val << 4);
+ }
+
+ qphy->tune2_val = reg_val;
+}
+
+static void qusb_phy_write_seq(void __iomem *base, u32 *seq, int cnt,
+ unsigned long delay)
+{
+ int i;
+
+ pr_debug("Seq count:%d\n", cnt);
+ for (i = 0; i < cnt; i = i+2) {
+ pr_debug("write 0x%02x to 0x%02x\n", seq[i], seq[i+1]);
+ writel_relaxed(seq[i], base + seq[i+1]);
+ if (delay)
+ usleep_range(delay, (delay + 2000));
+ }
+}
+
+static int qusb_phy_init(struct usb_phy *phy)
+{
+ struct qusb_phy *qphy = container_of(phy, struct qusb_phy, phy);
+ int ret, reset_val = 0;
+ u8 reg;
+ bool pll_lock_fail = false;
+
+ dev_dbg(phy->dev, "%s\n", __func__);
+
+ /*
+ * ref clock is enabled by default after power on reset. Linux clock
+ * driver will disable this clock as part of late init if peripheral
+ * driver(s) does not explicitly votes for it. Linux clock driver also
+ * does not disable the clock until late init even if peripheral
+ * driver explicitly requests it and cannot defer the probe until late
+ * init. Hence, Explicitly disable the clock using register write to
+ * allow QUSB PHY PLL to lock properly.
+ */
+ if (qphy->ref_clk_base) {
+ writel_relaxed((readl_relaxed(qphy->ref_clk_base) &
+ ~QUSB2PHY_REFCLK_ENABLE),
+ qphy->ref_clk_base);
+ /* Make sure that above write complete to get ref clk OFF */
+ wmb();
+ }
+
+ /* Perform phy reset */
+ ret = reset_control_assert(qphy->phy_reset);
+ if (ret)
+ dev_err(phy->dev, "%s: phy_reset assert failed\n", __func__);
+ usleep_range(100, 150);
+ ret = reset_control_deassert(qphy->phy_reset);
+ if (ret)
+ dev_err(phy->dev, "%s: phy_reset deassert failed\n", __func__);
+
+ /* Disable the PHY */
+ if (qphy->major_rev < 2)
+ writel_relaxed(CLAMP_N_EN | FREEZIO_N | POWER_DOWN,
+ qphy->base + QUSB2PHY_PORT_POWERDOWN);
+ else
+ writel_relaxed(readl_relaxed(qphy->base + QUSB2PHY_PWR_CTRL1) |
+ PWR_CTRL1_POWR_DOWN,
+ qphy->base + QUSB2PHY_PWR_CTRL1);
+
+ /* configure for ULPI mode if requested */
+ if (qphy->ulpi_mode)
+ writel_relaxed(0x0, qphy->base + QUSB2PHY_PORT_UTMI_CTRL2);
+
+ /* save reset value to override based on clk scheme */
+ if (qphy->ref_clk_base)
+ reset_val = readl_relaxed(qphy->base + QUSB2PHY_PLL_TEST);
+
+ if (qphy->qusb_phy_init_seq)
+ qusb_phy_write_seq(qphy->base, qphy->qusb_phy_init_seq,
+ qphy->init_seq_len, 0);
+
+ /*
+ * Check for EFUSE value only if tune2_efuse_reg is available
+ * and try to read EFUSE value only once i.e. not every USB
+ * cable connect case.
+ */
+ if (qphy->tune2_efuse_reg && !qphy->tune2) {
+ if (!qphy->tune2_val)
+ qusb_phy_get_tune2_param(qphy);
+
+ pr_debug("%s(): Programming TUNE2 parameter as:%x\n", __func__,
+ qphy->tune2_val);
+ writel_relaxed(qphy->tune2_val,
+ qphy->base + QUSB2PHY_PORT_TUNE2);
+ }
+
+ /* If tune modparam set, override tune value */
+ if (qphy->tune1) {
+ writel_relaxed(qphy->tune1,
+ qphy->base + QUSB2PHY_PORT_TUNE1);
+ }
+
+ if (qphy->tune2) {
+ writel_relaxed(qphy->tune2,
+ qphy->base + QUSB2PHY_PORT_TUNE2);
+ }
+
+ if (qphy->tune3) {
+ writel_relaxed(qphy->tune3,
+ qphy->base + QUSB2PHY_PORT_TUNE3);
+ }
+
+ if (qphy->tune4) {
+ writel_relaxed(qphy->tune4,
+ qphy->base + QUSB2PHY_PORT_TUNE4);
+ }
+
+ if (qphy->tune5) {
+ writel_relaxed(qphy->tune5,
+ qphy->base + QUSB2PHY_PORT_TUNE5);
+ }
+
+ /* ensure above writes are completed before re-enabling PHY */
+ wmb();
+
+ /* Enable the PHY */
+ if (qphy->major_rev < 2)
+ writel_relaxed(CLAMP_N_EN | FREEZIO_N,
+ qphy->base + QUSB2PHY_PORT_POWERDOWN);
+ else
+ writel_relaxed(readl_relaxed(qphy->base + QUSB2PHY_PWR_CTRL1) &
+ ~PWR_CTRL1_POWR_DOWN,
+ qphy->base + QUSB2PHY_PWR_CTRL1);
+
+ /* Ensure above write is completed before turning ON ref clk */
+ wmb();
+
+ /* Require to get phy pll lock successfully */
+ usleep_range(150, 160);
+
+ /* Turn on phy ref_clk if DIFF_CLK else select SE_CLK */
+ if (qphy->ref_clk_base) {
+ if (!qphy->is_se_clk) {
+ reset_val &= ~CLK_REF_SEL;
+ writel_relaxed((readl_relaxed(qphy->ref_clk_base) |
+ QUSB2PHY_REFCLK_ENABLE),
+ qphy->ref_clk_base);
+ } else {
+ reset_val |= CLK_REF_SEL;
+ writel_relaxed(reset_val,
+ qphy->base + QUSB2PHY_PLL_TEST);
+ }
+
+ /* Make sure above write is completed to get PLL source clock */
+ wmb();
+
+ /* Required to get PHY PLL lock successfully */
+ usleep_range(100, 110);
+ }
+
+ if (qphy->major_rev < 2) {
+ reg = readb_relaxed(qphy->base + QUSB2PHY_PLL_STATUS);
+ dev_dbg(phy->dev, "QUSB2PHY_PLL_STATUS:%x\n", reg);
+ if (!(reg & QUSB2PHY_PLL_LOCK))
+ pll_lock_fail = true;
+ } else {
+ reg = readb_relaxed(qphy->base +
+ QUSB2PHY_PLL_COMMON_STATUS_ONE);
+ dev_dbg(phy->dev, "QUSB2PHY_PLL_COMMON_STATUS_ONE:%x\n", reg);
+ if (!(reg & CORE_READY_STATUS))
+ pll_lock_fail = true;
+ }
+
+ if (pll_lock_fail)
+ dev_err(phy->dev, "QUSB PHY PLL LOCK fails:%x\n", reg);
+
+ return 0;
+}
+
+static void qusb_phy_shutdown(struct usb_phy *phy)
+{
+ struct qusb_phy *qphy = container_of(phy, struct qusb_phy, phy);
+
+ dev_dbg(phy->dev, "%s\n", __func__);
+
+ qusb_phy_enable_clocks(qphy, true);
+
+ /* Disable the PHY */
+ if (qphy->major_rev < 2)
+ writel_relaxed(CLAMP_N_EN | FREEZIO_N | POWER_DOWN,
+ qphy->base + QUSB2PHY_PORT_POWERDOWN);
+ else
+ writel_relaxed(readl_relaxed(qphy->base + QUSB2PHY_PWR_CTRL1) |
+ PWR_CTRL1_POWR_DOWN,
+ qphy->base + QUSB2PHY_PWR_CTRL1);
+
+ /* Make sure above write complete before turning off clocks */
+ wmb();
+
+ qusb_phy_enable_clocks(qphy, false);
+}
+/**
+ * Performs QUSB2 PHY suspend/resume functionality.
+ *
+ * @uphy - usb phy pointer.
+ * @suspend - to enable suspend or not. 1 - suspend, 0 - resume
+ *
+ */
+static int qusb_phy_set_suspend(struct usb_phy *phy, int suspend)
+{
+ struct qusb_phy *qphy = container_of(phy, struct qusb_phy, phy);
+ u32 linestate = 0, intr_mask = 0;
+
+ if (qphy->suspended == suspend) {
+ dev_dbg(phy->dev, "%s: USB PHY is already suspended\n",
+ __func__);
+ return 0;
+ }
+
+ if (suspend) {
+ /* Bus suspend case */
+ if (qphy->cable_connected) {
+ /* Clear all interrupts */
+ writel_relaxed(0x00,
+ qphy->base + QUSB2PHY_PORT_INTR_CTRL);
+
+ linestate = readl_relaxed(qphy->base +
+ QUSB2PHY_PORT_UTMI_STATUS);
+
+ /*
+ * D+/D- interrupts are level-triggered, but we are
+ * only interested if the line state changes, so enable
+ * the high/low trigger based on current state. In
+ * other words, enable the triggers _opposite_ of what
+ * the current D+/D- levels are.
+ * e.g. if currently D+ high, D- low (HS 'J'/Suspend),
+ * configure the mask to trigger on D+ low OR D- high
+ */
+ intr_mask = DPSE_INTR_EN | DMSE_INTR_EN;
+ if (!(linestate & LINESTATE_DP)) /* D+ low */
+ intr_mask |= DPSE_INTR_HIGH_SEL;
+ if (!(linestate & LINESTATE_DM)) /* D- low */
+ intr_mask |= DMSE_INTR_HIGH_SEL;
+
+ writel_relaxed(intr_mask,
+ qphy->base + QUSB2PHY_PORT_INTR_CTRL);
+
+ if (linestate & (LINESTATE_DP | LINESTATE_DM)) {
+ /* enable phy auto-resume */
+ writel_relaxed(0x0C,
+ qphy->base + QUSB2PHY_PORT_TEST_CTRL);
+ /* flush the previous write before next write */
+ wmb();
+ writel_relaxed(0x04,
+ qphy->base + QUSB2PHY_PORT_TEST_CTRL);
+ }
+
+
+ dev_dbg(phy->dev, "%s: intr_mask = %x\n",
+ __func__, intr_mask);
+
+ /* Makes sure that above write goes through */
+ wmb();
+
+ qusb_phy_enable_clocks(qphy, false);
+ } else { /* Disconnect case */
+ mutex_lock(&qphy->phy_lock);
+ /* Disable all interrupts */
+ writel_relaxed(0x00,
+ qphy->base + QUSB2PHY_PORT_INTR_CTRL);
+
+ /* Disable PHY */
+ writel_relaxed(POWER_DOWN | readl_relaxed(qphy->base +
+ QUSB2PHY_PORT_POWERDOWN),
+ qphy->base + QUSB2PHY_PORT_POWERDOWN);
+ /* Make sure that above write is completed */
+ wmb();
+
+ if (qphy->tcsr_clamp_dig_n)
+ writel_relaxed(0x0, qphy->tcsr_clamp_dig_n);
+
+ qusb_phy_enable_clocks(qphy, false);
+ qusb_phy_enable_power(qphy, false);
+ mutex_unlock(&qphy->phy_lock);
+
+ /*
+ * Set put_into_high_z_state to true so next USB
+ * cable connect, DPF_DMF request performs PHY
+ * reset and put it into high-z state. For bootup
+ * with or without USB cable, it doesn't require
+ * to put QUSB PHY into high-z state.
+ */
+ qphy->put_into_high_z_state = true;
+ }
+ qphy->suspended = true;
+ } else {
+ /* Bus suspend case */
+ if (qphy->cable_connected) {
+ qusb_phy_enable_clocks(qphy, true);
+ /* Clear all interrupts on resume */
+ writel_relaxed(0x00,
+ qphy->base + QUSB2PHY_PORT_INTR_CTRL);
+ } else {
+ qusb_phy_enable_power(qphy, true);
+ if (qphy->tcsr_clamp_dig_n)
+ writel_relaxed(0x1,
+ qphy->tcsr_clamp_dig_n);
+ qusb_phy_enable_clocks(qphy, true);
+ }
+ qphy->suspended = false;
+ }
+
+ return 0;
+}
+
+static int qusb_phy_notify_connect(struct usb_phy *phy,
+ enum usb_device_speed speed)
+{
+ struct qusb_phy *qphy = container_of(phy, struct qusb_phy, phy);
+
+ qphy->cable_connected = true;
+
+ dev_dbg(phy->dev, "QUSB PHY: connect notification cable_connected=%d\n",
+ qphy->cable_connected);
+ return 0;
+}
+
+static int qusb_phy_notify_disconnect(struct usb_phy *phy,
+ enum usb_device_speed speed)
+{
+ struct qusb_phy *qphy = container_of(phy, struct qusb_phy, phy);
+
+ qphy->cable_connected = false;
+
+ dev_dbg(phy->dev, "QUSB PHY: connect notification cable_connected=%d\n",
+ qphy->cable_connected);
+ return 0;
+}
+
+static int qusb_phy_dpdm_regulator_enable(struct regulator_dev *rdev)
+{
+ int ret = 0;
+ struct qusb_phy *qphy = rdev_get_drvdata(rdev);
+
+ dev_dbg(qphy->phy.dev, "%s dpdm_enable:%d\n",
+ __func__, qphy->dpdm_enable);
+
+ mutex_lock(&qphy->phy_lock);
+ if (!qphy->dpdm_enable) {
+ ret = qusb_phy_enable_power(qphy, true);
+ if (ret < 0) {
+ dev_dbg(qphy->phy.dev,
+ "dpdm regulator enable failed:%d\n", ret);
+ mutex_unlock(&qphy->phy_lock);
+ return ret;
+ }
+ qphy->dpdm_enable = true;
+ if (qphy->put_into_high_z_state) {
+ if (qphy->tcsr_clamp_dig_n)
+ writel_relaxed(0x1,
+ qphy->tcsr_clamp_dig_n);
+
+ qusb_phy_gdsc(qphy, true);
+ qusb_phy_enable_clocks(qphy, true);
+
+ dev_dbg(qphy->phy.dev, "RESET QUSB PHY\n");
+ ret = reset_control_assert(qphy->phy_reset);
+ if (ret)
+ dev_err(qphy->phy.dev, "phyassert failed\n");
+ usleep_range(100, 150);
+ ret = reset_control_deassert(qphy->phy_reset);
+ if (ret)
+ dev_err(qphy->phy.dev, "deassert failed\n");
+
+ /*
+ * Phy in non-driving mode leaves Dp and Dm
+ * lines in high-Z state. Controller power
+ * collapse is not switching phy to non-driving
+ * mode causing charger detection failure. Bring
+ * phy to non-driving mode by overriding
+ * controller output via UTMI interface.
+ */
+ writel_relaxed(TERM_SELECT | XCVR_SELECT_FS |
+ OP_MODE_NON_DRIVE,
+ qphy->base + QUSB2PHY_PORT_UTMI_CTRL1);
+ writel_relaxed(UTMI_ULPI_SEL |
+ UTMI_TEST_MUX_SEL,
+ qphy->base + QUSB2PHY_PORT_UTMI_CTRL2);
+
+
+ /* Disable PHY */
+ writel_relaxed(CLAMP_N_EN | FREEZIO_N |
+ POWER_DOWN,
+ qphy->base + QUSB2PHY_PORT_POWERDOWN);
+ /* Make sure that above write is completed */
+ wmb();
+
+ qusb_phy_enable_clocks(qphy, false);
+ qusb_phy_gdsc(qphy, false);
+ }
+ }
+ mutex_unlock(&qphy->phy_lock);
+
+ return ret;
+}
+
+static int qusb_phy_dpdm_regulator_disable(struct regulator_dev *rdev)
+{
+ int ret = 0;
+ struct qusb_phy *qphy = rdev_get_drvdata(rdev);
+
+ dev_dbg(qphy->phy.dev, "%s dpdm_enable:%d\n",
+ __func__, qphy->dpdm_enable);
+
+ mutex_lock(&qphy->phy_lock);
+ if (qphy->dpdm_enable) {
+ /* If usb core is active, rely on set_suspend to clamp phy */
+ if (!qphy->cable_connected) {
+ if (qphy->tcsr_clamp_dig_n)
+ writel_relaxed(0x0,
+ qphy->tcsr_clamp_dig_n);
+ }
+ ret = qusb_phy_enable_power(qphy, false);
+ if (ret < 0) {
+ dev_dbg(qphy->phy.dev,
+ "dpdm regulator disable failed:%d\n", ret);
+ mutex_unlock(&qphy->phy_lock);
+ return ret;
+ }
+ qphy->dpdm_enable = false;
+ }
+ mutex_unlock(&qphy->phy_lock);
+
+ return ret;
+}
+
+static int qusb_phy_dpdm_regulator_is_enabled(struct regulator_dev *rdev)
+{
+ struct qusb_phy *qphy = rdev_get_drvdata(rdev);
+
+ dev_dbg(qphy->phy.dev, "%s qphy->dpdm_enable = %d\n", __func__,
+ qphy->dpdm_enable);
+ return qphy->dpdm_enable;
+}
+
+static struct regulator_ops qusb_phy_dpdm_regulator_ops = {
+ .enable = qusb_phy_dpdm_regulator_enable,
+ .disable = qusb_phy_dpdm_regulator_disable,
+ .is_enabled = qusb_phy_dpdm_regulator_is_enabled,
+};
+
+static int qusb_phy_regulator_init(struct qusb_phy *qphy)
+{
+ struct device *dev = qphy->phy.dev;
+ struct regulator_config cfg = {};
+ struct regulator_init_data *init_data;
+
+ init_data = devm_kzalloc(dev, sizeof(*init_data), GFP_KERNEL);
+ if (!init_data)
+ return -ENOMEM;
+
+ init_data->constraints.valid_ops_mask |= REGULATOR_CHANGE_STATUS;
+ qphy->dpdm_rdesc.owner = THIS_MODULE;
+ qphy->dpdm_rdesc.type = REGULATOR_VOLTAGE;
+ qphy->dpdm_rdesc.ops = &qusb_phy_dpdm_regulator_ops;
+ qphy->dpdm_rdesc.name = kbasename(dev->of_node->full_name);
+
+ cfg.dev = dev;
+ cfg.init_data = init_data;
+ cfg.driver_data = qphy;
+ cfg.of_node = dev->of_node;
+
+ qphy->dpdm_rdev = devm_regulator_register(dev, &qphy->dpdm_rdesc, &cfg);
+ return PTR_ERR_OR_ZERO(qphy->dpdm_rdev);
+
+}
+
+static void qusb_phy_create_debugfs(struct qusb_phy *qphy)
+{
+ qphy->root = debugfs_create_dir(dev_name(qphy->phy.dev), NULL);
+ debugfs_create_x8("tune1", 0644, qphy->root, &qphy->tune1);
+ debugfs_create_x8("tune2", 0644, qphy->root, &qphy->tune2);
+ debugfs_create_x8("tune3", 0644, qphy->root, &qphy->tune3);
+ debugfs_create_x8("tune4", 0644, qphy->root, &qphy->tune4);
+ debugfs_create_x8("tune5", 0644, qphy->root, &qphy->tune5);
+}
+
+static int qusb_phy_probe(struct platform_device *pdev)
+{
+ struct qusb_phy *qphy;
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ int ret = 0, size = 0;
+ const char *phy_type;
+ bool hold_phy_reset;
+ u32 temp;
+
+ qphy = devm_kzalloc(dev, sizeof(*qphy), GFP_KERNEL);
+ if (!qphy)
+ return -ENOMEM;
+
+ qphy->phy.dev = dev;
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "qusb_phy_base");
+ qphy->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(qphy->base))
+ return PTR_ERR(qphy->base);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "tune2_efuse_addr");
+ if (res) {
+ qphy->tune2_efuse_reg = devm_ioremap_nocache(dev, res->start,
+ resource_size(res));
+ if (!IS_ERR_OR_NULL(qphy->tune2_efuse_reg)) {
+ ret = of_property_read_u32(dev->of_node,
+ "qcom,tune2-efuse-bit-pos",
+ &qphy->tune2_efuse_bit_pos);
+ if (!ret) {
+ ret = of_property_read_u32(dev->of_node,
+ "qcom,tune2-efuse-num-bits",
+ &qphy->tune2_efuse_num_of_bits);
+ }
+ of_property_read_u32(dev->of_node,
+ "qcom,tune2-efuse-correction",
+ &qphy->tune2_efuse_correction);
+
+ if (ret) {
+ dev_err(dev, "DT Value for tune2 efuse is invalid.\n");
+ return -EINVAL;
+ }
+ }
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "ref_clk_addr");
+ if (res) {
+ qphy->ref_clk_base = devm_ioremap_nocache(dev,
+ res->start, resource_size(res));
+ if (IS_ERR(qphy->ref_clk_base)) {
+ dev_dbg(dev, "ref_clk_address is not available.\n");
+ return PTR_ERR(qphy->ref_clk_base);
+ }
+
+ ret = of_property_read_string(dev->of_node,
+ "qcom,phy-clk-scheme", &phy_type);
+ if (ret) {
+ dev_err(dev, "error need qsub_phy_clk_scheme.\n");
+ return ret;
+ }
+
+ if (!strcasecmp(phy_type, "cml")) {
+ qphy->is_se_clk = false;
+ } else if (!strcasecmp(phy_type, "cmos")) {
+ qphy->is_se_clk = true;
+ } else {
+ dev_err(dev, "erro invalid qusb_phy_clk_scheme\n");
+ return -EINVAL;
+ }
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "tcsr_clamp_dig_n_1p8");
+ if (res) {
+ qphy->tcsr_clamp_dig_n = devm_ioremap_nocache(dev,
+ res->start, resource_size(res));
+ if (IS_ERR(qphy->tcsr_clamp_dig_n)) {
+ dev_err(dev, "err reading tcsr_clamp_dig_n\n");
+ qphy->tcsr_clamp_dig_n = NULL;
+ }
+ }
+
+ ret = of_property_read_u32(dev->of_node, "qcom,usb-hs-ac-bitmask",
+ &qphy->usb_hs_ac_bitmask);
+ if (!ret) {
+ ret = of_property_read_u32(dev->of_node, "qcom,usb-hs-ac-value",
+ &qphy->usb_hs_ac_value);
+ if (ret) {
+ dev_err(dev, "usb_hs_ac_value not passed\n", __func__);
+ return ret;
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "tcsr_conn_box_spare_0");
+ if (!res) {
+ dev_err(dev, "tcsr_conn_box_spare_0 not passed\n",
+ __func__);
+ return -ENOENT;
+ }
+
+ qphy->tcsr_conn_box_spare = devm_ioremap_nocache(dev,
+ res->start, resource_size(res));
+ if (IS_ERR(qphy->tcsr_conn_box_spare)) {
+ dev_err(dev, "err reading tcsr_conn_box_spare\n");
+ return PTR_ERR(qphy->tcsr_conn_box_spare);
+ }
+ }
+
+ qphy->ref_clk_src = devm_clk_get(dev, "ref_clk_src");
+ if (IS_ERR(qphy->ref_clk_src))
+ dev_dbg(dev, "clk get failed for ref_clk_src\n");
+
+ /* ref_clk is needed only for DIFF_CLK case, hence make it optional. */
+ if (of_property_match_string(pdev->dev.of_node,
+ "clock-names", "ref_clk") >= 0) {
+ qphy->ref_clk = devm_clk_get(dev, "ref_clk");
+ if (IS_ERR(qphy->ref_clk)) {
+ ret = PTR_ERR(qphy->ref_clk);
+ if (ret != -EPROBE_DEFER)
+ dev_dbg(dev,
+ "clk get failed for ref_clk\n");
+ return ret;
+ }
+
+ clk_set_rate(qphy->ref_clk, 19200000);
+ }
+
+ qphy->cfg_ahb_clk = devm_clk_get(dev, "cfg_ahb_clk");
+ if (IS_ERR(qphy->cfg_ahb_clk))
+ return PTR_ERR(qphy->cfg_ahb_clk);
+
+ qphy->phy_reset = devm_reset_control_get(dev, "phy_reset");
+ if (IS_ERR(qphy->phy_reset))
+ return PTR_ERR(qphy->phy_reset);
+
+ if (of_property_match_string(dev->of_node,
+ "clock-names", "iface_clk") >= 0) {
+ qphy->iface_clk = devm_clk_get(dev, "iface_clk");
+ if (IS_ERR(qphy->iface_clk)) {
+ ret = PTR_ERR(qphy->iface_clk);
+ qphy->iface_clk = NULL;
+ if (ret == -EPROBE_DEFER)
+ return ret;
+ dev_err(dev, "couldn't get iface_clk(%d)\n", ret);
+ }
+ }
+
+ if (of_property_match_string(dev->of_node,
+ "clock-names", "core_clk") >= 0) {
+ qphy->core_clk = devm_clk_get(dev, "core_clk");
+ if (IS_ERR(qphy->core_clk)) {
+ ret = PTR_ERR(qphy->core_clk);
+ qphy->core_clk = NULL;
+ if (ret == -EPROBE_DEFER)
+ return ret;
+ dev_err(dev, "couldn't get core_clk(%d)\n", ret);
+ }
+ }
+
+ qphy->gdsc = devm_regulator_get(dev, "USB3_GDSC");
+ if (IS_ERR(qphy->gdsc))
+ qphy->gdsc = NULL;
+
+ size = 0;
+ of_get_property(dev->of_node, "qcom,qusb-phy-init-seq", &size);
+ if (size) {
+ qphy->qusb_phy_init_seq = devm_kzalloc(dev,
+ size, GFP_KERNEL);
+ if (qphy->qusb_phy_init_seq) {
+ qphy->init_seq_len =
+ (size / sizeof(*qphy->qusb_phy_init_seq));
+ if (qphy->init_seq_len % 2) {
+ dev_err(dev, "invalid init_seq_len\n");
+ return -EINVAL;
+ }
+
+ of_property_read_u32_array(dev->of_node,
+ "qcom,qusb-phy-init-seq",
+ qphy->qusb_phy_init_seq,
+ qphy->init_seq_len);
+ } else {
+ dev_err(dev, "error allocating memory for phy_init_seq\n");
+ }
+ }
+
+ qphy->ulpi_mode = false;
+ ret = of_property_read_string(dev->of_node, "phy_type", &phy_type);
+
+ if (!ret) {
+ if (!strcasecmp(phy_type, "ulpi"))
+ qphy->ulpi_mode = true;
+ } else {
+ dev_err(dev, "error reading phy_type property\n");
+ return ret;
+ }
+
+ hold_phy_reset = of_property_read_bool(dev->of_node, "qcom,hold-reset");
+
+ /* use default major revision as 2 */
+ qphy->major_rev = 2;
+ ret = of_property_read_u32(dev->of_node, "qcom,major-rev",
+ &qphy->major_rev);
+
+ ret = of_property_read_u32_array(dev->of_node, "qcom,vdd-voltage-level",
+ (u32 *) qphy->vdd_levels,
+ ARRAY_SIZE(qphy->vdd_levels));
+ if (ret) {
+ dev_err(dev, "error reading qcom,vdd-voltage-level property\n");
+ return ret;
+ }
+
+ qphy->vdd = devm_regulator_get(dev, "vdd");
+ if (IS_ERR(qphy->vdd)) {
+ dev_err(dev, "unable to get vdd supply\n");
+ return PTR_ERR(qphy->vdd);
+ }
+
+ qphy->vdda33 = devm_regulator_get(dev, "vdda33");
+ if (IS_ERR(qphy->vdda33)) {
+ dev_err(dev, "unable to get vdda33 supply\n");
+ return PTR_ERR(qphy->vdda33);
+ }
+
+ qphy->vdda18 = devm_regulator_get(dev, "vdda18");
+ if (IS_ERR(qphy->vdda18)) {
+ dev_err(dev, "unable to get vdda18 supply\n");
+ return PTR_ERR(qphy->vdda18);
+ }
+
+ mutex_init(&qphy->phy_lock);
+ platform_set_drvdata(pdev, qphy);
+
+ qphy->phy.label = "msm-qusb-phy";
+ qphy->phy.init = qusb_phy_init;
+ qphy->phy.set_suspend = qusb_phy_set_suspend;
+ qphy->phy.shutdown = qusb_phy_shutdown;
+ qphy->phy.type = USB_PHY_TYPE_USB2;
+ qphy->phy.notify_connect = qusb_phy_notify_connect;
+ qphy->phy.notify_disconnect = qusb_phy_notify_disconnect;
+
+ /*
+ * On some platforms multiple QUSB PHYs are available. If QUSB PHY is
+ * not used, there is leakage current seen with QUSB PHY related voltage
+ * rail. Hence keep QUSB PHY into reset state explicitly here.
+ */
+ if (hold_phy_reset) {
+ ret = reset_control_assert(qphy->phy_reset);
+ if (ret)
+ dev_err(dev, "%s:phy_reset assert failed\n", __func__);
+ }
+
+ ret = usb_add_phy_dev(&qphy->phy);
+ if (ret)
+ return ret;
+
+ ret = qusb_phy_regulator_init(qphy);
+ if (ret)
+ usb_remove_phy(&qphy->phy);
+
+ /* de-assert clamp dig n to reduce leakage on 1p8 upon boot up */
+ if (qphy->tcsr_clamp_dig_n)
+ writel_relaxed(0x0, qphy->tcsr_clamp_dig_n);
+
+ /*
+ * Write the usb_hs_ac_value to usb_hs_ac_bitmask of tcsr_conn_box_spare
+ * reg to enable AC/DC coupling
+ */
+ if (qphy->tcsr_conn_box_spare) {
+ temp = readl_relaxed(qphy->tcsr_conn_box_spare) &
+ ~qphy->usb_hs_ac_bitmask;
+ writel_relaxed(temp | qphy->usb_hs_ac_value,
+ qphy->tcsr_conn_box_spare);
+ }
+
+ qphy->suspended = true;
+
+ qusb_phy_create_debugfs(qphy);
+
+ return ret;
+}
+
+static int qusb_phy_remove(struct platform_device *pdev)
+{
+ struct qusb_phy *qphy = platform_get_drvdata(pdev);
+
+ debugfs_remove_recursive(qphy->root);
+ usb_remove_phy(&qphy->phy);
+ qphy->cable_connected = false;
+ qusb_phy_set_suspend(&qphy->phy, true);
+
+ return 0;
+}
+
+static const struct of_device_id qusb_phy_id_table[] = {
+ { .compatible = "qcom,qusb2phy", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, qusb_phy_id_table);
+
+static struct platform_driver qusb_phy_driver = {
+ .probe = qusb_phy_probe,
+ .remove = qusb_phy_remove,
+ .driver = {
+ .name = "msm-qusb-phy",
+ .of_match_table = of_match_ptr(qusb_phy_id_table),
+ },
+};
+
+module_platform_driver(qusb_phy_driver);
+
+MODULE_DESCRIPTION("MSM QUSB2 PHY driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/phy/phy-msm-snps-hs.c b/drivers/usb/phy/phy-msm-snps-hs.c
index 9107cc4..594c8ef 100644
--- a/drivers/usb/phy/phy-msm-snps-hs.c
+++ b/drivers/usb/phy/phy-msm-snps-hs.c
@@ -19,6 +19,7 @@
#include <linux/regulator/machine.h>
#include <linux/usb/phy.h>
#include <linux/reset.h>
+#include <linux/debugfs.h>
#define USB2_PHY_USB_PHY_UTMI_CTRL0 (0x3c)
#define OPMODE_MASK (0x3 << 3)
@@ -59,6 +60,15 @@
#define USB2PHY_USB_PHY_RTUNE_SEL (0xb4)
#define RTUNE_SEL BIT(0)
+#define TXPREEMPAMPTUNE0(x) (x << 6)
+#define TXPREEMPAMPTUNE0_MASK (BIT(7) | BIT(6))
+#define USB2PHY_USB_PHY_PARAMETER_OVERRIDE_X0 0x6c
+#define USB2PHY_USB_PHY_PARAMETER_OVERRIDE_X1 0x70
+#define USB2PHY_USB_PHY_PARAMETER_OVERRIDE_X2 0x74
+#define USB2PHY_USB_PHY_PARAMETER_OVERRIDE_X3 0x78
+#define TXVREFTUNE0_MASK 0xF
+#define PARAM_OVRD_MASK 0xFF
+
#define USB_HSPHY_3P3_VOL_MIN 3050000 /* uV */
#define USB_HSPHY_3P3_VOL_MAX 3300000 /* uV */
#define USB_HSPHY_3P3_HPM_LOAD 16000 /* uA */
@@ -87,7 +97,6 @@ struct msm_hsphy {
bool suspended;
bool cable_connected;
bool dpdm_enable;
- bool no_rext_present;
int *param_override_seq;
int param_override_seq_cnt;
@@ -98,6 +107,15 @@ struct msm_hsphy {
struct mutex phy_lock;
struct regulator_desc dpdm_rdesc;
struct regulator_dev *dpdm_rdev;
+
+ /* debugfs entries */
+ struct dentry *root;
+ u8 txvref_tune0;
+ u8 pre_emphasis;
+ u8 param_ovrd0;
+ u8 param_ovrd1;
+ u8 param_ovrd2;
+ u8 param_ovrd3;
};
static void msm_hsphy_enable_clocks(struct msm_hsphy *phy, bool on)
@@ -360,6 +378,53 @@ static int msm_hsphy_init(struct usb_phy *uphy)
hsusb_phy_write_seq(phy->base, phy->param_override_seq,
phy->param_override_seq_cnt, 0);
+ if (phy->pre_emphasis) {
+ u8 val = TXPREEMPAMPTUNE0(phy->pre_emphasis) &
+ TXPREEMPAMPTUNE0_MASK;
+ if (val)
+ msm_usb_write_readback(phy->base,
+ USB2PHY_USB_PHY_PARAMETER_OVERRIDE_X1,
+ TXPREEMPAMPTUNE0_MASK, val);
+ }
+
+ if (phy->txvref_tune0) {
+ u8 val = phy->txvref_tune0 & TXVREFTUNE0_MASK;
+
+ msm_usb_write_readback(phy->base,
+ USB2PHY_USB_PHY_PARAMETER_OVERRIDE_X1,
+ TXVREFTUNE0_MASK, val);
+ }
+
+ if (phy->param_ovrd0) {
+ msm_usb_write_readback(phy->base,
+ USB2PHY_USB_PHY_PARAMETER_OVERRIDE_X0,
+ PARAM_OVRD_MASK, phy->param_ovrd0);
+ }
+
+ if (phy->param_ovrd1) {
+ msm_usb_write_readback(phy->base,
+ USB2PHY_USB_PHY_PARAMETER_OVERRIDE_X1,
+ PARAM_OVRD_MASK, phy->param_ovrd1);
+ }
+
+ if (phy->param_ovrd2) {
+ msm_usb_write_readback(phy->base,
+ USB2PHY_USB_PHY_PARAMETER_OVERRIDE_X2,
+ PARAM_OVRD_MASK, phy->param_ovrd2);
+ }
+
+ if (phy->param_ovrd3) {
+ msm_usb_write_readback(phy->base,
+ USB2PHY_USB_PHY_PARAMETER_OVERRIDE_X3,
+ PARAM_OVRD_MASK, phy->param_ovrd3);
+ }
+
+ dev_dbg(uphy->dev, "x0:%08x x1:%08x x2:%08x x3:%08x\n",
+ readl_relaxed(phy->base + USB2PHY_USB_PHY_PARAMETER_OVERRIDE_X0),
+ readl_relaxed(phy->base + USB2PHY_USB_PHY_PARAMETER_OVERRIDE_X1),
+ readl_relaxed(phy->base + USB2PHY_USB_PHY_PARAMETER_OVERRIDE_X2),
+ readl_relaxed(phy->base + USB2PHY_USB_PHY_PARAMETER_OVERRIDE_X3));
+
if (phy->phy_rcal_reg) {
rcal_code = readl_relaxed(phy->phy_rcal_reg) & phy->rcal_mask;
@@ -367,15 +432,6 @@ static int msm_hsphy_init(struct usb_phy *uphy)
phy->rcal_mask, phy->phy_rcal_reg, rcal_code);
}
- /*
- * Use external resistor value only if:
- * a. It is present and
- * b. efuse is not programmed.
- */
- if (!phy->no_rext_present && !rcal_code)
- msm_usb_write_readback(phy->base, USB2PHY_USB_PHY_RTUNE_SEL,
- RTUNE_SEL, RTUNE_SEL);
-
msm_usb_write_readback(phy->base, USB2_PHY_USB_PHY_HS_PHY_CTRL_COMMON2,
VREGBYPASS, VREGBYPASS);
@@ -574,6 +630,17 @@ static int msm_hsphy_regulator_init(struct msm_hsphy *phy)
return 0;
}
+static void msm_hsphy_create_debugfs(struct msm_hsphy *phy)
+{
+ phy->root = debugfs_create_dir(dev_name(phy->phy.dev), NULL);
+ debugfs_create_x8("pre_emphasis", 0644, phy->root, &phy->pre_emphasis);
+ debugfs_create_x8("txvref_tune0", 0644, phy->root, &phy->txvref_tune0);
+ debugfs_create_x8("param_ovrd0", 0644, phy->root, &phy->param_ovrd0);
+ debugfs_create_x8("param_ovrd1", 0644, phy->root, &phy->param_ovrd1);
+ debugfs_create_x8("param_ovrd2", 0644, phy->root, &phy->param_ovrd2);
+ debugfs_create_x8("param_ovrd3", 0644, phy->root, &phy->param_ovrd3);
+}
+
static int msm_hsphy_probe(struct platform_device *pdev)
{
struct msm_hsphy *phy;
@@ -655,9 +722,6 @@ static int msm_hsphy_probe(struct platform_device *pdev)
if (IS_ERR(phy->phy_reset))
return PTR_ERR(phy->phy_reset);
- phy->no_rext_present = of_property_read_bool(dev->of_node,
- "qcom,no-rext-present");
-
phy->param_override_seq_cnt = of_property_count_elems_of_size(
dev->of_node,
"qcom,param-override-seq",
@@ -735,6 +799,8 @@ static int msm_hsphy_probe(struct platform_device *pdev)
return ret;
}
+ msm_hsphy_create_debugfs(phy);
+
return 0;
err_ret:
@@ -748,6 +814,8 @@ static int msm_hsphy_remove(struct platform_device *pdev)
if (!phy)
return 0;
+ debugfs_remove_recursive(phy->root);
+
usb_remove_phy(&phy->phy);
clk_disable_unprepare(phy->ref_clk_src);
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index e0a4749..56f572c 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -968,6 +968,11 @@ static const struct usb_device_id option_ids[] = {
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x7B) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x06, 0x7C) },
+ /* Motorola devices */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x22b8, 0x2a70, 0xff, 0xff, 0xff) }, /* mdm6600 */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x22b8, 0x2e0a, 0xff, 0xff, 0xff) }, /* mdm9600 */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x22b8, 0x4281, 0x0a, 0x00, 0xfc) }, /* mdm ram dl */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x22b8, 0x900e, 0xff, 0xff, 0xff) }, /* mdm qc dl */
{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V640) },
{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V620) },
@@ -1549,6 +1554,7 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1428, 0xff, 0xff, 0xff), /* Telewell TW-LTE 4G v2 */
.driver_info = RSVD(2) },
{ USB_DEVICE_INTERFACE_CLASS(ZTE_VENDOR_ID, 0x1476, 0xff) }, /* GosunCn ZTE WeLink ME3630 (ECM/NCM mode) */
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1481, 0xff, 0x00, 0x00) }, /* ZTE MF871A */
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1533, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1534, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1535, 0xff, 0xff, 0xff) },
@@ -1952,11 +1958,15 @@ static const struct usb_device_id option_ids[] = {
.driver_info = RSVD(4) },
{ USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e35, 0xff), /* D-Link DWM-222 */
.driver_info = RSVD(4) },
+ { USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e3d, 0xff), /* D-Link DWM-222 A2 */
+ .driver_info = RSVD(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */
{ USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */
{ USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x7e11, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/A3 */
{ USB_DEVICE_INTERFACE_CLASS(0x2020, 0x2031, 0xff), /* Olicard 600 */
.driver_info = RSVD(4) },
+ { USB_DEVICE_INTERFACE_CLASS(0x2020, 0x2060, 0xff), /* BroadMobi BM818 */
+ .driver_info = RSVD(4) },
{ USB_DEVICE_INTERFACE_CLASS(0x2020, 0x4000, 0xff) }, /* OLICARD300 - MT6225 */
{ USB_DEVICE(INOVIA_VENDOR_ID, INOVIA_SEW858) },
{ USB_DEVICE(VIATELECOM_VENDOR_ID, VIATELECOM_PRODUCT_CDS7) },
diff --git a/drivers/usb/typec/tcpm.c b/drivers/usb/typec/tcpm.c
index 3457c1f..5f29ce8 100644
--- a/drivers/usb/typec/tcpm.c
+++ b/drivers/usb/typec/tcpm.c
@@ -378,7 +378,8 @@ static enum tcpm_state tcpm_default_state(struct tcpm_port *port)
return SNK_UNATTACHED;
else if (port->try_role == TYPEC_SOURCE)
return SRC_UNATTACHED;
- else if (port->tcpc->config->default_role == TYPEC_SINK)
+ else if (port->tcpc->config &&
+ port->tcpc->config->default_role == TYPEC_SINK)
return SNK_UNATTACHED;
/* Fall through to return SRC_UNATTACHED */
} else if (port->port_type == TYPEC_PORT_SNK) {
@@ -585,7 +586,20 @@ static void tcpm_debugfs_init(struct tcpm_port *port)
static void tcpm_debugfs_exit(struct tcpm_port *port)
{
+ int i;
+
+ mutex_lock(&port->logbuffer_lock);
+ for (i = 0; i < LOG_BUFFER_ENTRIES; i++) {
+ kfree(port->logbuffer[i]);
+ port->logbuffer[i] = NULL;
+ }
+ mutex_unlock(&port->logbuffer_lock);
+
debugfs_remove(port->dentry);
+ if (list_empty(&rootdir->d_subdirs)) {
+ debugfs_remove(rootdir);
+ rootdir = NULL;
+ }
}
#else
@@ -1094,7 +1108,8 @@ static int tcpm_pd_svdm(struct tcpm_port *port, const __le32 *payload, int cnt,
break;
case CMD_ATTENTION:
/* Attention command does not have response */
- typec_altmode_attention(adev, p[1]);
+ if (adev)
+ typec_altmode_attention(adev, p[1]);
return 0;
default:
break;
@@ -1146,20 +1161,26 @@ static int tcpm_pd_svdm(struct tcpm_port *port, const __le32 *payload, int cnt,
}
break;
case CMD_ENTER_MODE:
- typec_altmode_update_active(pdev, true);
+ if (adev && pdev) {
+ typec_altmode_update_active(pdev, true);
- if (typec_altmode_vdm(adev, p[0], &p[1], cnt)) {
- response[0] = VDO(adev->svid, 1, CMD_EXIT_MODE);
- response[0] |= VDO_OPOS(adev->mode);
- return 1;
+ if (typec_altmode_vdm(adev, p[0], &p[1], cnt)) {
+ response[0] = VDO(adev->svid, 1,
+ CMD_EXIT_MODE);
+ response[0] |= VDO_OPOS(adev->mode);
+ return 1;
+ }
}
return 0;
case CMD_EXIT_MODE:
- typec_altmode_update_active(pdev, false);
+ if (adev && pdev) {
+ typec_altmode_update_active(pdev, false);
- /* Back to USB Operation */
- WARN_ON(typec_altmode_notify(adev, TYPEC_STATE_USB,
- NULL));
+ /* Back to USB Operation */
+ WARN_ON(typec_altmode_notify(adev,
+ TYPEC_STATE_USB,
+ NULL));
+ }
break;
default:
break;
@@ -1169,8 +1190,10 @@ static int tcpm_pd_svdm(struct tcpm_port *port, const __le32 *payload, int cnt,
switch (cmd) {
case CMD_ENTER_MODE:
/* Back to USB Operation */
- WARN_ON(typec_altmode_notify(adev, TYPEC_STATE_USB,
- NULL));
+ if (adev)
+ WARN_ON(typec_altmode_notify(adev,
+ TYPEC_STATE_USB,
+ NULL));
break;
default:
break;
@@ -1181,7 +1204,8 @@ static int tcpm_pd_svdm(struct tcpm_port *port, const __le32 *payload, int cnt,
}
/* Informing the alternate mode drivers about everything */
- typec_altmode_vdm(adev, p[0], &p[1], cnt);
+ if (adev)
+ typec_altmode_vdm(adev, p[0], &p[1], cnt);
return rlen;
}
@@ -4083,7 +4107,7 @@ static int tcpm_try_role(const struct typec_capability *cap, int role)
mutex_lock(&port->lock);
if (tcpc->try_role)
ret = tcpc->try_role(tcpc, role);
- if (!ret && !tcpc->config->try_role_hw)
+ if (!ret && (!tcpc->config || !tcpc->config->try_role_hw))
port->try_role = role;
port->try_src_count = 0;
port->try_snk_count = 0;
@@ -4730,7 +4754,7 @@ static int tcpm_copy_caps(struct tcpm_port *port,
port->typec_caps.prefer_role = tcfg->default_role;
port->typec_caps.type = tcfg->type;
port->typec_caps.data = tcfg->data;
- port->self_powered = port->tcpc->config->self_powered;
+ port->self_powered = tcfg->self_powered;
return 0;
}
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index ae70465..124356d 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -497,12 +497,6 @@ static size_t init_iov_iter(struct vhost_virtqueue *vq, struct iov_iter *iter,
return iov_iter_count(iter);
}
-static bool vhost_exceeds_weight(int pkts, int total_len)
-{
- return total_len >= VHOST_NET_WEIGHT ||
- pkts >= VHOST_NET_PKT_WEIGHT;
-}
-
static int get_tx_bufs(struct vhost_net *net,
struct vhost_net_virtqueue *nvq,
struct msghdr *msg,
@@ -557,7 +551,7 @@ static void handle_tx_copy(struct vhost_net *net, struct socket *sock)
int err;
int sent_pkts = 0;
- for (;;) {
+ do {
bool busyloop_intr = false;
head = get_tx_bufs(net, nvq, &msg, &out, &in, &len,
@@ -598,11 +592,7 @@ static void handle_tx_copy(struct vhost_net *net, struct socket *sock)
err, len);
if (++nvq->done_idx >= VHOST_NET_BATCH)
vhost_net_signal_used(nvq);
- if (vhost_exceeds_weight(++sent_pkts, total_len)) {
- vhost_poll_queue(&vq->poll);
- break;
- }
- }
+ } while (likely(!vhost_exceeds_weight(vq, ++sent_pkts, total_len)));
vhost_net_signal_used(nvq);
}
@@ -626,7 +616,7 @@ static void handle_tx_zerocopy(struct vhost_net *net, struct socket *sock)
bool zcopy_used;
int sent_pkts = 0;
- for (;;) {
+ do {
bool busyloop_intr;
/* Release DMAs done buffers first */
@@ -701,11 +691,7 @@ static void handle_tx_zerocopy(struct vhost_net *net, struct socket *sock)
else
vhost_zerocopy_signal_used(net, vq);
vhost_net_tx_packet(net);
- if (unlikely(vhost_exceeds_weight(++sent_pkts, total_len))) {
- vhost_poll_queue(&vq->poll);
- break;
- }
- }
+ } while (likely(!vhost_exceeds_weight(vq, ++sent_pkts, total_len)));
}
/* Expects to be always run from workqueue - which acts as
@@ -941,8 +927,11 @@ static void handle_rx(struct vhost_net *net)
vq->log : NULL;
mergeable = vhost_has_feature(vq, VIRTIO_NET_F_MRG_RXBUF);
- while ((sock_len = vhost_net_rx_peek_head_len(net, sock->sk,
- &busyloop_intr))) {
+ do {
+ sock_len = vhost_net_rx_peek_head_len(net, sock->sk,
+ &busyloop_intr);
+ if (!sock_len)
+ break;
sock_len += sock_hlen;
vhost_len = sock_len + vhost_hlen;
headcount = get_rx_bufs(vq, vq->heads + nvq->done_idx,
@@ -1027,14 +1016,11 @@ static void handle_rx(struct vhost_net *net)
vhost_log_write(vq, vq_log, log, vhost_len,
vq->iov, in);
total_len += vhost_len;
- if (unlikely(vhost_exceeds_weight(++recv_pkts, total_len))) {
- vhost_poll_queue(&vq->poll);
- goto out;
- }
- }
+ } while (likely(!vhost_exceeds_weight(vq, ++recv_pkts, total_len)));
+
if (unlikely(busyloop_intr))
vhost_poll_queue(&vq->poll);
- else
+ else if (!sock_len)
vhost_net_enable_vq(net, vq);
out:
vhost_net_signal_used(nvq);
@@ -1115,7 +1101,8 @@ static int vhost_net_open(struct inode *inode, struct file *f)
vhost_net_buf_init(&n->vqs[i].rxq);
}
vhost_dev_init(dev, vqs, VHOST_NET_VQ_MAX,
- UIO_MAXIOV + VHOST_NET_BATCH);
+ UIO_MAXIOV + VHOST_NET_BATCH,
+ VHOST_NET_PKT_WEIGHT, VHOST_NET_WEIGHT);
vhost_poll_init(n->poll + VHOST_NET_VQ_TX, handle_tx_net, EPOLLOUT, dev);
vhost_poll_init(n->poll + VHOST_NET_VQ_RX, handle_rx_net, EPOLLIN, dev);
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index 0cfa925..5e298d9 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -57,6 +57,12 @@
#define VHOST_SCSI_PREALLOC_UPAGES 2048
#define VHOST_SCSI_PREALLOC_PROT_SGLS 2048
+/* Max number of requests before requeueing the job.
+ * Using this limit prevents one virtqueue from starving others with
+ * request.
+ */
+#define VHOST_SCSI_WEIGHT 256
+
struct vhost_scsi_inflight {
/* Wait for the flush operation to finish */
struct completion comp;
@@ -811,7 +817,7 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
u64 tag;
u32 exp_data_len, data_direction;
unsigned int out = 0, in = 0;
- int head, ret, prot_bytes;
+ int head, ret, prot_bytes, c = 0;
size_t req_size, rsp_size = sizeof(struct virtio_scsi_cmd_resp);
size_t out_size, in_size;
u16 lun;
@@ -830,7 +836,7 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
vhost_disable_notify(&vs->dev, vq);
- for (;;) {
+ do {
head = vhost_get_vq_desc(vq, vq->iov,
ARRAY_SIZE(vq->iov), &out, &in,
NULL, NULL);
@@ -1045,7 +1051,7 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
*/
INIT_WORK(&cmd->work, vhost_scsi_submission_work);
queue_work(vhost_scsi_workqueue, &cmd->work);
- }
+ } while (likely(!vhost_exceeds_weight(vq, ++c, 0)));
out:
mutex_unlock(&vq->mutex);
}
@@ -1398,7 +1404,8 @@ static int vhost_scsi_open(struct inode *inode, struct file *f)
vqs[i] = &vs->vqs[i].vq;
vs->vqs[i].vq.handle_kick = vhost_scsi_handle_kick;
}
- vhost_dev_init(&vs->dev, vqs, VHOST_SCSI_MAX_VQ, UIO_MAXIOV);
+ vhost_dev_init(&vs->dev, vqs, VHOST_SCSI_MAX_VQ, UIO_MAXIOV,
+ VHOST_SCSI_WEIGHT, 0);
vhost_scsi_init_inflight(vs, NULL);
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index c163bc1..0752f8d 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -413,8 +413,24 @@ static void vhost_dev_free_iovecs(struct vhost_dev *dev)
vhost_vq_free_iovecs(dev->vqs[i]);
}
+bool vhost_exceeds_weight(struct vhost_virtqueue *vq,
+ int pkts, int total_len)
+{
+ struct vhost_dev *dev = vq->dev;
+
+ if ((dev->byte_weight && total_len >= dev->byte_weight) ||
+ pkts >= dev->weight) {
+ vhost_poll_queue(&vq->poll);
+ return true;
+ }
+
+ return false;
+}
+EXPORT_SYMBOL_GPL(vhost_exceeds_weight);
+
void vhost_dev_init(struct vhost_dev *dev,
- struct vhost_virtqueue **vqs, int nvqs, int iov_limit)
+ struct vhost_virtqueue **vqs, int nvqs,
+ int iov_limit, int weight, int byte_weight)
{
struct vhost_virtqueue *vq;
int i;
@@ -428,6 +444,8 @@ void vhost_dev_init(struct vhost_dev *dev,
dev->mm = NULL;
dev->worker = NULL;
dev->iov_limit = iov_limit;
+ dev->weight = weight;
+ dev->byte_weight = byte_weight;
init_llist_head(&dev->work_list);
init_waitqueue_head(&dev->wait);
INIT_LIST_HEAD(&dev->read_list);
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
index 9490e7d..27a78a9 100644
--- a/drivers/vhost/vhost.h
+++ b/drivers/vhost/vhost.h
@@ -171,10 +171,13 @@ struct vhost_dev {
struct list_head pending_list;
wait_queue_head_t wait;
int iov_limit;
+ int weight;
+ int byte_weight;
};
+bool vhost_exceeds_weight(struct vhost_virtqueue *vq, int pkts, int total_len);
void vhost_dev_init(struct vhost_dev *, struct vhost_virtqueue **vqs,
- int nvqs, int iov_limit);
+ int nvqs, int iov_limit, int weight, int byte_weight);
long vhost_dev_set_owner(struct vhost_dev *dev);
bool vhost_dev_has_owner(struct vhost_dev *dev);
long vhost_dev_check_owner(struct vhost_dev *);
diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c
index e440f87..bab495d 100644
--- a/drivers/vhost/vsock.c
+++ b/drivers/vhost/vsock.c
@@ -21,6 +21,14 @@
#include "vhost.h"
#define VHOST_VSOCK_DEFAULT_HOST_CID 2
+/* Max number of bytes transferred before requeueing the job.
+ * Using this limit prevents one virtqueue from starving others. */
+#define VHOST_VSOCK_WEIGHT 0x80000
+/* Max number of packets transferred before requeueing the job.
+ * Using this limit prevents one virtqueue from starving others with
+ * small pkts.
+ */
+#define VHOST_VSOCK_PKT_WEIGHT 256
enum {
VHOST_VSOCK_FEATURES = VHOST_FEATURES,
@@ -78,6 +86,7 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
struct vhost_virtqueue *vq)
{
struct vhost_virtqueue *tx_vq = &vsock->vqs[VSOCK_VQ_TX];
+ int pkts = 0, total_len = 0;
bool added = false;
bool restart_tx = false;
@@ -89,7 +98,7 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
/* Avoid further vmexits, we're already processing the virtqueue */
vhost_disable_notify(&vsock->dev, vq);
- for (;;) {
+ do {
struct virtio_vsock_pkt *pkt;
struct iov_iter iov_iter;
unsigned out, in;
@@ -174,8 +183,9 @@ vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
*/
virtio_transport_deliver_tap_pkt(pkt);
+ total_len += pkt->len;
virtio_transport_free_pkt(pkt);
- }
+ } while(likely(!vhost_exceeds_weight(vq, ++pkts, total_len)));
if (added)
vhost_signal(&vsock->dev, vq);
@@ -350,7 +360,7 @@ static void vhost_vsock_handle_tx_kick(struct vhost_work *work)
struct vhost_vsock *vsock = container_of(vq->dev, struct vhost_vsock,
dev);
struct virtio_vsock_pkt *pkt;
- int head;
+ int head, pkts = 0, total_len = 0;
unsigned int out, in;
bool added = false;
@@ -360,7 +370,7 @@ static void vhost_vsock_handle_tx_kick(struct vhost_work *work)
goto out;
vhost_disable_notify(&vsock->dev, vq);
- for (;;) {
+ do {
u32 len;
if (!vhost_vsock_more_replies(vsock)) {
@@ -401,9 +411,11 @@ static void vhost_vsock_handle_tx_kick(struct vhost_work *work)
else
virtio_transport_free_pkt(pkt);
- vhost_add_used(vq, head, sizeof(pkt->hdr) + len);
+ len += sizeof(pkt->hdr);
+ vhost_add_used(vq, head, len);
+ total_len += len;
added = true;
- }
+ } while(likely(!vhost_exceeds_weight(vq, ++pkts, total_len)));
no_more_replies:
if (added)
@@ -531,7 +543,9 @@ static int vhost_vsock_dev_open(struct inode *inode, struct file *file)
vsock->vqs[VSOCK_VQ_TX].handle_kick = vhost_vsock_handle_tx_kick;
vsock->vqs[VSOCK_VQ_RX].handle_kick = vhost_vsock_handle_rx_kick;
- vhost_dev_init(&vsock->dev, vqs, ARRAY_SIZE(vsock->vqs), UIO_MAXIOV);
+ vhost_dev_init(&vsock->dev, vqs, ARRAY_SIZE(vsock->vqs),
+ UIO_MAXIOV, VHOST_VSOCK_PKT_WEIGHT,
+ VHOST_VSOCK_WEIGHT);
file->private_data = vsock;
spin_lock_init(&vsock->send_pkt_list_lock);
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index aa081f8..3d99975 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -357,8 +357,8 @@ xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
/* Convert the size to actually allocated. */
size = 1UL << (order + XEN_PAGE_SHIFT);
- if (((dev_addr + size - 1 <= dma_mask)) ||
- range_straddles_page_boundary(phys, size))
+ if (!WARN_ON((dev_addr + size - 1 > dma_mask) ||
+ range_straddles_page_boundary(phys, size)))
xen_destroy_contiguous_region(phys, order);
xen_free_coherent_pages(hwdev, size, vaddr, (dma_addr_t)phys, attrs);
diff --git a/drivers/xen/xen-pciback/conf_space_capability.c b/drivers/xen/xen-pciback/conf_space_capability.c
index 73427d8e..e569413 100644
--- a/drivers/xen/xen-pciback/conf_space_capability.c
+++ b/drivers/xen/xen-pciback/conf_space_capability.c
@@ -116,13 +116,12 @@ static int pm_ctrl_write(struct pci_dev *dev, int offset, u16 new_value,
{
int err;
u16 old_value;
- pci_power_t new_state, old_state;
+ pci_power_t new_state;
err = pci_read_config_word(dev, offset, &old_value);
if (err)
goto out;
- old_state = (pci_power_t)(old_value & PCI_PM_CTRL_STATE_MASK);
new_state = (pci_power_t)(new_value & PCI_PM_CTRL_STATE_MASK);
new_value &= PM_OK_BITS;
diff --git a/fs/adfs/super.c b/fs/adfs/super.c
index 7e099a7..4dc15b2 100644
--- a/fs/adfs/super.c
+++ b/fs/adfs/super.c
@@ -369,6 +369,7 @@ static int adfs_fill_super(struct super_block *sb, void *data, int silent)
struct buffer_head *bh;
struct object_info root_obj;
unsigned char *b_data;
+ unsigned int blocksize;
struct adfs_sb_info *asb;
struct inode *root;
int ret = -EINVAL;
@@ -420,8 +421,10 @@ static int adfs_fill_super(struct super_block *sb, void *data, int silent)
goto error_free_bh;
}
+ blocksize = 1 << dr->log2secsize;
brelse(bh);
- if (sb_set_blocksize(sb, 1 << dr->log2secsize)) {
+
+ if (sb_set_blocksize(sb, blocksize)) {
bh = sb_bread(sb, ADFS_DISCRECORD / sb->s_blocksize);
if (!bh) {
adfs_error(sb, "couldn't read superblock on "
diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
index ac6c383..1985565 100644
--- a/fs/btrfs/backref.c
+++ b/fs/btrfs/backref.c
@@ -1485,7 +1485,7 @@ int btrfs_check_shared(struct btrfs_root *root, u64 inum, u64 bytenr)
goto out;
}
- trans = btrfs_attach_transaction(root);
+ trans = btrfs_join_transaction_nostart(root);
if (IS_ERR(trans)) {
if (PTR_ERR(trans) != -ENOENT && PTR_ERR(trans) != -EROFS) {
ret = PTR_ERR(trans);
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index c1cd3fe..355ff08 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -388,10 +388,31 @@ static noinline int add_async_extent(struct async_cow *cow,
return 0;
}
+/*
+ * Check if the inode has flags compatible with compression
+ */
+static inline bool inode_can_compress(struct inode *inode)
+{
+ if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW ||
+ BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)
+ return false;
+ return true;
+}
+
+/*
+ * Check if the inode needs to be submitted to compression, based on mount
+ * options, defragmentation, properties or heuristics.
+ */
static inline int inode_need_compress(struct inode *inode, u64 start, u64 end)
{
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
+ if (!inode_can_compress(inode)) {
+ WARN(IS_ENABLED(CONFIG_BTRFS_DEBUG),
+ KERN_ERR "BTRFS: unexpected compression for ino %llu\n",
+ btrfs_ino(BTRFS_I(inode)));
+ return 0;
+ }
/* force compress */
if (btrfs_test_opt(fs_info, FORCE_COMPRESS))
return 1;
@@ -1596,7 +1617,8 @@ static int run_delalloc_range(void *private_data, struct page *locked_page,
} else if (BTRFS_I(inode)->flags & BTRFS_INODE_PREALLOC && !force_cow) {
ret = run_delalloc_nocow(inode, locked_page, start, end,
page_started, 0, nr_written);
- } else if (!inode_need_compress(inode, start, end)) {
+ } else if (!inode_can_compress(inode) ||
+ !inode_need_compress(inode, start, end)) {
ret = cow_file_range(inode, locked_page, start, end, end,
page_started, nr_written, 1, NULL);
} else {
diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
index e46e83e..734866a 100644
--- a/fs/btrfs/qgroup.c
+++ b/fs/btrfs/qgroup.c
@@ -2249,6 +2249,7 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans, u64 srcid,
int ret = 0;
int i;
u64 *i_qgroups;
+ bool committing = false;
struct btrfs_fs_info *fs_info = trans->fs_info;
struct btrfs_root *quota_root;
struct btrfs_qgroup *srcgroup;
@@ -2256,7 +2257,25 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans, u64 srcid,
u32 level_size = 0;
u64 nums;
- mutex_lock(&fs_info->qgroup_ioctl_lock);
+ /*
+ * There are only two callers of this function.
+ *
+ * One in create_subvol() in the ioctl context, which needs to hold
+ * the qgroup_ioctl_lock.
+ *
+ * The other one in create_pending_snapshot() where no other qgroup
+ * code can modify the fs as they all need to either start a new trans
+ * or hold a trans handler, thus we don't need to hold
+ * qgroup_ioctl_lock.
+ * This would avoid long and complex lock chain and make lockdep happy.
+ */
+ spin_lock(&fs_info->trans_lock);
+ if (trans->transaction->state == TRANS_STATE_COMMIT_DOING)
+ committing = true;
+ spin_unlock(&fs_info->trans_lock);
+
+ if (!committing)
+ mutex_lock(&fs_info->qgroup_ioctl_lock);
if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags))
goto out;
@@ -2420,7 +2439,8 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans, u64 srcid,
unlock:
spin_unlock(&fs_info->qgroup_lock);
out:
- mutex_unlock(&fs_info->qgroup_ioctl_lock);
+ if (!committing)
+ mutex_unlock(&fs_info->qgroup_ioctl_lock);
return ret;
}
diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
index 258392b..48ddbc1 100644
--- a/fs/btrfs/send.c
+++ b/fs/btrfs/send.c
@@ -6272,68 +6272,21 @@ static int changed_extent(struct send_ctx *sctx,
{
int ret = 0;
- if (sctx->cur_ino != sctx->cmp_key->objectid) {
-
- if (result == BTRFS_COMPARE_TREE_CHANGED) {
- struct extent_buffer *leaf_l;
- struct extent_buffer *leaf_r;
- struct btrfs_file_extent_item *ei_l;
- struct btrfs_file_extent_item *ei_r;
-
- leaf_l = sctx->left_path->nodes[0];
- leaf_r = sctx->right_path->nodes[0];
- ei_l = btrfs_item_ptr(leaf_l,
- sctx->left_path->slots[0],
- struct btrfs_file_extent_item);
- ei_r = btrfs_item_ptr(leaf_r,
- sctx->right_path->slots[0],
- struct btrfs_file_extent_item);
-
- /*
- * We may have found an extent item that has changed
- * only its disk_bytenr field and the corresponding
- * inode item was not updated. This case happens due to
- * very specific timings during relocation when a leaf
- * that contains file extent items is COWed while
- * relocation is ongoing and its in the stage where it
- * updates data pointers. So when this happens we can
- * safely ignore it since we know it's the same extent,
- * but just at different logical and physical locations
- * (when an extent is fully replaced with a new one, we
- * know the generation number must have changed too,
- * since snapshot creation implies committing the current
- * transaction, and the inode item must have been updated
- * as well).
- * This replacement of the disk_bytenr happens at
- * relocation.c:replace_file_extents() through
- * relocation.c:btrfs_reloc_cow_block().
- */
- if (btrfs_file_extent_generation(leaf_l, ei_l) ==
- btrfs_file_extent_generation(leaf_r, ei_r) &&
- btrfs_file_extent_ram_bytes(leaf_l, ei_l) ==
- btrfs_file_extent_ram_bytes(leaf_r, ei_r) &&
- btrfs_file_extent_compression(leaf_l, ei_l) ==
- btrfs_file_extent_compression(leaf_r, ei_r) &&
- btrfs_file_extent_encryption(leaf_l, ei_l) ==
- btrfs_file_extent_encryption(leaf_r, ei_r) &&
- btrfs_file_extent_other_encoding(leaf_l, ei_l) ==
- btrfs_file_extent_other_encoding(leaf_r, ei_r) &&
- btrfs_file_extent_type(leaf_l, ei_l) ==
- btrfs_file_extent_type(leaf_r, ei_r) &&
- btrfs_file_extent_disk_bytenr(leaf_l, ei_l) !=
- btrfs_file_extent_disk_bytenr(leaf_r, ei_r) &&
- btrfs_file_extent_disk_num_bytes(leaf_l, ei_l) ==
- btrfs_file_extent_disk_num_bytes(leaf_r, ei_r) &&
- btrfs_file_extent_offset(leaf_l, ei_l) ==
- btrfs_file_extent_offset(leaf_r, ei_r) &&
- btrfs_file_extent_num_bytes(leaf_l, ei_l) ==
- btrfs_file_extent_num_bytes(leaf_r, ei_r))
- return 0;
- }
-
- inconsistent_snapshot_error(sctx, result, "extent");
- return -EIO;
- }
+ /*
+ * We have found an extent item that changed without the inode item
+ * having changed. This can happen either after relocation (where the
+ * disk_bytenr of an extent item is replaced at
+ * relocation.c:replace_file_extents()) or after deduplication into a
+ * file in both the parent and send snapshots (where an extent item can
+ * get modified or replaced with a new one). Note that deduplication
+ * updates the inode item, but it only changes the iversion (sequence
+ * field in the inode item) of the inode, so if a file is deduplicated
+ * the same amount of times in both the parent and send snapshots, its
+ * iversion becames the same in both snapshots, whence the inode item is
+ * the same on both snapshots.
+ */
+ if (sctx->cur_ino != sctx->cmp_key->objectid)
+ return 0;
if (!sctx->cur_inode_new_gen && !sctx->cur_inode_deleted) {
if (result != BTRFS_COMPARE_TREE_DELETED)
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index bb8f6c0..26317bc 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -28,15 +28,18 @@ static const unsigned int btrfs_blocked_trans_types[TRANS_STATE_MAX] = {
[TRANS_STATE_COMMIT_START] = (__TRANS_START | __TRANS_ATTACH),
[TRANS_STATE_COMMIT_DOING] = (__TRANS_START |
__TRANS_ATTACH |
- __TRANS_JOIN),
+ __TRANS_JOIN |
+ __TRANS_JOIN_NOSTART),
[TRANS_STATE_UNBLOCKED] = (__TRANS_START |
__TRANS_ATTACH |
__TRANS_JOIN |
- __TRANS_JOIN_NOLOCK),
+ __TRANS_JOIN_NOLOCK |
+ __TRANS_JOIN_NOSTART),
[TRANS_STATE_COMPLETED] = (__TRANS_START |
__TRANS_ATTACH |
__TRANS_JOIN |
- __TRANS_JOIN_NOLOCK),
+ __TRANS_JOIN_NOLOCK |
+ __TRANS_JOIN_NOSTART),
};
void btrfs_put_transaction(struct btrfs_transaction *transaction)
@@ -531,7 +534,8 @@ start_transaction(struct btrfs_root *root, unsigned int num_items,
ret = join_transaction(fs_info, type);
if (ret == -EBUSY) {
wait_current_trans(fs_info);
- if (unlikely(type == TRANS_ATTACH))
+ if (unlikely(type == TRANS_ATTACH ||
+ type == TRANS_JOIN_NOSTART))
ret = -ENOENT;
}
} while (ret == -EBUSY);
@@ -648,6 +652,16 @@ struct btrfs_trans_handle *btrfs_join_transaction_nolock(struct btrfs_root *root
}
/*
+ * Similar to regular join but it never starts a transaction when none is
+ * running or after waiting for the current one to finish.
+ */
+struct btrfs_trans_handle *btrfs_join_transaction_nostart(struct btrfs_root *root)
+{
+ return start_transaction(root, 0, TRANS_JOIN_NOSTART,
+ BTRFS_RESERVE_NO_FLUSH, true);
+}
+
+/*
* btrfs_attach_transaction() - catch the running transaction
*
* It is used when we want to commit the current the transaction, but
@@ -2027,6 +2041,16 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans)
}
} else {
spin_unlock(&fs_info->trans_lock);
+ /*
+ * The previous transaction was aborted and was already removed
+ * from the list of transactions at fs_info->trans_list. So we
+ * abort to prevent writing a new superblock that reflects a
+ * corrupt state (pointing to trees with unwritten nodes/leafs).
+ */
+ if (test_bit(BTRFS_FS_STATE_TRANS_ABORTED, &fs_info->fs_state)) {
+ ret = -EROFS;
+ goto cleanup_transaction;
+ }
}
extwriter_counter_dec(cur_trans, trans->type);
diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h
index 4cbb1b5..c1d34cc 100644
--- a/fs/btrfs/transaction.h
+++ b/fs/btrfs/transaction.h
@@ -97,11 +97,13 @@ struct btrfs_transaction {
#define __TRANS_JOIN (1U << 11)
#define __TRANS_JOIN_NOLOCK (1U << 12)
#define __TRANS_DUMMY (1U << 13)
+#define __TRANS_JOIN_NOSTART (1U << 14)
#define TRANS_START (__TRANS_START | __TRANS_FREEZABLE)
#define TRANS_ATTACH (__TRANS_ATTACH)
#define TRANS_JOIN (__TRANS_JOIN | __TRANS_FREEZABLE)
#define TRANS_JOIN_NOLOCK (__TRANS_JOIN_NOLOCK)
+#define TRANS_JOIN_NOSTART (__TRANS_JOIN_NOSTART)
#define TRANS_EXTWRITERS (__TRANS_START | __TRANS_ATTACH)
@@ -187,6 +189,7 @@ struct btrfs_trans_handle *btrfs_start_transaction_fallback_global_rsv(
int min_factor);
struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root);
struct btrfs_trans_handle *btrfs_join_transaction_nolock(struct btrfs_root *root);
+struct btrfs_trans_handle *btrfs_join_transaction_nostart(struct btrfs_root *root);
struct btrfs_trans_handle *btrfs_attach_transaction(struct btrfs_root *root);
struct btrfs_trans_handle *btrfs_attach_transaction_barrier(
struct btrfs_root *root);
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 2fd0003..6e008bd 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -5040,8 +5040,7 @@ static inline int btrfs_chunk_max_errors(struct map_lookup *map)
if (map->type & (BTRFS_BLOCK_GROUP_RAID1 |
BTRFS_BLOCK_GROUP_RAID10 |
- BTRFS_BLOCK_GROUP_RAID5 |
- BTRFS_BLOCK_GROUP_DUP)) {
+ BTRFS_BLOCK_GROUP_RAID5)) {
max_errors = 1;
} else if (map->type & BTRFS_BLOCK_GROUP_RAID6) {
max_errors = 2;
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
index 9c332a6..476728b 100644
--- a/fs/ceph/addr.c
+++ b/fs/ceph/addr.c
@@ -913,8 +913,9 @@ static int ceph_writepages_start(struct address_space *mapping,
if (page_offset(page) >= ceph_wbc.i_size) {
dout("%p page eof %llu\n",
page, ceph_wbc.i_size);
- if (ceph_wbc.size_stable ||
- page_offset(page) >= i_size_read(inode))
+ if ((ceph_wbc.size_stable ||
+ page_offset(page) >= i_size_read(inode)) &&
+ clear_page_dirty_for_io(page))
mapping->a_ops->invalidatepage(page,
0, PAGE_SIZE);
unlock_page(page);
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index c7542e8..a11fa0b 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -1237,20 +1237,23 @@ static int send_cap_msg(struct cap_msg_args *arg)
}
/*
- * Queue cap releases when an inode is dropped from our cache. Since
- * inode is about to be destroyed, there is no need for i_ceph_lock.
+ * Queue cap releases when an inode is dropped from our cache.
*/
void ceph_queue_caps_release(struct inode *inode)
{
struct ceph_inode_info *ci = ceph_inode(inode);
struct rb_node *p;
+ /* lock i_ceph_lock, because ceph_d_revalidate(..., LOOKUP_RCU)
+ * may call __ceph_caps_issued_mask() on a freeing inode. */
+ spin_lock(&ci->i_ceph_lock);
p = rb_first(&ci->i_caps);
while (p) {
struct ceph_cap *cap = rb_entry(p, struct ceph_cap, ci_node);
p = rb_next(p);
__ceph_remove_cap(cap, true);
}
+ spin_unlock(&ci->i_ceph_lock);
}
/*
diff --git a/fs/ceph/locks.c b/fs/ceph/locks.c
index 9dae2ec..6a8f4a9 100644
--- a/fs/ceph/locks.c
+++ b/fs/ceph/locks.c
@@ -111,8 +111,7 @@ static int ceph_lock_message(u8 lock_type, u16 operation, struct inode *inode,
req->r_wait_for_completion = ceph_lock_wait_for_completion;
err = ceph_mdsc_do_request(mdsc, inode, req);
-
- if (operation == CEPH_MDS_OP_GETFILELOCK) {
+ if (!err && operation == CEPH_MDS_OP_GETFILELOCK) {
fl->fl_pid = -le64_to_cpu(req->r_reply_info.filelock_reply->pid);
if (CEPH_LOCK_SHARED == req->r_reply_info.filelock_reply->type)
fl->fl_type = F_RDLCK;
diff --git a/fs/ceph/super.h b/fs/ceph/super.h
index 582e28f..d8579a5 100644
--- a/fs/ceph/super.h
+++ b/fs/ceph/super.h
@@ -526,7 +526,12 @@ static inline void __ceph_dir_set_complete(struct ceph_inode_info *ci,
long long release_count,
long long ordered_count)
{
- smp_mb__before_atomic();
+ /*
+ * Makes sure operations that setup readdir cache (update page
+ * cache and i_size) are strongly ordered w.r.t. the following
+ * atomic64_set() operations.
+ */
+ smp_mb();
atomic64_set(&ci->i_complete_seq[0], release_count);
atomic64_set(&ci->i_complete_seq[1], ordered_count);
}
diff --git a/fs/ceph/xattr.c b/fs/ceph/xattr.c
index 5cc8b94..0a2d489 100644
--- a/fs/ceph/xattr.c
+++ b/fs/ceph/xattr.c
@@ -79,7 +79,7 @@ static size_t ceph_vxattrcb_layout(struct ceph_inode_info *ci, char *val,
const char *ns_field = " pool_namespace=";
char buf[128];
size_t len, total_len = 0;
- int ret;
+ ssize_t ret;
pool_ns = ceph_try_get_string(ci->i_layout.pool_ns);
@@ -103,11 +103,8 @@ static size_t ceph_vxattrcb_layout(struct ceph_inode_info *ci, char *val,
if (pool_ns)
total_len += strlen(ns_field) + pool_ns->len;
- if (!size) {
- ret = total_len;
- } else if (total_len > size) {
- ret = -ERANGE;
- } else {
+ ret = total_len;
+ if (size >= total_len) {
memcpy(val, buf, len);
ret = len;
if (pool_name) {
@@ -817,8 +814,11 @@ ssize_t __ceph_getxattr(struct inode *inode, const char *name, void *value,
if (err)
return err;
err = -ENODATA;
- if (!(vxattr->exists_cb && !vxattr->exists_cb(ci)))
+ if (!(vxattr->exists_cb && !vxattr->exists_cb(ci))) {
err = vxattr->getxattr_cb(ci, value, size);
+ if (size && size < err)
+ err = -ERANGE;
+ }
return err;
}
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index f31339d..c53a2e8 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -563,10 +563,10 @@ static bool
server_unresponsive(struct TCP_Server_Info *server)
{
/*
- * We need to wait 2 echo intervals to make sure we handle such
+ * We need to wait 3 echo intervals to make sure we handle such
* situations right:
* 1s client sends a normal SMB request
- * 2s client gets a response
+ * 3s client gets a response
* 30s echo workqueue job pops, and decides we got a response recently
* and don't need to send another
* ...
@@ -575,9 +575,9 @@ server_unresponsive(struct TCP_Server_Info *server)
*/
if ((server->tcpStatus == CifsGood ||
server->tcpStatus == CifsNeedNegotiate) &&
- time_after(jiffies, server->lstrp + 2 * server->echo_interval)) {
+ time_after(jiffies, server->lstrp + 3 * server->echo_interval)) {
cifs_dbg(VFS, "Server %s has not responded in %lu seconds. Reconnecting...\n",
- server->hostname, (2 * server->echo_interval) / HZ);
+ server->hostname, (3 * server->echo_interval) / HZ);
cifs_reconnect(server);
wake_up(&server->response_q);
return true;
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
index 0ccf8f9..cc9e846 100644
--- a/fs/cifs/smb2ops.c
+++ b/fs/cifs/smb2ops.c
@@ -2545,7 +2545,15 @@ fill_transform_hdr(struct smb2_transform_hdr *tr_hdr, unsigned int orig_len,
static inline void smb2_sg_set_buf(struct scatterlist *sg, const void *buf,
unsigned int buflen)
{
- sg_set_page(sg, virt_to_page(buf), buflen, offset_in_page(buf));
+ void *addr;
+ /*
+ * VMAP_STACK (at least) puts stack into the vmalloc address space
+ */
+ if (is_vmalloc_addr(buf))
+ addr = vmalloc_to_page(buf);
+ else
+ addr = virt_to_page(buf);
+ sg_set_page(sg, addr, buflen, offset_in_page(buf));
}
/* Assumes the first rqst has a transform header as the first iov.
@@ -3121,7 +3129,6 @@ receive_encrypted_standard(struct TCP_Server_Info *server,
{
int ret, length;
char *buf = server->smallbuf;
- char *tmpbuf;
struct smb2_sync_hdr *shdr;
unsigned int pdu_length = server->pdu_size;
unsigned int buf_size;
@@ -3151,18 +3158,15 @@ receive_encrypted_standard(struct TCP_Server_Info *server,
return length;
next_is_large = server->large_buf;
- one_more:
+one_more:
shdr = (struct smb2_sync_hdr *)buf;
if (shdr->NextCommand) {
- if (next_is_large) {
- tmpbuf = server->bigbuf;
+ if (next_is_large)
next_buffer = (char *)cifs_buf_get();
- } else {
- tmpbuf = server->smallbuf;
+ else
next_buffer = (char *)cifs_small_buf_get();
- }
memcpy(next_buffer,
- tmpbuf + le32_to_cpu(shdr->NextCommand),
+ buf + le32_to_cpu(shdr->NextCommand),
pdu_length - le32_to_cpu(shdr->NextCommand));
}
@@ -3191,12 +3195,21 @@ receive_encrypted_standard(struct TCP_Server_Info *server,
pdu_length -= le32_to_cpu(shdr->NextCommand);
server->large_buf = next_is_large;
if (next_is_large)
- server->bigbuf = next_buffer;
+ server->bigbuf = buf = next_buffer;
else
- server->smallbuf = next_buffer;
-
- buf += le32_to_cpu(shdr->NextCommand);
+ server->smallbuf = buf = next_buffer;
goto one_more;
+ } else if (ret != 0) {
+ /*
+ * ret != 0 here means that we didn't get to handle_mid() thus
+ * server->smallbuf and server->bigbuf are still valid. We need
+ * to free next_buffer because it is not going to be used
+ * anywhere.
+ */
+ if (next_is_large)
+ free_rsp_buf(CIFS_LARGE_BUFFER, next_buffer);
+ else
+ free_rsp_buf(CIFS_SMALL_BUFFER, next_buffer);
}
return ret;
diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
index c181f16..2bc47eb 100644
--- a/fs/cifs/smb2pdu.c
+++ b/fs/cifs/smb2pdu.c
@@ -168,7 +168,7 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon)
if (tcon == NULL)
return 0;
- if (smb2_command == SMB2_TREE_CONNECT)
+ if (smb2_command == SMB2_TREE_CONNECT || smb2_command == SMB2_IOCTL)
return 0;
if (tcon->tidStatus == CifsExiting) {
@@ -1006,7 +1006,12 @@ SMB2_sess_alloc_buffer(struct SMB2_sess_data *sess_data)
else
req->SecurityMode = 0;
+#ifdef CONFIG_CIFS_DFS_UPCALL
+ req->Capabilities = cpu_to_le32(SMB2_GLOBAL_CAP_DFS);
+#else
req->Capabilities = 0;
+#endif /* DFS_UPCALL */
+
req->Channel = 0; /* MBZ */
sess_data->iov[0].iov_base = (char *)req;
diff --git a/fs/coda/psdev.c b/fs/coda/psdev.c
index c5234c2..55824cb 100644
--- a/fs/coda/psdev.c
+++ b/fs/coda/psdev.c
@@ -187,8 +187,11 @@ static ssize_t coda_psdev_write(struct file *file, const char __user *buf,
if (req->uc_opcode == CODA_OPEN_BY_FD) {
struct coda_open_by_fd_out *outp =
(struct coda_open_by_fd_out *)req->uc_data;
- if (!outp->oh.result)
+ if (!outp->oh.result) {
outp->fh = fget(outp->fd);
+ if (!outp->fh)
+ return -EBADF;
+ }
}
wake_up(&req->uc_sleep);
diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
index a9b0094..8f08095 100644
--- a/fs/compat_ioctl.c
+++ b/fs/compat_ioctl.c
@@ -894,9 +894,6 @@ COMPATIBLE_IOCTL(PPPIOCDISCONN)
COMPATIBLE_IOCTL(PPPIOCATTCHAN)
COMPATIBLE_IOCTL(PPPIOCGCHAN)
COMPATIBLE_IOCTL(PPPIOCGL2TPSTATS)
-/* PPPOX */
-COMPATIBLE_IOCTL(PPPOEIOCSFWD)
-COMPATIBLE_IOCTL(PPPOEIOCDFWD)
/* Big A */
/* sparc only */
/* Big Q for sound/OSS */
diff --git a/fs/dax.c b/fs/dax.c
index 75a289c..f0d932f 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -659,7 +659,7 @@ struct page *dax_layout_busy_page(struct address_space *mapping)
* guaranteed to either see new references or prevent new
* references from being established.
*/
- unmap_mapping_range(mapping, 0, 0, 1);
+ unmap_mapping_range(mapping, 0, 0, 0);
while (index < end && pagevec_lookup_entries(&pvec, mapping, index,
min(end - index, (pgoff_t)PAGEVEC_SIZE),
diff --git a/fs/dlm/lowcomms.c b/fs/dlm/lowcomms.c
index a5e4a22..a93ebff 100644
--- a/fs/dlm/lowcomms.c
+++ b/fs/dlm/lowcomms.c
@@ -1630,8 +1630,10 @@ static void clean_writequeues(void)
static void work_stop(void)
{
- destroy_workqueue(recv_workqueue);
- destroy_workqueue(send_workqueue);
+ if (recv_workqueue)
+ destroy_workqueue(recv_workqueue);
+ if (send_workqueue)
+ destroy_workqueue(send_workqueue);
}
static int work_start(void)
@@ -1691,13 +1693,17 @@ static void work_flush(void)
struct hlist_node *n;
struct connection *con;
- flush_workqueue(recv_workqueue);
- flush_workqueue(send_workqueue);
+ if (recv_workqueue)
+ flush_workqueue(recv_workqueue);
+ if (send_workqueue)
+ flush_workqueue(send_workqueue);
do {
ok = 1;
foreach_conn(stop_conn);
- flush_workqueue(recv_workqueue);
- flush_workqueue(send_workqueue);
+ if (recv_workqueue)
+ flush_workqueue(recv_workqueue);
+ if (send_workqueue)
+ flush_workqueue(send_workqueue);
for (i = 0; i < CONN_HASH_SIZE && ok; i++) {
hlist_for_each_entry_safe(con, n,
&connection_hash[i], list) {
diff --git a/fs/exec.c b/fs/exec.c
index 77c03ce..aa9d20c 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -1826,7 +1826,7 @@ static int __do_execve_file(int fd, struct filename *filename,
membarrier_execve(current);
rseq_execve(current);
acct_update_integrals(current);
- task_numa_free(current);
+ task_numa_free(current, false);
free_bprm(bprm);
kfree(pathbuf);
if (filename)
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
index 8dee063..ac824f6 100644
--- a/fs/f2fs/segment.c
+++ b/fs/f2fs/segment.c
@@ -3393,6 +3393,11 @@ static int read_compacted_summaries(struct f2fs_sb_info *sbi)
seg_i = CURSEG_I(sbi, i);
segno = le32_to_cpu(ckpt->cur_data_segno[i]);
blk_off = le16_to_cpu(ckpt->cur_data_blkoff[i]);
+ if (blk_off > ENTRIES_IN_SUM) {
+ f2fs_bug_on(sbi, 1);
+ f2fs_put_page(page, 1);
+ return -EFAULT;
+ }
seg_i->next_segno = segno;
reset_curseg(sbi, i, 0);
seg_i->alloc_type = ckpt->alloc_type[i];
diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c
index 7f8bb08..d14d71d 100644
--- a/fs/gfs2/bmap.c
+++ b/fs/gfs2/bmap.c
@@ -392,6 +392,19 @@ static int fillup_metapath(struct gfs2_inode *ip, struct metapath *mp, int h)
return mp->mp_aheight - x - 1;
}
+static sector_t metapath_to_block(struct gfs2_sbd *sdp, struct metapath *mp)
+{
+ sector_t factor = 1, block = 0;
+ int hgt;
+
+ for (hgt = mp->mp_fheight - 1; hgt >= 0; hgt--) {
+ if (hgt < mp->mp_aheight)
+ block += mp->mp_list[hgt] * factor;
+ factor *= sdp->sd_inptrs;
+ }
+ return block;
+}
+
static void release_metapath(struct metapath *mp)
{
int i;
@@ -432,60 +445,84 @@ static inline unsigned int gfs2_extent_length(struct buffer_head *bh, __be64 *pt
return ptr - first;
}
-typedef const __be64 *(*gfs2_metadata_walker)(
- struct metapath *mp,
- const __be64 *start, const __be64 *end,
- u64 factor, void *data);
+enum walker_status { WALK_STOP, WALK_FOLLOW, WALK_CONTINUE };
-#define WALK_STOP ((__be64 *)0)
-#define WALK_NEXT ((__be64 *)1)
+/*
+ * gfs2_metadata_walker - walk an indirect block
+ * @mp: Metapath to indirect block
+ * @ptrs: Number of pointers to look at
+ *
+ * When returning WALK_FOLLOW, the walker must update @mp to point at the right
+ * indirect block to follow.
+ */
+typedef enum walker_status (*gfs2_metadata_walker)(struct metapath *mp,
+ unsigned int ptrs);
-static int gfs2_walk_metadata(struct inode *inode, sector_t lblock,
- u64 len, struct metapath *mp, gfs2_metadata_walker walker,
- void *data)
+/*
+ * gfs2_walk_metadata - walk a tree of indirect blocks
+ * @inode: The inode
+ * @mp: Starting point of walk
+ * @max_len: Maximum number of blocks to walk
+ * @walker: Called during the walk
+ *
+ * Returns 1 if the walk was stopped by @walker, 0 if we went past @max_len or
+ * past the end of metadata, and a negative error code otherwise.
+ */
+
+static int gfs2_walk_metadata(struct inode *inode, struct metapath *mp,
+ u64 max_len, gfs2_metadata_walker walker)
{
- struct metapath clone;
struct gfs2_inode *ip = GFS2_I(inode);
struct gfs2_sbd *sdp = GFS2_SB(inode);
- const __be64 *start, *end, *ptr;
u64 factor = 1;
unsigned int hgt;
- int ret = 0;
+ int ret;
- for (hgt = ip->i_height - 1; hgt >= mp->mp_aheight; hgt--)
+ /*
+ * The walk starts in the lowest allocated indirect block, which may be
+ * before the position indicated by @mp. Adjust @max_len accordingly
+ * to avoid a short walk.
+ */
+ for (hgt = mp->mp_fheight - 1; hgt >= mp->mp_aheight; hgt--) {
+ max_len += mp->mp_list[hgt] * factor;
+ mp->mp_list[hgt] = 0;
factor *= sdp->sd_inptrs;
+ }
for (;;) {
- u64 step;
+ u16 start = mp->mp_list[hgt];
+ enum walker_status status;
+ unsigned int ptrs;
+ u64 len;
/* Walk indirect block. */
- start = metapointer(hgt, mp);
- end = metaend(hgt, mp);
-
- step = (end - start) * factor;
- if (step > len)
- end = start + DIV_ROUND_UP_ULL(len, factor);
-
- ptr = walker(mp, start, end, factor, data);
- if (ptr == WALK_STOP)
+ ptrs = (hgt >= 1 ? sdp->sd_inptrs : sdp->sd_diptrs) - start;
+ len = ptrs * factor;
+ if (len > max_len)
+ ptrs = DIV_ROUND_UP_ULL(max_len, factor);
+ status = walker(mp, ptrs);
+ switch (status) {
+ case WALK_STOP:
+ return 1;
+ case WALK_FOLLOW:
+ BUG_ON(mp->mp_aheight == mp->mp_fheight);
+ ptrs = mp->mp_list[hgt] - start;
+ len = ptrs * factor;
break;
- if (step >= len)
+ case WALK_CONTINUE:
break;
- len -= step;
- if (ptr != WALK_NEXT) {
- BUG_ON(!*ptr);
- mp->mp_list[hgt] += ptr - start;
- goto fill_up_metapath;
}
+ if (len >= max_len)
+ break;
+ max_len -= len;
+ if (status == WALK_FOLLOW)
+ goto fill_up_metapath;
lower_metapath:
/* Decrease height of metapath. */
- if (mp != &clone) {
- clone_metapath(&clone, mp);
- mp = &clone;
- }
brelse(mp->mp_bh[hgt]);
mp->mp_bh[hgt] = NULL;
+ mp->mp_list[hgt] = 0;
if (!hgt)
break;
hgt--;
@@ -493,10 +530,7 @@ static int gfs2_walk_metadata(struct inode *inode, sector_t lblock,
/* Advance in metadata tree. */
(mp->mp_list[hgt])++;
- start = metapointer(hgt, mp);
- end = metaend(hgt, mp);
- if (start >= end) {
- mp->mp_list[hgt] = 0;
+ if (mp->mp_list[hgt] >= sdp->sd_inptrs) {
if (!hgt)
break;
goto lower_metapath;
@@ -504,44 +538,36 @@ static int gfs2_walk_metadata(struct inode *inode, sector_t lblock,
fill_up_metapath:
/* Increase height of metapath. */
- if (mp != &clone) {
- clone_metapath(&clone, mp);
- mp = &clone;
- }
ret = fillup_metapath(ip, mp, ip->i_height - 1);
if (ret < 0)
- break;
+ return ret;
hgt += ret;
for (; ret; ret--)
do_div(factor, sdp->sd_inptrs);
mp->mp_aheight = hgt + 1;
}
- if (mp == &clone)
- release_metapath(mp);
- return ret;
+ return 0;
}
-struct gfs2_hole_walker_args {
- u64 blocks;
-};
-
-static const __be64 *gfs2_hole_walker(struct metapath *mp,
- const __be64 *start, const __be64 *end,
- u64 factor, void *data)
+static enum walker_status gfs2_hole_walker(struct metapath *mp,
+ unsigned int ptrs)
{
- struct gfs2_hole_walker_args *args = data;
- const __be64 *ptr;
+ const __be64 *start, *ptr, *end;
+ unsigned int hgt;
+
+ hgt = mp->mp_aheight - 1;
+ start = metapointer(hgt, mp);
+ end = start + ptrs;
for (ptr = start; ptr < end; ptr++) {
if (*ptr) {
- args->blocks += (ptr - start) * factor;
+ mp->mp_list[hgt] += ptr - start;
if (mp->mp_aheight == mp->mp_fheight)
return WALK_STOP;
- return ptr; /* increase height */
+ return WALK_FOLLOW;
}
}
- args->blocks += (end - start) * factor;
- return WALK_NEXT;
+ return WALK_CONTINUE;
}
/**
@@ -559,12 +585,24 @@ static const __be64 *gfs2_hole_walker(struct metapath *mp,
static int gfs2_hole_size(struct inode *inode, sector_t lblock, u64 len,
struct metapath *mp, struct iomap *iomap)
{
- struct gfs2_hole_walker_args args = { };
- int ret = 0;
+ struct metapath clone;
+ u64 hole_size;
+ int ret;
- ret = gfs2_walk_metadata(inode, lblock, len, mp, gfs2_hole_walker, &args);
- if (!ret)
- iomap->length = args.blocks << inode->i_blkbits;
+ clone_metapath(&clone, mp);
+ ret = gfs2_walk_metadata(inode, &clone, len, gfs2_hole_walker);
+ if (ret < 0)
+ goto out;
+
+ if (ret == 1)
+ hole_size = metapath_to_block(GFS2_SB(inode), &clone) - lblock;
+ else
+ hole_size = len;
+ iomap->length = hole_size << inode->i_blkbits;
+ ret = 0;
+
+out:
+ release_metapath(&clone);
return ret;
}
diff --git a/fs/nfs/client.c b/fs/nfs/client.c
index c092661..0a2b59c 100644
--- a/fs/nfs/client.c
+++ b/fs/nfs/client.c
@@ -416,10 +416,10 @@ struct nfs_client *nfs_get_client(const struct nfs_client_initdata *cl_init)
clp = nfs_match_client(cl_init);
if (clp) {
spin_unlock(&nn->nfs_client_lock);
- if (IS_ERR(clp))
- return clp;
if (new)
new->rpc_ops->free_client(new);
+ if (IS_ERR(clp))
+ return clp;
return nfs_found_client(cl_init, clp);
}
if (new) {
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index a4ea9ab..3baeed0 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -1073,6 +1073,100 @@ int nfs_neg_need_reval(struct inode *dir, struct dentry *dentry,
return !nfs_check_verifier(dir, dentry, flags & LOOKUP_RCU);
}
+static int
+nfs_lookup_revalidate_done(struct inode *dir, struct dentry *dentry,
+ struct inode *inode, int error)
+{
+ switch (error) {
+ case 1:
+ dfprintk(LOOKUPCACHE, "NFS: %s(%pd2) is valid\n",
+ __func__, dentry);
+ return 1;
+ case 0:
+ nfs_mark_for_revalidate(dir);
+ if (inode && S_ISDIR(inode->i_mode)) {
+ /* Purge readdir caches. */
+ nfs_zap_caches(inode);
+ /*
+ * We can't d_drop the root of a disconnected tree:
+ * its d_hash is on the s_anon list and d_drop() would hide
+ * it from shrink_dcache_for_unmount(), leading to busy
+ * inodes on unmount and further oopses.
+ */
+ if (IS_ROOT(dentry))
+ return 1;
+ }
+ dfprintk(LOOKUPCACHE, "NFS: %s(%pd2) is invalid\n",
+ __func__, dentry);
+ return 0;
+ }
+ dfprintk(LOOKUPCACHE, "NFS: %s(%pd2) lookup returned error %d\n",
+ __func__, dentry, error);
+ return error;
+}
+
+static int
+nfs_lookup_revalidate_negative(struct inode *dir, struct dentry *dentry,
+ unsigned int flags)
+{
+ int ret = 1;
+ if (nfs_neg_need_reval(dir, dentry, flags)) {
+ if (flags & LOOKUP_RCU)
+ return -ECHILD;
+ ret = 0;
+ }
+ return nfs_lookup_revalidate_done(dir, dentry, NULL, ret);
+}
+
+static int
+nfs_lookup_revalidate_delegated(struct inode *dir, struct dentry *dentry,
+ struct inode *inode)
+{
+ nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
+ return nfs_lookup_revalidate_done(dir, dentry, inode, 1);
+}
+
+static int
+nfs_lookup_revalidate_dentry(struct inode *dir, struct dentry *dentry,
+ struct inode *inode)
+{
+ struct nfs_fh *fhandle;
+ struct nfs_fattr *fattr;
+ struct nfs4_label *label;
+ int ret;
+
+ ret = -ENOMEM;
+ fhandle = nfs_alloc_fhandle();
+ fattr = nfs_alloc_fattr();
+ label = nfs4_label_alloc(NFS_SERVER(inode), GFP_KERNEL);
+ if (fhandle == NULL || fattr == NULL || IS_ERR(label))
+ goto out;
+
+ ret = NFS_PROTO(dir)->lookup(dir, &dentry->d_name, fhandle, fattr, label);
+ if (ret < 0) {
+ if (ret == -ESTALE || ret == -ENOENT)
+ ret = 0;
+ goto out;
+ }
+ ret = 0;
+ if (nfs_compare_fh(NFS_FH(inode), fhandle))
+ goto out;
+ if (nfs_refresh_inode(inode, fattr) < 0)
+ goto out;
+
+ nfs_setsecurity(inode, fattr, label);
+ nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
+
+ /* set a readdirplus hint that we had a cache miss */
+ nfs_force_use_readdirplus(dir);
+ ret = 1;
+out:
+ nfs_free_fattr(fattr);
+ nfs_free_fhandle(fhandle);
+ nfs4_label_free(label);
+ return nfs_lookup_revalidate_done(dir, dentry, inode, ret);
+}
+
/*
* This is called every time the dcache has a lookup hit,
* and we should check whether we can really trust that
@@ -1084,58 +1178,36 @@ int nfs_neg_need_reval(struct inode *dir, struct dentry *dentry,
* If the parent directory is seen to have changed, we throw out the
* cached dentry and do a new lookup.
*/
-static int nfs_lookup_revalidate(struct dentry *dentry, unsigned int flags)
+static int
+nfs_do_lookup_revalidate(struct inode *dir, struct dentry *dentry,
+ unsigned int flags)
{
- struct inode *dir;
struct inode *inode;
- struct dentry *parent;
- struct nfs_fh *fhandle = NULL;
- struct nfs_fattr *fattr = NULL;
- struct nfs4_label *label = NULL;
int error;
- if (flags & LOOKUP_RCU) {
- parent = READ_ONCE(dentry->d_parent);
- dir = d_inode_rcu(parent);
- if (!dir)
- return -ECHILD;
- } else {
- parent = dget_parent(dentry);
- dir = d_inode(parent);
- }
nfs_inc_stats(dir, NFSIOS_DENTRYREVALIDATE);
inode = d_inode(dentry);
- if (!inode) {
- if (nfs_neg_need_reval(dir, dentry, flags)) {
- if (flags & LOOKUP_RCU)
- return -ECHILD;
- goto out_bad;
- }
- goto out_valid;
- }
+ if (!inode)
+ return nfs_lookup_revalidate_negative(dir, dentry, flags);
if (is_bad_inode(inode)) {
- if (flags & LOOKUP_RCU)
- return -ECHILD;
dfprintk(LOOKUPCACHE, "%s: %pd2 has dud inode\n",
__func__, dentry);
goto out_bad;
}
if (NFS_PROTO(dir)->have_delegation(inode, FMODE_READ))
- goto out_set_verifier;
+ return nfs_lookup_revalidate_delegated(dir, dentry, inode);
/* Force a full look up iff the parent directory has changed */
if (!(flags & (LOOKUP_EXCL | LOOKUP_REVAL)) &&
nfs_check_verifier(dir, dentry, flags & LOOKUP_RCU)) {
error = nfs_lookup_verify_inode(inode, flags);
if (error) {
- if (flags & LOOKUP_RCU)
- return -ECHILD;
if (error == -ESTALE)
- goto out_zap_parent;
- goto out_error;
+ nfs_zap_caches(dir);
+ goto out_bad;
}
nfs_advise_use_readdirplus(dir);
goto out_valid;
@@ -1147,81 +1219,45 @@ static int nfs_lookup_revalidate(struct dentry *dentry, unsigned int flags)
if (NFS_STALE(inode))
goto out_bad;
- error = -ENOMEM;
- fhandle = nfs_alloc_fhandle();
- fattr = nfs_alloc_fattr();
- if (fhandle == NULL || fattr == NULL)
- goto out_error;
-
- label = nfs4_label_alloc(NFS_SERVER(inode), GFP_NOWAIT);
- if (IS_ERR(label))
- goto out_error;
-
trace_nfs_lookup_revalidate_enter(dir, dentry, flags);
- error = NFS_PROTO(dir)->lookup(dir, &dentry->d_name, fhandle, fattr, label);
+ error = nfs_lookup_revalidate_dentry(dir, dentry, inode);
trace_nfs_lookup_revalidate_exit(dir, dentry, flags, error);
- if (error == -ESTALE || error == -ENOENT)
- goto out_bad;
- if (error)
- goto out_error;
- if (nfs_compare_fh(NFS_FH(inode), fhandle))
- goto out_bad;
- if ((error = nfs_refresh_inode(inode, fattr)) != 0)
- goto out_bad;
+ return error;
+out_valid:
+ return nfs_lookup_revalidate_done(dir, dentry, inode, 1);
+out_bad:
+ if (flags & LOOKUP_RCU)
+ return -ECHILD;
+ return nfs_lookup_revalidate_done(dir, dentry, inode, 0);
+}
- nfs_setsecurity(inode, fattr, label);
+static int
+__nfs_lookup_revalidate(struct dentry *dentry, unsigned int flags,
+ int (*reval)(struct inode *, struct dentry *, unsigned int))
+{
+ struct dentry *parent;
+ struct inode *dir;
+ int ret;
- nfs_free_fattr(fattr);
- nfs_free_fhandle(fhandle);
- nfs4_label_free(label);
-
- /* set a readdirplus hint that we had a cache miss */
- nfs_force_use_readdirplus(dir);
-
-out_set_verifier:
- nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
- out_valid:
if (flags & LOOKUP_RCU) {
+ parent = READ_ONCE(dentry->d_parent);
+ dir = d_inode_rcu(parent);
+ if (!dir)
+ return -ECHILD;
+ ret = reval(dir, dentry, flags);
if (parent != READ_ONCE(dentry->d_parent))
return -ECHILD;
- } else
+ } else {
+ parent = dget_parent(dentry);
+ ret = reval(d_inode(parent), dentry, flags);
dput(parent);
- dfprintk(LOOKUPCACHE, "NFS: %s(%pd2) is valid\n",
- __func__, dentry);
- return 1;
-out_zap_parent:
- nfs_zap_caches(dir);
- out_bad:
- WARN_ON(flags & LOOKUP_RCU);
- nfs_free_fattr(fattr);
- nfs_free_fhandle(fhandle);
- nfs4_label_free(label);
- nfs_mark_for_revalidate(dir);
- if (inode && S_ISDIR(inode->i_mode)) {
- /* Purge readdir caches. */
- nfs_zap_caches(inode);
- /*
- * We can't d_drop the root of a disconnected tree:
- * its d_hash is on the s_anon list and d_drop() would hide
- * it from shrink_dcache_for_unmount(), leading to busy
- * inodes on unmount and further oopses.
- */
- if (IS_ROOT(dentry))
- goto out_valid;
}
- dput(parent);
- dfprintk(LOOKUPCACHE, "NFS: %s(%pd2) is invalid\n",
- __func__, dentry);
- return 0;
-out_error:
- WARN_ON(flags & LOOKUP_RCU);
- nfs_free_fattr(fattr);
- nfs_free_fhandle(fhandle);
- nfs4_label_free(label);
- dput(parent);
- dfprintk(LOOKUPCACHE, "NFS: %s(%pd2) lookup returned error %d\n",
- __func__, dentry, error);
- return error;
+ return ret;
+}
+
+static int nfs_lookup_revalidate(struct dentry *dentry, unsigned int flags)
+{
+ return __nfs_lookup_revalidate(dentry, flags, nfs_do_lookup_revalidate);
}
/*
@@ -1580,62 +1616,55 @@ int nfs_atomic_open(struct inode *dir, struct dentry *dentry,
}
EXPORT_SYMBOL_GPL(nfs_atomic_open);
-static int nfs4_lookup_revalidate(struct dentry *dentry, unsigned int flags)
+static int
+nfs4_do_lookup_revalidate(struct inode *dir, struct dentry *dentry,
+ unsigned int flags)
{
struct inode *inode;
- int ret = 0;
if (!(flags & LOOKUP_OPEN) || (flags & LOOKUP_DIRECTORY))
- goto no_open;
+ goto full_reval;
if (d_mountpoint(dentry))
- goto no_open;
- if (NFS_SB(dentry->d_sb)->caps & NFS_CAP_ATOMIC_OPEN_V1)
- goto no_open;
+ goto full_reval;
inode = d_inode(dentry);
/* We can't create new files in nfs_open_revalidate(), so we
* optimize away revalidation of negative dentries.
*/
- if (inode == NULL) {
- struct dentry *parent;
- struct inode *dir;
+ if (inode == NULL)
+ goto full_reval;
- if (flags & LOOKUP_RCU) {
- parent = READ_ONCE(dentry->d_parent);
- dir = d_inode_rcu(parent);
- if (!dir)
- return -ECHILD;
- } else {
- parent = dget_parent(dentry);
- dir = d_inode(parent);
- }
- if (!nfs_neg_need_reval(dir, dentry, flags))
- ret = 1;
- else if (flags & LOOKUP_RCU)
- ret = -ECHILD;
- if (!(flags & LOOKUP_RCU))
- dput(parent);
- else if (parent != READ_ONCE(dentry->d_parent))
- return -ECHILD;
- goto out;
- }
+ if (NFS_PROTO(dir)->have_delegation(inode, FMODE_READ))
+ return nfs_lookup_revalidate_delegated(dir, dentry, inode);
/* NFS only supports OPEN on regular files */
if (!S_ISREG(inode->i_mode))
- goto no_open;
+ goto full_reval;
+
/* We cannot do exclusive creation on a positive dentry */
- if (flags & LOOKUP_EXCL)
- goto no_open;
+ if (flags & (LOOKUP_EXCL | LOOKUP_REVAL))
+ goto reval_dentry;
+
+ /* Check if the directory changed */
+ if (!nfs_check_verifier(dir, dentry, flags & LOOKUP_RCU))
+ goto reval_dentry;
/* Let f_op->open() actually open (and revalidate) the file */
- ret = 1;
+ return 1;
+reval_dentry:
+ if (flags & LOOKUP_RCU)
+ return -ECHILD;
+ return nfs_lookup_revalidate_dentry(dir, dentry, inode);;
-out:
- return ret;
+full_reval:
+ return nfs_do_lookup_revalidate(dir, dentry, flags);
+}
-no_open:
- return nfs_lookup_revalidate(dentry, flags);
+static int nfs4_lookup_revalidate(struct dentry *dentry, unsigned int flags)
+{
+ return __nfs_lookup_revalidate(dentry, flags,
+ nfs4_do_lookup_revalidate);
}
#endif /* CONFIG_NFSV4 */
diff --git a/fs/nfs/fscache.c b/fs/nfs/fscache.c
index 4dc8878..a7bc4e0 100644
--- a/fs/nfs/fscache.c
+++ b/fs/nfs/fscache.c
@@ -118,6 +118,10 @@ void nfs_fscache_get_super_cookie(struct super_block *sb, const char *uniq, int
struct rb_node **p, *parent;
int diff;
+ nfss->fscache_key = NULL;
+ nfss->fscache = NULL;
+ if (!(nfss->options & NFS_OPTION_FSCACHE))
+ return;
if (!uniq) {
uniq = "";
ulen = 1;
@@ -230,10 +234,11 @@ void nfs_fscache_release_super_cookie(struct super_block *sb)
void nfs_fscache_init_inode(struct inode *inode)
{
struct nfs_fscache_inode_auxdata auxdata;
+ struct nfs_server *nfss = NFS_SERVER(inode);
struct nfs_inode *nfsi = NFS_I(inode);
nfsi->fscache = NULL;
- if (!S_ISREG(inode->i_mode))
+ if (!(nfss->fscache && S_ISREG(inode->i_mode)))
return;
memset(&auxdata, 0, sizeof(auxdata));
diff --git a/fs/nfs/fscache.h b/fs/nfs/fscache.h
index 161ba2e..6363ea9 100644
--- a/fs/nfs/fscache.h
+++ b/fs/nfs/fscache.h
@@ -186,7 +186,7 @@ static inline void nfs_fscache_wait_on_invalidate(struct inode *inode)
*/
static inline const char *nfs_server_fscache_state(struct nfs_server *server)
{
- if (server->fscache && (server->options & NFS_OPTION_FSCACHE))
+ if (server->fscache)
return "yes";
return "no ";
}
diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
index 63287d9..5b61520 100644
--- a/fs/nfs/nfs4_fs.h
+++ b/fs/nfs/nfs4_fs.h
@@ -469,7 +469,8 @@ static inline void nfs4_schedule_session_recovery(struct nfs4_session *session,
extern struct nfs4_state_owner *nfs4_get_state_owner(struct nfs_server *, struct rpc_cred *, gfp_t);
extern void nfs4_put_state_owner(struct nfs4_state_owner *);
-extern void nfs4_purge_state_owners(struct nfs_server *);
+extern void nfs4_purge_state_owners(struct nfs_server *, struct list_head *);
+extern void nfs4_free_state_owners(struct list_head *head);
extern struct nfs4_state * nfs4_get_open_state(struct inode *, struct nfs4_state_owner *);
extern void nfs4_put_open_state(struct nfs4_state *);
extern void nfs4_close_state(struct nfs4_state *, fmode_t);
diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c
index 8f53455..86991bc 100644
--- a/fs/nfs/nfs4client.c
+++ b/fs/nfs/nfs4client.c
@@ -754,9 +754,12 @@ int nfs41_walk_client_list(struct nfs_client *new,
static void nfs4_destroy_server(struct nfs_server *server)
{
+ LIST_HEAD(freeme);
+
nfs_server_return_all_delegations(server);
unset_pnfs_layoutdriver(server);
- nfs4_purge_state_owners(server);
+ nfs4_purge_state_owners(server, &freeme);
+ nfs4_free_state_owners(&freeme);
}
/*
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 1de855e..31ae3bd 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -1355,12 +1355,20 @@ static bool nfs4_mode_match_open_stateid(struct nfs4_state *state,
return false;
}
-static int can_open_cached(struct nfs4_state *state, fmode_t mode, int open_mode)
+static int can_open_cached(struct nfs4_state *state, fmode_t mode,
+ int open_mode, enum open_claim_type4 claim)
{
int ret = 0;
if (open_mode & (O_EXCL|O_TRUNC))
goto out;
+ switch (claim) {
+ case NFS4_OPEN_CLAIM_NULL:
+ case NFS4_OPEN_CLAIM_FH:
+ goto out;
+ default:
+ break;
+ }
switch (mode & (FMODE_READ|FMODE_WRITE)) {
case FMODE_READ:
ret |= test_bit(NFS_O_RDONLY_STATE, &state->flags) != 0
@@ -1753,7 +1761,7 @@ static struct nfs4_state *nfs4_try_open_cached(struct nfs4_opendata *opendata)
for (;;) {
spin_lock(&state->owner->so_lock);
- if (can_open_cached(state, fmode, open_mode)) {
+ if (can_open_cached(state, fmode, open_mode, claim)) {
update_open_stateflags(state, fmode);
spin_unlock(&state->owner->so_lock);
goto out_return_state;
@@ -2282,7 +2290,8 @@ static void nfs4_open_prepare(struct rpc_task *task, void *calldata)
if (data->state != NULL) {
struct nfs_delegation *delegation;
- if (can_open_cached(data->state, data->o_arg.fmode, data->o_arg.open_flags))
+ if (can_open_cached(data->state, data->o_arg.fmode,
+ data->o_arg.open_flags, claim))
goto out_no_action;
rcu_read_lock();
delegation = rcu_dereference(NFS_I(data->state->inode)->delegation);
@@ -3124,7 +3133,7 @@ static int _nfs4_do_setattr(struct inode *inode,
if (nfs4_copy_delegation_stateid(inode, FMODE_WRITE, &arg->stateid, &delegation_cred)) {
/* Use that stateid */
- } else if (ctx != NULL) {
+ } else if (ctx != NULL && ctx->state) {
struct nfs_lock_context *l_ctx;
if (!nfs4_valid_open_stateid(ctx->state))
return -EBADF;
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index 3ba2087..c36ef75 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -628,24 +628,39 @@ void nfs4_put_state_owner(struct nfs4_state_owner *sp)
/**
* nfs4_purge_state_owners - Release all cached state owners
* @server: nfs_server with cached state owners to release
+ * @head: resulting list of state owners
*
* Called at umount time. Remaining state owners will be on
* the LRU with ref count of zero.
+ * Note that the state owners are not freed, but are added
+ * to the list @head, which can later be used as an argument
+ * to nfs4_free_state_owners.
*/
-void nfs4_purge_state_owners(struct nfs_server *server)
+void nfs4_purge_state_owners(struct nfs_server *server, struct list_head *head)
{
struct nfs_client *clp = server->nfs_client;
struct nfs4_state_owner *sp, *tmp;
- LIST_HEAD(doomed);
spin_lock(&clp->cl_lock);
list_for_each_entry_safe(sp, tmp, &server->state_owners_lru, so_lru) {
- list_move(&sp->so_lru, &doomed);
+ list_move(&sp->so_lru, head);
nfs4_remove_state_owner_locked(sp);
}
spin_unlock(&clp->cl_lock);
+}
- list_for_each_entry_safe(sp, tmp, &doomed, so_lru) {
+/**
+ * nfs4_purge_state_owners - Release all cached state owners
+ * @head: resulting list of state owners
+ *
+ * Frees a list of state owners that was generated by
+ * nfs4_purge_state_owners
+ */
+void nfs4_free_state_owners(struct list_head *head)
+{
+ struct nfs4_state_owner *sp, *tmp;
+
+ list_for_each_entry_safe(sp, tmp, head, so_lru) {
list_del(&sp->so_lru);
nfs4_free_state_owner(sp);
}
@@ -1843,12 +1858,13 @@ static int nfs4_do_reclaim(struct nfs_client *clp, const struct nfs4_state_recov
struct nfs4_state_owner *sp;
struct nfs_server *server;
struct rb_node *pos;
+ LIST_HEAD(freeme);
int status = 0;
restart:
rcu_read_lock();
list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
- nfs4_purge_state_owners(server);
+ nfs4_purge_state_owners(server, &freeme);
spin_lock(&clp->cl_lock);
for (pos = rb_first(&server->state_owners);
pos != NULL;
@@ -1877,6 +1893,7 @@ static int nfs4_do_reclaim(struct nfs_client *clp, const struct nfs4_state_recov
spin_unlock(&clp->cl_lock);
}
rcu_read_unlock();
+ nfs4_free_state_owners(&freeme);
return 0;
}
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index 6df9b85..d90efde 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -2239,6 +2239,7 @@ nfs_compare_remount_data(struct nfs_server *nfss,
data->acdirmin != nfss->acdirmin / HZ ||
data->acdirmax != nfss->acdirmax / HZ ||
data->timeo != (10U * nfss->client->cl_timeout->to_initval / HZ) ||
+ (data->options & NFS_OPTION_FSCACHE) != (nfss->options & NFS_OPTION_FSCACHE) ||
data->nfs_server.port != nfss->port ||
data->nfs_server.addrlen != nfss->nfs_client->cl_addrlen ||
!rpc_cmp_addr((struct sockaddr *)&data->nfs_server.address,
diff --git a/fs/notify/fanotify/fanotify.c b/fs/notify/fanotify/fanotify.c
index 29dee96..a18b8d7 100644
--- a/fs/notify/fanotify/fanotify.c
+++ b/fs/notify/fanotify/fanotify.c
@@ -148,10 +148,13 @@ struct fanotify_event_info *fanotify_alloc_event(struct fsnotify_group *group,
/*
* For queues with unlimited length lost events are not expected and
* can possibly have security implications. Avoid losing events when
- * memory is short.
+ * memory is short. For the limited size queues, avoid OOM killer in the
+ * target monitoring memcg as it may have security repercussion.
*/
if (group->max_events == UINT_MAX)
gfp |= __GFP_NOFAIL;
+ else
+ gfp |= __GFP_RETRY_MAYFAIL;
/* Whoever is interested in the event, pays for the allocation. */
memalloc_use_memcg(group->memcg);
diff --git a/fs/notify/inotify/inotify_fsnotify.c b/fs/notify/inotify/inotify_fsnotify.c
index f4184b4..16b8702 100644
--- a/fs/notify/inotify/inotify_fsnotify.c
+++ b/fs/notify/inotify/inotify_fsnotify.c
@@ -99,9 +99,13 @@ int inotify_handle_event(struct fsnotify_group *group,
i_mark = container_of(inode_mark, struct inotify_inode_mark,
fsn_mark);
- /* Whoever is interested in the event, pays for the allocation. */
+ /*
+ * Whoever is interested in the event, pays for the allocation. Do not
+ * trigger OOM killer in the target monitoring memcg as it may have
+ * security repercussion.
+ */
memalloc_use_memcg(group->memcg);
- event = kmalloc(alloc_len, GFP_KERNEL_ACCOUNT);
+ event = kmalloc(alloc_len, GFP_KERNEL_ACCOUNT | __GFP_RETRY_MAYFAIL);
memalloc_unuse_memcg();
if (unlikely(!event)) {
diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c
index 3a24ce3..c146e12 100644
--- a/fs/ocfs2/xattr.c
+++ b/fs/ocfs2/xattr.c
@@ -3833,7 +3833,6 @@ static int ocfs2_xattr_bucket_find(struct inode *inode,
u16 blk_per_bucket = ocfs2_blocks_per_xattr_bucket(inode->i_sb);
int low_bucket = 0, bucket, high_bucket;
struct ocfs2_xattr_bucket *search;
- u32 last_hash;
u64 blkno, lower_blkno = 0;
search = ocfs2_xattr_bucket_new(inode);
@@ -3877,8 +3876,6 @@ static int ocfs2_xattr_bucket_find(struct inode *inode,
if (xh->xh_count)
xe = &xh->xh_entries[le16_to_cpu(xh->xh_count) - 1];
- last_hash = le32_to_cpu(xe->xe_name_hash);
-
/* record lower_blkno which may be the insert place. */
lower_blkno = blkno;
diff --git a/fs/open.c b/fs/open.c
index 4dbbacc..886da56 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -383,6 +383,25 @@ long do_faccessat(int dfd, const char __user *filename, int mode)
override_cred->cap_permitted;
}
+ /*
+ * The new set of credentials can *only* be used in
+ * task-synchronous circumstances, and does not need
+ * RCU freeing, unless somebody then takes a separate
+ * reference to it.
+ *
+ * NOTE! This is _only_ true because this credential
+ * is used purely for override_creds() that installs
+ * it as the subjective cred. Other threads will be
+ * accessing ->real_cred, not the subjective cred.
+ *
+ * If somebody _does_ make a copy of this (using the
+ * 'get_current_cred()' function), that will clear the
+ * non_rcu field, because now that other user may be
+ * expecting RCU freeing. But normal thread-synchronous
+ * cred accesses will keep things non-RCY.
+ */
+ override_cred->non_rcu = 1;
+
old_cred = override_creds(override_cred);
retry:
res = user_path_at(dfd, filename, lookup_flags, &path);
diff --git a/fs/proc/base.c b/fs/proc/base.c
index c9afcb5..ad4370e 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -206,12 +206,53 @@ static int proc_root_link(struct dentry *dentry, struct path *path)
return result;
}
+/*
+ * If the user used setproctitle(), we just get the string from
+ * user space at arg_start, and limit it to a maximum of one page.
+ */
+static ssize_t get_mm_proctitle(struct mm_struct *mm, char __user *buf,
+ size_t count, unsigned long pos,
+ unsigned long arg_start)
+{
+ char *page;
+ int ret, got;
+
+ if (pos >= PAGE_SIZE)
+ return 0;
+
+ page = (char *)__get_free_page(GFP_KERNEL);
+ if (!page)
+ return -ENOMEM;
+
+ ret = 0;
+ got = access_remote_vm(mm, arg_start, page, PAGE_SIZE, FOLL_ANON);
+ if (got > 0) {
+ int len = strnlen(page, got);
+
+ /* Include the NUL character if it was found */
+ if (len < got)
+ len++;
+
+ if (len > pos) {
+ len -= pos;
+ if (len > count)
+ len = count;
+ len -= copy_to_user(buf, page+pos, len);
+ if (!len)
+ len = -EFAULT;
+ ret = len;
+ }
+ }
+ free_page((unsigned long)page);
+ return ret;
+}
+
static ssize_t get_mm_cmdline(struct mm_struct *mm, char __user *buf,
size_t count, loff_t *ppos)
{
unsigned long arg_start, arg_end, env_start, env_end;
unsigned long pos, len;
- char *page;
+ char *page, c;
/* Check if process spawned far enough to have cmdline. */
if (!mm->env_end)
@@ -228,28 +269,42 @@ static ssize_t get_mm_cmdline(struct mm_struct *mm, char __user *buf,
return 0;
/*
- * We have traditionally allowed the user to re-write
- * the argument strings and overflow the end result
- * into the environment section. But only do that if
- * the environment area is contiguous to the arguments.
+ * We allow setproctitle() to overwrite the argument
+ * strings, and overflow past the original end. But
+ * only when it overflows into the environment area.
*/
- if (env_start != arg_end || env_start >= env_end)
+ if (env_start != arg_end || env_end < env_start)
env_start = env_end = arg_end;
-
- /* .. and limit it to a maximum of one page of slop */
- if (env_end >= arg_end + PAGE_SIZE)
- env_end = arg_end + PAGE_SIZE - 1;
+ len = env_end - arg_start;
/* We're not going to care if "*ppos" has high bits set */
- pos = arg_start + *ppos;
-
- /* .. but we do check the result is in the proper range */
- if (pos < arg_start || pos >= env_end)
+ pos = *ppos;
+ if (pos >= len)
+ return 0;
+ if (count > len - pos)
+ count = len - pos;
+ if (!count)
return 0;
- /* .. and we never go past env_end */
- if (env_end - pos < count)
- count = env_end - pos;
+ /*
+ * Magical special case: if the argv[] end byte is not
+ * zero, the user has overwritten it with setproctitle(3).
+ *
+ * Possible future enhancement: do this only once when
+ * pos is 0, and set a flag in the 'struct file'.
+ */
+ if (access_remote_vm(mm, arg_end-1, &c, 1, FOLL_ANON) == 1 && c)
+ return get_mm_proctitle(mm, buf, count, pos, arg_start);
+
+ /*
+ * For the non-setproctitle() case we limit things strictly
+ * to the [arg_start, arg_end[ range.
+ */
+ pos += arg_start;
+ if (pos < arg_start || pos >= arg_end)
+ return 0;
+ if (count > arg_end - pos)
+ count = arg_end - pos;
page = (char *)__get_free_page(GFP_KERNEL);
if (!page)
@@ -259,48 +314,11 @@ static ssize_t get_mm_cmdline(struct mm_struct *mm, char __user *buf,
while (count) {
int got;
size_t size = min_t(size_t, PAGE_SIZE, count);
- long offset;
- /*
- * Are we already starting past the official end?
- * We always include the last byte that is *supposed*
- * to be NUL
- */
- offset = (pos >= arg_end) ? pos - arg_end + 1 : 0;
-
- got = access_remote_vm(mm, pos - offset, page, size + offset, FOLL_ANON);
- if (got <= offset)
+ got = access_remote_vm(mm, pos, page, size, FOLL_ANON);
+ if (got <= 0)
break;
- got -= offset;
-
- /* Don't walk past a NUL character once you hit arg_end */
- if (pos + got >= arg_end) {
- int n = 0;
-
- /*
- * If we started before 'arg_end' but ended up
- * at or after it, we start the NUL character
- * check at arg_end-1 (where we expect the normal
- * EOF to be).
- *
- * NOTE! This is smaller than 'got', because
- * pos + got >= arg_end
- */
- if (pos < arg_end)
- n = arg_end - pos - 1;
-
- /* Cut off at first NUL after 'n' */
- got = n + strnlen(page+n, offset+got-n);
- if (got < offset)
- break;
- got -= offset;
-
- /* Include the NUL if it existed */
- if (got < size)
- got++;
- }
-
- got -= copy_to_user(buf, page+offset, got);
+ got -= copy_to_user(buf, page, got);
if (unlikely(!got)) {
if (!len)
len = -EFAULT;
@@ -2159,9 +2177,12 @@ static int map_files_d_revalidate(struct dentry *dentry, unsigned int flags)
goto out;
if (!dname_to_vma_addr(dentry, &vm_start, &vm_end)) {
- down_read(&mm->mmap_sem);
- exact_vma_exists = !!find_exact_vma(mm, vm_start, vm_end);
- up_read(&mm->mmap_sem);
+ status = down_read_killable(&mm->mmap_sem);
+ if (!status) {
+ exact_vma_exists = !!find_exact_vma(mm, vm_start,
+ vm_end);
+ up_read(&mm->mmap_sem);
+ }
}
mmput(mm);
@@ -2207,8 +2228,11 @@ static int map_files_get_link(struct dentry *dentry, struct path *path)
if (rc)
goto out_mmput;
+ rc = down_read_killable(&mm->mmap_sem);
+ if (rc)
+ goto out_mmput;
+
rc = -ENOENT;
- down_read(&mm->mmap_sem);
vma = find_exact_vma(mm, vm_start, vm_end);
if (vma && vma->vm_file) {
*path = vma->vm_file->f_path;
@@ -2304,7 +2328,11 @@ static struct dentry *proc_map_files_lookup(struct inode *dir,
if (!mm)
goto out_put_task;
- down_read(&mm->mmap_sem);
+ result = ERR_PTR(-EINTR);
+ if (down_read_killable(&mm->mmap_sem))
+ goto out_put_mm;
+
+ result = ERR_PTR(-ENOENT);
vma = find_exact_vma(mm, vm_start, vm_end);
if (!vma)
goto out_no_vma;
@@ -2315,6 +2343,7 @@ static struct dentry *proc_map_files_lookup(struct inode *dir,
out_no_vma:
up_read(&mm->mmap_sem);
+out_put_mm:
mmput(mm);
out_put_task:
put_task_struct(task);
@@ -2356,7 +2385,12 @@ proc_map_files_readdir(struct file *file, struct dir_context *ctx)
mm = get_task_mm(task);
if (!mm)
goto out_put_task;
- down_read(&mm->mmap_sem);
+
+ ret = down_read_killable(&mm->mmap_sem);
+ if (ret) {
+ mmput(mm);
+ goto out_put_task;
+ }
nr_files = 0;
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index fac4a09..fe6f135 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -218,7 +218,11 @@ static void *m_start(struct seq_file *m, loff_t *ppos)
if (!mm || !mmget_not_zero(mm))
return NULL;
- down_read(&mm->mmap_sem);
+ if (down_read_killable(&mm->mmap_sem)) {
+ mmput(mm);
+ return ERR_PTR(-EINTR);
+ }
+
hold_task_mempolicy(priv);
priv->tail_vma = get_gate_vma(mm);
@@ -890,7 +894,10 @@ static int show_smaps_rollup(struct seq_file *m, void *v)
memset(&mss, 0, sizeof(mss));
- down_read(&mm->mmap_sem);
+ ret = down_read_killable(&mm->mmap_sem);
+ if (ret)
+ goto out_put_mm;
+
hold_task_mempolicy(priv);
for (vma = priv->mm->mmap; vma; vma = vma->vm_next) {
@@ -907,8 +914,9 @@ static int show_smaps_rollup(struct seq_file *m, void *v)
release_task_mempolicy(priv);
up_read(&mm->mmap_sem);
- mmput(mm);
+out_put_mm:
+ mmput(mm);
out_put_task:
put_task_struct(priv->task);
priv->task = NULL;
@@ -1191,7 +1199,10 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf,
goto out_mm;
}
- down_read(&mm->mmap_sem);
+ if (down_read_killable(&mm->mmap_sem)) {
+ count = -EINTR;
+ goto out_mm;
+ }
tlb_gather_mmu(&tlb, mm, 0, -1);
if (type == CLEAR_REFS_SOFT_DIRTY) {
for (vma = mm->mmap; vma; vma = vma->vm_next) {
@@ -1598,7 +1609,9 @@ static ssize_t pagemap_read(struct file *file, char __user *buf,
/* overflow ? */
if (end < start_vaddr || end > end_vaddr)
end = end_vaddr;
- down_read(&mm->mmap_sem);
+ ret = down_read_killable(&mm->mmap_sem);
+ if (ret)
+ goto out_free;
ret = walk_page_range(start_vaddr, end, &pagemap_walk);
up_read(&mm->mmap_sem);
start_vaddr = end;
diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
index 0b63d68d..5161894 100644
--- a/fs/proc/task_nommu.c
+++ b/fs/proc/task_nommu.c
@@ -211,7 +211,11 @@ static void *m_start(struct seq_file *m, loff_t *pos)
if (!mm || !mmget_not_zero(mm))
return NULL;
- down_read(&mm->mmap_sem);
+ if (down_read_killable(&mm->mmap_sem)) {
+ mmput(mm);
+ return ERR_PTR(-EINTR);
+ }
+
/* start from the Nth VMA */
for (p = rb_first(&mm->mm_rb); p; p = rb_next(p))
if (n-- == 0)
diff --git a/fs/seq_file.c b/fs/seq_file.c
index 1dea7a8..05e58b5 100644
--- a/fs/seq_file.c
+++ b/fs/seq_file.c
@@ -119,6 +119,7 @@ static int traverse(struct seq_file *m, loff_t offset)
}
if (seq_has_overflowed(m))
goto Eoverflow;
+ p = m->op->next(m, p, &m->index);
if (pos + m->count > offset) {
m->from = offset - pos;
m->count -= m->from;
@@ -126,7 +127,6 @@ static int traverse(struct seq_file *m, loff_t offset)
}
pos += m->count;
m->count = 0;
- p = m->op->next(m, p, &m->index);
if (pos == offset)
break;
}
diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
index 7468c96..5a519bc 100644
--- a/fs/userfaultfd.c
+++ b/fs/userfaultfd.c
@@ -884,6 +884,7 @@ static int userfaultfd_release(struct inode *inode, struct file *file)
/* len == 0 means wake all */
struct userfaultfd_wake_range range = { .len = 0, };
unsigned long new_flags;
+ bool still_valid;
WRITE_ONCE(ctx->released, true);
@@ -899,8 +900,7 @@ static int userfaultfd_release(struct inode *inode, struct file *file)
* taking the mmap_sem for writing.
*/
down_write(&mm->mmap_sem);
- if (!mmget_still_valid(mm))
- goto skip_mm;
+ still_valid = mmget_still_valid(mm);
prev = NULL;
for (vma = mm->mmap; vma; vma = vma->vm_next) {
cond_resched();
@@ -911,22 +911,23 @@ static int userfaultfd_release(struct inode *inode, struct file *file)
continue;
}
new_flags = vma->vm_flags & ~(VM_UFFD_MISSING | VM_UFFD_WP);
- prev = vma_merge(mm, prev, vma->vm_start, vma->vm_end,
- new_flags, vma->anon_vma,
- vma->vm_file, vma->vm_pgoff,
- vma_policy(vma),
- NULL_VM_UFFD_CTX,
- vma_get_anon_name(vma));
- if (prev)
- vma = prev;
- else
- prev = vma;
+ if (still_valid) {
+ prev = vma_merge(mm, prev, vma->vm_start, vma->vm_end,
+ new_flags, vma->anon_vma,
+ vma->vm_file, vma->vm_pgoff,
+ vma_policy(vma),
+ NULL_VM_UFFD_CTX,
+ vma_get_anon_name(vma));
+ if (prev)
+ vma = prev;
+ else
+ prev = vma;
+ }
vm_write_begin(vma);
WRITE_ONCE(vma->vm_flags, new_flags);
vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
vm_write_end(vma);
}
-skip_mm:
up_write(&mm->mmap_sem);
mmput(mm);
wakeup:
diff --git a/fs/xfs/libxfs/xfs_attr.c b/fs/xfs/libxfs/xfs_attr.c
index c6299f8..6410d3e 100644
--- a/fs/xfs/libxfs/xfs_attr.c
+++ b/fs/xfs/libxfs/xfs_attr.c
@@ -191,6 +191,121 @@ xfs_attr_calc_size(
return nblks;
}
+STATIC int
+xfs_attr_try_sf_addname(
+ struct xfs_inode *dp,
+ struct xfs_da_args *args)
+{
+
+ struct xfs_mount *mp = dp->i_mount;
+ int error, error2;
+
+ error = xfs_attr_shortform_addname(args);
+ if (error == -ENOSPC)
+ return error;
+
+ /*
+ * Commit the shortform mods, and we're done.
+ * NOTE: this is also the error path (EEXIST, etc).
+ */
+ if (!error && (args->flags & ATTR_KERNOTIME) == 0)
+ xfs_trans_ichgtime(args->trans, dp, XFS_ICHGTIME_CHG);
+
+ if (mp->m_flags & XFS_MOUNT_WSYNC)
+ xfs_trans_set_sync(args->trans);
+
+ error2 = xfs_trans_commit(args->trans);
+ args->trans = NULL;
+ return error ? error : error2;
+}
+
+/*
+ * Set the attribute specified in @args.
+ */
+int
+xfs_attr_set_args(
+ struct xfs_da_args *args)
+{
+ struct xfs_inode *dp = args->dp;
+ struct xfs_buf *leaf_bp = NULL;
+ int error;
+
+ /*
+ * If the attribute list is non-existent or a shortform list,
+ * upgrade it to a single-leaf-block attribute list.
+ */
+ if (dp->i_d.di_aformat == XFS_DINODE_FMT_LOCAL ||
+ (dp->i_d.di_aformat == XFS_DINODE_FMT_EXTENTS &&
+ dp->i_d.di_anextents == 0)) {
+
+ /*
+ * Build initial attribute list (if required).
+ */
+ if (dp->i_d.di_aformat == XFS_DINODE_FMT_EXTENTS)
+ xfs_attr_shortform_create(args);
+
+ /*
+ * Try to add the attr to the attribute list in the inode.
+ */
+ error = xfs_attr_try_sf_addname(dp, args);
+ if (error != -ENOSPC)
+ return error;
+
+ /*
+ * It won't fit in the shortform, transform to a leaf block.
+ * GROT: another possible req'mt for a double-split btree op.
+ */
+ error = xfs_attr_shortform_to_leaf(args, &leaf_bp);
+ if (error)
+ return error;
+
+ /*
+ * Prevent the leaf buffer from being unlocked so that a
+ * concurrent AIL push cannot grab the half-baked leaf
+ * buffer and run into problems with the write verifier.
+ * Once we're done rolling the transaction we can release
+ * the hold and add the attr to the leaf.
+ */
+ xfs_trans_bhold(args->trans, leaf_bp);
+ error = xfs_defer_finish(&args->trans);
+ xfs_trans_bhold_release(args->trans, leaf_bp);
+ if (error) {
+ xfs_trans_brelse(args->trans, leaf_bp);
+ return error;
+ }
+ }
+
+ if (xfs_bmap_one_block(dp, XFS_ATTR_FORK))
+ error = xfs_attr_leaf_addname(args);
+ else
+ error = xfs_attr_node_addname(args);
+ return error;
+}
+
+/*
+ * Remove the attribute specified in @args.
+ */
+int
+xfs_attr_remove_args(
+ struct xfs_da_args *args)
+{
+ struct xfs_inode *dp = args->dp;
+ int error;
+
+ if (!xfs_inode_hasattr(dp)) {
+ error = -ENOATTR;
+ } else if (dp->i_d.di_aformat == XFS_DINODE_FMT_LOCAL) {
+ ASSERT(dp->i_afp->if_flags & XFS_IFINLINE);
+ error = xfs_attr_shortform_remove(args);
+ } else if (xfs_bmap_one_block(dp, XFS_ATTR_FORK)) {
+ error = xfs_attr_leaf_removename(args);
+ } else {
+ error = xfs_attr_node_removename(args);
+ }
+
+ return error;
+}
+
int
xfs_attr_set(
struct xfs_inode *dp,
@@ -200,11 +315,10 @@ xfs_attr_set(
int flags)
{
struct xfs_mount *mp = dp->i_mount;
- struct xfs_buf *leaf_bp = NULL;
struct xfs_da_args args;
struct xfs_trans_res tres;
int rsvd = (flags & ATTR_ROOT) != 0;
- int error, err2, local;
+ int error, local;
XFS_STATS_INC(mp, xs_attr_set);
@@ -255,93 +369,17 @@ xfs_attr_set(
error = xfs_trans_reserve_quota_nblks(args.trans, dp, args.total, 0,
rsvd ? XFS_QMOPT_RES_REGBLKS | XFS_QMOPT_FORCE_RES :
XFS_QMOPT_RES_REGBLKS);
- if (error) {
- xfs_iunlock(dp, XFS_ILOCK_EXCL);
- xfs_trans_cancel(args.trans);
- return error;
- }
+ if (error)
+ goto out_trans_cancel;
xfs_trans_ijoin(args.trans, dp, 0);
-
- /*
- * If the attribute list is non-existent or a shortform list,
- * upgrade it to a single-leaf-block attribute list.
- */
- if (dp->i_d.di_aformat == XFS_DINODE_FMT_LOCAL ||
- (dp->i_d.di_aformat == XFS_DINODE_FMT_EXTENTS &&
- dp->i_d.di_anextents == 0)) {
-
- /*
- * Build initial attribute list (if required).
- */
- if (dp->i_d.di_aformat == XFS_DINODE_FMT_EXTENTS)
- xfs_attr_shortform_create(&args);
-
- /*
- * Try to add the attr to the attribute list in
- * the inode.
- */
- error = xfs_attr_shortform_addname(&args);
- if (error != -ENOSPC) {
- /*
- * Commit the shortform mods, and we're done.
- * NOTE: this is also the error path (EEXIST, etc).
- */
- ASSERT(args.trans != NULL);
-
- /*
- * If this is a synchronous mount, make sure that
- * the transaction goes to disk before returning
- * to the user.
- */
- if (mp->m_flags & XFS_MOUNT_WSYNC)
- xfs_trans_set_sync(args.trans);
-
- if (!error && (flags & ATTR_KERNOTIME) == 0) {
- xfs_trans_ichgtime(args.trans, dp,
- XFS_ICHGTIME_CHG);
- }
- err2 = xfs_trans_commit(args.trans);
- xfs_iunlock(dp, XFS_ILOCK_EXCL);
-
- return error ? error : err2;
- }
-
- /*
- * It won't fit in the shortform, transform to a leaf block.
- * GROT: another possible req'mt for a double-split btree op.
- */
- error = xfs_attr_shortform_to_leaf(&args, &leaf_bp);
- if (error)
- goto out;
- /*
- * Prevent the leaf buffer from being unlocked so that a
- * concurrent AIL push cannot grab the half-baked leaf
- * buffer and run into problems with the write verifier.
- */
- xfs_trans_bhold(args.trans, leaf_bp);
- error = xfs_defer_finish(&args.trans);
- if (error)
- goto out;
-
- /*
- * Commit the leaf transformation. We'll need another (linked)
- * transaction to add the new attribute to the leaf, which
- * means that we have to hold & join the leaf buffer here too.
- */
- error = xfs_trans_roll_inode(&args.trans, dp);
- if (error)
- goto out;
- xfs_trans_bjoin(args.trans, leaf_bp);
- leaf_bp = NULL;
- }
-
- if (xfs_bmap_one_block(dp, XFS_ATTR_FORK))
- error = xfs_attr_leaf_addname(&args);
- else
- error = xfs_attr_node_addname(&args);
+ error = xfs_attr_set_args(&args);
if (error)
- goto out;
+ goto out_trans_cancel;
+ if (!args.trans) {
+ /* shortform attribute has already been committed */
+ goto out_unlock;
+ }
/*
* If this is a synchronous mount, make sure that the
@@ -358,17 +396,14 @@ xfs_attr_set(
*/
xfs_trans_log_inode(args.trans, dp, XFS_ILOG_CORE);
error = xfs_trans_commit(args.trans);
+out_unlock:
xfs_iunlock(dp, XFS_ILOCK_EXCL);
-
return error;
-out:
- if (leaf_bp)
- xfs_trans_brelse(args.trans, leaf_bp);
+out_trans_cancel:
if (args.trans)
xfs_trans_cancel(args.trans);
- xfs_iunlock(dp, XFS_ILOCK_EXCL);
- return error;
+ goto out_unlock;
}
/*
@@ -423,17 +458,7 @@ xfs_attr_remove(
*/
xfs_trans_ijoin(args.trans, dp, 0);
- if (!xfs_inode_hasattr(dp)) {
- error = -ENOATTR;
- } else if (dp->i_d.di_aformat == XFS_DINODE_FMT_LOCAL) {
- ASSERT(dp->i_afp->if_flags & XFS_IFINLINE);
- error = xfs_attr_shortform_remove(&args);
- } else if (xfs_bmap_one_block(dp, XFS_ATTR_FORK)) {
- error = xfs_attr_leaf_removename(&args);
- } else {
- error = xfs_attr_node_removename(&args);
- }
-
+ error = xfs_attr_remove_args(&args);
if (error)
goto out;
diff --git a/fs/xfs/xfs_attr.h b/fs/xfs/libxfs/xfs_attr.h
similarity index 97%
rename from fs/xfs/xfs_attr.h
rename to fs/xfs/libxfs/xfs_attr.h
index 033ff8c..cc04ee0 100644
--- a/fs/xfs/xfs_attr.h
+++ b/fs/xfs/libxfs/xfs_attr.h
@@ -140,7 +140,9 @@ int xfs_attr_get(struct xfs_inode *ip, const unsigned char *name,
unsigned char *value, int *valuelenp, int flags);
int xfs_attr_set(struct xfs_inode *dp, const unsigned char *name,
unsigned char *value, int valuelen, int flags);
+int xfs_attr_set_args(struct xfs_da_args *args);
int xfs_attr_remove(struct xfs_inode *dp, const unsigned char *name, int flags);
+int xfs_attr_remove_args(struct xfs_da_args *args);
int xfs_attr_list(struct xfs_inode *dp, char *buffer, int bufsize,
int flags, struct attrlist_cursor_kern *cursor);
diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
index 3a496ff..06a7da8 100644
--- a/fs/xfs/libxfs/xfs_bmap.c
+++ b/fs/xfs/libxfs/xfs_bmap.c
@@ -1019,6 +1019,34 @@ xfs_bmap_add_attrfork_local(
return -EFSCORRUPTED;
}
+/* Set an inode attr fork off based on the format */
+int
+xfs_bmap_set_attrforkoff(
+ struct xfs_inode *ip,
+ int size,
+ int *version)
+{
+ switch (ip->i_d.di_format) {
+ case XFS_DINODE_FMT_DEV:
+ ip->i_d.di_forkoff = roundup(sizeof(xfs_dev_t), 8) >> 3;
+ break;
+ case XFS_DINODE_FMT_LOCAL:
+ case XFS_DINODE_FMT_EXTENTS:
+ case XFS_DINODE_FMT_BTREE:
+ ip->i_d.di_forkoff = xfs_attr_shortform_bytesfit(ip, size);
+ if (!ip->i_d.di_forkoff)
+ ip->i_d.di_forkoff = xfs_default_attroffset(ip) >> 3;
+ else if ((ip->i_mount->m_flags & XFS_MOUNT_ATTR2) && version)
+ *version = 2;
+ break;
+ default:
+ ASSERT(0);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
/*
* Convert inode from non-attributed to attributed.
* Must not be in a transaction, ip must not be locked.
@@ -1070,26 +1098,9 @@ xfs_bmap_add_attrfork(
xfs_trans_ijoin(tp, ip, 0);
xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
-
- switch (ip->i_d.di_format) {
- case XFS_DINODE_FMT_DEV:
- ip->i_d.di_forkoff = roundup(sizeof(xfs_dev_t), 8) >> 3;
- break;
- case XFS_DINODE_FMT_LOCAL:
- case XFS_DINODE_FMT_EXTENTS:
- case XFS_DINODE_FMT_BTREE:
- ip->i_d.di_forkoff = xfs_attr_shortform_bytesfit(ip, size);
- if (!ip->i_d.di_forkoff)
- ip->i_d.di_forkoff = xfs_default_attroffset(ip) >> 3;
- else if (mp->m_flags & XFS_MOUNT_ATTR2)
- version = 2;
- break;
- default:
- ASSERT(0);
- error = -EINVAL;
+ error = xfs_bmap_set_attrforkoff(ip, size, &version);
+ if (error)
goto trans_cancel;
- }
-
ASSERT(ip->i_afp == NULL);
ip->i_afp = kmem_zone_zalloc(xfs_ifork_zone, KM_SLEEP);
ip->i_afp->if_flags = XFS_IFEXTENTS;
@@ -1178,7 +1189,10 @@ xfs_iread_extents(
* Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out.
*/
level = be16_to_cpu(block->bb_level);
- ASSERT(level > 0);
+ if (unlikely(level == 0)) {
+ XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, mp);
+ return -EFSCORRUPTED;
+ }
pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes);
bno = be64_to_cpu(*pp);
diff --git a/fs/xfs/libxfs/xfs_bmap.h b/fs/xfs/libxfs/xfs_bmap.h
index b6e9b63..488dc88 100644
--- a/fs/xfs/libxfs/xfs_bmap.h
+++ b/fs/xfs/libxfs/xfs_bmap.h
@@ -183,6 +183,7 @@ void xfs_trim_extent(struct xfs_bmbt_irec *irec, xfs_fileoff_t bno,
xfs_filblks_t len);
void xfs_trim_extent_eof(struct xfs_bmbt_irec *, struct xfs_inode *);
int xfs_bmap_add_attrfork(struct xfs_inode *ip, int size, int rsvd);
+int xfs_bmap_set_attrforkoff(struct xfs_inode *ip, int size, int *version);
void xfs_bmap_local_to_extents_empty(struct xfs_inode *ip, int whichfork);
void __xfs_bmap_add_free(struct xfs_trans *tp, xfs_fsblock_t bno,
xfs_filblks_t len, struct xfs_owner_info *oinfo,
diff --git a/fs/xfs/libxfs/xfs_defer.c b/fs/xfs/libxfs/xfs_defer.c
index e792b16..c52beee 100644
--- a/fs/xfs/libxfs/xfs_defer.c
+++ b/fs/xfs/libxfs/xfs_defer.c
@@ -266,13 +266,15 @@ xfs_defer_trans_roll(
trace_xfs_defer_trans_roll(tp, _RET_IP_);
- /* Roll the transaction. */
+ /*
+ * Roll the transaction. Rolling always given a new transaction (even
+ * if committing the old one fails!) to hand back to the caller, so we
+ * join the held resources to the new transaction so that we always
+ * return with the held resources joined to @tpp, no matter what
+ * happened.
+ */
error = xfs_trans_roll(tpp);
tp = *tpp;
- if (error) {
- trace_xfs_defer_trans_roll_error(tp, error);
- return error;
- }
/* Rejoin the joined inodes. */
for (i = 0; i < ipcount; i++)
@@ -284,6 +286,8 @@ xfs_defer_trans_roll(
xfs_trans_bhold(tp, bplist[i]);
}
+ if (error)
+ trace_xfs_defer_trans_roll_error(tp, error);
return error;
}
diff --git a/fs/xfs/xfs_dquot.c b/fs/xfs/xfs_dquot.c
index 87e6dd53..a1af984 100644
--- a/fs/xfs/xfs_dquot.c
+++ b/fs/xfs/xfs_dquot.c
@@ -277,7 +277,8 @@ xfs_dquot_set_prealloc_limits(struct xfs_dquot *dqp)
/*
* Ensure that the given in-core dquot has a buffer on disk backing it, and
- * return the buffer. This is called when the bmapi finds a hole.
+ * return the buffer locked and held. This is called when the bmapi finds a
+ * hole.
*/
STATIC int
xfs_dquot_disk_alloc(
@@ -355,13 +356,14 @@ xfs_dquot_disk_alloc(
* If everything succeeds, the caller of this function is returned a
* buffer that is locked and held to the transaction. The caller
* is responsible for unlocking any buffer passed back, either
- * manually or by committing the transaction.
+ * manually or by committing the transaction. On error, the buffer is
+ * released and not passed back.
*/
xfs_trans_bhold(tp, bp);
error = xfs_defer_finish(tpp);
- tp = *tpp;
if (error) {
- xfs_buf_relse(bp);
+ xfs_trans_bhold_release(*tpp, bp);
+ xfs_trans_brelse(*tpp, bp);
return error;
}
*bpp = bp;
@@ -521,7 +523,6 @@ xfs_qm_dqread_alloc(
struct xfs_buf **bpp)
{
struct xfs_trans *tp;
- struct xfs_buf *bp;
int error;
error = xfs_trans_alloc(mp, &M_RES(mp)->tr_qm_dqalloc,
@@ -529,7 +530,7 @@ xfs_qm_dqread_alloc(
if (error)
goto err;
- error = xfs_dquot_disk_alloc(&tp, dqp, &bp);
+ error = xfs_dquot_disk_alloc(&tp, dqp, bpp);
if (error)
goto err_cancel;
@@ -539,10 +540,10 @@ xfs_qm_dqread_alloc(
* Buffer was held to the transaction, so we have to unlock it
* manually here because we're not passing it back.
*/
- xfs_buf_relse(bp);
+ xfs_buf_relse(*bpp);
+ *bpp = NULL;
goto err;
}
- *bpp = bp;
return 0;
err_cancel:
diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
index 74047bd..e427ad0 100644
--- a/fs/xfs/xfs_iops.c
+++ b/fs/xfs/xfs_iops.c
@@ -803,6 +803,7 @@ xfs_setattr_nonsize(
out_cancel:
xfs_trans_cancel(tp);
+ xfs_iunlock(ip, XFS_ILOCK_EXCL);
out_dqrele:
xfs_qm_dqrele(udqp);
xfs_qm_dqrele(gdqp);
diff --git a/include/asm-generic/getorder.h b/include/asm-generic/getorder.h
index c64bea7..e9f20b8 100644
--- a/include/asm-generic/getorder.h
+++ b/include/asm-generic/getorder.h
@@ -7,24 +7,6 @@
#include <linux/compiler.h>
#include <linux/log2.h>
-/*
- * Runtime evaluation of get_order()
- */
-static inline __attribute_const__
-int __get_order(unsigned long size)
-{
- int order;
-
- size--;
- size >>= PAGE_SHIFT;
-#if BITS_PER_LONG == 32
- order = fls(size);
-#else
- order = fls64(size);
-#endif
- return order;
-}
-
/**
* get_order - Determine the allocation order of a memory size
* @size: The size for which to get the order
@@ -43,19 +25,27 @@ int __get_order(unsigned long size)
* to hold an object of the specified size.
*
* The result is undefined if the size is 0.
- *
- * This function may be used to initialise variables with compile time
- * evaluations of constants.
*/
-#define get_order(n) \
-( \
- __builtin_constant_p(n) ? ( \
- ((n) == 0UL) ? BITS_PER_LONG - PAGE_SHIFT : \
- (((n) < (1UL << PAGE_SHIFT)) ? 0 : \
- ilog2((n) - 1) - PAGE_SHIFT + 1) \
- ) : \
- __get_order(n) \
-)
+static inline __attribute_const__ int get_order(unsigned long size)
+{
+ if (__builtin_constant_p(size)) {
+ if (!size)
+ return BITS_PER_LONG - PAGE_SHIFT;
+
+ if (size < (1UL << PAGE_SHIFT))
+ return 0;
+
+ return ilog2((size) - 1) - PAGE_SHIFT + 1;
+ }
+
+ size--;
+ size >>= PAGE_SHIFT;
+#if BITS_PER_LONG == 32
+ return fls(size);
+#else
+ return fls64(size);
+#endif
+}
#endif /* __ASSEMBLY__ */
diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h
index fbf5cfc9b3..fd965ff 100644
--- a/include/drm/i915_pciids.h
+++ b/include/drm/i915_pciids.h
@@ -386,6 +386,7 @@
INTEL_VGA_DEVICE(0x3E91, info), /* SRV GT2 */ \
INTEL_VGA_DEVICE(0x3E92, info), /* SRV GT2 */ \
INTEL_VGA_DEVICE(0x3E96, info), /* SRV GT2 */ \
+ INTEL_VGA_DEVICE(0x3E98, info), /* SRV GT2 */ \
INTEL_VGA_DEVICE(0x3E9A, info) /* SRV GT2 */
/* CFL H */
diff --git a/include/dt-bindings/clock/qcom,dispcc-bengal.h b/include/dt-bindings/clock/qcom,dispcc-bengal.h
index 581f827..223ab5a 100644
--- a/include/dt-bindings/clock/qcom,dispcc-bengal.h
+++ b/include/dt-bindings/clock/qcom,dispcc-bengal.h
@@ -32,8 +32,4 @@
#define DISP_CC_XO_CLK 22
#define DISP_CC_XO_CLK_SRC 23
-/* DISP_CC resets */
-#define DISP_CC_MDSS_CORE_BCR 0
-#define DISP_CC_MDSS_RSCC_BCR 1
-
#endif
diff --git a/include/dt-bindings/clock/qcom,gcc-bengal.h b/include/dt-bindings/clock/qcom,gcc-bengal.h
index 0d403fc..6e07413 100644
--- a/include/dt-bindings/clock/qcom,gcc-bengal.h
+++ b/include/dt-bindings/clock/qcom,gcc-bengal.h
@@ -65,8 +65,6 @@
#define GCC_CAMSS_TOP_AHB_CLK_SRC 55
#define GCC_CFG_NOC_USB3_PRIM_AXI_CLK 56
#define GCC_CPUSS_AHB_CLK 57
-#define GCC_CPUSS_AHB_CLK_SRC 58
-#define GCC_CPUSS_AHB_POSTDIV_CLK_SRC 59
#define GCC_CPUSS_GNOC_CLK 60
#define GCC_CPUSS_THROTTLE_CORE_CLK 61
#define GCC_CPUSS_THROTTLE_XO_CLK 62
@@ -89,123 +87,101 @@
#define GCC_GPU_SNOC_DVM_GFX_CLK 79
#define GCC_GPU_THROTTLE_CORE_CLK 80
#define GCC_GPU_THROTTLE_XO_CLK 81
-#define GCC_MSS_VS_CLK 82
-#define GCC_PDM2_CLK 83
-#define GCC_PDM2_CLK_SRC 84
-#define GCC_PDM_AHB_CLK 85
-#define GCC_PDM_XO4_CLK 86
-#define GCC_PRNG_AHB_CLK 87
-#define GCC_QMIP_CAMERA_NRT_AHB_CLK 88
-#define GCC_QMIP_CAMERA_RT_AHB_CLK 89
-#define GCC_QMIP_CPUSS_CFG_AHB_CLK 90
-#define GCC_QMIP_DISP_AHB_CLK 91
-#define GCC_QMIP_GPU_CFG_AHB_CLK 92
-#define GCC_QMIP_VIDEO_VCODEC_AHB_CLK 93
-#define GCC_QUPV3_WRAP0_CORE_2X_CLK 94
-#define GCC_QUPV3_WRAP0_CORE_CLK 95
-#define GCC_QUPV3_WRAP0_S0_CLK 96
-#define GCC_QUPV3_WRAP0_S0_CLK_SRC 97
-#define GCC_QUPV3_WRAP0_S1_CLK 98
-#define GCC_QUPV3_WRAP0_S1_CLK_SRC 99
-#define GCC_QUPV3_WRAP0_S2_CLK 100
-#define GCC_QUPV3_WRAP0_S2_CLK_SRC 101
-#define GCC_QUPV3_WRAP0_S3_CLK 102
-#define GCC_QUPV3_WRAP0_S3_CLK_SRC 103
-#define GCC_QUPV3_WRAP0_S4_CLK 104
-#define GCC_QUPV3_WRAP0_S4_CLK_SRC 105
-#define GCC_QUPV3_WRAP0_S5_CLK 106
-#define GCC_QUPV3_WRAP0_S5_CLK_SRC 107
-#define GCC_QUPV3_WRAP_0_M_AHB_CLK 108
-#define GCC_QUPV3_WRAP_0_S_AHB_CLK 109
-#define GCC_SDCC1_AHB_CLK 110
-#define GCC_SDCC1_APPS_CLK 111
-#define GCC_SDCC1_APPS_CLK_SRC 112
-#define GCC_SDCC1_ICE_CORE_CLK 113
-#define GCC_SDCC1_ICE_CORE_CLK_SRC 114
-#define GCC_SDCC2_AHB_CLK 115
-#define GCC_SDCC2_APPS_CLK 116
-#define GCC_SDCC2_APPS_CLK_SRC 117
-#define GCC_SYS_NOC_CPUSS_AHB_CLK 118
-#define GCC_SYS_NOC_UFS_PHY_AXI_CLK 119
-#define GCC_SYS_NOC_USB3_PRIM_AXI_CLK 120
-#define GCC_UFS_PHY_AHB_CLK 121
-#define GCC_UFS_PHY_AXI_CLK 122
-#define GCC_UFS_PHY_AXI_CLK_SRC 123
-#define GCC_UFS_PHY_ICE_CORE_CLK 124
-#define GCC_UFS_PHY_ICE_CORE_CLK_SRC 125
-#define GCC_UFS_PHY_PHY_AUX_CLK 126
-#define GCC_UFS_PHY_PHY_AUX_CLK_SRC 127
-#define GCC_UFS_PHY_RX_SYMBOL_0_CLK 128
-#define GCC_UFS_PHY_TX_SYMBOL_0_CLK 129
-#define GCC_UFS_PHY_UNIPRO_CORE_CLK 130
-#define GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC 131
-#define GCC_USB30_PRIM_MASTER_CLK 132
-#define GCC_USB30_PRIM_MASTER_CLK_SRC 133
-#define GCC_USB30_PRIM_MOCK_UTMI_CLK 134
-#define GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC 135
-#define GCC_USB30_PRIM_MOCK_UTMI_POSTDIV_CLK_SRC 136
-#define GCC_USB30_PRIM_SLEEP_CLK 137
-#define GCC_USB3_PRIM_CLKREF_CLK 138
-#define GCC_USB3_PRIM_PHY_AUX_CLK_SRC 139
-#define GCC_USB3_PRIM_PHY_COM_AUX_CLK 140
-#define GCC_USB3_PRIM_PHY_PIPE_CLK 141
-#define GCC_VCODEC0_AXI_CLK 142
-#define GCC_VDDA_VS_CLK 143
-#define GCC_VDDCX_VS_CLK 144
-#define GCC_VDDMX_VS_CLK 145
-#define GCC_VENUS_AHB_CLK 146
-#define GCC_VENUS_CTL_AXI_CLK 147
-#define GCC_VIDEO_AHB_CLK 148
-#define GCC_VIDEO_AXI0_CLK 149
-#define GCC_VIDEO_THROTTLE_CORE_CLK 150
-#define GCC_VIDEO_VCODEC0_SYS_CLK 151
-#define GCC_VIDEO_VENUS_CLK_SRC 152
-#define GCC_VIDEO_VENUS_CTL_CLK 153
-#define GCC_VIDEO_XO_CLK 154
-#define GCC_VS_CTRL_AHB_CLK 155
-#define GCC_VS_CTRL_CLK 156
-#define GCC_VS_CTRL_CLK_SRC 157
-#define GCC_VSENSOR_CLK_SRC 158
-#define GCC_WCSS_VS_CLK 159
-#define GCC_AHB2PHY_CSI_CLK 160
-#define GCC_AHB2PHY_USB_CLK 161
-#define GCC_APC_VS_CLK 162
-#define GCC_BIMC_GPU_AXI_CLK 163
-#define GCC_BOOT_ROM_AHB_CLK 164
-#define GCC_CAM_THROTTLE_NRT_CLK 165
-#define GCC_CAM_THROTTLE_RT_CLK 166
-#define GCC_CAMERA_AHB_CLK 167
-#define GCC_CAMERA_XO_CLK 168
-#define GCC_CAMSS_AXI_CLK 169
-#define GCC_CAMSS_AXI_CLK_SRC 170
-#define GCC_CAMSS_CAMNOC_ATB_CLK 171
-#define GCC_CAMSS_CAMNOC_NTS_XO_CLK 172
-#define GCC_CAMSS_CCI_0_CLK 173
-#define GCC_CAMSS_CCI_CLK_SRC 174
-#define GCC_CAMSS_CPHY_0_CLK 175
-#define GCC_CAMSS_CPHY_1_CLK 176
-#define GCC_CAMSS_CPHY_2_CLK 177
+#define GCC_PDM2_CLK 82
+#define GCC_PDM2_CLK_SRC 83
+#define GCC_PDM_AHB_CLK 84
+#define GCC_PDM_XO4_CLK 85
+#define GCC_PRNG_AHB_CLK 86
+#define GCC_QMIP_CAMERA_NRT_AHB_CLK 87
+#define GCC_QMIP_CAMERA_RT_AHB_CLK 88
+#define GCC_QMIP_CPUSS_CFG_AHB_CLK 89
+#define GCC_QMIP_DISP_AHB_CLK 90
+#define GCC_QMIP_GPU_CFG_AHB_CLK 91
+#define GCC_QMIP_VIDEO_VCODEC_AHB_CLK 92
+#define GCC_QUPV3_WRAP0_CORE_2X_CLK 93
+#define GCC_QUPV3_WRAP0_CORE_CLK 94
+#define GCC_QUPV3_WRAP0_S0_CLK 95
+#define GCC_QUPV3_WRAP0_S0_CLK_SRC 96
+#define GCC_QUPV3_WRAP0_S1_CLK 97
+#define GCC_QUPV3_WRAP0_S1_CLK_SRC 98
+#define GCC_QUPV3_WRAP0_S2_CLK 99
+#define GCC_QUPV3_WRAP0_S2_CLK_SRC 100
+#define GCC_QUPV3_WRAP0_S3_CLK 101
+#define GCC_QUPV3_WRAP0_S3_CLK_SRC 102
+#define GCC_QUPV3_WRAP0_S4_CLK 103
+#define GCC_QUPV3_WRAP0_S4_CLK_SRC 104
+#define GCC_QUPV3_WRAP0_S5_CLK 105
+#define GCC_QUPV3_WRAP0_S5_CLK_SRC 106
+#define GCC_QUPV3_WRAP_0_M_AHB_CLK 107
+#define GCC_QUPV3_WRAP_0_S_AHB_CLK 108
+#define GCC_SDCC1_AHB_CLK 109
+#define GCC_SDCC1_APPS_CLK 110
+#define GCC_SDCC1_APPS_CLK_SRC 111
+#define GCC_SDCC1_ICE_CORE_CLK 112
+#define GCC_SDCC1_ICE_CORE_CLK_SRC 113
+#define GCC_SDCC2_AHB_CLK 114
+#define GCC_SDCC2_APPS_CLK 115
+#define GCC_SDCC2_APPS_CLK_SRC 116
+#define GCC_SYS_NOC_CPUSS_AHB_CLK 117
+#define GCC_SYS_NOC_UFS_PHY_AXI_CLK 118
+#define GCC_SYS_NOC_USB3_PRIM_AXI_CLK 119
+#define GCC_UFS_PHY_AHB_CLK 120
+#define GCC_UFS_PHY_AXI_CLK 121
+#define GCC_UFS_PHY_AXI_CLK_SRC 122
+#define GCC_UFS_PHY_ICE_CORE_CLK 123
+#define GCC_UFS_PHY_ICE_CORE_CLK_SRC 124
+#define GCC_UFS_PHY_PHY_AUX_CLK 125
+#define GCC_UFS_PHY_PHY_AUX_CLK_SRC 126
+#define GCC_UFS_PHY_RX_SYMBOL_0_CLK 127
+#define GCC_UFS_PHY_TX_SYMBOL_0_CLK 128
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK 129
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC 130
+#define GCC_USB30_PRIM_MASTER_CLK 131
+#define GCC_USB30_PRIM_MASTER_CLK_SRC 132
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK 133
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC 134
+#define GCC_USB30_PRIM_MOCK_UTMI_POSTDIV_CLK_SRC 135
+#define GCC_USB30_PRIM_SLEEP_CLK 136
+#define GCC_USB3_PRIM_CLKREF_CLK 137
+#define GCC_USB3_PRIM_PHY_AUX_CLK_SRC 138
+#define GCC_USB3_PRIM_PHY_COM_AUX_CLK 139
+#define GCC_USB3_PRIM_PHY_PIPE_CLK 140
+#define GCC_VCODEC0_AXI_CLK 141
+#define GCC_VENUS_AHB_CLK 142
+#define GCC_VENUS_CTL_AXI_CLK 143
+#define GCC_VIDEO_AHB_CLK 144
+#define GCC_VIDEO_AXI0_CLK 145
+#define GCC_VIDEO_THROTTLE_CORE_CLK 146
+#define GCC_VIDEO_VCODEC0_SYS_CLK 147
+#define GCC_VIDEO_VENUS_CLK_SRC 148
+#define GCC_VIDEO_VENUS_CTL_CLK 149
+#define GCC_VIDEO_XO_CLK 150
+#define GCC_AHB2PHY_CSI_CLK 151
+#define GCC_AHB2PHY_USB_CLK 152
+#define GCC_BIMC_GPU_AXI_CLK 153
+#define GCC_BOOT_ROM_AHB_CLK 154
+#define GCC_CAM_THROTTLE_NRT_CLK 155
+#define GCC_CAM_THROTTLE_RT_CLK 156
+#define GCC_CAMERA_AHB_CLK 157
+#define GCC_CAMERA_XO_CLK 158
+#define GCC_CAMSS_AXI_CLK 159
+#define GCC_CAMSS_AXI_CLK_SRC 160
+#define GCC_CAMSS_CAMNOC_ATB_CLK 161
+#define GCC_CAMSS_CAMNOC_NTS_XO_CLK 162
+#define GCC_CAMSS_CCI_0_CLK 163
+#define GCC_CAMSS_CCI_CLK_SRC 164
+#define GCC_CAMSS_CPHY_0_CLK 165
+#define GCC_CAMSS_CPHY_1_CLK 166
+#define GCC_CAMSS_CPHY_2_CLK 167
/* GCC resets */
-#define GCC_CAMSS_OPE_BCR 0
-#define GCC_CAMSS_TFE_BCR 1
-#define GCC_CAMSS_TOP_BCR 2
-#define GCC_GPU_BCR 3
-#define GCC_MMSS_BCR 4
-#define GCC_PDM_BCR 5
-#define GCC_PRNG_BCR 6
-#define GCC_QUPV3_WRAPPER_0_BCR 7
-#define GCC_QUPV3_WRAPPER_1_BCR 8
-#define GCC_QUSB2PHY_PRIM_BCR 9
-#define GCC_QUSB2PHY_SEC_BCR 10
-#define GCC_SDCC1_BCR 11
-#define GCC_SDCC2_BCR 12
-#define GCC_UFS_PHY_BCR 13
-#define GCC_USB30_PRIM_BCR 14
-#define GCC_USB_PHY_CFG_AHB2PHY_BCR 15
-#define GCC_VCODEC0_BCR 16
-#define GCC_VENUS_BCR 17
-#define GCC_VIDEO_INTERFACE_BCR 18
-#define GCC_VS_BCR 19
+#define GCC_QUSB2PHY_PRIM_BCR 0
+#define GCC_QUSB2PHY_SEC_BCR 2
+#define GCC_UFS_PHY_BCR 3
+#define GCC_USB30_PRIM_BCR 4
+#define GCC_USB_PHY_CFG_AHB2PHY_BCR 5
+#define GCC_VCODEC0_BCR 6
+#define GCC_VENUS_BCR 7
+#define GCC_VIDEO_INTERFACE_BCR 8
#endif
diff --git a/include/dt-bindings/clock/qcom,gpucc-bengal.h b/include/dt-bindings/clock/qcom,gpucc-bengal.h
index 19a39e2..181005b 100644
--- a/include/dt-bindings/clock/qcom,gpucc-bengal.h
+++ b/include/dt-bindings/clock/qcom,gpucc-bengal.h
@@ -7,25 +7,22 @@
#define _DT_BINDINGS_CLK_QCOM_GPU_CC_BENGAL_H
/* GPU_CC clocks */
-#define GPU_CC_AHB_CLK 0
-#define GPU_CC_CRC_AHB_CLK 1
-#define GPU_CC_CX_APB_CLK 2
-#define GPU_CC_CX_GFX3D_CLK 3
-#define GPU_CC_CX_GFX3D_SLV_CLK 4
-#define GPU_CC_CX_GMU_CLK 5
-#define GPU_CC_CX_SNOC_DVM_CLK 9
-#define GPU_CC_CXO_AON_CLK 10
-#define GPU_CC_CXO_CLK 11
-#define GPU_CC_GMU_CLK_SRC 12
-#define GPU_CC_GX_CXO_CLK 13
-#define GPU_CC_GX_GFX3D_CLK 14
-#define GPU_CC_SLEEP_CLK 16
-
-/* GPU_CC resets */
-#define GPUCC_GPU_CC_CX_BCR 0
-#define GPUCC_GPU_CC_GFX3D_AON_BCR 1
-#define GPUCC_GPU_CC_GMU_BCR 2
-#define GPUCC_GPU_CC_GX_BCR 3
-#define GPUCC_GPU_CC_XO_BCR 4
+#define GPU_CC_PLL0 0
+#define GPU_CC_PLL0_OUT_AUX2 1
+#define GPU_CC_PLL1 2
+#define GPU_CC_PLL1_OUT_AUX 3
+#define GPU_CC_AHB_CLK 4
+#define GPU_CC_CRC_AHB_CLK 5
+#define GPU_CC_CX_GFX3D_CLK 6
+#define GPU_CC_CX_GMU_CLK 7
+#define GPU_CC_CX_SNOC_DVM_CLK 8
+#define GPU_CC_CXO_AON_CLK 9
+#define GPU_CC_CXO_CLK 10
+#define GPU_CC_GMU_CLK_SRC 11
+#define GPU_CC_GX_CXO_CLK 12
+#define GPU_CC_GX_GFX3D_CLK 13
+#define GPU_CC_GX_GFX3D_CLK_SRC 14
+#define GPU_CC_SLEEP_CLK 15
+#define GPU_CC_HLOS1_VOTE_GPU_SMMU_CLK 16
#endif
diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h
index 90ac450..561fefc 100644
--- a/include/kvm/arm_vgic.h
+++ b/include/kvm/arm_vgic.h
@@ -361,6 +361,7 @@ int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu);
void kvm_vgic_load(struct kvm_vcpu *vcpu);
void kvm_vgic_put(struct kvm_vcpu *vcpu);
+void kvm_vgic_vmcr_sync(struct kvm_vcpu *vcpu);
#define irqchip_in_kernel(k) (!!((k)->arch.vgic.in_kernel))
#define vgic_initialized(k) ((k)->arch.vgic.initialized)
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index de8d3d3..b4d23b3 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -326,7 +326,10 @@ void acpi_set_irq_model(enum acpi_irq_model_id model,
#ifdef CONFIG_X86_IO_APIC
extern int acpi_get_override_irq(u32 gsi, int *trigger, int *polarity);
#else
-#define acpi_get_override_irq(gsi, trigger, polarity) (-1)
+static inline int acpi_get_override_irq(u32 gsi, int *trigger, int *polarity)
+{
+ return -1;
+}
#endif
/*
* This function undoes the effect of one call to acpi_register_gsi().
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 741bfb7..3a8a3902 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -505,6 +505,12 @@ struct request_queue {
* various queue flags, see QUEUE_* below
*/
unsigned long queue_flags;
+ /*
+ * Number of contexts that have called blk_set_pm_only(). If this
+ * counter is above zero then only RQF_PM and RQF_PREEMPT requests are
+ * processed.
+ */
+ atomic_t pm_only;
/*
* ida allocated id for this queue. Used to index queues from
@@ -699,8 +705,7 @@ struct request_queue {
#define QUEUE_FLAG_REGISTERED 26 /* queue has been registered to a disk */
#define QUEUE_FLAG_SCSI_PASSTHROUGH 27 /* queue supports SCSI commands */
#define QUEUE_FLAG_QUIESCED 28 /* queue has been quiesced */
-#define QUEUE_FLAG_PREEMPT_ONLY 29 /* only process REQ_PREEMPT requests */
-#define QUEUE_FLAG_INLINECRYPT 30 /* inline encryption support */
+#define QUEUE_FLAG_INLINECRYPT 29 /* inline encryption support */
#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
(1 << QUEUE_FLAG_SAME_COMP) | \
@@ -740,12 +745,11 @@ bool blk_queue_flag_test_and_clear(unsigned int flag, struct request_queue *q);
((rq)->cmd_flags & (REQ_FAILFAST_DEV|REQ_FAILFAST_TRANSPORT| \
REQ_FAILFAST_DRIVER))
#define blk_queue_quiesced(q) test_bit(QUEUE_FLAG_QUIESCED, &(q)->queue_flags)
-#define blk_queue_preempt_only(q) \
- test_bit(QUEUE_FLAG_PREEMPT_ONLY, &(q)->queue_flags)
+#define blk_queue_pm_only(q) atomic_read(&(q)->pm_only)
#define blk_queue_fua(q) test_bit(QUEUE_FLAG_FUA, &(q)->queue_flags)
-extern int blk_set_preempt_only(struct request_queue *q);
-extern void blk_clear_preempt_only(struct request_queue *q);
+extern void blk_set_pm_only(struct request_queue *q);
+extern void blk_clear_pm_only(struct request_queue *q);
static inline int queue_in_flight(struct request_queue *q)
{
diff --git a/include/linux/ccp.h b/include/linux/ccp.h
index 7e9c991..43ed9e7 100644
--- a/include/linux/ccp.h
+++ b/include/linux/ccp.h
@@ -173,6 +173,8 @@ struct ccp_aes_engine {
enum ccp_aes_mode mode;
enum ccp_aes_action action;
+ u32 authsize;
+
struct scatterlist *key;
u32 key_len; /* In bytes */
diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
index 6984e7e..e78086c 100644
--- a/include/linux/cgroup-defs.h
+++ b/include/linux/cgroup-defs.h
@@ -209,6 +209,7 @@ struct css_set {
*/
struct list_head tasks;
struct list_head mg_tasks;
+ struct list_head dying_tasks;
/* all css_task_iters currently walking this cset */
struct list_head task_iters;
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index f19635e..7fd81b7 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -43,6 +43,9 @@
/* walk all threaded css_sets in the domain */
#define CSS_TASK_ITER_THREADED (1U << 1)
+/* internal flags */
+#define CSS_TASK_ITER_SKIPPED (1U << 16)
+
/* a css_task_iter should be treated as an opaque object */
struct css_task_iter {
struct cgroup_subsys *ss;
@@ -57,6 +60,7 @@ struct css_task_iter {
struct list_head *task_pos;
struct list_head *tasks_head;
struct list_head *mg_tasks_head;
+ struct list_head *dying_tasks_head;
struct css_set *cur_cset;
struct css_set *cur_dcset;
diff --git a/include/linux/coda.h b/include/linux/coda.h
index d30209b..0ca0c83 100644
--- a/include/linux/coda.h
+++ b/include/linux/coda.h
@@ -58,8 +58,7 @@ Mellon the rights to redistribute these changes without encumbrance.
#ifndef _CODA_HEADER_
#define _CODA_HEADER_
-#if defined(__linux__)
typedef unsigned long long u_quad_t;
-#endif
+
#include <uapi/linux/coda.h>
#endif
diff --git a/include/linux/coda_psdev.h b/include/linux/coda_psdev.h
index 1517095..57d2b2f 100644
--- a/include/linux/coda_psdev.h
+++ b/include/linux/coda_psdev.h
@@ -19,6 +19,17 @@ struct venus_comm {
struct mutex vc_mutex;
};
+/* messages between coda filesystem in kernel and Venus */
+struct upc_req {
+ struct list_head uc_chain;
+ caddr_t uc_data;
+ u_short uc_flags;
+ u_short uc_inSize; /* Size is at most 5000 bytes */
+ u_short uc_outSize;
+ u_short uc_opcode; /* copied from data to save lookup */
+ int uc_unique;
+ wait_queue_head_t uc_sleep; /* process' wait queue */
+};
static inline struct venus_comm *coda_vcp(struct super_block *sb)
{
diff --git a/include/linux/cred.h b/include/linux/cred.h
index 7eed610..1dc351d 100644
--- a/include/linux/cred.h
+++ b/include/linux/cred.h
@@ -150,7 +150,11 @@ struct cred {
struct user_struct *user; /* real user ID subscription */
struct user_namespace *user_ns; /* user_ns the caps and keyrings are relative to. */
struct group_info *group_info; /* supplementary groups for euid/fsgid */
- struct rcu_head rcu; /* RCU deletion hook */
+ /* RCU deletion */
+ union {
+ int non_rcu; /* Can we skip RCU deletion? */
+ struct rcu_head rcu; /* RCU deletion hook */
+ };
} __randomize_layout;
extern void __put_cred(struct cred *);
@@ -248,6 +252,7 @@ static inline const struct cred *get_cred(const struct cred *cred)
{
struct cred *nonconst_cred = (struct cred *) cred;
validate_creds(cred);
+ nonconst_cred->non_rcu = 0;
return get_new_cred(nonconst_cred);
}
diff --git a/include/linux/device.h b/include/linux/device.h
index cf133f5..7e5c1e5 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -1336,6 +1336,7 @@ extern int (*platform_notify_remove)(struct device *dev);
*/
extern struct device *get_device(struct device *dev);
extern void put_device(struct device *dev);
+extern bool kill_device(struct device *dev);
#ifdef CONFIG_DEVTMPFS
extern int devtmpfs_create_node(struct device *dev);
diff --git a/include/linux/host1x.h b/include/linux/host1x.h
index 89110d8..aef6e2f 100644
--- a/include/linux/host1x.h
+++ b/include/linux/host1x.h
@@ -310,6 +310,8 @@ struct host1x_device {
struct list_head clients;
bool registered;
+
+ struct device_dma_parameters dma_parms;
};
static inline struct host1x_device *to_host1x_device(struct device *dev)
diff --git a/include/linux/if_pppox.h b/include/linux/if_pppox.h
index ba7a9b0..24e9b36 100644
--- a/include/linux/if_pppox.h
+++ b/include/linux/if_pppox.h
@@ -84,6 +84,9 @@ extern int register_pppox_proto(int proto_num, const struct pppox_proto *pp);
extern void unregister_pppox_proto(int proto_num);
extern void pppox_unbind_sock(struct sock *sk);/* delete ppp-channel binding */
extern int pppox_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
+extern int pppox_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
+
+#define PPPOEIOCSFWD32 _IOW(0xB1 ,0, compat_size_t)
/* PPPoX socket states */
enum {
diff --git a/include/linux/iova.h b/include/linux/iova.h
index 3acb4f5..c9fb031 100644
--- a/include/linux/iova.h
+++ b/include/linux/iova.h
@@ -157,6 +157,7 @@ struct iova *reserve_iova(struct iova_domain *iovad, unsigned long pfn_lo,
void copy_reserved_iova(struct iova_domain *from, struct iova_domain *to);
void init_iova_domain(struct iova_domain *iovad, unsigned long granule,
unsigned long start_pfn);
+bool has_iova_flush_queue(struct iova_domain *iovad);
int init_iova_flush_queue(struct iova_domain *iovad,
iova_flush_cb flush_cb, iova_entry_dtor entry_dtor);
struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn);
@@ -237,6 +238,11 @@ static inline void init_iova_domain(struct iova_domain *iovad,
{
}
+static inline bool has_iova_flush_queue(struct iova_domain *iovad)
+{
+ return false;
+}
+
static inline int init_iova_flush_queue(struct iova_domain *iovad,
iova_flush_cb flush_cb,
iova_entry_dtor entry_dtor)
diff --git a/include/linux/ipa.h b/include/linux/ipa.h
index 33fd4e9..27bbf4e 100644
--- a/include/linux/ipa.h
+++ b/include/linux/ipa.h
@@ -13,6 +13,8 @@
#include "linux/msm_gsi.h"
#define IPA_APPS_MAX_BW_IN_MBPS 700
+#define IPA_BW_THRESHOLD_MAX 3
+
/**
* enum ipa_transport_type
* transport type: either GSI or SPS
@@ -501,10 +503,13 @@ typedef void (*ipa_notify_cb)(void *priv, enum ipa_dp_evt_type evt,
* use ipa_get_wdi_sap_stats structure
* @IPA_SET_WIFI_QUOTA: set quota limit on STA -
* use ipa_set_wifi_quota structure
+ * @IPA_SET_WLAN_BW: set wlan BW -
+ * use ipa_set_wlan_bw structure
*/
enum ipa_wdi_meter_evt_type {
IPA_GET_WDI_SAP_STATS,
IPA_SET_WIFI_QUOTA,
+ IPA_INFORM_WLAN_BW,
};
struct ipa_get_wdi_sap_stats {
@@ -540,6 +545,19 @@ struct ipa_set_wifi_quota {
uint8_t set_valid;
};
+/**
+ * struct ipa_inform_wlan_bw - structure used for
+ * IPA_INFORM_WLAN_BW.
+ *
+ * @index: Indicate which bw-index hit
+ * @throughput: throughput usage
+ *
+ */
+struct ipa_inform_wlan_bw {
+ uint8_t index;
+ uint64_t throughput;
+};
+
typedef void (*ipa_wdi_meter_notifier_cb)(enum ipa_wdi_meter_evt_type evt,
void *data);
@@ -1154,6 +1172,32 @@ struct ipa_wdi_buffer_info {
};
/**
+ * struct ipa_wdi_bw_info - address info of a WLAN allocated buffer
+ * @threshold: throughput wants to be monitored
+ * @num: number of threshold entries
+ * @stop: true to stop monitoring
+ *
+ * IPA driver will create/release IOMMU mapping in IPA SMMU from iova->pa
+ */
+struct ipa_wdi_bw_info {
+ uint64_t threshold[IPA_BW_THRESHOLD_MAX];
+ int num;
+ bool stop;
+};
+
+/**
+ * struct ipa_wdi_tx_info - sw tx info from WLAN
+ * @sta_tx: sw tx stats on sta interface
+ * @ap_tx: sw tx stats on ap interface
+ *
+ * IPA driver will create/release IOMMU mapping in IPA SMMU from iova->pa
+ */
+struct ipa_wdi_tx_info {
+ uint64_t sta_tx;
+ uint64_t ap_tx;
+};
+
+/**
* struct ipa_gsi_ep_config - IPA GSI endpoint configurations
*
* @ipa_ep_num: IPA EP pipe number
@@ -1421,6 +1465,8 @@ int ipa_disable_wdi_pipe(u32 clnt_hdl);
int ipa_resume_wdi_pipe(u32 clnt_hdl);
int ipa_suspend_wdi_pipe(u32 clnt_hdl);
int ipa_get_wdi_stats(struct IpaHwStatsWDIInfoData_t *stats);
+int ipa_uc_bw_monitor(struct ipa_wdi_bw_info *info);
+int ipa_set_wlan_tx_info(struct ipa_wdi_tx_info *info);
u16 ipa_get_smem_restr_bytes(void);
int ipa_broadcast_wdi_quota_reach_ind(uint32_t fid,
uint64_t num_bytes);
@@ -2357,6 +2403,16 @@ static inline int ipa_get_wdi_stats(struct IpaHwStatsWDIInfoData_t *stats)
return -EPERM;
}
+static inline int ipa_uc_bw_monitor(struct ipa_wdi_bw_info *info)
+{
+ return -EPERM;
+}
+
+static inline int ipa_set_wlan_tx_info(struct ipa_wdi_tx_info *info)
+{
+ return -EPERM;
+}
+
static inline int ipa_get_ep_mapping(enum ipa_client_type client)
{
return -EPERM;
diff --git a/include/linux/ipa_wdi3.h b/include/linux/ipa_wdi3.h
index fed0082..c1497c5 100644
--- a/include/linux/ipa_wdi3.h
+++ b/include/linux/ipa_wdi3.h
@@ -347,6 +347,29 @@ int ipa_wdi_release_smmu_mapping(u32 num_buffers,
*/
int ipa_wdi_get_stats(struct IpaHwStatsWDIInfoData_t *stats);
+
+/**
+ * ipa_wdi_bw_monitor() - set wdi BW monitoring
+ * @info: [inout] info blob from client populated by driver
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * @note Cannot be called from atomic context
+ *
+ */
+int ipa_wdi_bw_monitor(struct ipa_wdi_bw_info *info);
+
+/**
+ * ipa_wdi_sw_stats() - set wdi BW monitoring
+ * @info: [inout] info blob from client populated by driver
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * @note Cannot be called from atomic context
+ *
+ */
+int ipa_wdi_sw_stats(struct ipa_wdi_tx_info *info);
+
#else /* (CONFIG_IPA || CONFIG_IPA3) */
static inline int ipa_wdi_init(struct ipa_wdi_init_in_params *in,
@@ -415,6 +438,16 @@ static inline int ipa_wdi_get_stats(struct IpaHwStatsWDIInfoData_t *stats)
return -EPERM;
}
+static inline int ipa_wdi_bw_monitor(struct ipa_wdi_bw_info *info)
+{
+ return -EPERM;
+}
+
+static inline int ipa_wdi_sw_stats(struct ipa_wdi_tx_info *info)
+{
+ return -EPERM;
+}
+
#endif /* CONFIG_IPA3 */
#endif /* _IPA_WDI3_H_ */
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 30efb36..d42a36e 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -818,6 +818,7 @@ void kvm_arch_check_processor_compat(void *rtn);
int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu);
bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu);
int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu);
+bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu);
#ifndef __KVM_HAVE_ARCH_VM_ALLOC
/*
diff --git a/include/linux/mhi.h b/include/linux/mhi.h
index 93a4778..9c1c709 100644
--- a/include/linux/mhi.h
+++ b/include/linux/mhi.h
@@ -17,28 +17,28 @@ struct mhi_buf_info;
* enum MHI_CB - MHI callback
* @MHI_CB_IDLE: MHI entered idle state
* @MHI_CB_PENDING_DATA: New data available for client to process
+ * @MHI_CB_DTR_SIGNAL: DTR signaling update
* @MHI_CB_LPM_ENTER: MHI host entered low power mode
* @MHI_CB_LPM_EXIT: MHI host about to exit low power mode
* @MHI_CB_EE_RDDM: MHI device entered RDDM execution enviornment
* @MHI_CB_EE_MISSION_MODE: MHI device entered Mission Mode ee
* @MHI_CB_SYS_ERROR: MHI device enter error state (may recover)
* @MHI_CB_FATAL_ERROR: MHI device entered fatal error
- * @MHI_CB_BW_REQ: Received a bandwidth switch request from device
*/
enum MHI_CB {
MHI_CB_IDLE,
MHI_CB_PENDING_DATA,
+ MHI_CB_DTR_SIGNAL,
MHI_CB_LPM_ENTER,
MHI_CB_LPM_EXIT,
MHI_CB_EE_RDDM,
MHI_CB_EE_MISSION_MODE,
MHI_CB_SYS_ERROR,
MHI_CB_FATAL_ERROR,
- MHI_CB_BW_REQ,
};
/**
- * enum MHI_DEBUG_LEVL - various debugging level
+ * enum MHI_DEBUG_LEVEL - various debugging level
*/
enum MHI_DEBUG_LEVEL {
MHI_MSG_LVL_VERBOSE,
@@ -46,6 +46,7 @@ enum MHI_DEBUG_LEVEL {
MHI_MSG_LVL_ERROR,
MHI_MSG_LVL_CRITICAL,
MHI_MSG_LVL_MASK_ALL,
+ MHI_MSG_LVL_MAX,
};
/**
@@ -119,10 +120,12 @@ enum mhi_dev_state {
* struct mhi_link_info - bw requirement
* target_link_speed - as defined by TLS bits in LinkControl reg
* target_link_width - as defined by NLW bits in LinkStatus reg
+ * sequence_num - used by device to track bw requests sent to host
*/
struct mhi_link_info {
unsigned int target_link_speed;
unsigned int target_link_width;
+ int sequence_num;
};
/**
@@ -198,6 +201,7 @@ struct mhi_controller {
void __iomem *bhi;
void __iomem *bhie;
void __iomem *wake_db;
+ void __iomem *bw_scale_db;
/* device topology */
u32 dev_id;
@@ -240,6 +244,7 @@ struct mhi_controller {
u32 msi_allocated;
int *irq; /* interrupt table */
struct mhi_event *mhi_event;
+ struct list_head lp_ev_rings; /* low priority event rings */
/* cmd rings */
struct mhi_cmd *mhi_cmd;
@@ -278,6 +283,7 @@ struct mhi_controller {
struct work_struct st_worker;
struct work_struct fw_worker;
struct work_struct syserr_worker;
+ struct work_struct low_priority_worker;
wait_queue_head_t state_event;
/* shadow functions */
@@ -297,6 +303,8 @@ struct mhi_controller {
void (*unmap_single)(struct mhi_controller *mhi_cntrl,
struct mhi_buf_info *buf);
void (*tsync_log)(struct mhi_controller *mhi_cntrl, u64 remote_time);
+ int (*bw_scale)(struct mhi_controller *mhi_cntrl,
+ struct mhi_link_info *link_info);
/* channel to control DTR messaging */
struct mhi_device *dtr_dev;
diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h
index 804516e..3386399 100644
--- a/include/linux/mlx5/fs.h
+++ b/include/linux/mlx5/fs.h
@@ -188,6 +188,7 @@ int mlx5_modify_rule_destination(struct mlx5_flow_handle *handler,
struct mlx5_fc *mlx5_flow_rule_counter(struct mlx5_flow_handle *handler);
struct mlx5_fc *mlx5_fc_create(struct mlx5_core_dev *dev, bool aging);
void mlx5_fc_destroy(struct mlx5_core_dev *dev, struct mlx5_fc *counter);
+u64 mlx5_fc_query_lastuse(struct mlx5_fc *counter);
void mlx5_fc_query_cached(struct mlx5_fc *counter,
u64 *bytes, u64 *packets, u64 *lastuse);
int mlx5_fc_query(struct mlx5_core_dev *dev, struct mlx5_fc *counter,
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index f043d65..177f11c 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -5623,7 +5623,12 @@ struct mlx5_ifc_modify_cq_in_bits {
struct mlx5_ifc_cqc_bits cq_context;
- u8 reserved_at_280[0x600];
+ u8 reserved_at_280[0x60];
+
+ u8 cq_umem_valid[0x1];
+ u8 reserved_at_2e1[0x1f];
+
+ u8 reserved_at_300[0x580];
u8 pas[0][0x40];
};
diff --git a/include/linux/sched.h b/include/linux/sched.h
index eceacc2..6b99745 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1201,7 +1201,15 @@ struct task_struct {
u64 last_sum_exec_runtime;
struct callback_head numa_work;
- struct numa_group *numa_group;
+ /*
+ * This pointer is only modified for current in syscall and
+ * pagefault context (and for tasks being destroyed), so it can be read
+ * from any of the following contexts:
+ * - RCU read-side critical section
+ * - current->numa_group from everywhere
+ * - task's runqueue locked, task not running
+ */
+ struct numa_group __rcu *numa_group;
/*
* numa_faults is an array split into four regions:
diff --git a/include/linux/sched/numa_balancing.h b/include/linux/sched/numa_balancing.h
index e7dd04a..3988762 100644
--- a/include/linux/sched/numa_balancing.h
+++ b/include/linux/sched/numa_balancing.h
@@ -19,7 +19,7 @@
extern void task_numa_fault(int last_node, int node, int pages, int flags);
extern pid_t task_numa_group_id(struct task_struct *p);
extern void set_numabalancing_state(bool enabled);
-extern void task_numa_free(struct task_struct *p);
+extern void task_numa_free(struct task_struct *p, bool final);
extern bool should_numa_migrate_memory(struct task_struct *p, struct page *page,
int src_nid, int dst_cpu);
#else
@@ -34,7 +34,7 @@ static inline pid_t task_numa_group_id(struct task_struct *p)
static inline void set_numabalancing_state(bool enabled)
{
}
-static inline void task_numa_free(struct task_struct *p)
+static inline void task_numa_free(struct task_struct *p, bool final)
{
}
static inline bool should_numa_migrate_memory(struct task_struct *p,
diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h
index 3ab6949..d76803d 100644
--- a/include/linux/sched/sysctl.h
+++ b/include/linux/sched/sysctl.h
@@ -34,6 +34,8 @@ extern unsigned int sysctl_sched_child_runs_first;
extern unsigned int sysctl_sched_capacity_margin_up[MAX_MARGIN_LEVELS];
extern unsigned int sysctl_sched_capacity_margin_down[MAX_MARGIN_LEVELS];
#ifdef CONFIG_SCHED_WALT
+extern unsigned int sysctl_sched_user_hint;
+extern const int sched_user_hint_max;
extern unsigned int sysctl_sched_cpu_high_irqload;
extern unsigned int sysctl_sched_boost;
extern unsigned int sysctl_sched_group_upmigrate_pct;
@@ -49,6 +51,10 @@ extern int
walt_proc_group_thresholds_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp,
loff_t *ppos);
+extern int
+walt_proc_user_hint_handler(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp,
+ loff_t *ppos);
#endif
diff --git a/include/linux/usb/usb_qdss.h b/include/linux/usb/usb_qdss.h
index 9bc215d..d42bd54 100644
--- a/include/linux/usb/usb_qdss.h
+++ b/include/linux/usb/usb_qdss.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (c) 2012-2013, 2017-2018 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2013, 2017-2019 The Linux Foundation. All rights reserved.
*/
#ifndef __LINUX_USB_QDSS_H
@@ -17,6 +17,9 @@ struct qdss_request {
int actual;
int status;
void *context;
+ struct scatterlist *sg;
+ unsigned int num_sgs;
+ unsigned int num_mapped_sgs;
};
struct usb_qdss_ch {
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index 3424613..6021b41 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -342,6 +342,60 @@ struct ieee80211_sband_iftype_data {
};
/**
+ * enum ieee80211_edmg_bw_config - allowed channel bandwidth configurations
+ *
+ * @IEEE80211_EDMG_BW_CONFIG_4: 2.16GHz
+ * @IEEE80211_EDMG_BW_CONFIG_5: 2.16GHz and 4.32GHz
+ * @IEEE80211_EDMG_BW_CONFIG_6: 2.16GHz, 4.32GHz and 6.48GHz
+ * @IEEE80211_EDMG_BW_CONFIG_7: 2.16GHz, 4.32GHz, 6.48GHz and 8.64GHz
+ * @IEEE80211_EDMG_BW_CONFIG_8: 2.16GHz and 2.16GHz + 2.16GHz
+ * @IEEE80211_EDMG_BW_CONFIG_9: 2.16GHz, 4.32GHz and 2.16GHz + 2.16GHz
+ * @IEEE80211_EDMG_BW_CONFIG_10: 2.16GHz, 4.32GHz, 6.48GHz and 2.16GHz+2.16GHz
+ * @IEEE80211_EDMG_BW_CONFIG_11: 2.16GHz, 4.32GHz, 6.48GHz, 8.64GHz and
+ * 2.16GHz+2.16GHz
+ * @IEEE80211_EDMG_BW_CONFIG_12: 2.16GHz, 2.16GHz + 2.16GHz and
+ * 4.32GHz + 4.32GHz
+ * @IEEE80211_EDMG_BW_CONFIG_13: 2.16GHz, 4.32GHz, 2.16GHz + 2.16GHz and
+ * 4.32GHz + 4.32GHz
+ * @IEEE80211_EDMG_BW_CONFIG_14: 2.16GHz, 4.32GHz, 6.48GHz, 2.16GHz + 2.16GHz
+ * and 4.32GHz + 4.32GHz
+ * @IEEE80211_EDMG_BW_CONFIG_15: 2.16GHz, 4.32GHz, 6.48GHz, 8.64GHz,
+ * 2.16GHz + 2.16GHz and 4.32GHz + 4.32GHz
+ */
+enum ieee80211_edmg_bw_config {
+ IEEE80211_EDMG_BW_CONFIG_4 = 4,
+ IEEE80211_EDMG_BW_CONFIG_5 = 5,
+ IEEE80211_EDMG_BW_CONFIG_6 = 6,
+ IEEE80211_EDMG_BW_CONFIG_7 = 7,
+ IEEE80211_EDMG_BW_CONFIG_8 = 8,
+ IEEE80211_EDMG_BW_CONFIG_9 = 9,
+ IEEE80211_EDMG_BW_CONFIG_10 = 10,
+ IEEE80211_EDMG_BW_CONFIG_11 = 11,
+ IEEE80211_EDMG_BW_CONFIG_12 = 12,
+ IEEE80211_EDMG_BW_CONFIG_13 = 13,
+ IEEE80211_EDMG_BW_CONFIG_14 = 14,
+ IEEE80211_EDMG_BW_CONFIG_15 = 15,
+};
+
+/**
+ * struct ieee80211_edmg - EDMG configuration
+ *
+ * This structure describes most essential parameters needed
+ * to describe 802.11ay EDMG configuration
+ *
+ * @channels: bitmap that indicates the 2.16 GHz channel(s)
+ * that are allowed to be used for transmissions.
+ * Bit 0 indicates channel 1, bit 1 indicates channel 2, etc.
+ * Set to 0 indicate EDMG not supported.
+ * @bw_config: Channel BW Configuration subfield encodes
+ * the allowed channel bandwidth configurations
+ */
+struct ieee80211_edmg {
+ u8 channels;
+ enum ieee80211_edmg_bw_config bw_config;
+};
+
+/**
* struct ieee80211_supported_band - frequency band definition
*
* This structure describes a frequency band a wiphy
@@ -357,6 +411,7 @@ struct ieee80211_sband_iftype_data {
* @n_bitrates: Number of bitrates in @bitrates
* @ht_cap: HT capabilities in this band
* @vht_cap: VHT capabilities in this band
+ * @edmg_cap: EDMG capabilities in this band
* @n_iftype_data: number of iftype data entries
* @iftype_data: interface type data entries. Note that the bits in
* @types_mask inside this structure cannot overlap (i.e. only
@@ -371,6 +426,7 @@ struct ieee80211_supported_band {
int n_bitrates;
struct ieee80211_sta_ht_cap ht_cap;
struct ieee80211_sta_vht_cap vht_cap;
+ struct ieee80211_edmg edmg_cap;
u16 n_iftype_data;
const struct ieee80211_sband_iftype_data *iftype_data;
};
@@ -522,12 +578,17 @@ struct key_params {
* @center_freq1: center frequency of first segment
* @center_freq2: center frequency of second segment
* (only with 80+80 MHz)
+ * @edmg: define the EDMG channels configuration.
+ * If edmg is requested (i.e. the .channels member is non-zero),
+ * chan will define the primary channel and all other
+ * parameters are ignored.
*/
struct cfg80211_chan_def {
struct ieee80211_channel *chan;
enum nl80211_chan_width width;
u32 center_freq1;
u32 center_freq2;
+ struct ieee80211_edmg edmg;
};
/**
@@ -586,6 +647,19 @@ cfg80211_chandef_identical(const struct cfg80211_chan_def *chandef1,
}
/**
+ * cfg80211_chandef_is_edmg - check if chandef represents an EDMG channel
+ *
+ * @chandef: the channel definition
+ *
+ * Return: %true if EDMG defined, %false otherwise.
+ */
+static inline bool
+cfg80211_chandef_is_edmg(const struct cfg80211_chan_def *chandef)
+{
+ return chandef->edmg.channels || chandef->edmg.bw_config;
+}
+
+/**
* cfg80211_chandef_compatible - check if two channel definitions are compatible
* @chandef1: first channel definition
* @chandef2: second channel definition
@@ -1124,15 +1198,17 @@ int cfg80211_check_station_change(struct wiphy *wiphy,
* @RATE_INFO_FLAGS_MCS: mcs field filled with HT MCS
* @RATE_INFO_FLAGS_VHT_MCS: mcs field filled with VHT MCS
* @RATE_INFO_FLAGS_SHORT_GI: 400ns guard interval
- * @RATE_INFO_FLAGS_60G: 60GHz MCS
+ * @RATE_INFO_FLAGS_DMG: 60GHz MCS
* @RATE_INFO_FLAGS_HE_MCS: HE MCS information
+ * @RATE_INFO_FLAGS_EDMG: 60GHz MCS in EDMG mode
*/
enum rate_info_flags {
RATE_INFO_FLAGS_MCS = BIT(0),
RATE_INFO_FLAGS_VHT_MCS = BIT(1),
RATE_INFO_FLAGS_SHORT_GI = BIT(2),
- RATE_INFO_FLAGS_60G = BIT(3),
+ RATE_INFO_FLAGS_DMG = BIT(3),
RATE_INFO_FLAGS_HE_MCS = BIT(4),
+ RATE_INFO_FLAGS_EDMG = BIT(5),
};
/**
@@ -1172,6 +1248,7 @@ enum rate_info_bw {
* @he_dcm: HE DCM value
* @he_ru_alloc: HE RU allocation (from &enum nl80211_he_ru_alloc,
* only valid if bw is %RATE_INFO_BW_HE_RU)
+ * @n_bonded_ch: In case of EDMG the number of bonded channels (1-4)
*/
struct rate_info {
u8 flags;
@@ -1182,6 +1259,7 @@ struct rate_info {
u8 he_gi;
u8 he_dcm;
u8 he_ru_alloc;
+ u8 n_bonded_ch;
};
/**
@@ -2347,6 +2425,9 @@ struct cfg80211_bss_selection {
* @fils_erp_rrk_len: Length of @fils_erp_rrk in octets.
* @want_1x: indicates user-space supports and wants to use 802.1X driver
* offload of 4-way handshake.
+ * @edmg: define the EDMG channels.
+ * This may specify multiple channels and bonding options for the driver
+ * to choose from, based on BSS configuration.
*/
struct cfg80211_connect_params {
struct ieee80211_channel *channel;
@@ -2380,6 +2461,7 @@ struct cfg80211_connect_params {
const u8 *fils_erp_rrk;
size_t fils_erp_rrk_len;
bool want_1x;
+ struct ieee80211_edmg edmg;
};
/**
diff --git a/include/net/ndisc.h b/include/net/ndisc.h
index ddfbb59..d2a016e 100644
--- a/include/net/ndisc.h
+++ b/include/net/ndisc.h
@@ -38,6 +38,7 @@ enum {
ND_OPT_RDNSS = 25, /* RFC5006 */
ND_OPT_DNSSL = 31, /* RFC6106 */
ND_OPT_6CO = 34, /* RFC6775 */
+ ND_OPT_CAPTIVE_PORTAL = 37, /* RFC7710 */
__ND_OPT_MAX
};
diff --git a/include/scsi/libfcoe.h b/include/scsi/libfcoe.h
index bb8092f..58507c7 100644
--- a/include/scsi/libfcoe.h
+++ b/include/scsi/libfcoe.h
@@ -241,6 +241,7 @@ struct fcoe_fcf {
* @vn_mac: VN_Node assigned MAC address for data
*/
struct fcoe_rport {
+ struct fc_rport_priv rdata;
unsigned long time;
u16 fcoe_len;
u16 flags;
diff --git a/include/soc/qcom/rmnet_ctl.h b/include/soc/qcom/rmnet_ctl.h
new file mode 100644
index 0000000..0080560
--- /dev/null
+++ b/include/soc/qcom/rmnet_ctl.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ *
+ * RMNET_CTL header
+ *
+ */
+
+#ifndef _RMNET_CTL_H_
+#define _RMNET_CTL_H_
+
+#include <linux/skbuff.h>
+
+struct rmnet_ctl_client_hooks {
+ void (*ctl_dl_client_hook)(struct sk_buff *skb);
+};
+
+#ifdef CONFIG_RMNET_CTL
+
+void *rmnet_ctl_register_client(struct rmnet_ctl_client_hooks *hook);
+int rmnet_ctl_unregister_client(void *handle);
+int rmnet_ctl_send_client(void *handle, struct sk_buff *skb);
+
+#else
+
+static inline void *rmnet_ctl_register_client(
+ struct rmnet_ctl_client_hooks *hook)
+{
+ return NULL;
+}
+
+static inline int rmnet_ctl_unregister_client(void *handle)
+{
+ return -EINVAL;
+}
+
+static inline int rmnet_ctl_send_client(void *handle, struct sk_buff *skb)
+{
+ return -EINVAL;
+}
+
+#endif /* CONFIG_RMNET_CTL */
+
+#endif /* _RMNET_CTL_H_ */
diff --git a/include/soc/qcom/rmnet_qmi.h b/include/soc/qcom/rmnet_qmi.h
index 9096b10..ffcef3f 100644
--- a/include/soc/qcom/rmnet_qmi.h
+++ b/include/soc/qcom/rmnet_qmi.h
@@ -24,6 +24,7 @@ void rmnet_set_powersave_format(void *port);
void rmnet_clear_powersave_format(void *port);
void rmnet_get_packets(void *port, u64 *rx, u64 *tx);
int rmnet_get_powersave_notif(void *port);
+struct net_device *rmnet_get_real_dev(void *port);
#else
static inline void *rmnet_get_qmi_pt(void *port)
{
@@ -76,5 +77,9 @@ static inline int rmnet_get_powersave_notif(void *port)
return 0;
}
+static inline struct net_device *rmnet_get_real_dev(void *port)
+{
+ return NULL;
+}
#endif /* CONFIG_QCOM_QMI_RMNET */
#endif /*_RMNET_QMI_H*/
diff --git a/include/soc/qcom/secure_buffer.h b/include/soc/qcom/secure_buffer.h
index 02713d1..82845d9 100644
--- a/include/soc/qcom/secure_buffer.h
+++ b/include/soc/qcom/secure_buffer.h
@@ -41,6 +41,18 @@ enum vmid {
#define PERM_WRITE 0x2
#define PERM_EXEC 0x1
+struct dest_vm_and_perm_info {
+ u32 vm;
+ u32 perm;
+ u64 ctx;
+ u32 ctx_size;
+};
+
+struct mem_prot_info {
+ phys_addr_t addr;
+ u64 size;
+};
+
#ifdef CONFIG_QCOM_SECURE_BUFFER
int msm_secure_table(struct sg_table *table);
int msm_unsecure_table(struct sg_table *table);
diff --git a/include/sound/compress_driver.h b/include/sound/compress_driver.h
index b52d4a0..78a2291 100644
--- a/include/sound/compress_driver.h
+++ b/include/sound/compress_driver.h
@@ -177,10 +177,7 @@ static inline void snd_compr_drain_notify(struct snd_compr_stream *stream)
if (snd_BUG_ON(!stream))
return;
- if (stream->direction == SND_COMPRESS_PLAYBACK)
- stream->runtime->state = SNDRV_PCM_STATE_SETUP;
- else
- stream->runtime->state = SNDRV_PCM_STATE_PREPARED;
+ stream->runtime->state = SNDRV_PCM_STATE_SETUP;
wake_up(&stream->runtime->sleep);
}
diff --git a/include/trace/events/dfc.h b/include/trace/events/dfc.h
index 375a156..fb092bb 100644
--- a/include/trace/events/dfc.h
+++ b/include/trace/events/dfc.h
@@ -236,6 +236,29 @@ TRACE_EVENT(dfc_tx_link_status_ind,
__entry->mid, __entry->bid)
);
+TRACE_EVENT(dfc_qmap,
+
+ TP_PROTO(const void *data, size_t len, bool in),
+
+ TP_ARGS(data, len, in),
+
+ TP_STRUCT__entry(
+ __field(bool, in)
+ __field(size_t, len)
+ __dynamic_array(u8, data, len)
+ ),
+
+ TP_fast_assign(
+ __entry->in = in;
+ __entry->len = len;
+ memcpy(__get_dynamic_array(data), data, len);
+ ),
+
+ TP_printk("%s [%s]",
+ __entry->in ? "<--" : "-->",
+ __print_hex(__get_dynamic_array(data), __entry->len))
+);
+
#endif /* _TRACE_DFC_H */
/* This part must be outside protection */
diff --git a/include/trace/events/rxrpc.h b/include/trace/events/rxrpc.h
index 147546e..815dcfa 100644
--- a/include/trace/events/rxrpc.h
+++ b/include/trace/events/rxrpc.h
@@ -500,10 +500,10 @@ rxrpc_tx_points;
#define E_(a, b) { a, b }
TRACE_EVENT(rxrpc_local,
- TP_PROTO(struct rxrpc_local *local, enum rxrpc_local_trace op,
+ TP_PROTO(unsigned int local_debug_id, enum rxrpc_local_trace op,
int usage, const void *where),
- TP_ARGS(local, op, usage, where),
+ TP_ARGS(local_debug_id, op, usage, where),
TP_STRUCT__entry(
__field(unsigned int, local )
@@ -513,7 +513,7 @@ TRACE_EVENT(rxrpc_local,
),
TP_fast_assign(
- __entry->local = local->debug_id;
+ __entry->local = local_debug_id;
__entry->op = op;
__entry->usage = usage;
__entry->where = where;
diff --git a/include/trace/events/walt.h b/include/trace/events/walt.h
index 1cf2bf9..13cd0b3 100644
--- a/include/trace/events/walt.h
+++ b/include/trace/events/walt.h
@@ -491,9 +491,10 @@ TRACE_EVENT(sched_load_to_gov,
TP_PROTO(struct rq *rq, u64 aggr_grp_load, u32 tt_load,
int freq_aggr, u64 load, int policy,
- int big_task_rotation),
+ int big_task_rotation,
+ unsigned int user_hint),
TP_ARGS(rq, aggr_grp_load, tt_load, freq_aggr, load, policy,
- big_task_rotation),
+ big_task_rotation, user_hint),
TP_STRUCT__entry(
__field(int, cpu)
@@ -509,6 +510,7 @@ TRACE_EVENT(sched_load_to_gov,
__field(u64, pl)
__field(u64, load)
__field(int, big_task_rotation)
+ __field(unsigned int, user_hint)
),
TP_fast_assign(
@@ -526,13 +528,14 @@ TRACE_EVENT(sched_load_to_gov,
rq->walt_stats.pred_demands_sum_scaled;
__entry->load = load;
__entry->big_task_rotation = big_task_rotation;
+ __entry->user_hint = user_hint;
),
- TP_printk("cpu=%d policy=%d ed_task_pid=%d aggr_grp_load=%llu freq_aggr=%d tt_load=%llu rq_ps=%llu grp_rq_ps=%llu nt_ps=%llu grp_nt_ps=%llu pl=%llu load=%llu big_task_rotation=%d",
+ TP_printk("cpu=%d policy=%d ed_task_pid=%d aggr_grp_load=%llu freq_aggr=%d tt_load=%llu rq_ps=%llu grp_rq_ps=%llu nt_ps=%llu grp_nt_ps=%llu pl=%llu load=%llu big_task_rotation=%d user_hint=%u",
__entry->cpu, __entry->policy, __entry->ed_task_pid,
__entry->aggr_grp_load, __entry->freq_aggr,
__entry->tt_load, __entry->rq_ps, __entry->grp_rq_ps,
__entry->nt_ps, __entry->grp_nt_ps, __entry->pl, __entry->load,
- __entry->big_task_rotation)
+ __entry->big_task_rotation, __entry->user_hint)
);
#endif
diff --git a/include/uapi/linux/coda_psdev.h b/include/uapi/linux/coda_psdev.h
index aa6623e..d50d51a 100644
--- a/include/uapi/linux/coda_psdev.h
+++ b/include/uapi/linux/coda_psdev.h
@@ -7,19 +7,6 @@
#define CODA_PSDEV_MAJOR 67
#define MAX_CODADEVS 5 /* how many do we allow */
-
-/* messages between coda filesystem in kernel and Venus */
-struct upc_req {
- struct list_head uc_chain;
- caddr_t uc_data;
- u_short uc_flags;
- u_short uc_inSize; /* Size is at most 5000 bytes */
- u_short uc_outSize;
- u_short uc_opcode; /* copied from data to save lookup */
- int uc_unique;
- wait_queue_head_t uc_sleep; /* process' wait queue */
-};
-
#define CODA_REQ_ASYNC 0x1
#define CODA_REQ_READ 0x2
#define CODA_REQ_WRITE 0x4
diff --git a/include/uapi/linux/esoc_ctrl.h b/include/uapi/linux/esoc_ctrl.h
index 9a8a3e2..7924470 100644
--- a/include/uapi/linux/esoc_ctrl.h
+++ b/include/uapi/linux/esoc_ctrl.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
*/
#ifndef _UAPI_ESOC_CTRL_H_
#define _UAPI_ESOC_CTRL_H_
@@ -57,6 +57,7 @@ enum esoc_evt {
ESOC_CMD_ENG_OFF,
ESOC_INVALID_STATE,
ESOC_RETRY_PON_EVT,
+ ESOC_BOOT_STATE,
};
enum esoc_cmd {
diff --git a/include/uapi/linux/msm_ipa.h b/include/uapi/linux/msm_ipa.h
index a1953ab..a137403 100644
--- a/include/uapi/linux/msm_ipa.h
+++ b/include/uapi/linux/msm_ipa.h
@@ -126,6 +126,7 @@
#define IPA_IOCTL_FNR_COUNTER_ALLOC 74
#define IPA_IOCTL_FNR_COUNTER_DEALLOC 75
#define IPA_IOCTL_FNR_COUNTER_QUERY 76
+#define IPA_IOCTL_SET_FNR_COUNTER_INFO 77
/**
* max size of the header to be inserted
@@ -2523,6 +2524,24 @@ struct ipa_odl_modem_config {
__u8 config_status;
};
+struct ipa_ioc_fnr_index_info {
+ uint8_t hw_counter_offset;
+ uint8_t sw_counter_offset;
+};
+
+enum ipacm_hw_index_counter_type {
+ UL_HW = 0,
+ DL_HW,
+ DL_ALL,
+ UL_ALL,
+};
+
+enum ipacm_hw_index_counter_virtual_type {
+ UL_HW_CACHE = 0,
+ DL_HW_CACHE,
+ UL_WLAN_TX,
+ DL_WLAN_TX
+};
/**
* actual IOCTLs supported by IPA driver
@@ -2773,6 +2792,10 @@ struct ipa_odl_modem_config {
IPA_IOCTL_FNR_COUNTER_QUERY, \
struct ipa_ioc_flt_rt_query)
+#define IPA_IOC_SET_FNR_COUNTER_INFO _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_SET_FNR_COUNTER_INFO, \
+ struct ipa_ioc_fnr_index_info)
+
/*
* unique magic number of the Tethering bridge ioctls
*/
diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h
index 16bf3a5..2f938db 100644
--- a/include/uapi/linux/nl80211.h
+++ b/include/uapi/linux/nl80211.h
@@ -52,6 +52,11 @@
#define NL80211_MULTICAST_GROUP_NAN "nan"
#define NL80211_MULTICAST_GROUP_TESTMODE "testmode"
+#define NL80211_EDMG_BW_CONFIG_MIN 4
+#define NL80211_EDMG_BW_CONFIG_MAX 15
+#define NL80211_EDMG_CHANNELS_MIN 1
+#define NL80211_EDMG_CHANNELS_MAX 0x3c /* 0b00111100 */
+
/**
* DOC: Station handling
*
@@ -2288,6 +2293,52 @@ enum nl80211_commands {
* association request when used with NL80211_CMD_NEW_STATION). Can be set
* only if %NL80211_STA_FLAG_WME is set.
*
+ * @NL80211_ATTR_FTM_RESPONDER: nested attribute which user-space can include
+ * in %NL80211_CMD_START_AP or %NL80211_CMD_SET_BEACON for fine timing
+ * measurement (FTM) responder functionality and containing parameters as
+ * possible, see &enum nl80211_ftm_responder_attr
+ *
+ * @NL80211_ATTR_FTM_RESPONDER_STATS: Nested attribute with FTM responder
+ * statistics, see &enum nl80211_ftm_responder_stats.
+ *
+ * @NL80211_ATTR_TIMEOUT: Timeout for the given operation in milliseconds (u32),
+ * if the attribute is not given no timeout is requested. Note that 0 is an
+ * invalid value.
+ *
+ * @NL80211_ATTR_PEER_MEASUREMENTS: peer measurements request (and result)
+ * data, uses nested attributes specified in
+ * &enum nl80211_peer_measurement_attrs.
+ * This is also used for capability advertisement in the wiphy information,
+ * with the appropriate sub-attributes.
+ *
+ * @NL80211_ATTR_AIRTIME_WEIGHT: Station's weight when scheduled by the airtime
+ * scheduler.
+ *
+ * @NL80211_ATTR_STA_TX_POWER_SETTING: Transmit power setting type (u8) for
+ * station associated with the AP. See &enum nl80211_tx_power_setting for
+ * possible values.
+ * @NL80211_ATTR_STA_TX_POWER: Transmit power level (s16) in dBm units. This
+ * allows to set Tx power for a station. If this attribute is not included,
+ * the default per-interface tx power setting will be overriding. Driver
+ * should be picking up the lowest tx power, either tx power per-interface
+ * or per-station.
+ *
+ * @NL80211_ATTR_SAE_PASSWORD: attribute for passing SAE password material. It
+ * is used with %NL80211_CMD_CONNECT to provide password for offloading
+ * SAE authentication for WPA3-Personal networks.
+ *
+ * @NL80211_ATTR_TWT_RESPONDER: Enable target wait time responder support.
+ *
+ * @NL80211_ATTR_HE_OBSS_PD: nested attribute for OBSS Packet Detection
+ * functionality.
+ *
+ * @NL80211_ATTR_WIPHY_EDMG_CHANNELS: bitmap that indicates the 2.16 GHz
+ * channel(s) that are allowed to be used for EDMG transmissions.
+ * Defined by IEEE P802.11ay/D4.0 section 9.4.2.251. (u8 attribute)
+ * @NL80211_ATTR_WIPHY_EDMG_BW_CONFIG: Channel BW Configuration subfield encodes
+ * the allowed channel bandwidth configurations. (u8 attribute)
+ * Defined by IEEE P802.11ay/D4.0 section 9.4.2.251, Table 13.
+ *
* @NUM_NL80211_ATTR: total number of nl80211_attrs available
* @NL80211_ATTR_MAX: highest attribute number currently defined
* @__NL80211_ATTR_AFTER_LAST: internal use
@@ -2729,6 +2780,27 @@ enum nl80211_attrs {
NL80211_ATTR_HE_CAPABILITY,
+ NL80211_ATTR_FTM_RESPONDER,
+
+ NL80211_ATTR_FTM_RESPONDER_STATS,
+
+ NL80211_ATTR_TIMEOUT,
+
+ NL80211_ATTR_PEER_MEASUREMENTS,
+
+ NL80211_ATTR_AIRTIME_WEIGHT,
+ NL80211_ATTR_STA_TX_POWER_SETTING,
+ NL80211_ATTR_STA_TX_POWER,
+
+ NL80211_ATTR_SAE_PASSWORD,
+
+ NL80211_ATTR_TWT_RESPONDER,
+
+ NL80211_ATTR_HE_OBSS_PD,
+
+ NL80211_ATTR_WIPHY_EDMG_CHANNELS,
+ NL80211_ATTR_WIPHY_EDMG_BW_CONFIG,
+
/* add attributes here, update the policy in nl80211.c */
__NL80211_ATTR_AFTER_LAST,
@@ -2779,7 +2851,7 @@ enum nl80211_attrs {
#define NL80211_HT_CAPABILITY_LEN 26
#define NL80211_VHT_CAPABILITY_LEN 12
#define NL80211_HE_MIN_CAPABILITY_LEN 16
-#define NL80211_HE_MAX_CAPABILITY_LEN 51
+#define NL80211_HE_MAX_CAPABILITY_LEN 54
#define NL80211_MAX_NR_CIPHER_SUITES 5
#define NL80211_MAX_NR_AKM_SUITES 2
@@ -3320,6 +3392,12 @@ enum nl80211_band_iftype_attr {
* @NL80211_BAND_ATTR_VHT_CAPA: VHT capabilities, as in the HT information IE
* @NL80211_BAND_ATTR_IFTYPE_DATA: nested array attribute, with each entry using
* attributes from &enum nl80211_band_iftype_attr
+ * @NL80211_BAND_ATTR_EDMG_CHANNELS: bitmap that indicates the 2.16 GHz
+ * channel(s) that are allowed to be used for EDMG transmissions.
+ * Defined by IEEE P802.11ay/D4.0 section 9.4.2.251.
+ * @NL80211_BAND_ATTR_EDMG_BW_CONFIG: Channel BW Configuration subfield encodes
+ * the allowed channel bandwidth configurations.
+ * Defined by IEEE P802.11ay/D4.0 section 9.4.2.251, Table 13.
* @NL80211_BAND_ATTR_MAX: highest band attribute currently defined
* @__NL80211_BAND_ATTR_AFTER_LAST: internal use
*/
@@ -3337,6 +3415,9 @@ enum nl80211_band_attr {
NL80211_BAND_ATTR_VHT_CAPA,
NL80211_BAND_ATTR_IFTYPE_DATA,
+ NL80211_BAND_ATTR_EDMG_CHANNELS,
+ NL80211_BAND_ATTR_EDMG_BW_CONFIG,
+
/* keep last */
__NL80211_BAND_ATTR_AFTER_LAST,
NL80211_BAND_ATTR_MAX = __NL80211_BAND_ATTR_AFTER_LAST - 1
diff --git a/include/uapi/media/msm_vidc_utils.h b/include/uapi/media/msm_vidc_utils.h
index 14ee584..18fdddb 100644
--- a/include/uapi/media/msm_vidc_utils.h
+++ b/include/uapi/media/msm_vidc_utils.h
@@ -348,4 +348,25 @@ enum msm_vidc_hdr_info_types {
MSM_VIDC_RGB_MAX_FLL,
};
+enum msm_vidc_plane_reserved_field_types {
+ MSM_VIDC_BUFFER_FD,
+ MSM_VIDC_DATA_OFFSET,
+ MSM_VIDC_COMP_RATIO,
+ MSM_VIDC_INPUT_TAG_1,
+ MSM_VIDC_INPUT_TAG_2,
+};
+
+enum msm_vidc_cb_event_types {
+ MSM_VIDC_HEIGHT,
+ MSM_VIDC_WIDTH,
+ MSM_VIDC_BIT_DEPTH,
+ MSM_VIDC_PIC_STRUCT,
+ MSM_VIDC_COLOR_SPACE,
+ MSM_VIDC_CROP_TOP,
+ MSM_VIDC_CROP_LEFT,
+ MSM_VIDC_CROP_HEIGHT,
+ MSM_VIDC_CROP_WIDTH,
+ MSM_VIDC_PROFILE,
+ MSM_VIDC_LEVEL,
+};
#endif
diff --git a/ipc/mqueue.c b/ipc/mqueue.c
index 94bc9584..8763ee7 100644
--- a/ipc/mqueue.c
+++ b/ipc/mqueue.c
@@ -389,7 +389,6 @@ static void mqueue_evict_inode(struct inode *inode)
{
struct mqueue_inode_info *info;
struct user_struct *user;
- unsigned long mq_bytes, mq_treesize;
struct ipc_namespace *ipc_ns;
struct msg_msg *msg, *nmsg;
LIST_HEAD(tmp_msg);
@@ -412,16 +411,18 @@ static void mqueue_evict_inode(struct inode *inode)
free_msg(msg);
}
- /* Total amount of bytes accounted for the mqueue */
- mq_treesize = info->attr.mq_maxmsg * sizeof(struct msg_msg) +
- min_t(unsigned int, info->attr.mq_maxmsg, MQ_PRIO_MAX) *
- sizeof(struct posix_msg_tree_node);
-
- mq_bytes = mq_treesize + (info->attr.mq_maxmsg *
- info->attr.mq_msgsize);
-
user = info->user;
if (user) {
+ unsigned long mq_bytes, mq_treesize;
+
+ /* Total amount of bytes accounted for the mqueue */
+ mq_treesize = info->attr.mq_maxmsg * sizeof(struct msg_msg) +
+ min_t(unsigned int, info->attr.mq_maxmsg, MQ_PRIO_MAX) *
+ sizeof(struct posix_msg_tree_node);
+
+ mq_bytes = mq_treesize + (info->attr.mq_maxmsg *
+ info->attr.mq_msgsize);
+
spin_lock(&mq_lock);
user->mq_bytes -= mq_bytes;
/*
diff --git a/kernel/Makefile b/kernel/Makefile
index e968877..c2aa5bd 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -40,6 +40,9 @@
# Don't instrument error handlers
CFLAGS_cfi.o = $(DISABLE_CFI_CLANG)
+# Don't instrument error handlers
+CFLAGS_cfi.o = $(DISABLE_CFI_CLANG)
+
obj-y += sched/
obj-y += locking/
obj-y += power/
diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
index 9a66ad3..8e91dd0 100644
--- a/kernel/cgroup/cgroup.c
+++ b/kernel/cgroup/cgroup.c
@@ -213,7 +213,8 @@ static struct cftype cgroup_base_files[];
static int cgroup_apply_control(struct cgroup *cgrp);
static void cgroup_finalize_control(struct cgroup *cgrp, int ret);
-static void css_task_iter_advance(struct css_task_iter *it);
+static void css_task_iter_skip(struct css_task_iter *it,
+ struct task_struct *task);
static int cgroup_destroy_locked(struct cgroup *cgrp);
static struct cgroup_subsys_state *css_create(struct cgroup *cgrp,
struct cgroup_subsys *ss);
@@ -673,6 +674,7 @@ struct css_set init_css_set = {
.dom_cset = &init_css_set,
.tasks = LIST_HEAD_INIT(init_css_set.tasks),
.mg_tasks = LIST_HEAD_INIT(init_css_set.mg_tasks),
+ .dying_tasks = LIST_HEAD_INIT(init_css_set.dying_tasks),
.task_iters = LIST_HEAD_INIT(init_css_set.task_iters),
.threaded_csets = LIST_HEAD_INIT(init_css_set.threaded_csets),
.cgrp_links = LIST_HEAD_INIT(init_css_set.cgrp_links),
@@ -776,6 +778,21 @@ static void css_set_update_populated(struct css_set *cset, bool populated)
cgroup_update_populated(link->cgrp, populated);
}
+/*
+ * @task is leaving, advance task iterators which are pointing to it so
+ * that they can resume at the next position. Advancing an iterator might
+ * remove it from the list, use safe walk. See css_task_iter_skip() for
+ * details.
+ */
+static void css_set_skip_task_iters(struct css_set *cset,
+ struct task_struct *task)
+{
+ struct css_task_iter *it, *pos;
+
+ list_for_each_entry_safe(it, pos, &cset->task_iters, iters_node)
+ css_task_iter_skip(it, task);
+}
+
/**
* css_set_move_task - move a task from one css_set to another
* @task: task being moved
@@ -801,22 +818,9 @@ static void css_set_move_task(struct task_struct *task,
css_set_update_populated(to_cset, true);
if (from_cset) {
- struct css_task_iter *it, *pos;
-
WARN_ON_ONCE(list_empty(&task->cg_list));
- /*
- * @task is leaving, advance task iterators which are
- * pointing to it so that they can resume at the next
- * position. Advancing an iterator might remove it from
- * the list, use safe walk. See css_task_iter_advance*()
- * for details.
- */
- list_for_each_entry_safe(it, pos, &from_cset->task_iters,
- iters_node)
- if (it->task_pos == &task->cg_list)
- css_task_iter_advance(it);
-
+ css_set_skip_task_iters(from_cset, task);
list_del_init(&task->cg_list);
if (!css_set_populated(from_cset))
css_set_update_populated(from_cset, false);
@@ -1143,6 +1147,7 @@ static struct css_set *find_css_set(struct css_set *old_cset,
cset->dom_cset = cset;
INIT_LIST_HEAD(&cset->tasks);
INIT_LIST_HEAD(&cset->mg_tasks);
+ INIT_LIST_HEAD(&cset->dying_tasks);
INIT_LIST_HEAD(&cset->task_iters);
INIT_LIST_HEAD(&cset->threaded_csets);
INIT_HLIST_NODE(&cset->hlist);
@@ -4235,15 +4240,18 @@ static void css_task_iter_advance_css_set(struct css_task_iter *it)
it->task_pos = NULL;
return;
}
- } while (!css_set_populated(cset));
+ } while (!css_set_populated(cset) && list_empty(&cset->dying_tasks));
if (!list_empty(&cset->tasks))
it->task_pos = cset->tasks.next;
- else
+ else if (!list_empty(&cset->mg_tasks))
it->task_pos = cset->mg_tasks.next;
+ else
+ it->task_pos = cset->dying_tasks.next;
it->tasks_head = &cset->tasks;
it->mg_tasks_head = &cset->mg_tasks;
+ it->dying_tasks_head = &cset->dying_tasks;
/*
* We don't keep css_sets locked across iteration steps and thus
@@ -4269,9 +4277,20 @@ static void css_task_iter_advance_css_set(struct css_task_iter *it)
list_add(&it->iters_node, &cset->task_iters);
}
+static void css_task_iter_skip(struct css_task_iter *it,
+ struct task_struct *task)
+{
+ lockdep_assert_held(&css_set_lock);
+
+ if (it->task_pos == &task->cg_list) {
+ it->task_pos = it->task_pos->next;
+ it->flags |= CSS_TASK_ITER_SKIPPED;
+ }
+}
+
static void css_task_iter_advance(struct css_task_iter *it)
{
- struct list_head *next;
+ struct task_struct *task;
lockdep_assert_held(&css_set_lock);
repeat:
@@ -4281,25 +4300,40 @@ static void css_task_iter_advance(struct css_task_iter *it)
* consumed first and then ->mg_tasks. After ->mg_tasks,
* we move onto the next cset.
*/
- next = it->task_pos->next;
-
- if (next == it->tasks_head)
- next = it->mg_tasks_head->next;
-
- if (next == it->mg_tasks_head)
- css_task_iter_advance_css_set(it);
+ if (it->flags & CSS_TASK_ITER_SKIPPED)
+ it->flags &= ~CSS_TASK_ITER_SKIPPED;
else
- it->task_pos = next;
+ it->task_pos = it->task_pos->next;
+
+ if (it->task_pos == it->tasks_head)
+ it->task_pos = it->mg_tasks_head->next;
+ if (it->task_pos == it->mg_tasks_head)
+ it->task_pos = it->dying_tasks_head->next;
+ if (it->task_pos == it->dying_tasks_head)
+ css_task_iter_advance_css_set(it);
} else {
/* called from start, proceed to the first cset */
css_task_iter_advance_css_set(it);
}
- /* if PROCS, skip over tasks which aren't group leaders */
- if ((it->flags & CSS_TASK_ITER_PROCS) && it->task_pos &&
- !thread_group_leader(list_entry(it->task_pos, struct task_struct,
- cg_list)))
- goto repeat;
+ if (!it->task_pos)
+ return;
+
+ task = list_entry(it->task_pos, struct task_struct, cg_list);
+
+ if (it->flags & CSS_TASK_ITER_PROCS) {
+ /* if PROCS, skip over tasks which aren't group leaders */
+ if (!thread_group_leader(task))
+ goto repeat;
+
+ /* and dying leaders w/o live member threads */
+ if (!atomic_read(&task->signal->live))
+ goto repeat;
+ } else {
+ /* skip all dying ones */
+ if (task->flags & PF_EXITING)
+ goto repeat;
+ }
}
/**
@@ -4355,6 +4389,10 @@ struct task_struct *css_task_iter_next(struct css_task_iter *it)
spin_lock_irq(&css_set_lock);
+ /* @it may be half-advanced by skips, finish advancing */
+ if (it->flags & CSS_TASK_ITER_SKIPPED)
+ css_task_iter_advance(it);
+
if (it->task_pos) {
it->cur_task = list_entry(it->task_pos, struct task_struct,
cg_list);
@@ -5790,6 +5828,7 @@ void cgroup_exit(struct task_struct *tsk)
if (!list_empty(&tsk->cg_list)) {
spin_lock_irq(&css_set_lock);
css_set_move_task(tsk, cset, NULL, false);
+ list_add_tail(&tsk->cg_list, &cset->dying_tasks);
cset->nr_tasks--;
spin_unlock_irq(&css_set_lock);
} else {
@@ -5810,6 +5849,13 @@ void cgroup_release(struct task_struct *task)
do_each_subsys_mask(ss, ssid, have_release_callback) {
ss->release(task);
} while_each_subsys_mask();
+
+ if (use_task_css_set_links) {
+ spin_lock_irq(&css_set_lock);
+ css_set_skip_task_iters(task_css_set(task), task);
+ list_del_init(&task->cg_list);
+ spin_unlock_irq(&css_set_lock);
+ }
}
void cgroup_free(struct task_struct *task)
diff --git a/kernel/cred.c b/kernel/cred.c
index efd04b2..5ab1f7e 100644
--- a/kernel/cred.c
+++ b/kernel/cred.c
@@ -147,7 +147,10 @@ void __put_cred(struct cred *cred)
BUG_ON(cred == current->cred);
BUG_ON(cred == current->real_cred);
- call_rcu(&cred->rcu, put_cred_rcu);
+ if (cred->non_rcu)
+ put_cred_rcu(&cred->rcu);
+ else
+ call_rcu(&cred->rcu, put_cred_rcu);
}
EXPORT_SYMBOL(__put_cred);
@@ -258,6 +261,7 @@ struct cred *prepare_creds(void)
old = task->cred;
memcpy(new, old, sizeof(struct cred));
+ new->non_rcu = 0;
atomic_set(&new->usage, 1);
set_cred_subscribers(new, 0);
get_group_info(new->group_info);
@@ -537,7 +541,19 @@ const struct cred *override_creds(const struct cred *new)
validate_creds(old);
validate_creds(new);
- get_cred(new);
+
+ /*
+ * NOTE! This uses 'get_new_cred()' rather than 'get_cred()'.
+ *
+ * That means that we do not clear the 'non_rcu' flag, since
+ * we are only installing the cred into the thread-synchronous
+ * '->cred' pointer, not the '->real_cred' pointer that is
+ * visible to other threads under RCU.
+ *
+ * Also note that we did validate_creds() manually, not depending
+ * on the validation in 'get_cred()'.
+ */
+ get_new_cred((struct cred *)new);
alter_cred_subscribers(new, 1);
rcu_assign_pointer(current->cred, new);
alter_cred_subscribers(old, -1);
@@ -620,6 +636,7 @@ struct cred *prepare_kernel_cred(struct task_struct *daemon)
validate_creds(old);
*new = *old;
+ new->non_rcu = 0;
atomic_set(&new->usage, 1);
set_cred_subscribers(new, 0);
get_uid(new->user);
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 888d93c..6d7d708 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -11292,7 +11292,7 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
goto err_unlock;
}
- perf_install_in_context(ctx, event, cpu);
+ perf_install_in_context(ctx, event, event->cpu);
perf_unpin_context(ctx);
mutex_unlock(&ctx->mutex);
diff --git a/kernel/exit.c b/kernel/exit.c
index 942f0a5..aae5cc5 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -194,6 +194,7 @@ void release_task(struct task_struct *p)
rcu_read_unlock();
proc_flush_task(p);
+ cgroup_release(p);
write_lock_irq(&tasklist_lock);
ptrace_release_task(p);
@@ -219,7 +220,6 @@ void release_task(struct task_struct *p)
}
write_unlock_irq(&tasklist_lock);
- cgroup_release(p);
release_thread(p);
call_rcu(&p->rcu, delayed_put_task_struct);
diff --git a/kernel/fork.c b/kernel/fork.c
index e7a644e..6a5d06d 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -714,7 +714,7 @@ void __put_task_struct(struct task_struct *tsk)
WARN_ON(tsk == current);
cgroup_free(tsk);
- task_numa_free(tsk);
+ task_numa_free(tsk, true);
security_task_free(tsk);
exit_creds(tsk);
delayacct_tsk_free(tsk);
diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c
index 8e009cee..26814a1 100644
--- a/kernel/irq/irqdesc.c
+++ b/kernel/irq/irqdesc.c
@@ -294,6 +294,18 @@ static void irq_sysfs_add(int irq, struct irq_desc *desc)
}
}
+static void irq_sysfs_del(struct irq_desc *desc)
+{
+ /*
+ * If irq_sysfs_init() has not yet been invoked (early boot), then
+ * irq_kobj_base is NULL and the descriptor was never added.
+ * kobject_del() complains about a object with no parent, so make
+ * it conditional.
+ */
+ if (irq_kobj_base)
+ kobject_del(&desc->kobj);
+}
+
static int __init irq_sysfs_init(void)
{
struct irq_desc *desc;
@@ -324,6 +336,7 @@ static struct kobj_type irq_kobj_type = {
};
static void irq_sysfs_add(int irq, struct irq_desc *desc) {}
+static void irq_sysfs_del(struct irq_desc *desc) {}
#endif /* CONFIG_SYSFS */
@@ -437,7 +450,7 @@ static void free_desc(unsigned int irq)
* The sysfs entry must be serialized against a concurrent
* irq_sysfs_init() as well.
*/
- kobject_del(&desc->kobj);
+ irq_sysfs_del(desc);
delete_irq_desc(irq);
/*
diff --git a/kernel/locking/lockdep_proc.c b/kernel/locking/lockdep_proc.c
index 3dd980d..6fcc465 100644
--- a/kernel/locking/lockdep_proc.c
+++ b/kernel/locking/lockdep_proc.c
@@ -200,7 +200,6 @@ static void lockdep_stats_debug_show(struct seq_file *m)
static int lockdep_stats_show(struct seq_file *m, void *v)
{
- struct lock_class *class;
unsigned long nr_unused = 0, nr_uncategorized = 0,
nr_irq_safe = 0, nr_irq_unsafe = 0,
nr_softirq_safe = 0, nr_softirq_unsafe = 0,
@@ -210,6 +209,9 @@ static int lockdep_stats_show(struct seq_file *m, void *v)
nr_hardirq_read_safe = 0, nr_hardirq_read_unsafe = 0,
sum_forward_deps = 0;
+#ifdef CONFIG_PROVE_LOCKING
+ struct lock_class *class;
+
list_for_each_entry(class, &all_lock_classes, lock_entry) {
if (class->usage_mask == 0)
@@ -241,13 +243,13 @@ static int lockdep_stats_show(struct seq_file *m, void *v)
if (class->usage_mask & LOCKF_ENABLED_HARDIRQ_READ)
nr_hardirq_read_unsafe++;
-#ifdef CONFIG_PROVE_LOCKING
sum_forward_deps += lockdep_count_forward_deps(class);
-#endif
}
#ifdef CONFIG_DEBUG_LOCKDEP
DEBUG_LOCKS_WARN_ON(debug_atomic_read(nr_unused_locks) != nr_unused);
#endif
+
+#endif
seq_printf(m, " lock-classes: %11lu [max: %lu]\n",
nr_lock_classes, MAX_LOCKDEP_KEYS);
seq_printf(m, " direct dependencies: %11lu [max: %lu]\n",
diff --git a/kernel/module.c b/kernel/module.c
index 669d81c..8644c18 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -3399,8 +3399,7 @@ static bool finished_loading(const char *name)
sched_annotate_sleep();
mutex_lock(&module_mutex);
mod = find_module_all(name, strlen(name), true);
- ret = !mod || mod->state == MODULE_STATE_LIVE
- || mod->state == MODULE_STATE_GOING;
+ ret = !mod || mod->state == MODULE_STATE_LIVE;
mutex_unlock(&module_mutex);
return ret;
@@ -3570,8 +3569,7 @@ static int add_unformed_module(struct module *mod)
mutex_lock(&module_mutex);
old = find_module_all(mod->name, strlen(mod->name), true);
if (old != NULL) {
- if (old->state == MODULE_STATE_COMING
- || old->state == MODULE_STATE_UNFORMED) {
+ if (old->state != MODULE_STATE_LIVE) {
/* Wait in case it fails to load. */
mutex_unlock(&module_mutex);
err = wait_event_interruptible(module_wq,
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 6c775be..952827e 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -2008,7 +2008,7 @@ static inline void walt_try_to_wake_up(struct task_struct *p)
rcu_read_lock();
grp = task_related_thread_group(p);
- if (update_preferred_cluster(grp, p, old_load))
+ if (update_preferred_cluster(grp, p, old_load, false))
set_preferred_cluster(grp);
rcu_read_unlock();
}
@@ -3203,7 +3203,7 @@ void scheduler_tick(void)
rcu_read_lock();
grp = task_related_thread_group(curr);
- if (update_preferred_cluster(grp, curr, old_load))
+ if (update_preferred_cluster(grp, curr, old_load, true))
set_preferred_cluster(grp);
rcu_read_unlock();
@@ -6381,6 +6381,7 @@ int sched_cpu_starting(unsigned int cpu)
{
sched_rq_cpu_starting(cpu);
sched_tick_start(cpu);
+ clear_walt_request(cpu);
return 0;
}
diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
index 31decf0..35e8185 100644
--- a/kernel/sched/cpufreq_schedutil.c
+++ b/kernel/sched/cpufreq_schedutil.c
@@ -56,6 +56,7 @@ struct sugov_policy {
struct task_struct *thread;
bool work_in_progress;
+ bool limits_changed;
bool need_freq_update;
};
@@ -113,8 +114,11 @@ static bool sugov_should_update_freq(struct sugov_policy *sg_policy, u64 time)
!cpufreq_this_cpu_can_update(sg_policy->policy))
return false;
- if (unlikely(sg_policy->need_freq_update))
+ if (unlikely(sg_policy->limits_changed)) {
+ sg_policy->limits_changed = false;
+ sg_policy->need_freq_update = true;
return true;
+ }
/* No need to recalculate next freq for min_rate_limit_us
* at least. However we might still decide to further rate
@@ -595,7 +599,7 @@ static void sugov_walt_adjust(struct sugov_cpu *sg_cpu, unsigned long *util,
static inline void ignore_dl_rate_limit(struct sugov_cpu *sg_cpu, struct sugov_policy *sg_policy)
{
if (cpu_bw_dl(cpu_rq(sg_cpu->cpu)) > sg_cpu->bw_dl)
- sg_policy->need_freq_update = true;
+ sg_policy->limits_changed = true;
}
static inline unsigned long target_util(struct sugov_policy *sg_policy,
@@ -628,7 +632,9 @@ static void sugov_update_single(struct update_util_data *hook, u64 time,
if (!sugov_should_update_freq(sg_policy, time))
return;
- busy = use_pelt() && sugov_cpu_is_busy(sg_cpu);
+ /* Limits may have changed, don't skip frequency update */
+ busy = use_pelt() && !sg_policy->need_freq_update &&
+ sugov_cpu_is_busy(sg_cpu);
sg_cpu->util = util = sugov_get_util(sg_cpu);
max = sg_cpu->max;
@@ -1286,6 +1292,7 @@ static int sugov_start(struct cpufreq_policy *policy)
sg_policy->last_freq_update_time = 0;
sg_policy->next_freq = 0;
sg_policy->work_in_progress = false;
+ sg_policy->limits_changed = false;
sg_policy->need_freq_update = false;
sg_policy->cached_raw_freq = 0;
@@ -1356,7 +1363,7 @@ static void sugov_limits(struct cpufreq_policy *policy)
raw_spin_unlock_irqrestore(&sg_policy->update_lock, flags);
}
- sg_policy->need_freq_update = true;
+ sg_policy->limits_changed = true;
}
static struct cpufreq_governor schedutil_gov = {
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 1cf158c..ad12150 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -1113,6 +1113,21 @@ struct numa_group {
unsigned long faults[0];
};
+/*
+ * For functions that can be called in multiple contexts that permit reading
+ * ->numa_group (see struct task_struct for locking rules).
+ */
+static struct numa_group *deref_task_numa_group(struct task_struct *p)
+{
+ return rcu_dereference_check(p->numa_group, p == current ||
+ (lockdep_is_held(&task_rq(p)->lock) && !READ_ONCE(p->on_cpu)));
+}
+
+static struct numa_group *deref_curr_numa_group(struct task_struct *p)
+{
+ return rcu_dereference_protected(p->numa_group, p == current);
+}
+
static inline unsigned long group_faults_priv(struct numa_group *ng);
static inline unsigned long group_faults_shared(struct numa_group *ng);
@@ -1156,10 +1171,12 @@ static unsigned int task_scan_start(struct task_struct *p)
{
unsigned long smin = task_scan_min(p);
unsigned long period = smin;
+ struct numa_group *ng;
/* Scale the maximum scan period with the amount of shared memory. */
- if (p->numa_group) {
- struct numa_group *ng = p->numa_group;
+ rcu_read_lock();
+ ng = rcu_dereference(p->numa_group);
+ if (ng) {
unsigned long shared = group_faults_shared(ng);
unsigned long private = group_faults_priv(ng);
@@ -1167,6 +1184,7 @@ static unsigned int task_scan_start(struct task_struct *p)
period *= shared + 1;
period /= private + shared + 1;
}
+ rcu_read_unlock();
return max(smin, period);
}
@@ -1175,13 +1193,14 @@ static unsigned int task_scan_max(struct task_struct *p)
{
unsigned long smin = task_scan_min(p);
unsigned long smax;
+ struct numa_group *ng;
/* Watch for min being lower than max due to floor calculations */
smax = sysctl_numa_balancing_scan_period_max / task_nr_scan_windows(p);
/* Scale the maximum scan period with the amount of shared memory. */
- if (p->numa_group) {
- struct numa_group *ng = p->numa_group;
+ ng = deref_curr_numa_group(p);
+ if (ng) {
unsigned long shared = group_faults_shared(ng);
unsigned long private = group_faults_priv(ng);
unsigned long period = smax;
@@ -1213,7 +1232,7 @@ void init_numa_balancing(unsigned long clone_flags, struct task_struct *p)
p->numa_scan_period = sysctl_numa_balancing_scan_delay;
p->numa_work.next = &p->numa_work;
p->numa_faults = NULL;
- p->numa_group = NULL;
+ RCU_INIT_POINTER(p->numa_group, NULL);
p->last_task_numa_placement = 0;
p->last_sum_exec_runtime = 0;
@@ -1260,7 +1279,16 @@ static void account_numa_dequeue(struct rq *rq, struct task_struct *p)
pid_t task_numa_group_id(struct task_struct *p)
{
- return p->numa_group ? p->numa_group->gid : 0;
+ struct numa_group *ng;
+ pid_t gid = 0;
+
+ rcu_read_lock();
+ ng = rcu_dereference(p->numa_group);
+ if (ng)
+ gid = ng->gid;
+ rcu_read_unlock();
+
+ return gid;
}
/*
@@ -1285,11 +1313,13 @@ static inline unsigned long task_faults(struct task_struct *p, int nid)
static inline unsigned long group_faults(struct task_struct *p, int nid)
{
- if (!p->numa_group)
+ struct numa_group *ng = deref_task_numa_group(p);
+
+ if (!ng)
return 0;
- return p->numa_group->faults[task_faults_idx(NUMA_MEM, nid, 0)] +
- p->numa_group->faults[task_faults_idx(NUMA_MEM, nid, 1)];
+ return ng->faults[task_faults_idx(NUMA_MEM, nid, 0)] +
+ ng->faults[task_faults_idx(NUMA_MEM, nid, 1)];
}
static inline unsigned long group_faults_cpu(struct numa_group *group, int nid)
@@ -1427,12 +1457,13 @@ static inline unsigned long task_weight(struct task_struct *p, int nid,
static inline unsigned long group_weight(struct task_struct *p, int nid,
int dist)
{
+ struct numa_group *ng = deref_task_numa_group(p);
unsigned long faults, total_faults;
- if (!p->numa_group)
+ if (!ng)
return 0;
- total_faults = p->numa_group->total_faults;
+ total_faults = ng->total_faults;
if (!total_faults)
return 0;
@@ -1446,7 +1477,7 @@ static inline unsigned long group_weight(struct task_struct *p, int nid,
bool should_numa_migrate_memory(struct task_struct *p, struct page * page,
int src_nid, int dst_cpu)
{
- struct numa_group *ng = p->numa_group;
+ struct numa_group *ng = deref_curr_numa_group(p);
int dst_nid = cpu_to_node(dst_cpu);
int last_cpupid, this_cpupid;
@@ -1651,13 +1682,14 @@ static bool load_too_imbalanced(long src_load, long dst_load,
static void task_numa_compare(struct task_numa_env *env,
long taskimp, long groupimp, bool maymove)
{
+ struct numa_group *cur_ng, *p_ng = deref_curr_numa_group(env->p);
struct rq *dst_rq = cpu_rq(env->dst_cpu);
+ long imp = p_ng ? groupimp : taskimp;
struct task_struct *cur;
long src_load, dst_load;
- long load;
- long imp = env->p->numa_group ? groupimp : taskimp;
- long moveimp = imp;
int dist = env->dist;
+ long moveimp = imp;
+ long load;
if (READ_ONCE(dst_rq->numa_migrate_on))
return;
@@ -1696,21 +1728,22 @@ static void task_numa_compare(struct task_numa_env *env,
* If dst and source tasks are in the same NUMA group, or not
* in any group then look only at task weights.
*/
- if (cur->numa_group == env->p->numa_group) {
+ cur_ng = rcu_dereference(cur->numa_group);
+ if (cur_ng == p_ng) {
imp = taskimp + task_weight(cur, env->src_nid, dist) -
task_weight(cur, env->dst_nid, dist);
/*
* Add some hysteresis to prevent swapping the
* tasks within a group over tiny differences.
*/
- if (cur->numa_group)
+ if (cur_ng)
imp -= imp / 16;
} else {
/*
* Compare the group weights. If a task is all by itself
* (not part of a group), use the task weight instead.
*/
- if (cur->numa_group && env->p->numa_group)
+ if (cur_ng && p_ng)
imp += group_weight(cur, env->src_nid, dist) -
group_weight(cur, env->dst_nid, dist);
else
@@ -1808,11 +1841,12 @@ static int task_numa_migrate(struct task_struct *p)
.best_imp = 0,
.best_cpu = -1,
};
- struct sched_domain *sd;
- struct rq *best_rq;
unsigned long taskweight, groupweight;
- int nid, ret, dist;
+ struct sched_domain *sd;
long taskimp, groupimp;
+ struct numa_group *ng;
+ struct rq *best_rq;
+ int nid, ret, dist;
/*
* Pick the lowest SD_NUMA domain, as that would have the smallest
@@ -1858,7 +1892,8 @@ static int task_numa_migrate(struct task_struct *p)
* multiple NUMA nodes; in order to better consolidate the group,
* we need to check other locations.
*/
- if (env.best_cpu == -1 || (p->numa_group && p->numa_group->active_nodes > 1)) {
+ ng = deref_curr_numa_group(p);
+ if (env.best_cpu == -1 || (ng && ng->active_nodes > 1)) {
for_each_online_node(nid) {
if (nid == env.src_nid || nid == p->numa_preferred_nid)
continue;
@@ -1891,7 +1926,7 @@ static int task_numa_migrate(struct task_struct *p)
* A task that migrated to a second choice node will be better off
* trying for a better one later. Do not set the preferred node here.
*/
- if (p->numa_group) {
+ if (ng) {
if (env.best_cpu == -1)
nid = env.src_nid;
else
@@ -2186,6 +2221,7 @@ static void task_numa_placement(struct task_struct *p)
unsigned long total_faults;
u64 runtime, period;
spinlock_t *group_lock = NULL;
+ struct numa_group *ng;
/*
* The p->mm->numa_scan_seq field gets updated without
@@ -2203,8 +2239,9 @@ static void task_numa_placement(struct task_struct *p)
runtime = numa_get_avg_runtime(p, &period);
/* If the task is part of a group prevent parallel updates to group stats */
- if (p->numa_group) {
- group_lock = &p->numa_group->lock;
+ ng = deref_curr_numa_group(p);
+ if (ng) {
+ group_lock = &ng->lock;
spin_lock_irq(group_lock);
}
@@ -2245,7 +2282,7 @@ static void task_numa_placement(struct task_struct *p)
p->numa_faults[cpu_idx] += f_diff;
faults += p->numa_faults[mem_idx];
p->total_numa_faults += diff;
- if (p->numa_group) {
+ if (ng) {
/*
* safe because we can only change our own group
*
@@ -2253,14 +2290,14 @@ static void task_numa_placement(struct task_struct *p)
* nid and priv in a specific region because it
* is at the beginning of the numa_faults array.
*/
- p->numa_group->faults[mem_idx] += diff;
- p->numa_group->faults_cpu[mem_idx] += f_diff;
- p->numa_group->total_faults += diff;
- group_faults += p->numa_group->faults[mem_idx];
+ ng->faults[mem_idx] += diff;
+ ng->faults_cpu[mem_idx] += f_diff;
+ ng->total_faults += diff;
+ group_faults += ng->faults[mem_idx];
}
}
- if (!p->numa_group) {
+ if (!ng) {
if (faults > max_faults) {
max_faults = faults;
max_nid = nid;
@@ -2271,8 +2308,8 @@ static void task_numa_placement(struct task_struct *p)
}
}
- if (p->numa_group) {
- numa_group_count_active_nodes(p->numa_group);
+ if (ng) {
+ numa_group_count_active_nodes(ng);
spin_unlock_irq(group_lock);
max_nid = preferred_group_nid(p, max_nid);
}
@@ -2306,7 +2343,7 @@ static void task_numa_group(struct task_struct *p, int cpupid, int flags,
int cpu = cpupid_to_cpu(cpupid);
int i;
- if (unlikely(!p->numa_group)) {
+ if (unlikely(!deref_curr_numa_group(p))) {
unsigned int size = sizeof(struct numa_group) +
4*nr_node_ids*sizeof(unsigned long);
@@ -2342,7 +2379,7 @@ static void task_numa_group(struct task_struct *p, int cpupid, int flags,
if (!grp)
goto no_join;
- my_grp = p->numa_group;
+ my_grp = deref_curr_numa_group(p);
if (grp == my_grp)
goto no_join;
@@ -2404,13 +2441,24 @@ static void task_numa_group(struct task_struct *p, int cpupid, int flags,
return;
}
-void task_numa_free(struct task_struct *p)
+/*
+ * Get rid of NUMA staticstics associated with a task (either current or dead).
+ * If @final is set, the task is dead and has reached refcount zero, so we can
+ * safely free all relevant data structures. Otherwise, there might be
+ * concurrent reads from places like load balancing and procfs, and we should
+ * reset the data back to default state without freeing ->numa_faults.
+ */
+void task_numa_free(struct task_struct *p, bool final)
{
- struct numa_group *grp = p->numa_group;
- void *numa_faults = p->numa_faults;
+ /* safe: p either is current or is being freed by current */
+ struct numa_group *grp = rcu_dereference_raw(p->numa_group);
+ unsigned long *numa_faults = p->numa_faults;
unsigned long flags;
int i;
+ if (!numa_faults)
+ return;
+
if (grp) {
spin_lock_irqsave(&grp->lock, flags);
for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++)
@@ -2423,8 +2471,14 @@ void task_numa_free(struct task_struct *p)
put_numa_group(grp);
}
- p->numa_faults = NULL;
- kfree(numa_faults);
+ if (final) {
+ p->numa_faults = NULL;
+ kfree(numa_faults);
+ } else {
+ p->total_numa_faults = 0;
+ for (i = 0; i < NR_NUMA_HINT_FAULT_STATS * nr_node_ids; i++)
+ numa_faults[i] = 0;
+ }
}
/*
@@ -2477,7 +2531,7 @@ void task_numa_fault(int last_cpupid, int mem_node, int pages, int flags)
* actively using should be counted as local. This allows the
* scan rate to slow down when a workload has settled down.
*/
- ng = p->numa_group;
+ ng = deref_curr_numa_group(p);
if (!priv && !local && ng && ng->active_nodes > 1 &&
numa_is_active_node(cpu_node, ng) &&
numa_is_active_node(mem_node, ng))
@@ -3858,7 +3912,8 @@ static inline bool task_fits_max(struct task_struct *p, int cpu)
if (is_min_capacity_cpu(cpu)) {
if (task_boost_policy(p) == SCHED_BOOST_ON_BIG ||
task_boost > 0 ||
- schedtune_task_boost(p) > 0)
+ schedtune_task_boost(p) > 0 ||
+ walt_should_kick_upmigrate(p, cpu))
return false;
} else { /* mid cap cpu */
if (task_boost > 1)
@@ -3883,6 +3938,7 @@ struct find_best_target_env {
bool is_rtg;
int placement_boost;
bool need_idle;
+ bool boosted;
int fastpath;
int start_cpu;
};
@@ -6818,7 +6874,7 @@ static void find_best_target(struct sched_domain *sd, cpumask_t *cpus,
unsigned long best_active_cuml_util = ULONG_MAX;
unsigned long best_idle_cuml_util = ULONG_MAX;
bool prefer_idle = schedtune_prefer_idle(p);
- bool boosted = schedtune_task_boost(p) > 0 || per_task_boost(p) > 0;
+ bool boosted = fbt_env->boosted;
/* Initialise with deepest possible cstate (INT_MAX) */
int shallowest_idle_cstate = INT_MAX;
struct sched_domain *start_sd;
@@ -6858,9 +6914,7 @@ static void find_best_target(struct sched_domain *sd, cpumask_t *cpus,
!cpu_isolated(prev_cpu) && cpu_online(prev_cpu) &&
idle_cpu(prev_cpu)) {
- if (idle_get_state_idx(cpu_rq(prev_cpu)) <=
- (is_min_capacity_cpu(prev_cpu) ? 1 : 0)) {
-
+ if (idle_get_state_idx(cpu_rq(prev_cpu)) <= 1) {
target_cpu = prev_cpu;
fbt_env->fastpath = PREV_CPU_FASTPATH;
@@ -7598,6 +7652,7 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu, int sy
fbt_env.placement_boost = placement_boost;
fbt_env.need_idle = need_idle;
fbt_env.start_cpu = start_cpu;
+ fbt_env.boosted = boosted;
find_best_target(NULL, candidates, p, &fbt_env);
} else {
@@ -8691,7 +8746,17 @@ static int detach_tasks(struct lb_env *env)
if (sched_feat(LB_MIN) && load < 16 && !env->sd->nr_balance_failed)
goto next;
- if ((load / 2) > env->imbalance)
+ /*
+ * p is not running task when we goes until here, so if p is one
+ * of the 2 task in src cpu rq and not the running one,
+ * that means it is the only task that can be balanced.
+ * So only when there is other tasks can be balanced or
+ * there is situation to ignore big task, it is needed
+ * to skip the task load bigger than 2*imbalance.
+ */
+ if (((cpu_rq(env->src_cpu)->nr_running > 2) ||
+ (env->flags & LBF_IGNORE_BIG_TASKS)) &&
+ ((load / 2) > env->imbalance))
goto next;
detach_task(p, env);
@@ -12178,18 +12243,22 @@ void show_numa_stats(struct task_struct *p, struct seq_file *m)
{
int node;
unsigned long tsf = 0, tpf = 0, gsf = 0, gpf = 0;
+ struct numa_group *ng;
+ rcu_read_lock();
+ ng = rcu_dereference(p->numa_group);
for_each_online_node(node) {
if (p->numa_faults) {
tsf = p->numa_faults[task_faults_idx(NUMA_MEM, node, 0)];
tpf = p->numa_faults[task_faults_idx(NUMA_MEM, node, 1)];
}
- if (p->numa_group) {
- gsf = p->numa_group->faults[task_faults_idx(NUMA_MEM, node, 0)],
- gpf = p->numa_group->faults[task_faults_idx(NUMA_MEM, node, 1)];
+ if (ng) {
+ gsf = ng->faults[task_faults_idx(NUMA_MEM, node, 0)],
+ gpf = ng->faults[task_faults_idx(NUMA_MEM, node, 1)];
}
print_numa_stats(m, node, tsf, tpf, gsf, gpf);
}
+ rcu_read_unlock();
}
#endif /* CONFIG_NUMA_BALANCING */
#endif /* CONFIG_SCHED_DEBUG */
@@ -12531,6 +12600,7 @@ void check_for_migration(struct rq *rq, struct task_struct *p)
int active_balance;
int new_cpu = -1;
int prev_cpu = task_cpu(p);
+ int ret;
if (rq->misfit_task_load) {
if (rq->curr->state != TASK_RUNNING ||
@@ -12550,9 +12620,13 @@ void check_for_migration(struct rq *rq, struct task_struct *p)
if (active_balance) {
mark_reserved(new_cpu);
raw_spin_unlock(&migration_lock);
- stop_one_cpu_nowait(prev_cpu,
+ ret = stop_one_cpu_nowait(prev_cpu,
active_load_balance_cpu_stop, rq,
&rq->active_balance_work);
+ if (!ret)
+ clear_reserved(new_cpu);
+ else
+ wake_up_if_idle(new_cpu);
return;
}
} else {
diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c
index 2c8719f..c349976 100644
--- a/kernel/sched/idle.c
+++ b/kernel/sched/idle.c
@@ -60,7 +60,8 @@ static noinline int __cpuidle cpu_idle_poll(void)
stop_critical_timings();
while (!tif_need_resched() &&
- (cpu_idle_force_poll || tick_check_broadcast_expired()))
+ (cpu_idle_force_poll || tick_check_broadcast_expired() ||
+ is_reserved(smp_processor_id())))
cpu_relax();
start_critical_timings();
trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
@@ -256,7 +257,8 @@ static void do_idle(void)
* broadcast device expired for us, we don't want to go deep
* idle as we know that the IPI is going to arrive right away.
*/
- if (cpu_idle_force_poll || tick_check_broadcast_expired()) {
+ if (cpu_idle_force_poll || tick_check_broadcast_expired() ||
+ is_reserved(smp_processor_id())) {
tick_nohz_idle_restart_tick();
cpu_idle_poll();
} else {
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index b7af759..2b1410e 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -2680,7 +2680,7 @@ extern unsigned int __read_mostly sched_load_granule;
extern int register_cpu_cycle_counter_cb(struct cpu_cycle_counter_cb *cb);
extern int update_preferred_cluster(struct related_thread_group *grp,
- struct task_struct *p, u32 old_load);
+ struct task_struct *p, u32 old_load, bool from_tick);
extern void set_preferred_cluster(struct related_thread_group *grp);
extern void add_new_task_to_grp(struct task_struct *new);
@@ -2995,7 +2995,7 @@ static inline u32 task_load(struct task_struct *p) { return 0; }
static inline u32 task_pl(struct task_struct *p) { return 0; }
static inline int update_preferred_cluster(struct related_thread_group *grp,
- struct task_struct *p, u32 old_load)
+ struct task_struct *p, u32 old_load, bool from_tick)
{
return 0;
}
diff --git a/kernel/sched/walt.c b/kernel/sched/walt.c
index 2bedd91..e5f1c4e 100644
--- a/kernel/sched/walt.c
+++ b/kernel/sched/walt.c
@@ -475,6 +475,19 @@ static u32 top_task_load(struct rq *rq)
}
}
+unsigned int sysctl_sched_user_hint;
+static unsigned long sched_user_hint_reset_time;
+static bool is_cluster_hosting_top_app(struct sched_cluster *cluster);
+
+static inline bool should_apply_suh_freq_boost(struct sched_cluster *cluster)
+{
+ if (sched_freq_aggr_en || !sysctl_sched_user_hint ||
+ !cluster->aggr_grp_load)
+ return false;
+
+ return is_cluster_hosting_top_app(cluster);
+}
+
static inline u64 freq_policy_load(struct rq *rq)
{
unsigned int reporting_policy = sysctl_sched_freq_reporting_policy;
@@ -510,9 +523,18 @@ static inline u64 freq_policy_load(struct rq *rq)
break;
}
+ if (should_apply_suh_freq_boost(cluster)) {
+ if (is_suh_max())
+ load = sched_ravg_window;
+ else
+ load = div64_u64(load * sysctl_sched_user_hint,
+ (u64)100);
+ }
+
done:
trace_sched_load_to_gov(rq, aggr_grp_load, tt_load, sched_freq_aggr_en,
- load, reporting_policy, walt_rotation_enabled);
+ load, reporting_policy, walt_rotation_enabled,
+ sysctl_sched_user_hint);
return load;
}
@@ -2607,6 +2629,9 @@ void update_best_cluster(struct related_thread_group *grp,
return;
}
+ if (is_suh_max())
+ demand = sched_group_upmigrate;
+
if (!grp->skip_min) {
if (demand >= sched_group_upmigrate) {
grp->skip_min = true;
@@ -2703,13 +2728,16 @@ void set_preferred_cluster(struct related_thread_group *grp)
}
int update_preferred_cluster(struct related_thread_group *grp,
- struct task_struct *p, u32 old_load)
+ struct task_struct *p, u32 old_load, bool from_tick)
{
u32 new_load = task_load(p);
if (!grp)
return 0;
+ if (unlikely(from_tick && is_suh_max()))
+ return 1;
+
/*
* Update if task's load has changed significantly or a complete window
* has passed since we last updated preference
@@ -2724,8 +2752,6 @@ int update_preferred_cluster(struct related_thread_group *grp,
#define ADD_TASK 0
#define REM_TASK 1
-#define DEFAULT_CGROUP_COLOC_ID 1
-
static inline struct related_thread_group*
lookup_related_thread_group(unsigned int group_id)
{
@@ -2963,6 +2989,22 @@ int sync_cgroup_colocation(struct task_struct *p, bool insert)
}
#endif
+static bool is_cluster_hosting_top_app(struct sched_cluster *cluster)
+{
+ struct related_thread_group *grp;
+ bool grp_on_min;
+
+ grp = lookup_related_thread_group(DEFAULT_CGROUP_COLOC_ID);
+
+ if (!grp)
+ return false;
+
+ grp_on_min = !grp->skip_min &&
+ (sched_boost_policy() != SCHED_BOOST_ON_BIG);
+
+ return (is_min_capacity_cluster(cluster) == grp_on_min);
+}
+
static unsigned long max_cap[NR_CPUS];
static unsigned long thermal_cap_cpu[NR_CPUS];
@@ -3223,6 +3265,10 @@ void walt_irq_work(struct irq_work *irq_work)
rtgb_active = false;
}
+ if (!is_migration && sysctl_sched_user_hint && time_after(jiffies,
+ sched_user_hint_reset_time))
+ sysctl_sched_user_hint = 0;
+
for_each_sched_cluster(cluster) {
cpumask_t cluster_online_cpus;
unsigned int num_cpus, i = 1;
@@ -3435,3 +3481,26 @@ void walt_sched_init_rq(struct rq *rq)
rq->cum_window_demand_scaled = 0;
rq->notif_pending = false;
}
+
+int walt_proc_user_hint_handler(struct ctl_table *table,
+ int write, void __user *buffer, size_t *lenp,
+ loff_t *ppos)
+{
+ int ret;
+ unsigned int old_value;
+ static DEFINE_MUTEX(mutex);
+
+ mutex_lock(&mutex);
+
+ sched_user_hint_reset_time = jiffies + HZ;
+ old_value = sysctl_sched_user_hint;
+ ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
+ if (ret || !write || (old_value == sysctl_sched_user_hint))
+ goto unlock;
+
+ irq_work_queue(&walt_migration_irq_work);
+
+unlock:
+ mutex_unlock(&mutex);
+ return ret;
+}
diff --git a/kernel/sched/walt.h b/kernel/sched/walt.h
index 152a9df..e89dc2f 100644
--- a/kernel/sched/walt.h
+++ b/kernel/sched/walt.h
@@ -306,6 +306,23 @@ static inline void walt_enable_frequency_aggregation(bool enable)
sched_freq_aggr_en = enable;
}
+static inline bool is_suh_max(void)
+{
+ return sysctl_sched_user_hint == sched_user_hint_max;
+}
+
+#define DEFAULT_CGROUP_COLOC_ID 1
+static inline bool walt_should_kick_upmigrate(struct task_struct *p, int cpu)
+{
+ struct related_thread_group *rtg = p->grp;
+
+ if (is_suh_max() && rtg && rtg->id == DEFAULT_CGROUP_COLOC_ID &&
+ rtg->skip_min && p->unfilter)
+ return is_min_capacity_cpu(cpu);
+
+ return false;
+}
+
#else /* CONFIG_SCHED_WALT */
static inline void walt_sched_init_rq(struct rq *rq) { }
@@ -386,6 +403,12 @@ static inline u64 sched_irqload(int cpu)
{
return 0;
}
+
+static inline bool walt_should_kick_upmigrate(struct task_struct *p, int cpu)
+{
+ return false;
+}
+
#endif /* CONFIG_SCHED_WALT */
#endif
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 8497914..a9849cd 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -141,6 +141,9 @@ static int six_hundred_forty_kb = 640 * 1024;
#endif
static int two_hundred_fifty_five = 255;
+#ifdef CONFIG_SCHED_WALT
+const int sched_user_hint_max = 1000;
+#endif
/* this is needed for the proc_doulongvec_minmax of vm_dirty_bytes */
static unsigned long dirty_bytes_min = 2 * PAGE_SIZE;
@@ -345,6 +348,15 @@ static struct ctl_table kern_table[] = {
#endif
#ifdef CONFIG_SCHED_WALT
{
+ .procname = "sched_user_hint",
+ .data = &sysctl_sched_user_hint,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = walt_proc_user_hint_handler,
+ .extra1 = &zero,
+ .extra2 = (void *)&sched_user_hint_max,
+ },
+ {
.procname = "sched_cpu_high_irqload",
.data = &sysctl_sched_cpu_high_irqload,
.maxlen = sizeof(unsigned int),
@@ -426,7 +438,7 @@ static struct ctl_table kern_table[] = {
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
- .extra1 = &zero,
+ .extra1 = &one,
.extra2 = &two_hundred_fifty_five,
},
#endif
diff --git a/kernel/taskstats.c b/kernel/taskstats.c
index a578d8f51..2dac07a 100644
--- a/kernel/taskstats.c
+++ b/kernel/taskstats.c
@@ -798,6 +798,10 @@ static int taskstats2_foreach(struct sk_buff *skb, struct netlink_callback *cb)
nla = nla_find(nlmsg_attrdata(cb->nlh, GENL_HDRLEN),
nlmsg_attrlen(cb->nlh, GENL_HDRLEN),
TASKSTATS_TYPE_FOREACH);
+
+ if (!nla)
+ goto out;
+
buf = nla_get_u32(nla);
oom_score_min = (short) (buf & 0xFFFF);
oom_score_max = (short) ((buf >> 16) & 0xFFFF);
@@ -854,6 +858,7 @@ static int taskstats2_foreach(struct sk_buff *skb, struct netlink_callback *cb)
}
cb->args[0] = iter.tgid;
+out:
return skb->len;
}
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index ec149b7..f322cbf 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -1648,6 +1648,11 @@ static bool test_rec_ops_needs_regs(struct dyn_ftrace *rec)
return keep_regs;
}
+static struct ftrace_ops *
+ftrace_find_tramp_ops_any(struct dyn_ftrace *rec);
+static struct ftrace_ops *
+ftrace_find_tramp_ops_next(struct dyn_ftrace *rec, struct ftrace_ops *ops);
+
static bool __ftrace_hash_rec_update(struct ftrace_ops *ops,
int filter_hash,
bool inc)
@@ -1776,15 +1781,17 @@ static bool __ftrace_hash_rec_update(struct ftrace_ops *ops,
}
/*
- * If the rec had TRAMP enabled, then it needs to
- * be cleared. As TRAMP can only be enabled iff
- * there is only a single ops attached to it.
- * In otherwords, always disable it on decrementing.
- * In the future, we may set it if rec count is
- * decremented to one, and the ops that is left
- * has a trampoline.
+ * The TRAMP needs to be set only if rec count
+ * is decremented to one, and the ops that is
+ * left has a trampoline. As TRAMP can only be
+ * enabled if there is only a single ops attached
+ * to it.
*/
- rec->flags &= ~FTRACE_FL_TRAMP;
+ if (ftrace_rec_count(rec) == 1 &&
+ ftrace_find_tramp_ops_any(rec))
+ rec->flags |= FTRACE_FL_TRAMP;
+ else
+ rec->flags &= ~FTRACE_FL_TRAMP;
/*
* flags will be cleared in ftrace_check_record()
@@ -1977,11 +1984,6 @@ static void print_ip_ins(const char *fmt, const unsigned char *p)
printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
}
-static struct ftrace_ops *
-ftrace_find_tramp_ops_any(struct dyn_ftrace *rec);
-static struct ftrace_ops *
-ftrace_find_tramp_ops_next(struct dyn_ftrace *rec, struct ftrace_ops *ops);
-
enum ftrace_bug_type ftrace_bug_type;
const void *ftrace_expected;
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 33cbc18..56d2963 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -6089,6 +6089,7 @@ tracing_mark_write(struct file *filp, const char __user *ubuf,
struct trace_array *tr = filp->private_data;
struct ring_buffer_event *event;
enum event_trigger_type tt = ETT_NONE;
+ struct trace_entry *trace_entry;
struct ring_buffer *buffer;
struct print_entry *entry;
unsigned long irq_flags;
@@ -6126,7 +6127,8 @@ tracing_mark_write(struct file *filp, const char __user *ubuf,
return -EBADF;
entry = ring_buffer_event_data(event);
- entry->ip = _THIS_IP_;
+ trace_entry = (struct trace_entry *)entry;
+ entry->ip = trace_entry->pid;
len = __copy_from_user_inatomic(&entry->buf, ubuf, cnt);
if (len) {
@@ -6146,12 +6148,12 @@ tracing_mark_write(struct file *filp, const char __user *ubuf,
if (entry->buf[cnt - 1] != '\n') {
entry->buf[cnt] = '\n';
entry->buf[cnt + 1] = '\0';
- stm_log(OST_ENTITY_TRACE_MARKER, entry->buf, cnt + 2);
+ stm_log(OST_ENTITY_TRACE_MARKER, entry, sizeof(*entry)+cnt + 2);
} else {
entry->buf[cnt] = '\0';
- stm_log(OST_ENTITY_TRACE_MARKER, entry->buf, cnt + 1);
+ stm_log(OST_ENTITY_TRACE_MARKER, entry, sizeof(*entry)+cnt + 1);
}
-
+ entry->ip = _THIS_IP_;
__buffer_unlock_commit(buffer, event);
if (tt)
diff --git a/lib/test_firmware.c b/lib/test_firmware.c
index fd48a15..a74b1aa 100644
--- a/lib/test_firmware.c
+++ b/lib/test_firmware.c
@@ -894,8 +894,11 @@ static int __init test_firmware_init(void)
return -ENOMEM;
rc = __test_firmware_config_init();
- if (rc)
+ if (rc) {
+ kfree(test_fw_config);
+ pr_err("could not init firmware test config: %d\n", rc);
return rc;
+ }
rc = misc_register(&test_fw_misc_device);
if (rc) {
diff --git a/lib/test_overflow.c b/lib/test_overflow.c
index fc68056..7a4b6f6 100644
--- a/lib/test_overflow.c
+++ b/lib/test_overflow.c
@@ -486,16 +486,17 @@ static int __init test_overflow_shift(void)
* Deal with the various forms of allocator arguments. See comments above
* the DEFINE_TEST_ALLOC() instances for mapping of the "bits".
*/
-#define alloc010(alloc, arg, sz) alloc(sz, GFP_KERNEL)
-#define alloc011(alloc, arg, sz) alloc(sz, GFP_KERNEL, NUMA_NO_NODE)
+#define alloc_GFP (GFP_KERNEL | __GFP_NOWARN)
+#define alloc010(alloc, arg, sz) alloc(sz, alloc_GFP)
+#define alloc011(alloc, arg, sz) alloc(sz, alloc_GFP, NUMA_NO_NODE)
#define alloc000(alloc, arg, sz) alloc(sz)
#define alloc001(alloc, arg, sz) alloc(sz, NUMA_NO_NODE)
-#define alloc110(alloc, arg, sz) alloc(arg, sz, GFP_KERNEL)
+#define alloc110(alloc, arg, sz) alloc(arg, sz, alloc_GFP)
#define free0(free, arg, ptr) free(ptr)
#define free1(free, arg, ptr) free(arg, ptr)
-/* Wrap around to 8K */
-#define TEST_SIZE (9 << PAGE_SHIFT)
+/* Wrap around to 16K */
+#define TEST_SIZE (5 * 4096)
#define DEFINE_TEST_ALLOC(func, free_func, want_arg, want_gfp, want_node)\
static int __init test_ ## func (void *arg) \
diff --git a/lib/test_string.c b/lib/test_string.c
index 0fcdb82d..98a787e 100644
--- a/lib/test_string.c
+++ b/lib/test_string.c
@@ -35,7 +35,7 @@ static __init int memset16_selftest(void)
fail:
kfree(p);
if (i < 256)
- return (i << 24) | (j << 16) | k;
+ return (i << 24) | (j << 16) | k | 0x8000;
return 0;
}
@@ -71,7 +71,7 @@ static __init int memset32_selftest(void)
fail:
kfree(p);
if (i < 256)
- return (i << 24) | (j << 16) | k;
+ return (i << 24) | (j << 16) | k | 0x8000;
return 0;
}
@@ -107,7 +107,7 @@ static __init int memset64_selftest(void)
fail:
kfree(p);
if (i < 256)
- return (i << 24) | (j << 16) | k;
+ return (i << 24) | (j << 16) | k | 0x8000;
return 0;
}
diff --git a/mm/cma.c b/mm/cma.c
index 672ced1..d3973af 100644
--- a/mm/cma.c
+++ b/mm/cma.c
@@ -18,11 +18,6 @@
#define pr_fmt(fmt) "cma: " fmt
-#ifdef CONFIG_CMA_DEBUG
-#ifndef DEBUG
-# define DEBUG
-#endif
-#endif
#define CREATE_TRACE_POINTS
#include <linux/memblock.h>
@@ -313,6 +308,12 @@ int __init cma_declare_contiguous(phys_addr_t base,
*/
alignment = max(alignment, (phys_addr_t)PAGE_SIZE <<
max_t(unsigned long, MAX_ORDER - 1, pageblock_order));
+ if (fixed && base & (alignment - 1)) {
+ ret = -EINVAL;
+ pr_err("Region at %pa must be aligned to %pa bytes\n",
+ &base, &alignment);
+ goto err;
+ }
base = ALIGN(base, alignment);
size = ALIGN(size, alignment);
limit &= ~(alignment - 1);
@@ -343,6 +344,13 @@ int __init cma_declare_contiguous(phys_addr_t base,
if (limit == 0 || limit > memblock_end)
limit = memblock_end;
+ if (base + size > limit) {
+ ret = -EINVAL;
+ pr_err("Size (%pa) of region at %pa exceeds limit (%pa)\n",
+ &size, &base, &limit);
+ goto err;
+ }
+
/* Reserve memory */
if (fixed) {
if (memblock_is_region_reserved(base, size) ||
diff --git a/mm/gup.c b/mm/gup.c
index caadd31..f3088d2 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -458,11 +458,14 @@ static int get_gate_page(struct mm_struct *mm, unsigned long address,
pgd = pgd_offset_k(address);
else
pgd = pgd_offset_gate(mm, address);
- BUG_ON(pgd_none(*pgd));
+ if (pgd_none(*pgd))
+ return -EFAULT;
p4d = p4d_offset(pgd, address);
- BUG_ON(p4d_none(*p4d));
+ if (p4d_none(*p4d))
+ return -EFAULT;
pud = pud_offset(p4d, address);
- BUG_ON(pud_none(*pud));
+ if (pud_none(*pud))
+ return -EFAULT;
pmd = pmd_offset(pud, address);
if (!pmd_present(*pmd))
return -EFAULT;
@@ -1367,7 +1370,8 @@ static inline pte_t gup_get_pte(pte_t *ptep)
}
#endif
-static void undo_dev_pagemap(int *nr, int nr_start, struct page **pages)
+static void __maybe_unused undo_dev_pagemap(int *nr, int nr_start,
+ struct page **pages)
{
while ((*nr) - nr_start) {
struct page *page = pages[--(*nr)];
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index a21b2ca..8dbf67f 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -33,6 +33,7 @@
#include <linux/page_idle.h>
#include <linux/shmem_fs.h>
#include <linux/oom.h>
+#include <linux/page_owner.h>
#include <asm/tlb.h>
#include <asm/pgalloc.h>
@@ -2478,6 +2479,9 @@ static void __split_huge_page(struct page *page, struct list_head *list,
}
ClearPageCompound(head);
+
+ split_page_owner(head, HPAGE_PMD_ORDER);
+
/* See comment in __split_huge_page_tail() */
if (PageAnon(head)) {
/* Additional pin to radix tree of swap cache */
diff --git a/mm/kmemleak.c b/mm/kmemleak.c
index a606745..1589165 100644
--- a/mm/kmemleak.c
+++ b/mm/kmemleak.c
@@ -126,7 +126,7 @@
/* GFP bitmask for kmemleak internal allocations */
#define gfp_kmemleak_mask(gfp) (((gfp) & (GFP_KERNEL | GFP_ATOMIC)) | \
__GFP_NORETRY | __GFP_NOMEMALLOC | \
- __GFP_NOWARN | __GFP_NOFAIL)
+ __GFP_NOWARN)
/* scanning area inside a memory block */
struct kmemleak_scan_area {
@@ -588,7 +588,7 @@ static struct kmemleak_object *create_object(unsigned long ptr, size_t size,
if (in_irq()) {
object->pid = 0;
strncpy(object->comm, "hardirq", sizeof(object->comm));
- } else if (in_softirq()) {
+ } else if (in_serving_softirq()) {
object->pid = 0;
strncpy(object->comm, "softirq", sizeof(object->comm));
} else {
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 7e7cc0c..ecde75f 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -1037,26 +1037,45 @@ void mem_cgroup_iter_break(struct mem_cgroup *root,
css_put(&prev->css);
}
-static void invalidate_reclaim_iterators(struct mem_cgroup *dead_memcg)
+static void __invalidate_reclaim_iterators(struct mem_cgroup *from,
+ struct mem_cgroup *dead_memcg)
{
- struct mem_cgroup *memcg = dead_memcg;
struct mem_cgroup_reclaim_iter *iter;
struct mem_cgroup_per_node *mz;
int nid;
int i;
- for (; memcg; memcg = parent_mem_cgroup(memcg)) {
- for_each_node(nid) {
- mz = mem_cgroup_nodeinfo(memcg, nid);
- for (i = 0; i <= DEF_PRIORITY; i++) {
- iter = &mz->iter[i];
- cmpxchg(&iter->position,
- dead_memcg, NULL);
- }
+ for_each_node(nid) {
+ mz = mem_cgroup_nodeinfo(from, nid);
+ for (i = 0; i <= DEF_PRIORITY; i++) {
+ iter = &mz->iter[i];
+ cmpxchg(&iter->position,
+ dead_memcg, NULL);
}
}
}
+static void invalidate_reclaim_iterators(struct mem_cgroup *dead_memcg)
+{
+ struct mem_cgroup *memcg = dead_memcg;
+ struct mem_cgroup *last;
+
+ do {
+ __invalidate_reclaim_iterators(memcg, dead_memcg);
+ last = memcg;
+ } while ((memcg = parent_mem_cgroup(memcg)));
+
+ /*
+ * When cgruop1 non-hierarchy mode is used,
+ * parent_mem_cgroup() does not walk all the way up to the
+ * cgroup root (root_mem_cgroup). So we have to handle
+ * dead_memcg from cgroup root separately.
+ */
+ if (last != root_mem_cgroup)
+ __invalidate_reclaim_iterators(root_mem_cgroup,
+ dead_memcg);
+}
+
/**
* mem_cgroup_scan_tasks - iterate over tasks of a memory cgroup hierarchy
* @memcg: hierarchy root
diff --git a/mm/memory.c b/mm/memory.c
index b009e8c..fc9c36b 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -4982,7 +4982,9 @@ int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
void *old_buf = buf;
int write = gup_flags & FOLL_WRITE;
- down_read(&mm->mmap_sem);
+ if (down_read_killable(&mm->mmap_sem))
+ return 0;
+
/* ignore errors, just check how much was successfully transferred */
while (len) {
int bytes, ret, offset;
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index d78b843..4b81d09 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -406,7 +406,7 @@ static const struct mempolicy_operations mpol_ops[MPOL_MAX] = {
},
};
-static void migrate_page_add(struct page *page, struct list_head *pagelist,
+static int migrate_page_add(struct page *page, struct list_head *pagelist,
unsigned long flags);
struct queue_pages {
@@ -432,11 +432,14 @@ static inline bool queue_pages_required(struct page *page,
}
/*
- * queue_pages_pmd() has three possible return values:
- * 1 - pages are placed on the right node or queued successfully.
- * 0 - THP was split.
- * -EIO - is migration entry or MPOL_MF_STRICT was specified and an existing
- * page was already on a node that does not follow the policy.
+ * queue_pages_pmd() has four possible return values:
+ * 0 - pages are placed on the right node or queued successfully.
+ * 1 - there is unmovable page, and MPOL_MF_MOVE* & MPOL_MF_STRICT were
+ * specified.
+ * 2 - THP was split.
+ * -EIO - is migration entry or only MPOL_MF_STRICT was specified and an
+ * existing page was already on a node that does not follow the
+ * policy.
*/
static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr,
unsigned long end, struct mm_walk *walk)
@@ -454,23 +457,20 @@ static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr,
if (is_huge_zero_page(page)) {
spin_unlock(ptl);
__split_huge_pmd(walk->vma, pmd, addr, false, NULL);
+ ret = 2;
goto out;
}
- if (!queue_pages_required(page, qp)) {
- ret = 1;
+ if (!queue_pages_required(page, qp))
goto unlock;
- }
- ret = 1;
flags = qp->flags;
/* go to thp migration */
if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
- if (!vma_migratable(walk->vma)) {
- ret = -EIO;
+ if (!vma_migratable(walk->vma) ||
+ migrate_page_add(page, qp->pagelist, flags)) {
+ ret = 1;
goto unlock;
}
-
- migrate_page_add(page, qp->pagelist, flags);
} else
ret = -EIO;
unlock:
@@ -482,6 +482,13 @@ static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr,
/*
* Scan through pages checking if pages follow certain conditions,
* and move them to the pagelist if they do.
+ *
+ * queue_pages_pte_range() has three possible return values:
+ * 0 - pages are placed on the right node or queued successfully.
+ * 1 - there is unmovable page, and MPOL_MF_MOVE* & MPOL_MF_STRICT were
+ * specified.
+ * -EIO - only MPOL_MF_STRICT was specified and an existing page was already
+ * on a node that does not follow the policy.
*/
static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
unsigned long end, struct mm_walk *walk)
@@ -491,17 +498,17 @@ static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
struct queue_pages *qp = walk->private;
unsigned long flags = qp->flags;
int ret;
+ bool has_unmovable = false;
pte_t *pte;
spinlock_t *ptl;
ptl = pmd_trans_huge_lock(pmd, vma);
if (ptl) {
ret = queue_pages_pmd(pmd, ptl, addr, end, walk);
- if (ret > 0)
- return 0;
- else if (ret < 0)
+ if (ret != 2)
return ret;
}
+ /* THP was split, fall through to pte walk */
if (pmd_trans_unstable(pmd))
return 0;
@@ -522,14 +529,28 @@ static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
if (!queue_pages_required(page, qp))
continue;
if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
- if (!vma_migratable(vma))
+ /* MPOL_MF_STRICT must be specified if we get here */
+ if (!vma_migratable(vma)) {
+ has_unmovable = true;
break;
- migrate_page_add(page, qp->pagelist, flags);
+ }
+
+ /*
+ * Do not abort immediately since there may be
+ * temporary off LRU pages in the range. Still
+ * need migrate other LRU pages.
+ */
+ if (migrate_page_add(page, qp->pagelist, flags))
+ has_unmovable = true;
} else
break;
}
pte_unmap_unlock(pte - 1, ptl);
cond_resched();
+
+ if (has_unmovable)
+ return 1;
+
return addr != end ? -EIO : 0;
}
@@ -644,7 +665,13 @@ static int queue_pages_test_walk(unsigned long start, unsigned long end,
*
* If pages found in a given range are on a set of nodes (determined by
* @nodes and @flags,) it's isolated and queued to the pagelist which is
- * passed via @private.)
+ * passed via @private.
+ *
+ * queue_pages_range() has three possible return values:
+ * 1 - there is unmovable page, but MPOL_MF_MOVE* & MPOL_MF_STRICT were
+ * specified.
+ * 0 - queue pages successfully or no misplaced page.
+ * -EIO - there is misplaced page and only MPOL_MF_STRICT was specified.
*/
static int
queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end,
@@ -939,7 +966,7 @@ static long do_get_mempolicy(int *policy, nodemask_t *nmask,
/*
* page migration, thp tail pages can be passed.
*/
-static void migrate_page_add(struct page *page, struct list_head *pagelist,
+static int migrate_page_add(struct page *page, struct list_head *pagelist,
unsigned long flags)
{
struct page *head = compound_head(page);
@@ -952,8 +979,19 @@ static void migrate_page_add(struct page *page, struct list_head *pagelist,
mod_node_page_state(page_pgdat(head),
NR_ISOLATED_ANON + page_is_file_cache(head),
hpage_nr_pages(head));
+ } else if (flags & MPOL_MF_STRICT) {
+ /*
+ * Non-movable page may reach here. And, there may be
+ * temporary off LRU pages or non-LRU movable pages.
+ * Treat them as unmovable pages since they can't be
+ * isolated, so they can't be moved at the moment. It
+ * should return -EIO for this case too.
+ */
+ return -EIO;
}
}
+
+ return 0;
}
/* page allocation callback for NUMA node migration */
@@ -1156,9 +1194,10 @@ static struct page *new_page(struct page *page, unsigned long start)
}
#else
-static void migrate_page_add(struct page *page, struct list_head *pagelist,
+static int migrate_page_add(struct page *page, struct list_head *pagelist,
unsigned long flags)
{
+ return -EIO;
}
int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
@@ -1181,6 +1220,7 @@ static long do_mbind(unsigned long start, unsigned long len,
struct mempolicy *new;
unsigned long end;
int err;
+ int ret;
LIST_HEAD(pagelist);
if (flags & ~(unsigned long)MPOL_MF_VALID)
@@ -1242,10 +1282,15 @@ static long do_mbind(unsigned long start, unsigned long len,
if (err)
goto mpol_out;
- err = queue_pages_range(mm, start, end, nmask,
+ ret = queue_pages_range(mm, start, end, nmask,
flags | MPOL_MF_INVERT, &pagelist);
- if (!err)
- err = mbind_range(mm, start, end, new);
+
+ if (ret < 0) {
+ err = -EIO;
+ goto up_out;
+ }
+
+ err = mbind_range(mm, start, end, new);
if (!err) {
int nr_failed = 0;
@@ -1258,13 +1303,14 @@ static long do_mbind(unsigned long start, unsigned long len,
putback_movable_pages(&pagelist);
}
- if (nr_failed && (flags & MPOL_MF_STRICT))
+ if ((ret > 0) || (nr_failed && (flags & MPOL_MF_STRICT)))
err = -EIO;
} else
putback_movable_pages(&pagelist);
+up_out:
up_write(&mm->mmap_sem);
- mpol_out:
+mpol_out:
mpol_put(new);
return err;
}
diff --git a/mm/mmu_notifier.c b/mm/mmu_notifier.c
index 82bb1a9..06dedb1 100644
--- a/mm/mmu_notifier.c
+++ b/mm/mmu_notifier.c
@@ -316,7 +316,7 @@ static int do_mmu_notifier_register(struct mmu_notifier *mn,
* thanks to mm_take_all_locks().
*/
spin_lock(&mm->mmu_notifier_mm->lock);
- hlist_add_head(&mn->hlist, &mm->mmu_notifier_mm->list);
+ hlist_add_head_rcu(&mn->hlist, &mm->mmu_notifier_mm->list);
spin_unlock(&mm->mmu_notifier_mm->lock);
mm_drop_all_locks(mm);
diff --git a/mm/nommu.c b/mm/nommu.c
index e4aac33..1d63ecf 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -1779,7 +1779,8 @@ int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
struct vm_area_struct *vma;
int write = gup_flags & FOLL_WRITE;
- down_read(&mm->mmap_sem);
+ if (down_read_killable(&mm->mmap_sem))
+ return 0;
/* the access must start within one of the target process's mappings */
vma = find_vma(mm, addr);
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index 0af8992..4272af2 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -52,11 +52,13 @@
#define CREATE_TRACE_POINTS
#include <trace/events/oom.h>
+#define ULMK_MAGIC "lmkd"
+
int sysctl_panic_on_oom =
IS_ENABLED(CONFIG_DEBUG_PANIC_ON_OOM) ? 2 : 0;
int sysctl_oom_kill_allocating_task;
int sysctl_oom_dump_tasks = 1;
-int sysctl_reap_mem_on_sigkill;
+int sysctl_reap_mem_on_sigkill = 1;
/*
* Serializes oom killer invocations (out_of_memory()) from all contexts to
@@ -1221,6 +1223,10 @@ void pagefault_out_of_memory(void)
.order = 0,
};
+ if (IS_ENABLED(CONFIG_HAVE_LOW_MEMORY_KILLER) ||
+ IS_ENABLED(CONFIG_HAVE_USERSPACE_LOW_MEMORY_KILLER))
+ return;
+
if (mem_cgroup_oom_synchronize(true))
return;
@@ -1230,30 +1236,6 @@ void pagefault_out_of_memory(void)
mutex_unlock(&oom_lock);
}
-/* Call this function with task_lock being held as we're accessing ->mm */
-void dump_killed_info(struct task_struct *selected)
-{
- int selected_tasksize = get_mm_rss(selected->mm);
-
- pr_info_ratelimited("Killing '%s' (%d), adj %hd,\n"
- " to free %ldkB on behalf of '%s' (%d)\n"
- " Free CMA is %ldkB\n"
- " Total reserve is %ldkB\n"
- " Total free pages is %ldkB\n"
- " Total file cache is %ldkB\n",
- selected->comm, selected->pid,
- selected->signal->oom_score_adj,
- selected_tasksize * (long)(PAGE_SIZE / 1024),
- current->comm, current->pid,
- global_zone_page_state(NR_FREE_CMA_PAGES) *
- (long)(PAGE_SIZE / 1024),
- totalreserve_pages * (long)(PAGE_SIZE / 1024),
- global_zone_page_state(NR_FREE_PAGES) *
- (long)(PAGE_SIZE / 1024),
- global_node_page_state(NR_FILE_PAGES) *
- (long)(PAGE_SIZE / 1024));
-}
-
void add_to_oom_reaper(struct task_struct *p)
{
static DEFINE_RATELIMIT_STATE(reaper_rs, DEFAULT_RATELIMIT_INTERVAL,
@@ -1272,10 +1254,10 @@ void add_to_oom_reaper(struct task_struct *p)
wake_oom_reaper(p);
}
- dump_killed_info(p);
task_unlock(p);
- if (__ratelimit(&reaper_rs) && p->signal->oom_score_adj == 0) {
+ if (strcmp(current->comm, ULMK_MAGIC) && __ratelimit(&reaper_rs)
+ && p->signal->oom_score_adj == 0) {
show_mem(SHOW_MEM_FILTER_NODES, NULL);
show_mem_call_notifiers();
if (sysctl_oom_dump_tasks)
diff --git a/mm/rmap.c b/mm/rmap.c
index a77f9b2..94e2488 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1466,7 +1466,15 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
/*
* No need to invalidate here it will synchronize on
* against the special swap migration pte.
+ *
+ * The assignment to subpage above was computed from a
+ * swap PTE which results in an invalid pointer.
+ * Since only PAGE_SIZE pages can currently be
+ * migrated, just set it to page. This will need to be
+ * changed when hugepage migrations to device private
+ * memory are supported.
*/
+ subpage = page;
goto discard;
}
diff --git a/mm/swap.c b/mm/swap.c
index 55b88f1..c481066 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -740,15 +740,20 @@ void release_pages(struct page **pages, int nr)
if (is_huge_zero_page(page))
continue;
- /* Device public page can not be huge page */
- if (is_device_public_page(page)) {
+ if (is_zone_device_page(page)) {
if (locked_pgdat) {
spin_unlock_irqrestore(&locked_pgdat->lru_lock,
flags);
locked_pgdat = NULL;
}
- put_devmap_managed_page(page);
- continue;
+ /*
+ * ZONE_DEVICE pages that return 'false' from
+ * put_devmap_managed_page() do not require special
+ * processing, and instead, expect a call to
+ * put_page_testzero().
+ */
+ if (put_devmap_managed_page(page))
+ continue;
}
page = compound_head(page);
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 8721360..d515d13 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -2364,6 +2364,12 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
return NULL;
/*
+ * First make sure the mappings are removed from all page-tables
+ * before they are freed.
+ */
+ vmalloc_sync_all();
+
+ /*
* In this function, newly allocated vm_struct has VM_UNINITIALIZED
* flag. It means that vm_struct is not fully initialized.
* Now, it is fully initialized, so remove this flag here.
@@ -2908,6 +2914,9 @@ EXPORT_SYMBOL(remap_vmalloc_range);
/*
* Implement a stub for vmalloc_sync_all() if the architecture chose not to
* have one.
+ *
+ * The purpose of this function is to make sure the vmalloc area
+ * mappings are identical in all page-tables in the system.
*/
void __weak vmalloc_sync_all(void)
{
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 8be730a..3d12b19 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -694,7 +694,14 @@ static unsigned long shrink_slab(gfp_t gfp_mask, int nid,
unsigned long ret, freed = 0;
struct shrinker *shrinker;
- if (!mem_cgroup_is_root(memcg))
+ /*
+ * The root memcg might be allocated even though memcg is disabled
+ * via "cgroup_disable=memory" boot parameter. This could make
+ * mem_cgroup_is_root() return false, then just run memcg slab
+ * shrink, but skip global shrink. This may result in premature
+ * oom.
+ */
+ if (!mem_cgroup_disabled() && !mem_cgroup_is_root(memcg))
return shrink_slab_memcg(gfp_mask, nid, memcg, priority);
if (!down_read_trylock(&shrinker_rwsem))
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index 6d97f19..821de0d 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -53,6 +53,7 @@
#include <linux/zpool.h>
#include <linux/mount.h>
#include <linux/migrate.h>
+#include <linux/wait.h>
#include <linux/pagemap.h>
#include <linux/fs.h>
@@ -267,6 +268,10 @@ struct zs_pool {
#ifdef CONFIG_COMPACTION
struct inode *inode;
struct work_struct free_work;
+ /* A wait queue for when migration races with async_free_zspage() */
+ struct wait_queue_head migration_wait;
+ atomic_long_t isolated_pages;
+ bool destroying;
#endif
};
@@ -1882,6 +1887,31 @@ static void dec_zspage_isolation(struct zspage *zspage)
zspage->isolated--;
}
+static void putback_zspage_deferred(struct zs_pool *pool,
+ struct size_class *class,
+ struct zspage *zspage)
+{
+ enum fullness_group fg;
+
+ fg = putback_zspage(class, zspage);
+ if (fg == ZS_EMPTY)
+ schedule_work(&pool->free_work);
+
+}
+
+static inline void zs_pool_dec_isolated(struct zs_pool *pool)
+{
+ VM_BUG_ON(atomic_long_read(&pool->isolated_pages) <= 0);
+ atomic_long_dec(&pool->isolated_pages);
+ /*
+ * There's no possibility of racing, since wait_for_isolated_drain()
+ * checks the isolated count under &class->lock after enqueuing
+ * on migration_wait.
+ */
+ if (atomic_long_read(&pool->isolated_pages) == 0 && pool->destroying)
+ wake_up_all(&pool->migration_wait);
+}
+
static void replace_sub_page(struct size_class *class, struct zspage *zspage,
struct page *newpage, struct page *oldpage)
{
@@ -1951,6 +1981,7 @@ static bool zs_page_isolate(struct page *page, isolate_mode_t mode)
*/
if (!list_empty(&zspage->list) && !is_zspage_isolated(zspage)) {
get_zspage_mapping(zspage, &class_idx, &fullness);
+ atomic_long_inc(&pool->isolated_pages);
remove_zspage(class, zspage, fullness);
}
@@ -2050,8 +2081,16 @@ static int zs_page_migrate(struct address_space *mapping, struct page *newpage,
* Page migration is done so let's putback isolated zspage to
* the list if @page is final isolated subpage in the zspage.
*/
- if (!is_zspage_isolated(zspage))
- putback_zspage(class, zspage);
+ if (!is_zspage_isolated(zspage)) {
+ /*
+ * We cannot race with zs_destroy_pool() here because we wait
+ * for isolation to hit zero before we start destroying.
+ * Also, we ensure that everyone can see pool->destroying before
+ * we start waiting.
+ */
+ putback_zspage_deferred(pool, class, zspage);
+ zs_pool_dec_isolated(pool);
+ }
reset_page(page);
put_page(page);
@@ -2097,13 +2136,12 @@ static void zs_page_putback(struct page *page)
spin_lock(&class->lock);
dec_zspage_isolation(zspage);
if (!is_zspage_isolated(zspage)) {
- fg = putback_zspage(class, zspage);
/*
* Due to page_lock, we cannot free zspage immediately
* so let's defer.
*/
- if (fg == ZS_EMPTY)
- schedule_work(&pool->free_work);
+ putback_zspage_deferred(pool, class, zspage);
+ zs_pool_dec_isolated(pool);
}
spin_unlock(&class->lock);
}
@@ -2127,8 +2165,36 @@ static int zs_register_migration(struct zs_pool *pool)
return 0;
}
+static bool pool_isolated_are_drained(struct zs_pool *pool)
+{
+ return atomic_long_read(&pool->isolated_pages) == 0;
+}
+
+/* Function for resolving migration */
+static void wait_for_isolated_drain(struct zs_pool *pool)
+{
+
+ /*
+ * We're in the process of destroying the pool, so there are no
+ * active allocations. zs_page_isolate() fails for completely free
+ * zspages, so we need only wait for the zs_pool's isolated
+ * count to hit zero.
+ */
+ wait_event(pool->migration_wait,
+ pool_isolated_are_drained(pool));
+}
+
static void zs_unregister_migration(struct zs_pool *pool)
{
+ pool->destroying = true;
+ /*
+ * We need a memory barrier here to ensure global visibility of
+ * pool->destroying. Thus pool->isolated pages will either be 0 in which
+ * case we don't care, or it will be > 0 and pool->destroying will
+ * ensure that we wake up once isolation hits 0.
+ */
+ smp_mb();
+ wait_for_isolated_drain(pool); /* This can block */
flush_work(&pool->free_work);
iput(pool->inode);
}
@@ -2366,6 +2432,8 @@ struct zs_pool *zs_create_pool(const char *name)
if (!pool->name)
goto err;
+ init_waitqueue_head(&pool->migration_wait);
+
if (create_cache(pool))
goto err;
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index fb54d32..6a362da 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -1621,6 +1621,9 @@ br_multicast_leave_group(struct net_bridge *br,
if (!br_port_group_equal(p, port, src))
continue;
+ if (p->flags & MDB_PG_FLAGS_PERMANENT)
+ break;
+
rcu_assign_pointer(*pp, p->next);
hlist_del_init(&p->mglist);
del_timer(&p->timer);
diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c
index 7df2690..5f3950f 100644
--- a/net/bridge/br_vlan.c
+++ b/net/bridge/br_vlan.c
@@ -677,6 +677,11 @@ void br_vlan_flush(struct net_bridge *br)
ASSERT_RTNL();
+ /* delete auto-added default pvid local fdb before flushing vlans
+ * otherwise it will be leaked on bridge device init failure
+ */
+ br_fdb_delete_by_port(br, NULL, 0, 1);
+
vg = br_vlan_group(br);
__vlan_flush(vg);
RCU_INIT_POINTER(br->vlgrp, NULL);
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
index 0bb4d71..62ffc98 100644
--- a/net/bridge/netfilter/ebtables.c
+++ b/net/bridge/netfilter/ebtables.c
@@ -1779,20 +1779,28 @@ static int compat_calc_entry(const struct ebt_entry *e,
return 0;
}
+static int ebt_compat_init_offsets(unsigned int number)
+{
+ if (number > INT_MAX)
+ return -EINVAL;
+
+ /* also count the base chain policies */
+ number += NF_BR_NUMHOOKS;
+
+ return xt_compat_init_offsets(NFPROTO_BRIDGE, number);
+}
static int compat_table_info(const struct ebt_table_info *info,
struct compat_ebt_replace *newinfo)
{
unsigned int size = info->entries_size;
const void *entries = info->entries;
+ int ret;
newinfo->entries_size = size;
- if (info->nentries) {
- int ret = xt_compat_init_offsets(NFPROTO_BRIDGE,
- info->nentries);
- if (ret)
- return ret;
- }
+ ret = ebt_compat_init_offsets(info->nentries);
+ if (ret)
+ return ret;
return EBT_ENTRY_ITERATE(entries, size, compat_calc_entry, info,
entries, newinfo);
@@ -2241,11 +2249,9 @@ static int compat_do_replace(struct net *net, void __user *user,
xt_compat_lock(NFPROTO_BRIDGE);
- if (tmp.nentries) {
- ret = xt_compat_init_offsets(NFPROTO_BRIDGE, tmp.nentries);
- if (ret < 0)
- goto out_unlock;
- }
+ ret = ebt_compat_init_offsets(tmp.nentries);
+ if (ret < 0)
+ goto out_unlock;
ret = compat_copy_entries(entries_tmp, tmp.entries_size, &state);
if (ret < 0)
@@ -2268,8 +2274,10 @@ static int compat_do_replace(struct net *net, void __user *user,
state.buf_kern_len = size64;
ret = compat_copy_entries(entries_tmp, tmp.entries_size, &state);
- if (WARN_ON(ret < 0))
+ if (WARN_ON(ret < 0)) {
+ vfree(entries_tmp);
goto out_unlock;
+ }
vfree(entries_tmp);
tmp.entries_size = size64;
diff --git a/net/can/gw.c b/net/can/gw.c
index 53859346..bd21614 100644
--- a/net/can/gw.c
+++ b/net/can/gw.c
@@ -1046,32 +1046,50 @@ static __init int cgw_module_init(void)
pr_info("can: netlink gateway (rev " CAN_GW_VERSION ") max_hops=%d\n",
max_hops);
- register_pernet_subsys(&cangw_pernet_ops);
+ ret = register_pernet_subsys(&cangw_pernet_ops);
+ if (ret)
+ return ret;
+
+ ret = -ENOMEM;
cgw_cache = kmem_cache_create("can_gw", sizeof(struct cgw_job),
0, 0, NULL);
-
if (!cgw_cache)
- return -ENOMEM;
+ goto out_cache_create;
/* set notifier */
notifier.notifier_call = cgw_notifier;
- register_netdevice_notifier(¬ifier);
+ ret = register_netdevice_notifier(¬ifier);
+ if (ret)
+ goto out_register_notifier;
ret = rtnl_register_module(THIS_MODULE, PF_CAN, RTM_GETROUTE,
NULL, cgw_dump_jobs, 0);
- if (ret) {
- unregister_netdevice_notifier(¬ifier);
- kmem_cache_destroy(cgw_cache);
- return -ENOBUFS;
- }
+ if (ret)
+ goto out_rtnl_register1;
- /* Only the first call to rtnl_register_module can fail */
- rtnl_register_module(THIS_MODULE, PF_CAN, RTM_NEWROUTE,
- cgw_create_job, NULL, 0);
- rtnl_register_module(THIS_MODULE, PF_CAN, RTM_DELROUTE,
- cgw_remove_job, NULL, 0);
+ ret = rtnl_register_module(THIS_MODULE, PF_CAN, RTM_NEWROUTE,
+ cgw_create_job, NULL, 0);
+ if (ret)
+ goto out_rtnl_register2;
+ ret = rtnl_register_module(THIS_MODULE, PF_CAN, RTM_DELROUTE,
+ cgw_remove_job, NULL, 0);
+ if (ret)
+ goto out_rtnl_register3;
return 0;
+
+out_rtnl_register3:
+ rtnl_unregister(PF_CAN, RTM_NEWROUTE);
+out_rtnl_register2:
+ rtnl_unregister(PF_CAN, RTM_GETROUTE);
+out_rtnl_register1:
+ unregister_netdevice_notifier(¬ifier);
+out_register_notifier:
+ kmem_cache_destroy(cgw_cache);
+out_cache_create:
+ unregister_pernet_subsys(&cangw_pernet_ops);
+
+ return ret;
}
static __exit void cgw_module_exit(void)
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
index 60934bd..76c41a8 100644
--- a/net/ceph/osd_client.c
+++ b/net/ceph/osd_client.c
@@ -1423,7 +1423,7 @@ static enum calc_target_result calc_target(struct ceph_osd_client *osdc,
struct ceph_osds up, acting;
bool force_resend = false;
bool unpaused = false;
- bool legacy_change;
+ bool legacy_change = false;
bool split = false;
bool sort_bitwise = ceph_osdmap_flag(osdc, CEPH_OSDMAP_SORTBITWISE);
bool recovery_deletes = ceph_osdmap_flag(osdc,
@@ -1511,15 +1511,14 @@ static enum calc_target_result calc_target(struct ceph_osd_client *osdc,
t->osd = acting.primary;
}
- if (unpaused || legacy_change || force_resend ||
- (split && con && CEPH_HAVE_FEATURE(con->peer_features,
- RESEND_ON_SPLIT)))
+ if (unpaused || legacy_change || force_resend || split)
ct_res = CALC_TARGET_NEED_RESEND;
else
ct_res = CALC_TARGET_NO_ACTION;
out:
- dout("%s t %p -> ct_res %d osd %d\n", __func__, t, ct_res, t->osd);
+ dout("%s t %p -> %d%d%d%d ct_res %d osd%d\n", __func__, t, unpaused,
+ legacy_change, force_resend, split, ct_res, t->osd);
return ct_res;
}
diff --git a/net/core/dev.c b/net/core/dev.c
index c5887ed..e369e1b 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -9540,6 +9540,8 @@ static void __net_exit default_device_exit(struct net *net)
/* Push remaining network devices to init_net */
snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
+ if (__dev_get_by_name(&init_net, fb_name))
+ snprintf(fb_name, IFNAMSIZ, "dev%%d");
err = dev_change_net_namespace(dev, &init_net, fb_name);
if (err) {
pr_emerg("%s: failed to move %s to init_net: %d\n",
diff --git a/net/dsa/switch.c b/net/dsa/switch.c
index 142b294..b0b9413 100644
--- a/net/dsa/switch.c
+++ b/net/dsa/switch.c
@@ -127,6 +127,9 @@ static void dsa_switch_mdb_add_bitmap(struct dsa_switch *ds,
{
int port;
+ if (!ds->ops->port_mdb_add)
+ return;
+
for_each_set_bit(port, bitmap, ds->num_ports)
ds->ops->port_mdb_add(ds, port, mdb);
}
diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c
index c248e0d..67ef9d8 100644
--- a/net/ipv4/ip_tunnel_core.c
+++ b/net/ipv4/ip_tunnel_core.c
@@ -89,9 +89,12 @@ void iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb,
__ip_select_ident(net, iph, skb_shinfo(skb)->gso_segs ?: 1);
err = ip_local_out(net, sk, skb);
- if (unlikely(net_xmit_eval(err)))
- pkt_len = 0;
- iptunnel_xmit_stats(dev, pkt_len);
+
+ if (dev) {
+ if (unlikely(net_xmit_eval(err)))
+ pkt_len = 0;
+ iptunnel_xmit_stats(dev, pkt_len);
+ }
}
EXPORT_SYMBOL_GPL(iptunnel_xmit);
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
index c891235..4368282 100644
--- a/net/ipv4/ipip.c
+++ b/net/ipv4/ipip.c
@@ -281,6 +281,9 @@ static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb,
const struct iphdr *tiph = &tunnel->parms.iph;
u8 ipproto;
+ if (!pskb_inet_may_pull(skb))
+ goto tx_error;
+
switch (skb->protocol) {
case htons(ETH_P_IP):
ipproto = IPPROTO_IPIP;
diff --git a/net/ipv4/netfilter/ipt_rpfilter.c b/net/ipv4/netfilter/ipt_rpfilter.c
index 12843c9..74b19a5 100644
--- a/net/ipv4/netfilter/ipt_rpfilter.c
+++ b/net/ipv4/netfilter/ipt_rpfilter.c
@@ -96,6 +96,7 @@ static bool rpfilter_mt(const struct sk_buff *skb, struct xt_action_param *par)
flow.flowi4_mark = info->flags & XT_RPFILTER_VALID_MARK ? skb->mark : 0;
flow.flowi4_tos = RT_TOS(iph->tos);
flow.flowi4_scope = RT_SCOPE_UNIVERSE;
+ flow.flowi4_oif = l3mdev_master_ifindex_rcu(xt_in(par));
return rpfilter_lookup_reverse(xt_net(par), &flow, xt_in(par), info->flags) ^ invert;
}
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index 01ecd51..a53ef07 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -680,12 +680,13 @@ static int prepare_ip6gre_xmit_ipv6(struct sk_buff *skb,
struct flowi6 *fl6, __u8 *dsfield,
int *encap_limit)
{
- struct ipv6hdr *ipv6h = ipv6_hdr(skb);
+ struct ipv6hdr *ipv6h;
struct ip6_tnl *t = netdev_priv(dev);
__u16 offset;
offset = ip6_tnl_parse_tlv_enc_lim(skb, skb_network_header(skb));
/* ip6_tnl_parse_tlv_enc_lim() might have reallocated skb->head */
+ ipv6h = ipv6_hdr(skb);
if (offset > 0) {
struct ipv6_tlv_tnl_enc_lim *tel;
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index ade1390..d0ad85b 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -1283,12 +1283,11 @@ ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
}
fl6.flowi6_uid = sock_net_uid(dev_net(dev), NULL);
+ dsfield = INET_ECN_encapsulate(dsfield, ipv4_get_dsfield(iph));
if (iptunnel_handle_offloads(skb, SKB_GSO_IPXIP6))
return -1;
- dsfield = INET_ECN_encapsulate(dsfield, ipv4_get_dsfield(iph));
-
skb_set_inner_ipproto(skb, IPPROTO_IPIP);
err = ip6_tnl_xmit(skb, dev, dsfield, &fl6, encap_limit, &mtu,
@@ -1372,12 +1371,11 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
}
fl6.flowi6_uid = sock_net_uid(dev_net(dev), NULL);
+ dsfield = INET_ECN_encapsulate(dsfield, ipv6_get_dsfield(ipv6h));
if (iptunnel_handle_offloads(skb, SKB_GSO_IPXIP6))
return -1;
- dsfield = INET_ECN_encapsulate(dsfield, ipv6_get_dsfield(ipv6h));
-
skb_set_inner_ipproto(skb, IPPROTO_IPV6);
err = ip6_tnl_xmit(skb, dev, dsfield, &fl6, encap_limit, &mtu,
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index 673a4a9..45d8d08 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -197,6 +197,7 @@ static inline int ndisc_is_useropt(const struct net_device *dev,
{
return opt->nd_opt_type == ND_OPT_RDNSS ||
opt->nd_opt_type == ND_OPT_DNSSL ||
+ opt->nd_opt_type == ND_OPT_CAPTIVE_PORTAL ||
ndisc_ops_is_useropt(dev, opt->nd_opt_type);
}
diff --git a/net/ipv6/netfilter/ip6t_rpfilter.c b/net/ipv6/netfilter/ip6t_rpfilter.c
index c3c6b09..0f3407f 100644
--- a/net/ipv6/netfilter/ip6t_rpfilter.c
+++ b/net/ipv6/netfilter/ip6t_rpfilter.c
@@ -58,7 +58,9 @@ static bool rpfilter_lookup_reverse6(struct net *net, const struct sk_buff *skb,
if (rpfilter_addr_linklocal(&iph->saddr)) {
lookup_flags |= RT6_LOOKUP_F_IFACE;
fl6.flowi6_oif = dev->ifindex;
- } else if ((flags & XT_RPFILTER_LOOSE) == 0)
+ /* Set flowi6_oif for vrf devices to lookup route in l3mdev domain. */
+ } else if (netif_is_l3_master(dev) || netif_is_l3_slave(dev) ||
+ (flags & XT_RPFILTER_LOOSE) == 0)
fl6.flowi6_oif = dev->ifindex;
rt = (void *)ip6_route_lookup(net, &fl6, skb, lookup_flags);
@@ -73,7 +75,9 @@ static bool rpfilter_lookup_reverse6(struct net *net, const struct sk_buff *skb,
goto out;
}
- if (rt->rt6i_idev->dev == dev || (flags & XT_RPFILTER_LOOSE))
+ if (rt->rt6i_idev->dev == dev ||
+ l3mdev_master_ifindex_rcu(rt->rt6i_idev->dev) == dev->ifindex ||
+ (flags & XT_RPFILTER_LOOSE))
ret = true;
out:
ip6_rt_put(rt);
diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
index 04d9946..c095678 100644
--- a/net/l2tp/l2tp_ppp.c
+++ b/net/l2tp/l2tp_ppp.c
@@ -1686,6 +1686,9 @@ static const struct proto_ops pppol2tp_ops = {
.recvmsg = pppol2tp_recvmsg,
.mmap = sock_no_mmap,
.ioctl = pppox_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = pppox_compat_ioctl,
+#endif
};
static const struct pppox_proto pppol2tp_proto = {
diff --git a/net/mac80211/driver-ops.c b/net/mac80211/driver-ops.c
index bb886e7..f783d13 100644
--- a/net/mac80211/driver-ops.c
+++ b/net/mac80211/driver-ops.c
@@ -169,11 +169,16 @@ int drv_conf_tx(struct ieee80211_local *local,
if (!check_sdata_in_driver(sdata))
return -EIO;
- if (WARN_ONCE(params->cw_min == 0 ||
- params->cw_min > params->cw_max,
- "%s: invalid CW_min/CW_max: %d/%d\n",
- sdata->name, params->cw_min, params->cw_max))
+ if (params->cw_min == 0 || params->cw_min > params->cw_max) {
+ /*
+ * If we can't configure hardware anyway, don't warn. We may
+ * never have initialized the CW parameters.
+ */
+ WARN_ONCE(local->ops->conf_tx,
+ "%s: invalid CW_min/CW_max: %d/%d\n",
+ sdata->name, params->cw_min, params->cw_max);
return -EINVAL;
+ }
trace_drv_conf_tx(local, sdata, ac, params);
if (local->ops->conf_tx)
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 1aaa73fa..dbd9a31 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -160,10 +160,10 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata,
memcpy(&sta_ht_cap, &sband->ht_cap, sizeof(sta_ht_cap));
ieee80211_apply_htcap_overrides(sdata, &sta_ht_cap);
+ memset(chandef, 0, sizeof(struct cfg80211_chan_def));
chandef->chan = channel;
chandef->width = NL80211_CHAN_WIDTH_20_NOHT;
chandef->center_freq1 = channel->center_freq;
- chandef->center_freq2 = 0;
if (!ht_oper || !sta_ht_cap.ht_supported) {
ret = IEEE80211_STA_DISABLE_HT | IEEE80211_STA_DISABLE_VHT;
@@ -1967,6 +1967,16 @@ ieee80211_sta_wmm_params(struct ieee80211_local *local,
ieee80211_regulatory_limit_wmm_params(sdata, ¶ms[ac], ac);
}
+ /* WMM specification requires all 4 ACIs. */
+ for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
+ if (params[ac].cw_min == 0) {
+ sdata_info(sdata,
+ "AP has invalid WMM params (missing AC %d), using defaults\n",
+ ac);
+ return false;
+ }
+ }
+
for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
mlme_dbg(sdata,
"WMM AC=%d acm=%d aifs=%d cWmin=%d cWmax=%d txop=%d uapsd=%d, downgraded=%d\n",
diff --git a/net/mac80211/status.c b/net/mac80211/status.c
index 534a604..9afd3fc 100644
--- a/net/mac80211/status.c
+++ b/net/mac80211/status.c
@@ -264,6 +264,8 @@ static int ieee80211_tx_radiotap_len(struct ieee80211_tx_info *info)
/* IEEE80211_RADIOTAP_RATE rate */
if (info->status.rates[0].idx >= 0 &&
!(info->status.rates[0].flags & (IEEE80211_TX_RC_MCS |
+ RATE_INFO_FLAGS_DMG |
+ RATE_INFO_FLAGS_EDMG |
IEEE80211_TX_RC_VHT_MCS)))
len += 2;
@@ -315,6 +317,8 @@ ieee80211_add_tx_radiotap_header(struct ieee80211_local *local,
/* IEEE80211_RADIOTAP_RATE */
if (info->status.rates[0].idx >= 0 &&
!(info->status.rates[0].flags & (IEEE80211_TX_RC_MCS |
+ RATE_INFO_FLAGS_DMG |
+ RATE_INFO_FLAGS_EDMG |
IEEE80211_TX_RC_VHT_MCS))) {
u16 rate;
diff --git a/net/netfilter/ipset/ip_set_bitmap_ipmac.c b/net/netfilter/ipset/ip_set_bitmap_ipmac.c
index 13ade57..4f01321 100644
--- a/net/netfilter/ipset/ip_set_bitmap_ipmac.c
+++ b/net/netfilter/ipset/ip_set_bitmap_ipmac.c
@@ -230,7 +230,7 @@ bitmap_ipmac_kadt(struct ip_set *set, const struct sk_buff *skb,
e.id = ip_to_id(map, ip);
- if (opt->flags & IPSET_DIM_ONE_SRC)
+ if (opt->flags & IPSET_DIM_TWO_SRC)
ether_addr_copy(e.ether, eth_hdr(skb)->h_source);
else
ether_addr_copy(e.ether, eth_hdr(skb)->h_dest);
diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
index 1577f2f..e2538c57 100644
--- a/net/netfilter/ipset/ip_set_core.c
+++ b/net/netfilter/ipset/ip_set_core.c
@@ -1157,7 +1157,7 @@ static int ip_set_rename(struct net *net, struct sock *ctnl,
return -ENOENT;
write_lock_bh(&ip_set_ref_lock);
- if (set->ref != 0) {
+ if (set->ref != 0 || set->ref_netlink != 0) {
ret = -IPSET_ERR_REFERENCED;
goto out;
}
diff --git a/net/netfilter/ipset/ip_set_hash_ipmac.c b/net/netfilter/ipset/ip_set_hash_ipmac.c
index fd87de3..16ec822 100644
--- a/net/netfilter/ipset/ip_set_hash_ipmac.c
+++ b/net/netfilter/ipset/ip_set_hash_ipmac.c
@@ -95,15 +95,11 @@ hash_ipmac4_kadt(struct ip_set *set, const struct sk_buff *skb,
struct hash_ipmac4_elem e = { .ip = 0, { .foo[0] = 0, .foo[1] = 0 } };
struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
- /* MAC can be src only */
- if (!(opt->flags & IPSET_DIM_TWO_SRC))
- return 0;
-
if (skb_mac_header(skb) < skb->head ||
(skb_mac_header(skb) + ETH_HLEN) > skb->data)
return -EINVAL;
- if (opt->flags & IPSET_DIM_ONE_SRC)
+ if (opt->flags & IPSET_DIM_TWO_SRC)
ether_addr_copy(e.ether, eth_hdr(skb)->h_source);
else
ether_addr_copy(e.ether, eth_hdr(skb)->h_dest);
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 27eff89..c6073d1 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -431,13 +431,12 @@ EXPORT_SYMBOL_GPL(nf_ct_invert_tuple);
* table location, we assume id gets exposed to userspace.
*
* Following nf_conn items do not change throughout lifetime
- * of the nf_conn after it has been committed to main hash table:
+ * of the nf_conn:
*
* 1. nf_conn address
- * 2. nf_conn->ext address
- * 3. nf_conn->master address (normally NULL)
- * 4. tuple
- * 5. the associated net namespace
+ * 2. nf_conn->master address (normally NULL)
+ * 3. the associated net namespace
+ * 4. the original direction tuple
*/
u32 nf_ct_get_id(const struct nf_conn *ct)
{
@@ -447,9 +446,10 @@ u32 nf_ct_get_id(const struct nf_conn *ct)
net_get_random_once(&ct_id_seed, sizeof(ct_id_seed));
a = (unsigned long)ct;
- b = (unsigned long)ct->master ^ net_hash_mix(nf_ct_net(ct));
- c = (unsigned long)ct->ext;
- d = (unsigned long)siphash(&ct->tuplehash, sizeof(ct->tuplehash),
+ b = (unsigned long)ct->master;
+ c = (unsigned long)nf_ct_net(ct);
+ d = (unsigned long)siphash(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
+ sizeof(ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple),
&ct_id_seed);
#ifdef CONFIG_64BIT
return siphash_4u64((u64)a, (u64)b, (u64)c, (u64)d, &ct_id_seed);
diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
index 842f3f8..7011ab2 100644
--- a/net/netfilter/nf_conntrack_proto_tcp.c
+++ b/net/netfilter/nf_conntrack_proto_tcp.c
@@ -480,6 +480,7 @@ static bool tcp_in_window(const struct nf_conn *ct,
struct ip_ct_tcp_state *receiver = &state->seen[!dir];
const struct nf_conntrack_tuple *tuple = &ct->tuplehash[dir].tuple;
__u32 seq, ack, sack, end, win, swin;
+ u16 win_raw;
s32 receiver_offset;
bool res, in_recv_win;
@@ -488,7 +489,8 @@ static bool tcp_in_window(const struct nf_conn *ct,
*/
seq = ntohl(tcph->seq);
ack = sack = ntohl(tcph->ack_seq);
- win = ntohs(tcph->window);
+ win_raw = ntohs(tcph->window);
+ win = win_raw;
end = segment_seq_plus_len(seq, skb->len, dataoff, tcph);
if (receiver->flags & IP_CT_TCP_FLAG_SACK_PERM)
@@ -663,14 +665,14 @@ static bool tcp_in_window(const struct nf_conn *ct,
&& state->last_seq == seq
&& state->last_ack == ack
&& state->last_end == end
- && state->last_win == win)
+ && state->last_win == win_raw)
state->retrans++;
else {
state->last_dir = dir;
state->last_seq = seq;
state->last_ack = ack;
state->last_end = end;
- state->last_win = win;
+ state->last_win = win_raw;
state->retrans = 0;
}
}
diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c
index 9169134..7f2c191 100644
--- a/net/netfilter/nfnetlink.c
+++ b/net/netfilter/nfnetlink.c
@@ -575,7 +575,7 @@ static int nfnetlink_bind(struct net *net, int group)
ss = nfnetlink_get_subsys(type << 8);
rcu_read_unlock();
if (!ss)
- request_module("nfnetlink-subsys-%d", type);
+ request_module_nowait("nfnetlink-subsys-%d", type);
return 0;
}
#endif
diff --git a/net/netfilter/nft_hash.c b/net/netfilter/nft_hash.c
index c2d2371..b8f23f7 100644
--- a/net/netfilter/nft_hash.c
+++ b/net/netfilter/nft_hash.c
@@ -196,7 +196,7 @@ static int nft_symhash_init(const struct nft_ctx *ctx,
priv->dreg = nft_parse_register(tb[NFTA_HASH_DREG]);
priv->modulus = ntohl(nla_get_be32(tb[NFTA_HASH_MODULUS]));
- if (priv->modulus <= 1)
+ if (priv->modulus < 1)
return -ERANGE;
if (priv->offset + priv->modulus - 1 < priv->offset)
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 33e982b..7e25a6a 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -2616,6 +2616,13 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
mutex_lock(&po->pg_vec_lock);
+ /* packet_sendmsg() check on tx_ring.pg_vec was lockless,
+ * we need to confirm it under protection of pg_vec_lock.
+ */
+ if (unlikely(!po->tx_ring.pg_vec)) {
+ err = -EBUSY;
+ goto out;
+ }
if (likely(saddr == NULL)) {
dev = packet_cached_dev_get(po);
proto = po->num;
diff --git a/net/qrtr/fifo.c b/net/qrtr/fifo.c
index 0a494a6..4a1dd07 100644
--- a/net/qrtr/fifo.c
+++ b/net/qrtr/fifo.c
@@ -341,7 +341,7 @@ static int qrtr_fifo_xprt_probe(struct platform_device *pdev)
qrtr_fifo_config_init(xprtp);
xprtp->ep.xmit = xprt_write;
- ret = qrtr_endpoint_register(&xprtp->ep, QRTR_EP_NID_AUTO);
+ ret = qrtr_endpoint_register(&xprtp->ep, QRTR_EP_NID_AUTO, false);
if (ret)
return ret;
diff --git a/net/qrtr/mhi.c b/net/qrtr/mhi.c
index b864b6b..788177e 100644
--- a/net/qrtr/mhi.c
+++ b/net/qrtr/mhi.c
@@ -145,6 +145,7 @@ static int qcom_mhi_qrtr_probe(struct mhi_device *mhi_dev,
{
struct qrtr_mhi_dev *qdev;
u32 net_id;
+ bool rt;
int rc;
qdev = devm_kzalloc(&mhi_dev->dev, sizeof(*qdev), GFP_KERNEL);
@@ -160,10 +161,12 @@ static int qcom_mhi_qrtr_probe(struct mhi_device *mhi_dev,
if (rc < 0)
net_id = QRTR_EP_NET_ID_AUTO;
+ rt = of_property_read_bool(mhi_dev->dev.of_node, "qcom,low-latency");
+
INIT_LIST_HEAD(&qdev->ul_pkts);
spin_lock_init(&qdev->ul_lock);
- rc = qrtr_endpoint_register(&qdev->ep, net_id);
+ rc = qrtr_endpoint_register(&qdev->ep, net_id, rt);
if (rc)
return rc;
diff --git a/net/qrtr/qrtr.c b/net/qrtr/qrtr.c
index d994a90..2c9d7de 100644
--- a/net/qrtr/qrtr.c
+++ b/net/qrtr/qrtr.c
@@ -20,8 +20,10 @@
#include <linux/rwsem.h>
#include <linux/ipc_logging.h>
#include <linux/uidgid.h>
+#include <linux/pm_wakeup.h>
#include <net/sock.h>
+#include <uapi/linux/sched/types.h>
#include "qrtr.h"
@@ -149,6 +151,7 @@ static DEFINE_MUTEX(qrtr_port_lock);
* @kworker: worker thread for recv work
* @task: task to run the worker thread
* @read_data: scheduled work for recv work
+ * @ws: wakeupsource avoid system suspend
* @ilc: ipc logging context reference
*/
struct qrtr_node {
@@ -170,6 +173,8 @@ struct qrtr_node {
struct task_struct *task;
struct kthread_work read_data;
+ struct wakeup_source *ws;
+
void *ilc;
};
@@ -346,6 +351,7 @@ static void __qrtr_node_release(struct kref *kref)
}
mutex_unlock(&node->qrtr_tx_lock);
+ wakeup_source_unregister(node->ws);
kthread_flush_worker(&node->kworker);
kthread_stop(node->task);
@@ -610,10 +616,16 @@ static void qrtr_node_assign(struct qrtr_node *node, unsigned int nid)
node->nid = nid;
up_write(&qrtr_node_lock);
+ snprintf(name, sizeof(name), "qrtr_%d", nid);
if (!node->ilc) {
- snprintf(name, sizeof(name), "qrtr_%d", nid);
node->ilc = ipc_log_context_create(QRTR_LOG_PAGE_CNT, name, 0);
}
+ /* create wakeup source for only NID = 3,0 or 7.
+ * From other nodes sensor service stream samples
+ * cause APPS suspend problems and power drain issue.
+ */
+ if (!node->ws && (nid == 0 || nid == 3 || nid == 7))
+ node->ws = wakeup_source_register(name);
}
/**
@@ -744,6 +756,8 @@ int qrtr_endpoint_post(struct qrtr_endpoint *ep, const void *data, size_t len)
cb->type != QRTR_TYPE_RESUME_TX)
goto err;
+ pm_wakeup_ws_event(node->ws, 0, true);
+
if (frag) {
skb->data_len = size;
skb->len = size;
@@ -921,13 +935,16 @@ static void qrtr_node_rx_work(struct kthread_work *work)
* qrtr_endpoint_register() - register a new endpoint
* @ep: endpoint to register
* @nid: desired node id; may be QRTR_EP_NID_AUTO for auto-assignment
+ * @rt: flag to notify real time low latency endpoint
* Return: 0 on success; negative error code on failure
*
* The specified endpoint must have the xmit function pointer set on call.
*/
-int qrtr_endpoint_register(struct qrtr_endpoint *ep, unsigned int net_id)
+int qrtr_endpoint_register(struct qrtr_endpoint *ep, unsigned int net_id,
+ bool rt)
{
struct qrtr_node *node;
+ struct sched_param param = {.sched_priority = 1};
if (!ep || !ep->xmit)
return -EINVAL;
@@ -950,6 +967,8 @@ int qrtr_endpoint_register(struct qrtr_endpoint *ep, unsigned int net_id)
kfree(node);
return -ENOMEM;
}
+ if (rt)
+ sched_setscheduler(node->task, SCHED_FIFO, ¶m);
mutex_init(&node->qrtr_tx_lock);
INIT_RADIX_TREE(&node->qrtr_tx_flow, GFP_KERNEL);
diff --git a/net/qrtr/qrtr.h b/net/qrtr/qrtr.h
index f9aede4..6a2cccb 100644
--- a/net/qrtr/qrtr.h
+++ b/net/qrtr/qrtr.h
@@ -26,7 +26,8 @@ struct qrtr_endpoint {
struct qrtr_node *node;
};
-int qrtr_endpoint_register(struct qrtr_endpoint *ep, unsigned int net_id);
+int qrtr_endpoint_register(struct qrtr_endpoint *ep, unsigned int net_id,
+ bool rt);
void qrtr_endpoint_unregister(struct qrtr_endpoint *ep);
diff --git a/net/qrtr/smd.c b/net/qrtr/smd.c
index 0c78f15..fff9720 100644
--- a/net/qrtr/smd.c
+++ b/net/qrtr/smd.c
@@ -60,6 +60,7 @@ static int qcom_smd_qrtr_probe(struct rpmsg_device *rpdev)
{
struct qrtr_smd_dev *qdev;
u32 net_id;
+ bool rt;
int rc;
qdev = devm_kzalloc(&rpdev->dev, sizeof(*qdev), GFP_KERNEL);
@@ -74,7 +75,9 @@ static int qcom_smd_qrtr_probe(struct rpmsg_device *rpdev)
if (rc < 0)
net_id = QRTR_EP_NET_ID_AUTO;
- rc = qrtr_endpoint_register(&qdev->ep, net_id);
+ rt = of_property_read_bool(rpdev->dev.of_node, "qcom,low-latency");
+
+ rc = qrtr_endpoint_register(&qdev->ep, net_id, rt);
if (rc)
return rc;
diff --git a/net/qrtr/usb.c b/net/qrtr/usb.c
index fd71df9..3daecca 100644
--- a/net/qrtr/usb.c
+++ b/net/qrtr/usb.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
-/* Copyright (c) 2018, The Linux Foundation. All rights reserved. */
+/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved. */
#include <linux/kthread.h>
#include <linux/module.h>
@@ -213,7 +213,7 @@ static int qcom_usb_qrtr_probe(struct usb_interface *interface,
init_usb_anchor(&qdev->submitted);
- rc = qrtr_endpoint_register(&qdev->ep, QRTR_EP_NID_AUTO);
+ rc = qrtr_endpoint_register(&qdev->ep, QRTR_EP_NID_AUTO, false);
if (rc)
return rc;
@@ -263,7 +263,7 @@ static int qcom_usb_qrtr_reset_resume(struct usb_interface *intf)
int rc = 0;
qrtr_endpoint_unregister(&qdev->ep);
- rc = qrtr_endpoint_register(&qdev->ep, QRTR_EP_NID_AUTO);
+ rc = qrtr_endpoint_register(&qdev->ep, QRTR_EP_NID_AUTO, false);
if (rc)
return rc;
diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
index d76e5e5..7319d3c 100644
--- a/net/rxrpc/af_rxrpc.c
+++ b/net/rxrpc/af_rxrpc.c
@@ -195,7 +195,7 @@ static int rxrpc_bind(struct socket *sock, struct sockaddr *saddr, int len)
service_in_use:
write_unlock(&local->services_lock);
- rxrpc_put_local(local);
+ rxrpc_unuse_local(local);
ret = -EADDRINUSE;
error_unlock:
release_sock(&rx->sk);
@@ -908,7 +908,7 @@ static int rxrpc_release_sock(struct sock *sk)
rxrpc_queue_work(&rxnet->service_conn_reaper);
rxrpc_queue_work(&rxnet->client_conn_reaper);
- rxrpc_put_local(rx->local);
+ rxrpc_unuse_local(rx->local);
rx->local = NULL;
key_put(rx->key);
rx->key = NULL;
diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
index 7444d8b..8d72e94 100644
--- a/net/rxrpc/ar-internal.h
+++ b/net/rxrpc/ar-internal.h
@@ -258,7 +258,8 @@ struct rxrpc_security {
*/
struct rxrpc_local {
struct rcu_head rcu;
- atomic_t usage;
+ atomic_t active_users; /* Number of users of the local endpoint */
+ atomic_t usage; /* Number of references to the structure */
struct rxrpc_net *rxnet; /* The network ns in which this resides */
struct list_head link;
struct socket *socket; /* my UDP socket */
@@ -998,6 +999,8 @@ struct rxrpc_local *rxrpc_lookup_local(struct net *, const struct sockaddr_rxrpc
struct rxrpc_local *rxrpc_get_local(struct rxrpc_local *);
struct rxrpc_local *rxrpc_get_local_maybe(struct rxrpc_local *);
void rxrpc_put_local(struct rxrpc_local *);
+struct rxrpc_local *rxrpc_use_local(struct rxrpc_local *);
+void rxrpc_unuse_local(struct rxrpc_local *);
void rxrpc_queue_local(struct rxrpc_local *);
void rxrpc_destroy_all_locals(struct rxrpc_net *);
@@ -1057,6 +1060,7 @@ void rxrpc_destroy_all_peers(struct rxrpc_net *);
struct rxrpc_peer *rxrpc_get_peer(struct rxrpc_peer *);
struct rxrpc_peer *rxrpc_get_peer_maybe(struct rxrpc_peer *);
void rxrpc_put_peer(struct rxrpc_peer *);
+void rxrpc_put_peer_locked(struct rxrpc_peer *);
/*
* proc.c
diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c
index d591f54..7965600 100644
--- a/net/rxrpc/input.c
+++ b/net/rxrpc/input.c
@@ -1106,8 +1106,12 @@ static void rxrpc_post_packet_to_local(struct rxrpc_local *local,
{
_enter("%p,%p", local, skb);
- skb_queue_tail(&local->event_queue, skb);
- rxrpc_queue_local(local);
+ if (rxrpc_get_local_maybe(local)) {
+ skb_queue_tail(&local->event_queue, skb);
+ rxrpc_queue_local(local);
+ } else {
+ rxrpc_free_skb(skb, rxrpc_skb_rx_freed);
+ }
}
/*
@@ -1117,8 +1121,12 @@ static void rxrpc_reject_packet(struct rxrpc_local *local, struct sk_buff *skb)
{
CHECK_SLAB_OKAY(&local->usage);
- skb_queue_tail(&local->reject_queue, skb);
- rxrpc_queue_local(local);
+ if (rxrpc_get_local_maybe(local)) {
+ skb_queue_tail(&local->reject_queue, skb);
+ rxrpc_queue_local(local);
+ } else {
+ rxrpc_free_skb(skb, rxrpc_skb_rx_freed);
+ }
}
/*
diff --git a/net/rxrpc/local_object.c b/net/rxrpc/local_object.c
index 10317db..c752ad4 100644
--- a/net/rxrpc/local_object.c
+++ b/net/rxrpc/local_object.c
@@ -83,6 +83,7 @@ static struct rxrpc_local *rxrpc_alloc_local(struct rxrpc_net *rxnet,
local = kzalloc(sizeof(struct rxrpc_local), GFP_KERNEL);
if (local) {
atomic_set(&local->usage, 1);
+ atomic_set(&local->active_users, 1);
local->rxnet = rxnet;
INIT_LIST_HEAD(&local->link);
INIT_WORK(&local->processor, rxrpc_local_processor);
@@ -96,7 +97,7 @@ static struct rxrpc_local *rxrpc_alloc_local(struct rxrpc_net *rxnet,
local->debug_id = atomic_inc_return(&rxrpc_debug_id);
memcpy(&local->srx, srx, sizeof(*srx));
local->srx.srx_service = 0;
- trace_rxrpc_local(local, rxrpc_local_new, 1, NULL);
+ trace_rxrpc_local(local->debug_id, rxrpc_local_new, 1, NULL);
}
_leave(" = %p", local);
@@ -270,11 +271,8 @@ struct rxrpc_local *rxrpc_lookup_local(struct net *net,
* bind the transport socket may still fail if we're attempting
* to use a local address that the dying object is still using.
*/
- if (!rxrpc_get_local_maybe(local)) {
- cursor = cursor->next;
- list_del_init(&local->link);
+ if (!rxrpc_use_local(local))
break;
- }
age = "old";
goto found;
@@ -288,7 +286,10 @@ struct rxrpc_local *rxrpc_lookup_local(struct net *net,
if (ret < 0)
goto sock_error;
- list_add_tail(&local->link, cursor);
+ if (cursor != &rxnet->local_endpoints)
+ list_replace_init(cursor, &local->link);
+ else
+ list_add_tail(&local->link, cursor);
age = "new";
found:
@@ -324,7 +325,7 @@ struct rxrpc_local *rxrpc_get_local(struct rxrpc_local *local)
int n;
n = atomic_inc_return(&local->usage);
- trace_rxrpc_local(local, rxrpc_local_got, n, here);
+ trace_rxrpc_local(local->debug_id, rxrpc_local_got, n, here);
return local;
}
@@ -338,7 +339,8 @@ struct rxrpc_local *rxrpc_get_local_maybe(struct rxrpc_local *local)
if (local) {
int n = atomic_fetch_add_unless(&local->usage, 1, 0);
if (n > 0)
- trace_rxrpc_local(local, rxrpc_local_got, n + 1, here);
+ trace_rxrpc_local(local->debug_id, rxrpc_local_got,
+ n + 1, here);
else
local = NULL;
}
@@ -346,24 +348,18 @@ struct rxrpc_local *rxrpc_get_local_maybe(struct rxrpc_local *local)
}
/*
- * Queue a local endpoint.
+ * Queue a local endpoint and pass the caller's reference to the work item.
*/
void rxrpc_queue_local(struct rxrpc_local *local)
{
const void *here = __builtin_return_address(0);
+ unsigned int debug_id = local->debug_id;
+ int n = atomic_read(&local->usage);
if (rxrpc_queue_work(&local->processor))
- trace_rxrpc_local(local, rxrpc_local_queued,
- atomic_read(&local->usage), here);
-}
-
-/*
- * A local endpoint reached its end of life.
- */
-static void __rxrpc_put_local(struct rxrpc_local *local)
-{
- _enter("%d", local->debug_id);
- rxrpc_queue_work(&local->processor);
+ trace_rxrpc_local(debug_id, rxrpc_local_queued, n, here);
+ else
+ rxrpc_put_local(local);
}
/*
@@ -376,10 +372,47 @@ void rxrpc_put_local(struct rxrpc_local *local)
if (local) {
n = atomic_dec_return(&local->usage);
- trace_rxrpc_local(local, rxrpc_local_put, n, here);
+ trace_rxrpc_local(local->debug_id, rxrpc_local_put, n, here);
if (n == 0)
- __rxrpc_put_local(local);
+ call_rcu(&local->rcu, rxrpc_local_rcu);
+ }
+}
+
+/*
+ * Start using a local endpoint.
+ */
+struct rxrpc_local *rxrpc_use_local(struct rxrpc_local *local)
+{
+ unsigned int au;
+
+ local = rxrpc_get_local_maybe(local);
+ if (!local)
+ return NULL;
+
+ au = atomic_fetch_add_unless(&local->active_users, 1, 0);
+ if (au == 0) {
+ rxrpc_put_local(local);
+ return NULL;
+ }
+
+ return local;
+}
+
+/*
+ * Cease using a local endpoint. Once the number of active users reaches 0, we
+ * start the closure of the transport in the work processor.
+ */
+void rxrpc_unuse_local(struct rxrpc_local *local)
+{
+ unsigned int au;
+
+ if (local) {
+ au = atomic_dec_return(&local->active_users);
+ if (au == 0)
+ rxrpc_queue_local(local);
+ else
+ rxrpc_put_local(local);
}
}
@@ -397,16 +430,6 @@ static void rxrpc_local_destroyer(struct rxrpc_local *local)
_enter("%d", local->debug_id);
- /* We can get a race between an incoming call packet queueing the
- * processor again and the work processor starting the destruction
- * process which will shut down the UDP socket.
- */
- if (local->dead) {
- _leave(" [already dead]");
- return;
- }
- local->dead = true;
-
mutex_lock(&rxnet->local_mutex);
list_del_init(&local->link);
mutex_unlock(&rxnet->local_mutex);
@@ -426,13 +449,11 @@ static void rxrpc_local_destroyer(struct rxrpc_local *local)
*/
rxrpc_purge_queue(&local->reject_queue);
rxrpc_purge_queue(&local->event_queue);
-
- _debug("rcu local %d", local->debug_id);
- call_rcu(&local->rcu, rxrpc_local_rcu);
}
/*
- * Process events on an endpoint
+ * Process events on an endpoint. The work item carries a ref which
+ * we must release.
*/
static void rxrpc_local_processor(struct work_struct *work)
{
@@ -440,13 +461,15 @@ static void rxrpc_local_processor(struct work_struct *work)
container_of(work, struct rxrpc_local, processor);
bool again;
- trace_rxrpc_local(local, rxrpc_local_processing,
+ trace_rxrpc_local(local->debug_id, rxrpc_local_processing,
atomic_read(&local->usage), NULL);
do {
again = false;
- if (atomic_read(&local->usage) == 0)
- return rxrpc_local_destroyer(local);
+ if (atomic_read(&local->active_users) == 0) {
+ rxrpc_local_destroyer(local);
+ break;
+ }
if (!skb_queue_empty(&local->reject_queue)) {
rxrpc_reject_packets(local);
@@ -458,6 +481,8 @@ static void rxrpc_local_processor(struct work_struct *work)
again = true;
}
} while (again);
+
+ rxrpc_put_local(local);
}
/*
diff --git a/net/rxrpc/peer_event.c b/net/rxrpc/peer_event.c
index bd2fa3b..dc7fdaf 100644
--- a/net/rxrpc/peer_event.c
+++ b/net/rxrpc/peer_event.c
@@ -375,7 +375,7 @@ static void rxrpc_peer_keepalive_dispatch(struct rxrpc_net *rxnet,
spin_lock_bh(&rxnet->peer_hash_lock);
list_add_tail(&peer->keepalive_link,
&rxnet->peer_keepalive[slot & mask]);
- rxrpc_put_peer(peer);
+ rxrpc_put_peer_locked(peer);
}
spin_unlock_bh(&rxnet->peer_hash_lock);
diff --git a/net/rxrpc/peer_object.c b/net/rxrpc/peer_object.c
index 5691b7d..71547e8 100644
--- a/net/rxrpc/peer_object.c
+++ b/net/rxrpc/peer_object.c
@@ -441,6 +441,24 @@ void rxrpc_put_peer(struct rxrpc_peer *peer)
}
/*
+ * Drop a ref on a peer record where the caller already holds the
+ * peer_hash_lock.
+ */
+void rxrpc_put_peer_locked(struct rxrpc_peer *peer)
+{
+ const void *here = __builtin_return_address(0);
+ int n;
+
+ n = atomic_dec_return(&peer->usage);
+ trace_rxrpc_peer(peer, rxrpc_peer_put, n, here);
+ if (n == 0) {
+ hash_del_rcu(&peer->hash_link);
+ list_del_init(&peer->keepalive_link);
+ kfree_rcu(peer, rcu);
+ }
+}
+
+/*
* Make sure all peer records have been discarded.
*/
void rxrpc_destroy_all_peers(struct rxrpc_net *rxnet)
diff --git a/net/rxrpc/sendmsg.c b/net/rxrpc/sendmsg.c
index be01f9c..5d6ab4f 100644
--- a/net/rxrpc/sendmsg.c
+++ b/net/rxrpc/sendmsg.c
@@ -230,6 +230,7 @@ static void rxrpc_queue_packet(struct rxrpc_sock *rx, struct rxrpc_call *call,
rxrpc_set_call_completion(call,
RXRPC_CALL_LOCAL_ERROR,
0, ret);
+ rxrpc_notify_socket(call);
goto out;
}
_debug("need instant resend %d", ret);
diff --git a/net/sched/act_bpf.c b/net/sched/act_bpf.c
index 0c68bc9..20fae5c 100644
--- a/net/sched/act_bpf.c
+++ b/net/sched/act_bpf.c
@@ -287,6 +287,7 @@ static int tcf_bpf_init(struct net *net, struct nlattr *nla,
struct tcf_bpf *prog;
bool is_bpf, is_ebpf;
int ret, res = 0;
+ u32 index;
if (!nla)
return -EINVAL;
@@ -299,13 +300,13 @@ static int tcf_bpf_init(struct net *net, struct nlattr *nla,
return -EINVAL;
parm = nla_data(tb[TCA_ACT_BPF_PARMS]);
-
- ret = tcf_idr_check_alloc(tn, &parm->index, act, bind);
+ index = parm->index;
+ ret = tcf_idr_check_alloc(tn, &index, act, bind);
if (!ret) {
- ret = tcf_idr_create(tn, parm->index, est, act,
+ ret = tcf_idr_create(tn, index, est, act,
&act_bpf_ops, bind, true);
if (ret < 0) {
- tcf_idr_cleanup(tn, parm->index);
+ tcf_idr_cleanup(tn, index);
return ret;
}
diff --git a/net/sched/act_connmark.c b/net/sched/act_connmark.c
index 6f0f273..6054367 100644
--- a/net/sched/act_connmark.c
+++ b/net/sched/act_connmark.c
@@ -104,6 +104,7 @@ static int tcf_connmark_init(struct net *net, struct nlattr *nla,
struct tcf_connmark_info *ci;
struct tc_connmark *parm;
int ret = 0;
+ u32 index;
if (!nla)
return -EINVAL;
@@ -117,13 +118,13 @@ static int tcf_connmark_init(struct net *net, struct nlattr *nla,
return -EINVAL;
parm = nla_data(tb[TCA_CONNMARK_PARMS]);
-
- ret = tcf_idr_check_alloc(tn, &parm->index, a, bind);
+ index = parm->index;
+ ret = tcf_idr_check_alloc(tn, &index, a, bind);
if (!ret) {
- ret = tcf_idr_create(tn, parm->index, est, a,
+ ret = tcf_idr_create(tn, index, est, a,
&act_connmark_ops, bind, false);
if (ret) {
- tcf_idr_cleanup(tn, parm->index);
+ tcf_idr_cleanup(tn, index);
return ret;
}
diff --git a/net/sched/act_csum.c b/net/sched/act_csum.c
index b8a67ae..4043719 100644
--- a/net/sched/act_csum.c
+++ b/net/sched/act_csum.c
@@ -55,6 +55,7 @@ static int tcf_csum_init(struct net *net, struct nlattr *nla,
struct tc_csum *parm;
struct tcf_csum *p;
int ret = 0, err;
+ u32 index;
if (nla == NULL)
return -EINVAL;
@@ -66,13 +67,13 @@ static int tcf_csum_init(struct net *net, struct nlattr *nla,
if (tb[TCA_CSUM_PARMS] == NULL)
return -EINVAL;
parm = nla_data(tb[TCA_CSUM_PARMS]);
-
- err = tcf_idr_check_alloc(tn, &parm->index, a, bind);
+ index = parm->index;
+ err = tcf_idr_check_alloc(tn, &index, a, bind);
if (!err) {
- ret = tcf_idr_create(tn, parm->index, est, a,
+ ret = tcf_idr_create(tn, index, est, a,
&act_csum_ops, bind, true);
if (ret) {
- tcf_idr_cleanup(tn, parm->index);
+ tcf_idr_cleanup(tn, index);
return ret;
}
ret = ACT_P_CREATED;
diff --git a/net/sched/act_gact.c b/net/sched/act_gact.c
index cd1d9bd..72d3347 100644
--- a/net/sched/act_gact.c
+++ b/net/sched/act_gact.c
@@ -64,6 +64,7 @@ static int tcf_gact_init(struct net *net, struct nlattr *nla,
struct tc_gact *parm;
struct tcf_gact *gact;
int ret = 0;
+ u32 index;
int err;
#ifdef CONFIG_GACT_PROB
struct tc_gact_p *p_parm = NULL;
@@ -79,6 +80,7 @@ static int tcf_gact_init(struct net *net, struct nlattr *nla,
if (tb[TCA_GACT_PARMS] == NULL)
return -EINVAL;
parm = nla_data(tb[TCA_GACT_PARMS]);
+ index = parm->index;
#ifndef CONFIG_GACT_PROB
if (tb[TCA_GACT_PROB] != NULL)
@@ -91,12 +93,12 @@ static int tcf_gact_init(struct net *net, struct nlattr *nla,
}
#endif
- err = tcf_idr_check_alloc(tn, &parm->index, a, bind);
+ err = tcf_idr_check_alloc(tn, &index, a, bind);
if (!err) {
- ret = tcf_idr_create(tn, parm->index, est, a,
+ ret = tcf_idr_create(tn, index, est, a,
&act_gact_ops, bind, true);
if (ret) {
- tcf_idr_cleanup(tn, parm->index);
+ tcf_idr_cleanup(tn, index);
return ret;
}
ret = ACT_P_CREATED;
diff --git a/net/sched/act_ife.c b/net/sched/act_ife.c
index 06a3d48..24047e0 100644
--- a/net/sched/act_ife.c
+++ b/net/sched/act_ife.c
@@ -482,8 +482,14 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla,
u8 *saddr = NULL;
bool exists = false;
int ret = 0;
+ u32 index;
int err;
+ if (!nla) {
+ NL_SET_ERR_MSG_MOD(extack, "IFE requires attributes to be passed");
+ return -EINVAL;
+ }
+
err = nla_parse_nested(tb, TCA_IFE_MAX, nla, ife_policy, NULL);
if (err < 0)
return err;
@@ -504,7 +510,8 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla,
if (!p)
return -ENOMEM;
- err = tcf_idr_check_alloc(tn, &parm->index, a, bind);
+ index = parm->index;
+ err = tcf_idr_check_alloc(tn, &index, a, bind);
if (err < 0) {
kfree(p);
return err;
@@ -516,10 +523,10 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla,
}
if (!exists) {
- ret = tcf_idr_create(tn, parm->index, est, a, &act_ife_ops,
+ ret = tcf_idr_create(tn, index, est, a, &act_ife_ops,
bind, true);
if (ret) {
- tcf_idr_cleanup(tn, parm->index);
+ tcf_idr_cleanup(tn, index);
kfree(p);
return ret;
}
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
index f767e78..548614b 100644
--- a/net/sched/act_mirred.c
+++ b/net/sched/act_mirred.c
@@ -104,6 +104,7 @@ static int tcf_mirred_init(struct net *net, struct nlattr *nla,
struct net_device *dev;
bool exists = false;
int ret, err;
+ u32 index;
if (!nla) {
NL_SET_ERR_MSG_MOD(extack, "Mirred requires attributes to be passed");
@@ -117,8 +118,8 @@ static int tcf_mirred_init(struct net *net, struct nlattr *nla,
return -EINVAL;
}
parm = nla_data(tb[TCA_MIRRED_PARMS]);
-
- err = tcf_idr_check_alloc(tn, &parm->index, a, bind);
+ index = parm->index;
+ err = tcf_idr_check_alloc(tn, &index, a, bind);
if (err < 0)
return err;
exists = err;
@@ -135,21 +136,21 @@ static int tcf_mirred_init(struct net *net, struct nlattr *nla,
if (exists)
tcf_idr_release(*a, bind);
else
- tcf_idr_cleanup(tn, parm->index);
+ tcf_idr_cleanup(tn, index);
NL_SET_ERR_MSG_MOD(extack, "Unknown mirred option");
return -EINVAL;
}
if (!exists) {
if (!parm->ifindex) {
- tcf_idr_cleanup(tn, parm->index);
+ tcf_idr_cleanup(tn, index);
NL_SET_ERR_MSG_MOD(extack, "Specified device does not exist");
return -EINVAL;
}
- ret = tcf_idr_create(tn, parm->index, est, a,
+ ret = tcf_idr_create(tn, index, est, a,
&act_mirred_ops, bind, true);
if (ret) {
- tcf_idr_cleanup(tn, parm->index);
+ tcf_idr_cleanup(tn, index);
return ret;
}
ret = ACT_P_CREATED;
diff --git a/net/sched/act_nat.c b/net/sched/act_nat.c
index 4313aa1..6198289 100644
--- a/net/sched/act_nat.c
+++ b/net/sched/act_nat.c
@@ -45,6 +45,7 @@ static int tcf_nat_init(struct net *net, struct nlattr *nla, struct nlattr *est,
struct tc_nat *parm;
int ret = 0, err;
struct tcf_nat *p;
+ u32 index;
if (nla == NULL)
return -EINVAL;
@@ -56,13 +57,13 @@ static int tcf_nat_init(struct net *net, struct nlattr *nla, struct nlattr *est,
if (tb[TCA_NAT_PARMS] == NULL)
return -EINVAL;
parm = nla_data(tb[TCA_NAT_PARMS]);
-
- err = tcf_idr_check_alloc(tn, &parm->index, a, bind);
+ index = parm->index;
+ err = tcf_idr_check_alloc(tn, &index, a, bind);
if (!err) {
- ret = tcf_idr_create(tn, parm->index, est, a,
+ ret = tcf_idr_create(tn, index, est, a,
&act_nat_ops, bind, false);
if (ret) {
- tcf_idr_cleanup(tn, parm->index);
+ tcf_idr_cleanup(tn, index);
return ret;
}
ret = ACT_P_CREATED;
diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c
index ca535a8..82d258b 100644
--- a/net/sched/act_pedit.c
+++ b/net/sched/act_pedit.c
@@ -149,6 +149,7 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla,
struct tcf_pedit *p;
int ret = 0, err;
int ksize;
+ u32 index;
if (!nla) {
NL_SET_ERR_MSG_MOD(extack, "Pedit requires attributes to be passed");
@@ -178,18 +179,19 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla,
if (IS_ERR(keys_ex))
return PTR_ERR(keys_ex);
- err = tcf_idr_check_alloc(tn, &parm->index, a, bind);
+ index = parm->index;
+ err = tcf_idr_check_alloc(tn, &index, a, bind);
if (!err) {
if (!parm->nkeys) {
- tcf_idr_cleanup(tn, parm->index);
+ tcf_idr_cleanup(tn, index);
NL_SET_ERR_MSG_MOD(extack, "Pedit requires keys to be passed");
ret = -EINVAL;
goto out_free;
}
- ret = tcf_idr_create(tn, parm->index, est, a,
+ ret = tcf_idr_create(tn, index, est, a,
&act_pedit_ops, bind, false);
if (ret) {
- tcf_idr_cleanup(tn, parm->index);
+ tcf_idr_cleanup(tn, index);
goto out_free;
}
ret = ACT_P_CREATED;
diff --git a/net/sched/act_police.c b/net/sched/act_police.c
index 5d8bfa8..997c34d 100644
--- a/net/sched/act_police.c
+++ b/net/sched/act_police.c
@@ -85,6 +85,7 @@ static int tcf_police_init(struct net *net, struct nlattr *nla,
struct qdisc_rate_table *R_tab = NULL, *P_tab = NULL;
struct tc_action_net *tn = net_generic(net, police_net_id);
bool exists = false;
+ u32 index;
int size;
if (nla == NULL)
@@ -101,7 +102,8 @@ static int tcf_police_init(struct net *net, struct nlattr *nla,
return -EINVAL;
parm = nla_data(tb[TCA_POLICE_TBF]);
- err = tcf_idr_check_alloc(tn, &parm->index, a, bind);
+ index = parm->index;
+ err = tcf_idr_check_alloc(tn, &index, a, bind);
if (err < 0)
return err;
exists = err;
@@ -109,10 +111,10 @@ static int tcf_police_init(struct net *net, struct nlattr *nla,
return 0;
if (!exists) {
- ret = tcf_idr_create(tn, parm->index, NULL, a,
+ ret = tcf_idr_create(tn, index, NULL, a,
&act_police_ops, bind, false);
if (ret) {
- tcf_idr_cleanup(tn, parm->index);
+ tcf_idr_cleanup(tn, index);
return ret;
}
ret = ACT_P_CREATED;
diff --git a/net/sched/act_sample.c b/net/sched/act_sample.c
index c7f5d63..ac37654 100644
--- a/net/sched/act_sample.c
+++ b/net/sched/act_sample.c
@@ -43,7 +43,7 @@ static int tcf_sample_init(struct net *net, struct nlattr *nla,
struct tc_action_net *tn = net_generic(net, sample_net_id);
struct nlattr *tb[TCA_SAMPLE_MAX + 1];
struct psample_group *psample_group;
- u32 psample_group_num, rate;
+ u32 psample_group_num, rate, index;
struct tc_sample *parm;
struct tcf_sample *s;
bool exists = false;
@@ -59,8 +59,8 @@ static int tcf_sample_init(struct net *net, struct nlattr *nla,
return -EINVAL;
parm = nla_data(tb[TCA_SAMPLE_PARMS]);
-
- err = tcf_idr_check_alloc(tn, &parm->index, a, bind);
+ index = parm->index;
+ err = tcf_idr_check_alloc(tn, &index, a, bind);
if (err < 0)
return err;
exists = err;
@@ -68,10 +68,10 @@ static int tcf_sample_init(struct net *net, struct nlattr *nla,
return 0;
if (!exists) {
- ret = tcf_idr_create(tn, parm->index, est, a,
+ ret = tcf_idr_create(tn, index, est, a,
&act_sample_ops, bind, true);
if (ret) {
- tcf_idr_cleanup(tn, parm->index);
+ tcf_idr_cleanup(tn, index);
return ret;
}
ret = ACT_P_CREATED;
diff --git a/net/sched/act_simple.c b/net/sched/act_simple.c
index 52400d4..658efae 100644
--- a/net/sched/act_simple.c
+++ b/net/sched/act_simple.c
@@ -88,6 +88,7 @@ static int tcf_simp_init(struct net *net, struct nlattr *nla,
struct tcf_defact *d;
bool exists = false;
int ret = 0, err;
+ u32 index;
if (nla == NULL)
return -EINVAL;
@@ -100,7 +101,8 @@ static int tcf_simp_init(struct net *net, struct nlattr *nla,
return -EINVAL;
parm = nla_data(tb[TCA_DEF_PARMS]);
- err = tcf_idr_check_alloc(tn, &parm->index, a, bind);
+ index = parm->index;
+ err = tcf_idr_check_alloc(tn, &index, a, bind);
if (err < 0)
return err;
exists = err;
@@ -111,15 +113,15 @@ static int tcf_simp_init(struct net *net, struct nlattr *nla,
if (exists)
tcf_idr_release(*a, bind);
else
- tcf_idr_cleanup(tn, parm->index);
+ tcf_idr_cleanup(tn, index);
return -EINVAL;
}
if (!exists) {
- ret = tcf_idr_create(tn, parm->index, est, a,
+ ret = tcf_idr_create(tn, index, est, a,
&act_simp_ops, bind, false);
if (ret) {
- tcf_idr_cleanup(tn, parm->index);
+ tcf_idr_cleanup(tn, index);
return ret;
}
diff --git a/net/sched/act_skbedit.c b/net/sched/act_skbedit.c
index 86d90fc..7709710 100644
--- a/net/sched/act_skbedit.c
+++ b/net/sched/act_skbedit.c
@@ -107,6 +107,7 @@ static int tcf_skbedit_init(struct net *net, struct nlattr *nla,
u16 *queue_mapping = NULL, *ptype = NULL;
bool exists = false;
int ret = 0, err;
+ u32 index;
if (nla == NULL)
return -EINVAL;
@@ -153,8 +154,8 @@ static int tcf_skbedit_init(struct net *net, struct nlattr *nla,
}
parm = nla_data(tb[TCA_SKBEDIT_PARMS]);
-
- err = tcf_idr_check_alloc(tn, &parm->index, a, bind);
+ index = parm->index;
+ err = tcf_idr_check_alloc(tn, &index, a, bind);
if (err < 0)
return err;
exists = err;
@@ -165,15 +166,15 @@ static int tcf_skbedit_init(struct net *net, struct nlattr *nla,
if (exists)
tcf_idr_release(*a, bind);
else
- tcf_idr_cleanup(tn, parm->index);
+ tcf_idr_cleanup(tn, index);
return -EINVAL;
}
if (!exists) {
- ret = tcf_idr_create(tn, parm->index, est, a,
+ ret = tcf_idr_create(tn, index, est, a,
&act_skbedit_ops, bind, true);
if (ret) {
- tcf_idr_cleanup(tn, parm->index);
+ tcf_idr_cleanup(tn, index);
return ret;
}
diff --git a/net/sched/act_skbmod.c b/net/sched/act_skbmod.c
index 588077f..3038493 100644
--- a/net/sched/act_skbmod.c
+++ b/net/sched/act_skbmod.c
@@ -88,12 +88,12 @@ static int tcf_skbmod_init(struct net *net, struct nlattr *nla,
struct nlattr *tb[TCA_SKBMOD_MAX + 1];
struct tcf_skbmod_params *p, *p_old;
struct tc_skbmod *parm;
+ u32 lflags = 0, index;
struct tcf_skbmod *d;
bool exists = false;
u8 *daddr = NULL;
u8 *saddr = NULL;
u16 eth_type = 0;
- u32 lflags = 0;
int ret = 0, err;
if (!nla)
@@ -122,10 +122,11 @@ static int tcf_skbmod_init(struct net *net, struct nlattr *nla,
}
parm = nla_data(tb[TCA_SKBMOD_PARMS]);
+ index = parm->index;
if (parm->flags & SKBMOD_F_SWAPMAC)
lflags = SKBMOD_F_SWAPMAC;
- err = tcf_idr_check_alloc(tn, &parm->index, a, bind);
+ err = tcf_idr_check_alloc(tn, &index, a, bind);
if (err < 0)
return err;
exists = err;
@@ -136,15 +137,15 @@ static int tcf_skbmod_init(struct net *net, struct nlattr *nla,
if (exists)
tcf_idr_release(*a, bind);
else
- tcf_idr_cleanup(tn, parm->index);
+ tcf_idr_cleanup(tn, index);
return -EINVAL;
}
if (!exists) {
- ret = tcf_idr_create(tn, parm->index, est, a,
+ ret = tcf_idr_create(tn, index, est, a,
&act_skbmod_ops, bind, true);
if (ret) {
- tcf_idr_cleanup(tn, parm->index);
+ tcf_idr_cleanup(tn, index);
return ret;
}
diff --git a/net/sched/act_tunnel_key.c b/net/sched/act_tunnel_key.c
index 72d9c43..66bfe57 100644
--- a/net/sched/act_tunnel_key.c
+++ b/net/sched/act_tunnel_key.c
@@ -224,6 +224,7 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla,
__be16 flags;
u8 tos, ttl;
int ret = 0;
+ u32 index;
int err;
if (!nla) {
@@ -244,7 +245,8 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla,
}
parm = nla_data(tb[TCA_TUNNEL_KEY_PARMS]);
- err = tcf_idr_check_alloc(tn, &parm->index, a, bind);
+ index = parm->index;
+ err = tcf_idr_check_alloc(tn, &index, a, bind);
if (err < 0)
return err;
exists = err;
@@ -338,7 +340,7 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla,
}
if (!exists) {
- ret = tcf_idr_create(tn, parm->index, est, a,
+ ret = tcf_idr_create(tn, index, est, a,
&act_tunnel_key_ops, bind, true);
if (ret) {
NL_SET_ERR_MSG(extack, "Cannot create TC IDR");
@@ -384,7 +386,7 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla,
if (exists)
tcf_idr_release(*a, bind);
else
- tcf_idr_cleanup(tn, parm->index);
+ tcf_idr_cleanup(tn, index);
return ret;
}
diff --git a/net/sched/act_vlan.c b/net/sched/act_vlan.c
index 033d273..da993edd 100644
--- a/net/sched/act_vlan.c
+++ b/net/sched/act_vlan.c
@@ -118,6 +118,7 @@ static int tcf_vlan_init(struct net *net, struct nlattr *nla,
u8 push_prio = 0;
bool exists = false;
int ret = 0, err;
+ u32 index;
if (!nla)
return -EINVAL;
@@ -129,7 +130,8 @@ static int tcf_vlan_init(struct net *net, struct nlattr *nla,
if (!tb[TCA_VLAN_PARMS])
return -EINVAL;
parm = nla_data(tb[TCA_VLAN_PARMS]);
- err = tcf_idr_check_alloc(tn, &parm->index, a, bind);
+ index = parm->index;
+ err = tcf_idr_check_alloc(tn, &index, a, bind);
if (err < 0)
return err;
exists = err;
@@ -145,7 +147,7 @@ static int tcf_vlan_init(struct net *net, struct nlattr *nla,
if (exists)
tcf_idr_release(*a, bind);
else
- tcf_idr_cleanup(tn, parm->index);
+ tcf_idr_cleanup(tn, index);
return -EINVAL;
}
push_vid = nla_get_u16(tb[TCA_VLAN_PUSH_VLAN_ID]);
@@ -153,7 +155,7 @@ static int tcf_vlan_init(struct net *net, struct nlattr *nla,
if (exists)
tcf_idr_release(*a, bind);
else
- tcf_idr_cleanup(tn, parm->index);
+ tcf_idr_cleanup(tn, index);
return -ERANGE;
}
@@ -167,7 +169,7 @@ static int tcf_vlan_init(struct net *net, struct nlattr *nla,
if (exists)
tcf_idr_release(*a, bind);
else
- tcf_idr_cleanup(tn, parm->index);
+ tcf_idr_cleanup(tn, index);
return -EPROTONOSUPPORT;
}
} else {
@@ -181,16 +183,16 @@ static int tcf_vlan_init(struct net *net, struct nlattr *nla,
if (exists)
tcf_idr_release(*a, bind);
else
- tcf_idr_cleanup(tn, parm->index);
+ tcf_idr_cleanup(tn, index);
return -EINVAL;
}
action = parm->v_action;
if (!exists) {
- ret = tcf_idr_create(tn, parm->index, est, a,
+ ret = tcf_idr_create(tn, index, est, a,
&act_vlan_ops, bind, true);
if (ret) {
- tcf_idr_cleanup(tn, parm->index);
+ tcf_idr_cleanup(tn, index);
return ret;
}
@@ -296,6 +298,14 @@ static int tcf_vlan_search(struct net *net, struct tc_action **a, u32 index,
return tcf_idr_search(tn, a, index);
}
+static size_t tcf_vlan_get_fill_size(const struct tc_action *act)
+{
+ return nla_total_size(sizeof(struct tc_vlan))
+ + nla_total_size(sizeof(u16)) /* TCA_VLAN_PUSH_VLAN_ID */
+ + nla_total_size(sizeof(u16)) /* TCA_VLAN_PUSH_VLAN_PROTOCOL */
+ + nla_total_size(sizeof(u8)); /* TCA_VLAN_PUSH_VLAN_PRIORITY */
+}
+
static struct tc_action_ops act_vlan_ops = {
.kind = "vlan",
.type = TCA_ACT_VLAN,
@@ -305,6 +315,7 @@ static struct tc_action_ops act_vlan_ops = {
.init = tcf_vlan_init,
.cleanup = tcf_vlan_cleanup,
.walk = tcf_vlan_walker,
+ .get_fill_size = tcf_vlan_get_fill_size,
.lookup = tcf_vlan_search,
.size = sizeof(struct tcf_vlan),
};
diff --git a/net/sched/sch_codel.c b/net/sched/sch_codel.c
index 17cd81f..77fae0b 100644
--- a/net/sched/sch_codel.c
+++ b/net/sched/sch_codel.c
@@ -71,10 +71,10 @@ static struct sk_buff *dequeue_func(struct codel_vars *vars, void *ctx)
struct Qdisc *sch = ctx;
struct sk_buff *skb = __qdisc_dequeue_head(&sch->q);
- if (skb)
+ if (skb) {
sch->qstats.backlog -= qdisc_pkt_len(skb);
-
- prefetch(&skb->end); /* we'll need skb_shinfo() */
+ prefetch(&skb->end); /* we'll need skb_shinfo() */
+ }
return skb;
}
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index 3131b41..28adac3 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -561,7 +561,7 @@ static void sctp_do_8_2_transport_strike(struct sctp_cmd_seq *commands,
*/
if (net->sctp.pf_enable &&
(transport->state == SCTP_ACTIVE) &&
- (asoc->pf_retrans < transport->pathmaxrxt) &&
+ (transport->error_count < transport->pathmaxrxt) &&
(transport->error_count > asoc->pf_retrans)) {
sctp_assoc_control_transport(asoc, transport,
diff --git a/net/sctp/stream.c b/net/sctp/stream.c
index 0da5793..87061a4b 100644
--- a/net/sctp/stream.c
+++ b/net/sctp/stream.c
@@ -416,6 +416,7 @@ int sctp_send_reset_streams(struct sctp_association *asoc,
nstr_list[i] = htons(str_list[i]);
if (out && !sctp_stream_outq_is_empty(stream, str_nums, nstr_list)) {
+ kfree(nstr_list);
retval = -EAGAIN;
goto out;
}
diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
index 9bbab6b..26dcd02 100644
--- a/net/smc/af_smc.c
+++ b/net/smc/af_smc.c
@@ -1680,14 +1680,18 @@ static int smc_setsockopt(struct socket *sock, int level, int optname,
}
break;
case TCP_NODELAY:
- if (sk->sk_state != SMC_INIT && sk->sk_state != SMC_LISTEN) {
+ if (sk->sk_state != SMC_INIT &&
+ sk->sk_state != SMC_LISTEN &&
+ sk->sk_state != SMC_CLOSED) {
if (val && !smc->use_fallback)
mod_delayed_work(system_wq, &smc->conn.tx_work,
0);
}
break;
case TCP_CORK:
- if (sk->sk_state != SMC_INIT && sk->sk_state != SMC_LISTEN) {
+ if (sk->sk_state != SMC_INIT &&
+ sk->sk_state != SMC_LISTEN &&
+ sk->sk_state != SMC_CLOSED) {
if (!val && !smc->use_fallback)
mod_delayed_work(system_wq, &smc->conn.tx_work,
0);
diff --git a/net/tipc/addr.c b/net/tipc/addr.c
index b88d48d..0f1eaed 100644
--- a/net/tipc/addr.c
+++ b/net/tipc/addr.c
@@ -75,6 +75,7 @@ void tipc_set_node_addr(struct net *net, u32 addr)
tipc_set_node_id(net, node_id);
}
tn->trial_addr = addr;
+ tn->addr_trial_end = jiffies;
pr_info("32-bit node address hash set to %x\n", addr);
}
diff --git a/net/tipc/netlink_compat.c b/net/tipc/netlink_compat.c
index 85ebb67..318c541 100644
--- a/net/tipc/netlink_compat.c
+++ b/net/tipc/netlink_compat.c
@@ -55,6 +55,7 @@ struct tipc_nl_compat_msg {
int rep_type;
int rep_size;
int req_type;
+ int req_size;
struct net *net;
struct sk_buff *rep;
struct tlv_desc *req;
@@ -257,7 +258,8 @@ static int tipc_nl_compat_dumpit(struct tipc_nl_compat_cmd_dump *cmd,
int err;
struct sk_buff *arg;
- if (msg->req_type && !TLV_CHECK_TYPE(msg->req, msg->req_type))
+ if (msg->req_type && (!msg->req_size ||
+ !TLV_CHECK_TYPE(msg->req, msg->req_type)))
return -EINVAL;
msg->rep = tipc_tlv_alloc(msg->rep_size);
@@ -354,7 +356,8 @@ static int tipc_nl_compat_doit(struct tipc_nl_compat_cmd_doit *cmd,
{
int err;
- if (msg->req_type && !TLV_CHECK_TYPE(msg->req, msg->req_type))
+ if (msg->req_type && (!msg->req_size ||
+ !TLV_CHECK_TYPE(msg->req, msg->req_type)))
return -EINVAL;
err = __tipc_nl_compat_doit(cmd, msg);
@@ -1276,8 +1279,8 @@ static int tipc_nl_compat_recv(struct sk_buff *skb, struct genl_info *info)
goto send;
}
- len = nlmsg_attrlen(req_nlh, GENL_HDRLEN + TIPC_GENL_HDRLEN);
- if (!len || !TLV_OK(msg.req, len)) {
+ msg.req_size = nlmsg_attrlen(req_nlh, GENL_HDRLEN + TIPC_GENL_HDRLEN);
+ if (msg.req_size && !TLV_OK(msg.req, msg.req_size)) {
msg.rep = tipc_get_err_tlv(TIPC_CFG_NOT_SUPPORTED);
err = -EOPNOTSUPP;
goto send;
diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
index ab27a28..2e30bf1 100644
--- a/net/vmw_vsock/af_vsock.c
+++ b/net/vmw_vsock/af_vsock.c
@@ -281,7 +281,8 @@ EXPORT_SYMBOL_GPL(vsock_insert_connected);
void vsock_remove_bound(struct vsock_sock *vsk)
{
spin_lock_bh(&vsock_table_lock);
- __vsock_remove_bound(vsk);
+ if (__vsock_in_bound_table(vsk))
+ __vsock_remove_bound(vsk);
spin_unlock_bh(&vsock_table_lock);
}
EXPORT_SYMBOL_GPL(vsock_remove_bound);
@@ -289,7 +290,8 @@ EXPORT_SYMBOL_GPL(vsock_remove_bound);
void vsock_remove_connected(struct vsock_sock *vsk)
{
spin_lock_bh(&vsock_table_lock);
- __vsock_remove_connected(vsk);
+ if (__vsock_in_connected_table(vsk))
+ __vsock_remove_connected(vsk);
spin_unlock_bh(&vsock_table_lock);
}
EXPORT_SYMBOL_GPL(vsock_remove_connected);
@@ -325,35 +327,10 @@ struct sock *vsock_find_connected_socket(struct sockaddr_vm *src,
}
EXPORT_SYMBOL_GPL(vsock_find_connected_socket);
-static bool vsock_in_bound_table(struct vsock_sock *vsk)
-{
- bool ret;
-
- spin_lock_bh(&vsock_table_lock);
- ret = __vsock_in_bound_table(vsk);
- spin_unlock_bh(&vsock_table_lock);
-
- return ret;
-}
-
-static bool vsock_in_connected_table(struct vsock_sock *vsk)
-{
- bool ret;
-
- spin_lock_bh(&vsock_table_lock);
- ret = __vsock_in_connected_table(vsk);
- spin_unlock_bh(&vsock_table_lock);
-
- return ret;
-}
-
void vsock_remove_sock(struct vsock_sock *vsk)
{
- if (vsock_in_bound_table(vsk))
- vsock_remove_bound(vsk);
-
- if (vsock_in_connected_table(vsk))
- vsock_remove_connected(vsk);
+ vsock_remove_bound(vsk);
+ vsock_remove_connected(vsk);
}
EXPORT_SYMBOL_GPL(vsock_remove_sock);
@@ -484,8 +461,7 @@ static void vsock_pending_work(struct work_struct *work)
* incoming packets can't find this socket, and to reduce the reference
* count.
*/
- if (vsock_in_connected_table(vsk))
- vsock_remove_connected(vsk);
+ vsock_remove_connected(vsk);
sk->sk_state = TCP_CLOSE;
diff --git a/net/vmw_vsock/hyperv_transport.c b/net/vmw_vsock/hyperv_transport.c
index a827547..9c7da81 100644
--- a/net/vmw_vsock/hyperv_transport.c
+++ b/net/vmw_vsock/hyperv_transport.c
@@ -35,6 +35,9 @@
/* The MTU is 16KB per the host side's design */
#define HVS_MTU_SIZE (1024 * 16)
+/* How long to wait for graceful shutdown of a connection */
+#define HVS_CLOSE_TIMEOUT (8 * HZ)
+
struct vmpipe_proto_header {
u32 pkt_type;
u32 data_size;
@@ -217,18 +220,6 @@ static void hvs_set_channel_pending_send_size(struct vmbus_channel *chan)
set_channel_pending_send_size(chan,
HVS_PKT_LEN(HVS_SEND_BUF_SIZE));
- /* See hvs_stream_has_space(): we must make sure the host has seen
- * the new pending send size, before we can re-check the writable
- * bytes.
- */
- virt_mb();
-}
-
-static void hvs_clear_channel_pending_send_size(struct vmbus_channel *chan)
-{
- set_channel_pending_send_size(chan, 0);
-
- /* Ditto */
virt_mb();
}
@@ -298,26 +289,36 @@ static void hvs_channel_cb(void *ctx)
if (hvs_channel_readable(chan))
sk->sk_data_ready(sk);
- /* See hvs_stream_has_space(): when we reach here, the writable bytes
- * may be already less than HVS_PKT_LEN(HVS_SEND_BUF_SIZE).
- */
if (hv_get_bytes_to_write(&chan->outbound) > 0)
sk->sk_write_space(sk);
}
+static void hvs_do_close_lock_held(struct vsock_sock *vsk,
+ bool cancel_timeout)
+{
+ struct sock *sk = sk_vsock(vsk);
+
+ sock_set_flag(sk, SOCK_DONE);
+ vsk->peer_shutdown = SHUTDOWN_MASK;
+ if (vsock_stream_has_data(vsk) <= 0)
+ sk->sk_state = TCP_CLOSING;
+ sk->sk_state_change(sk);
+ if (vsk->close_work_scheduled &&
+ (!cancel_timeout || cancel_delayed_work(&vsk->close_work))) {
+ vsk->close_work_scheduled = false;
+ vsock_remove_sock(vsk);
+
+ /* Release the reference taken while scheduling the timeout */
+ sock_put(sk);
+ }
+}
+
static void hvs_close_connection(struct vmbus_channel *chan)
{
struct sock *sk = get_per_channel_state(chan);
- struct vsock_sock *vsk = vsock_sk(sk);
lock_sock(sk);
-
- sk->sk_state = TCP_CLOSE;
- sock_set_flag(sk, SOCK_DONE);
- vsk->peer_shutdown |= SEND_SHUTDOWN | RCV_SHUTDOWN;
-
- sk->sk_state_change(sk);
-
+ hvs_do_close_lock_held(vsock_sk(sk), true);
release_sock(sk);
}
@@ -328,8 +329,9 @@ static void hvs_open_connection(struct vmbus_channel *chan)
struct sockaddr_vm addr;
struct sock *sk, *new = NULL;
- struct vsock_sock *vnew;
- struct hvsock *hvs, *hvs_new;
+ struct vsock_sock *vnew = NULL;
+ struct hvsock *hvs = NULL;
+ struct hvsock *hvs_new = NULL;
int ret;
if_type = &chan->offermsg.offer.if_type;
@@ -388,6 +390,13 @@ static void hvs_open_connection(struct vmbus_channel *chan)
set_per_channel_state(chan, conn_from_host ? new : sk);
vmbus_set_chn_rescind_callback(chan, hvs_close_connection);
+ /* Set the pending send size to max packet size to always get
+ * notifications from the host when there is enough writable space.
+ * The host is optimized to send notifications only when the pending
+ * size boundary is crossed, and not always.
+ */
+ hvs_set_channel_pending_send_size(chan);
+
if (conn_from_host) {
new->sk_state = TCP_ESTABLISHED;
sk->sk_ack_backlog++;
@@ -452,50 +461,80 @@ static int hvs_connect(struct vsock_sock *vsk)
return vmbus_send_tl_connect_request(&h->vm_srv_id, &h->host_srv_id);
}
+static void hvs_shutdown_lock_held(struct hvsock *hvs, int mode)
+{
+ struct vmpipe_proto_header hdr;
+
+ if (hvs->fin_sent || !hvs->chan)
+ return;
+
+ /* It can't fail: see hvs_channel_writable_bytes(). */
+ (void)hvs_send_data(hvs->chan, (struct hvs_send_buf *)&hdr, 0);
+ hvs->fin_sent = true;
+}
+
static int hvs_shutdown(struct vsock_sock *vsk, int mode)
{
struct sock *sk = sk_vsock(vsk);
- struct vmpipe_proto_header hdr;
- struct hvs_send_buf *send_buf;
- struct hvsock *hvs;
if (!(mode & SEND_SHUTDOWN))
return 0;
lock_sock(sk);
-
- hvs = vsk->trans;
- if (hvs->fin_sent)
- goto out;
-
- send_buf = (struct hvs_send_buf *)&hdr;
-
- /* It can't fail: see hvs_channel_writable_bytes(). */
- (void)hvs_send_data(hvs->chan, send_buf, 0);
-
- hvs->fin_sent = true;
-out:
+ hvs_shutdown_lock_held(vsk->trans, mode);
release_sock(sk);
return 0;
}
+static void hvs_close_timeout(struct work_struct *work)
+{
+ struct vsock_sock *vsk =
+ container_of(work, struct vsock_sock, close_work.work);
+ struct sock *sk = sk_vsock(vsk);
+
+ sock_hold(sk);
+ lock_sock(sk);
+ if (!sock_flag(sk, SOCK_DONE))
+ hvs_do_close_lock_held(vsk, false);
+
+ vsk->close_work_scheduled = false;
+ release_sock(sk);
+ sock_put(sk);
+}
+
+/* Returns true, if it is safe to remove socket; false otherwise */
+static bool hvs_close_lock_held(struct vsock_sock *vsk)
+{
+ struct sock *sk = sk_vsock(vsk);
+
+ if (!(sk->sk_state == TCP_ESTABLISHED ||
+ sk->sk_state == TCP_CLOSING))
+ return true;
+
+ if ((sk->sk_shutdown & SHUTDOWN_MASK) != SHUTDOWN_MASK)
+ hvs_shutdown_lock_held(vsk->trans, SHUTDOWN_MASK);
+
+ if (sock_flag(sk, SOCK_DONE))
+ return true;
+
+ /* This reference will be dropped by the delayed close routine */
+ sock_hold(sk);
+ INIT_DELAYED_WORK(&vsk->close_work, hvs_close_timeout);
+ vsk->close_work_scheduled = true;
+ schedule_delayed_work(&vsk->close_work, HVS_CLOSE_TIMEOUT);
+ return false;
+}
+
static void hvs_release(struct vsock_sock *vsk)
{
struct sock *sk = sk_vsock(vsk);
- struct hvsock *hvs = vsk->trans;
- struct vmbus_channel *chan;
+ bool remove_sock;
lock_sock(sk);
-
- sk->sk_state = TCP_CLOSING;
- vsock_remove_sock(vsk);
-
+ remove_sock = hvs_close_lock_held(vsk);
release_sock(sk);
-
- chan = hvs->chan;
- if (chan)
- hvs_shutdown(vsk, RCV_SHUTDOWN | SEND_SHUTDOWN);
-
+ if (remove_sock)
+ vsock_remove_sock(vsk);
}
static void hvs_destruct(struct vsock_sock *vsk)
@@ -651,23 +690,8 @@ static s64 hvs_stream_has_data(struct vsock_sock *vsk)
static s64 hvs_stream_has_space(struct vsock_sock *vsk)
{
struct hvsock *hvs = vsk->trans;
- struct vmbus_channel *chan = hvs->chan;
- s64 ret;
- ret = hvs_channel_writable_bytes(chan);
- if (ret > 0) {
- hvs_clear_channel_pending_send_size(chan);
- } else {
- /* See hvs_channel_cb() */
- hvs_set_channel_pending_send_size(chan);
-
- /* Re-check the writable bytes to avoid race */
- ret = hvs_channel_writable_bytes(chan);
- if (ret > 0)
- hvs_clear_channel_pending_send_size(chan);
- }
-
- return ret;
+ return hvs_channel_writable_bytes(hvs->chan);
}
static u64 hvs_stream_rcvhiwat(struct vsock_sock *vsk)
diff --git a/net/wireless/chan.c b/net/wireless/chan.c
index 2db713d..5d5333a 100644
--- a/net/wireless/chan.c
+++ b/net/wireless/chan.c
@@ -13,6 +13,11 @@
#include "core.h"
#include "rdev-ops.h"
+static bool cfg80211_valid_60g_freq(u32 freq)
+{
+ return freq >= 58320 && freq <= 70200;
+}
+
void cfg80211_chandef_create(struct cfg80211_chan_def *chandef,
struct ieee80211_channel *chan,
enum nl80211_channel_type chan_type)
@@ -22,6 +27,8 @@ void cfg80211_chandef_create(struct cfg80211_chan_def *chandef,
chandef->chan = chan;
chandef->center_freq2 = 0;
+ chandef->edmg.bw_config = 0;
+ chandef->edmg.channels = 0;
switch (chan_type) {
case NL80211_CHAN_NO_HT:
@@ -46,6 +53,91 @@ void cfg80211_chandef_create(struct cfg80211_chan_def *chandef,
}
EXPORT_SYMBOL(cfg80211_chandef_create);
+static bool cfg80211_edmg_chandef_valid(const struct cfg80211_chan_def *chandef)
+{
+ int max_contiguous = 0;
+ int num_of_enabled = 0;
+ int contiguous = 0;
+ int i;
+
+ if (!chandef->edmg.channels || !chandef->edmg.bw_config)
+ return false;
+
+ if (!cfg80211_valid_60g_freq(chandef->chan->center_freq))
+ return false;
+
+ for (i = 0; i < 6; i++) {
+ if (chandef->edmg.channels & BIT(i)) {
+ contiguous++;
+ num_of_enabled++;
+ } else {
+ contiguous = 0;
+ }
+
+ max_contiguous = max(contiguous, max_contiguous);
+ }
+ /* basic verification of edmg configuration according to
+ * IEEE P802.11ay/D4.0 section 9.4.2.251
+ */
+ /* check bw_config against contiguous edmg channels */
+ switch (chandef->edmg.bw_config) {
+ case IEEE80211_EDMG_BW_CONFIG_4:
+ case IEEE80211_EDMG_BW_CONFIG_8:
+ case IEEE80211_EDMG_BW_CONFIG_12:
+ if (max_contiguous < 1)
+ return false;
+ break;
+ case IEEE80211_EDMG_BW_CONFIG_5:
+ case IEEE80211_EDMG_BW_CONFIG_9:
+ case IEEE80211_EDMG_BW_CONFIG_13:
+ if (max_contiguous < 2)
+ return false;
+ break;
+ case IEEE80211_EDMG_BW_CONFIG_6:
+ case IEEE80211_EDMG_BW_CONFIG_10:
+ case IEEE80211_EDMG_BW_CONFIG_14:
+ if (max_contiguous < 3)
+ return false;
+ break;
+ case IEEE80211_EDMG_BW_CONFIG_7:
+ case IEEE80211_EDMG_BW_CONFIG_11:
+ case IEEE80211_EDMG_BW_CONFIG_15:
+ if (max_contiguous < 4)
+ return false;
+ break;
+
+ default:
+ return false;
+ }
+
+ /* check bw_config against aggregated (non contiguous) edmg channels */
+ switch (chandef->edmg.bw_config) {
+ case IEEE80211_EDMG_BW_CONFIG_4:
+ case IEEE80211_EDMG_BW_CONFIG_5:
+ case IEEE80211_EDMG_BW_CONFIG_6:
+ case IEEE80211_EDMG_BW_CONFIG_7:
+ break;
+ case IEEE80211_EDMG_BW_CONFIG_8:
+ case IEEE80211_EDMG_BW_CONFIG_9:
+ case IEEE80211_EDMG_BW_CONFIG_10:
+ case IEEE80211_EDMG_BW_CONFIG_11:
+ if (num_of_enabled < 2)
+ return false;
+ break;
+ case IEEE80211_EDMG_BW_CONFIG_12:
+ case IEEE80211_EDMG_BW_CONFIG_13:
+ case IEEE80211_EDMG_BW_CONFIG_14:
+ case IEEE80211_EDMG_BW_CONFIG_15:
+ if (num_of_enabled < 4 || max_contiguous < 2)
+ return false;
+ break;
+ default:
+ return false;
+ }
+
+ return true;
+}
+
bool cfg80211_chandef_valid(const struct cfg80211_chan_def *chandef)
{
u32 control_freq;
@@ -111,6 +203,10 @@ bool cfg80211_chandef_valid(const struct cfg80211_chan_def *chandef)
return false;
}
+ if (cfg80211_chandef_is_edmg(chandef) &&
+ !cfg80211_edmg_chandef_valid(chandef))
+ return false;
+
return true;
}
EXPORT_SYMBOL(cfg80211_chandef_valid);
@@ -720,12 +816,66 @@ static bool cfg80211_secondary_chans_ok(struct wiphy *wiphy,
return true;
}
+/* check if the operating channels are valid and supported */
+static bool cfg80211_edmg_usable(struct wiphy *wiphy, u8 edmg_channels,
+ enum ieee80211_edmg_bw_config edmg_bw_config,
+ int primary_channel,
+ struct ieee80211_edmg *edmg_cap)
+{
+ struct ieee80211_channel *chan;
+ int i, freq;
+ int channels_counter = 0;
+
+ if (!edmg_channels && !edmg_bw_config)
+ return true;
+
+ if ((!edmg_channels && edmg_bw_config) ||
+ (edmg_channels && !edmg_bw_config))
+ return false;
+
+ if (!(edmg_channels & BIT(primary_channel - 1)))
+ return false;
+
+ /* 60GHz channels 1..6 */
+ for (i = 0; i < 6; i++) {
+ if (!(edmg_channels & BIT(i)))
+ continue;
+
+ if (!(edmg_cap->channels & BIT(i)))
+ return false;
+
+ channels_counter++;
+
+ freq = ieee80211_channel_to_frequency(i + 1,
+ NL80211_BAND_60GHZ);
+ chan = ieee80211_get_channel(wiphy, freq);
+ if (!chan || chan->flags & IEEE80211_CHAN_DISABLED)
+ return false;
+ }
+
+ /* IEEE802.11 allows max 4 channels */
+ if (channels_counter > 4)
+ return false;
+
+ /* check bw_config is a subset of what driver supports
+ * (see IEEE P802.11ay/D4.0 section 9.4.2.251, Table 13)
+ */
+ if ((edmg_bw_config % 4) > (edmg_cap->bw_config % 4))
+ return false;
+
+ if (edmg_bw_config > edmg_cap->bw_config)
+ return false;
+
+ return true;
+}
+
bool cfg80211_chandef_usable(struct wiphy *wiphy,
const struct cfg80211_chan_def *chandef,
u32 prohibited_flags)
{
struct ieee80211_sta_ht_cap *ht_cap;
struct ieee80211_sta_vht_cap *vht_cap;
+ struct ieee80211_edmg *edmg_cap;
u32 width, control_freq, cap;
if (WARN_ON(!cfg80211_chandef_valid(chandef)))
@@ -733,6 +883,15 @@ bool cfg80211_chandef_usable(struct wiphy *wiphy,
ht_cap = &wiphy->bands[chandef->chan->band]->ht_cap;
vht_cap = &wiphy->bands[chandef->chan->band]->vht_cap;
+ edmg_cap = &wiphy->bands[chandef->chan->band]->edmg_cap;
+
+ if (edmg_cap->channels &&
+ !cfg80211_edmg_usable(wiphy,
+ chandef->edmg.channels,
+ chandef->edmg.bw_config,
+ chandef->chan->hw_value,
+ edmg_cap))
+ return false;
control_freq = chandef->chan->center_freq;
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 646d50c..0323ed8 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -209,6 +209,8 @@ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
[NL80211_ATTR_WIPHY_FREQ] = { .type = NLA_U32 },
[NL80211_ATTR_WIPHY_CHANNEL_TYPE] = { .type = NLA_U32 },
+ [NL80211_ATTR_WIPHY_EDMG_CHANNELS] = { .type = NLA_U8 },
+ [NL80211_ATTR_WIPHY_EDMG_BW_CONFIG] = { .type = NLA_U8 },
[NL80211_ATTR_CHANNEL_WIDTH] = { .type = NLA_U32 },
[NL80211_ATTR_CENTER_FREQ1] = { .type = NLA_U32 },
[NL80211_ATTR_CENTER_FREQ2] = { .type = NLA_U32 },
@@ -1410,6 +1412,15 @@ static int nl80211_send_band_rateinfo(struct sk_buff *msg,
nla_nest_end(msg, nl_iftype_data);
}
+ /* add EDMG info */
+ if (sband->edmg_cap.channels &&
+ (nla_put_u8(msg, NL80211_BAND_ATTR_EDMG_CHANNELS,
+ sband->edmg_cap.channels) ||
+ nla_put_u8(msg, NL80211_BAND_ATTR_EDMG_BW_CONFIG,
+ sband->edmg_cap.bw_config)))
+
+ return -ENOBUFS;
+
/* add bitrates */
nl_rates = nla_nest_start(msg, NL80211_BAND_ATTR_RATES);
if (!nl_rates)
@@ -2348,6 +2359,18 @@ static int nl80211_parse_chandef(struct cfg80211_registered_device *rdev,
info->attrs[NL80211_ATTR_CENTER_FREQ2]);
}
+ if (info->attrs[NL80211_ATTR_WIPHY_EDMG_CHANNELS]) {
+ chandef->edmg.channels =
+ nla_get_u8(info->attrs[NL80211_ATTR_WIPHY_EDMG_CHANNELS]);
+
+ if (info->attrs[NL80211_ATTR_WIPHY_EDMG_BW_CONFIG])
+ chandef->edmg.bw_config =
+ nla_get_u8(info->attrs[NL80211_ATTR_WIPHY_EDMG_BW_CONFIG]);
+ } else {
+ chandef->edmg.bw_config = 0;
+ chandef->edmg.channels = 0;
+ }
+
if (!cfg80211_chandef_valid(chandef))
return -EINVAL;
@@ -9407,6 +9430,15 @@ static int nl80211_connect(struct sk_buff *skb, struct genl_info *info)
return -EINVAL;
}
+ if (info->attrs[NL80211_ATTR_WIPHY_EDMG_CHANNELS]) {
+ connect.edmg.channels =
+ nla_get_u8(info->attrs[NL80211_ATTR_WIPHY_EDMG_CHANNELS]);
+
+ if (info->attrs[NL80211_ATTR_WIPHY_EDMG_BW_CONFIG])
+ connect.edmg.bw_config =
+ nla_get_u8(info->attrs[NL80211_ATTR_WIPHY_EDMG_BW_CONFIG]);
+ }
+
if (connect.privacy && info->attrs[NL80211_ATTR_KEYS]) {
connkeys = nl80211_parse_connkeys(rdev, info, NULL);
if (IS_ERR(connkeys))
diff --git a/net/wireless/util.c b/net/wireless/util.c
index 83a1781..96f5c8e 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -1009,7 +1009,7 @@ static u32 cfg80211_calculate_bitrate_ht(struct rate_info *rate)
return (bitrate + 50000) / 100000;
}
-static u32 cfg80211_calculate_bitrate_60g(struct rate_info *rate)
+static u32 cfg80211_calculate_bitrate_dmg(struct rate_info *rate)
{
static const u32 __mcs2bitrate[] = {
/* control PHY */
@@ -1056,6 +1056,40 @@ static u32 cfg80211_calculate_bitrate_60g(struct rate_info *rate)
return __mcs2bitrate[rate->mcs];
}
+static u32 cfg80211_calculate_bitrate_edmg(struct rate_info *rate)
+{
+ static const u32 __mcs2bitrate[] = {
+ /* control PHY */
+ [0] = 275,
+ /* SC PHY */
+ [1] = 3850,
+ [2] = 7700,
+ [3] = 9625,
+ [4] = 11550,
+ [5] = 12512, /* 1251.25 mbps */
+ [6] = 13475,
+ [7] = 15400,
+ [8] = 19250,
+ [9] = 23100,
+ [10] = 25025,
+ [11] = 26950,
+ [12] = 30800,
+ [13] = 38500,
+ [14] = 46200,
+ [15] = 50050,
+ [16] = 53900,
+ [17] = 57750,
+ [18] = 69300,
+ [19] = 75075,
+ [20] = 80850,
+ };
+
+ if (WARN_ON_ONCE(rate->mcs >= ARRAY_SIZE(__mcs2bitrate)))
+ return 0;
+
+ return __mcs2bitrate[rate->mcs] * rate->n_bonded_ch;
+}
+
static u32 cfg80211_calculate_bitrate_vht(struct rate_info *rate)
{
static const u32 base[4][10] = {
@@ -1226,8 +1260,10 @@ u32 cfg80211_calculate_bitrate(struct rate_info *rate)
{
if (rate->flags & RATE_INFO_FLAGS_MCS)
return cfg80211_calculate_bitrate_ht(rate);
- if (rate->flags & RATE_INFO_FLAGS_60G)
- return cfg80211_calculate_bitrate_60g(rate);
+ if (rate->flags & RATE_INFO_FLAGS_DMG)
+ return cfg80211_calculate_bitrate_dmg(rate);
+ if (rate->flags & RATE_INFO_FLAGS_EDMG)
+ return cfg80211_calculate_bitrate_edmg(rate);
if (rate->flags & RATE_INFO_FLAGS_VHT_MCS)
return cfg80211_calculate_bitrate_vht(rate);
if (rate->flags & RATE_INFO_FLAGS_HE_MCS)
diff --git a/scripts/Kconfig.include b/scripts/Kconfig.include
index dad5583..3b2861f 100644
--- a/scripts/Kconfig.include
+++ b/scripts/Kconfig.include
@@ -20,7 +20,7 @@
# $(cc-option,<flag>)
# Return y if the compiler supports <flag>, n otherwise
-cc-option = $(success,$(CC) -Werror $(1) -E -x c /dev/null -o /dev/null)
+cc-option = $(success,$(CC) -Werror $(CLANG_FLAGS) $(1) -E -x c /dev/null -o /dev/null)
# $(ld-option,<flag>)
# Return y if the linker supports <flag>, n otherwise
diff --git a/scripts/Makefile.modpost b/scripts/Makefile.modpost
index 1771a31..15dd58a 100644
--- a/scripts/Makefile.modpost
+++ b/scripts/Makefile.modpost
@@ -75,7 +75,7 @@
$(if $(CONFIG_MODULE_SRCVERSION_ALL),-a,) \
$(if $(KBUILD_EXTMOD),-i,-o) $(kernelsymfile) \
$(if $(KBUILD_EXTMOD),-I $(modulesymfile)) \
- $(if $(KBUILD_EXTRA_SYMBOLS), $(patsubst %, -e %,$(KBUILD_EXTRA_SYMBOLS))) \
+ $(if $(KBUILD_EXTMOD),$(addprefix -e ,$(KBUILD_EXTRA_SYMBOLS))) \
$(if $(KBUILD_EXTMOD),-o $(modulesymfile)) \
$(if $(CONFIG_DEBUG_SECTION_MISMATCH),,-S) \
$(if $(CONFIG_SECTION_MISMATCH_WARN_ONLY),,-E) \
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
index 6ac3685..a5fe929 100755
--- a/scripts/checkpatch.pl
+++ b/scripts/checkpatch.pl
@@ -3118,7 +3118,7 @@
$compat2 =~ s/\,[a-zA-Z0-9]*\-/\,<\.\*>\-/;
my $compat3 = $compat;
$compat3 =~ s/\,([a-z]*)[0-9]*\-/\,$1<\.\*>\-/;
- `grep -Erq "$compat|$compat2|$compat3" $dt_path`;
+ `grep -ERq "$compat|$compat2|$compat3" $dt_path`;
if ( $? >> 8 ) {
WARN("UNDOCUMENTED_DT_STRING",
"DT compatible string \"$compat\" appears un-documented -- check $dt_path\n" . $herecurr);
diff --git a/scripts/genksyms/keywords.c b/scripts/genksyms/keywords.c
index 9f40bcd..f6956aa 100644
--- a/scripts/genksyms/keywords.c
+++ b/scripts/genksyms/keywords.c
@@ -24,6 +24,10 @@
{ "__volatile__", VOLATILE_KEYW },
{ "__builtin_va_list", VA_LIST_KEYW },
+ { "__int128", BUILTIN_INT_KEYW },
+ { "__int128_t", BUILTIN_INT_KEYW },
+ { "__uint128_t", BUILTIN_INT_KEYW },
+
// According to rth, c99 defines "_Bool", __restrict", __restrict__", "restrict". KAO
{ "_Bool", BOOL_KEYW },
{ "_restrict", RESTRICT_KEYW },
diff --git a/scripts/genksyms/parse.y b/scripts/genksyms/parse.y
index 00a6d7e..1ebcf52 100644
--- a/scripts/genksyms/parse.y
+++ b/scripts/genksyms/parse.y
@@ -76,6 +76,7 @@
%token ATTRIBUTE_KEYW
%token AUTO_KEYW
%token BOOL_KEYW
+%token BUILTIN_INT_KEYW
%token CHAR_KEYW
%token CONST_KEYW
%token DOUBLE_KEYW
@@ -263,6 +264,7 @@
| VOID_KEYW
| BOOL_KEYW
| VA_LIST_KEYW
+ | BUILTIN_INT_KEYW
| TYPE { (*$1)->tag = SYM_TYPEDEF; $$ = $1; }
;
diff --git a/scripts/kallsyms.c b/scripts/kallsyms.c
index 0c9c54b..31ed7f3 100644
--- a/scripts/kallsyms.c
+++ b/scripts/kallsyms.c
@@ -152,6 +152,9 @@ static int read_symbol(FILE *in, struct sym_entry *s)
/* exclude debugging symbols */
else if (stype == 'N' || stype == 'n')
return -1;
+ /* exclude s390 kasan local symbols */
+ else if (!strncmp(sym, ".LASANPC", 8))
+ return -1;
/* include the type field in the symbol name, so that it gets
* compressed together */
diff --git a/scripts/kconfig/confdata.c b/scripts/kconfig/confdata.c
index fd99ae9..0dde19c 100644
--- a/scripts/kconfig/confdata.c
+++ b/scripts/kconfig/confdata.c
@@ -784,6 +784,7 @@ int conf_write(const char *name)
const char *str;
char dirname[PATH_MAX+1], tmpname[PATH_MAX+22], newname[PATH_MAX+8];
char *env;
+ int i;
dirname[0] = 0;
if (name && name[0]) {
@@ -860,6 +861,9 @@ int conf_write(const char *name)
}
fclose(out);
+ for_all_symbols(i, sym)
+ sym->flags &= ~SYMBOL_WRITTEN;
+
if (*tmpname) {
strcat(dirname, basename);
strcat(dirname, ".old");
diff --git a/scripts/link-vmlinux.sh b/scripts/link-vmlinux.sh
index 09b9fa7..63fa0cb 100755
--- a/scripts/link-vmlinux.sh
+++ b/scripts/link-vmlinux.sh
@@ -413,3 +413,14 @@
exit 1
fi
fi
+
+# Starting Android Q, the DTB's are part of dtb.img and not part
+# of the kernel image. RTIC DTS relies on the kernel environment
+# and could not build outside of the kernel. Generate RTIC DTS after
+# successful kernel build if MPGen is enabled. The DTB will be
+# generated with dtb.img in kernel_definitions.mk.
+if [ ! -z ${RTIC_MPGEN+x} ]; then
+ ${RTIC_MPGEN} --objcopy="${OBJCOPY}" --objdump="${OBJDUMP}" \
+ --binpath="" --vmlinux="vmlinux" --config=${KCONFIG_CONFIG} \
+ --cc="${CC} ${KBUILD_AFLAGS}" --dts=rtic_mp.dts
+fi
diff --git a/scripts/recordmcount.h b/scripts/recordmcount.h
index 2e77937..ccfbfde 100644
--- a/scripts/recordmcount.h
+++ b/scripts/recordmcount.h
@@ -326,7 +326,8 @@ static uint_t *sift_rel_mcount(uint_t *mlocp,
if (!mcountsym)
mcountsym = get_mcountsym(sym0, relp, str0);
- if (mcountsym == Elf_r_sym(relp) && !is_fake_mcount(relp)) {
+ if (mcountsym && mcountsym == Elf_r_sym(relp) &&
+ !is_fake_mcount(relp)) {
uint_t const addend =
_w(_w(relp->r_offset) - recval + mcount_adjust);
mrelp->r_offset = _w(offbase
diff --git a/scripts/sphinx-pre-install b/scripts/sphinx-pre-install
index 0674597..3524dbc 100755
--- a/scripts/sphinx-pre-install
+++ b/scripts/sphinx-pre-install
@@ -301,7 +301,7 @@
#
# Checks valid for RHEL/CentOS version 7.x.
#
- if (! $system_release =~ /Fedora/) {
+ if (!($system_release =~ /Fedora/)) {
$map{"virtualenv"} = "python-virtualenv";
}
diff --git a/security/selinux/ss/policydb.c b/security/selinux/ss/policydb.c
index d31a52e..91d259c 100644
--- a/security/selinux/ss/policydb.c
+++ b/security/selinux/ss/policydb.c
@@ -275,6 +275,8 @@ static int rangetr_cmp(struct hashtab *h, const void *k1, const void *k2)
return v;
}
+static int (*destroy_f[SYM_NUM]) (void *key, void *datum, void *datap);
+
/*
* Initialize a policy database structure.
*/
@@ -322,8 +324,10 @@ static int policydb_init(struct policydb *p)
out:
hashtab_destroy(p->filename_trans);
hashtab_destroy(p->range_tr);
- for (i = 0; i < SYM_NUM; i++)
+ for (i = 0; i < SYM_NUM; i++) {
+ hashtab_map(p->symtab[i].table, destroy_f[i], NULL);
hashtab_destroy(p->symtab[i].table);
+ }
return rc;
}
diff --git a/sound/ac97/bus.c b/sound/ac97/bus.c
index 9cbf692..ca50ff4 100644
--- a/sound/ac97/bus.c
+++ b/sound/ac97/bus.c
@@ -125,17 +125,12 @@ static int ac97_codec_add(struct ac97_controller *ac97_ctrl, int idx,
vendor_id);
ret = device_add(&codec->dev);
- if (ret)
- goto err_free_codec;
+ if (ret) {
+ put_device(&codec->dev);
+ return ret;
+ }
return 0;
-err_free_codec:
- of_node_put(codec->dev.of_node);
- put_device(&codec->dev);
- kfree(codec);
- ac97_ctrl->codecs[idx] = NULL;
-
- return ret;
}
unsigned int snd_ac97_bus_scan_one(struct ac97_controller *adrv,
diff --git a/sound/core/compress_offload.c b/sound/core/compress_offload.c
index 009e469..8e547e2 100644
--- a/sound/core/compress_offload.c
+++ b/sound/core/compress_offload.c
@@ -578,10 +578,7 @@ snd_compr_set_params(struct snd_compr_stream *stream, unsigned long arg)
stream->metadata_set = false;
stream->next_track = false;
- if (stream->direction == SND_COMPRESS_PLAYBACK)
- stream->runtime->state = SNDRV_PCM_STATE_SETUP;
- else
- stream->runtime->state = SNDRV_PCM_STATE_PREPARED;
+ stream->runtime->state = SNDRV_PCM_STATE_SETUP;
} else {
return -EPERM;
}
@@ -698,8 +695,17 @@ static int snd_compr_start(struct snd_compr_stream *stream)
{
int retval;
- if (stream->runtime->state != SNDRV_PCM_STATE_PREPARED)
+ switch (stream->runtime->state) {
+ case SNDRV_PCM_STATE_SETUP:
+ if (stream->direction != SND_COMPRESS_CAPTURE)
+ return -EPERM;
+ break;
+ case SNDRV_PCM_STATE_PREPARED:
+ break;
+ default:
return -EPERM;
+ }
+
retval = stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_START);
if (!retval)
stream->runtime->state = SNDRV_PCM_STATE_RUNNING;
@@ -710,9 +716,15 @@ static int snd_compr_stop(struct snd_compr_stream *stream)
{
int retval;
- if (stream->runtime->state == SNDRV_PCM_STATE_PREPARED ||
- stream->runtime->state == SNDRV_PCM_STATE_SETUP)
+ switch (stream->runtime->state) {
+ case SNDRV_PCM_STATE_OPEN:
+ case SNDRV_PCM_STATE_SETUP:
+ case SNDRV_PCM_STATE_PREPARED:
return -EPERM;
+ default:
+ break;
+ }
+
retval = stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_STOP);
if (!retval) {
stream->runtime->state = SNDRV_PCM_STATE_SETUP;
@@ -771,10 +783,18 @@ static int snd_compr_drain(struct snd_compr_stream *stream)
int retval;
mutex_lock(&stream->device->lock);
- if (stream->runtime->state == SNDRV_PCM_STATE_PREPARED ||
- stream->runtime->state == SNDRV_PCM_STATE_SETUP) {
- retval = -EPERM;
- goto ret;
+ switch (stream->runtime->state) {
+ case SNDRV_PCM_STATE_OPEN:
+ case SNDRV_PCM_STATE_SETUP:
+ case SNDRV_PCM_STATE_PREPARED:
+ case SNDRV_PCM_STATE_PAUSED:
+ mutex_unlock(&stream->device->lock);
+ return -EPERM;
+ case SNDRV_PCM_STATE_XRUN:
+ mutex_unlock(&stream->device->lock);
+ return -EPIPE;
+ default:
+ break;
}
mutex_unlock(&stream->device->lock);
retval = stream->ops->trigger(stream, SND_COMPR_TRIGGER_DRAIN);
@@ -798,6 +818,10 @@ static int snd_compr_next_track(struct snd_compr_stream *stream)
if (stream->runtime->state != SNDRV_PCM_STATE_RUNNING)
return -EPERM;
+ /* next track doesn't have any meaning for capture streams */
+ if (stream->direction == SND_COMPRESS_CAPTURE)
+ return -EPERM;
+
/* you can signal next track if this is intended to be a gapless stream
* and current track metadata is set
*/
@@ -817,12 +841,25 @@ static int snd_compr_partial_drain(struct snd_compr_stream *stream)
int retval;
mutex_lock(&stream->device->lock);
- if (stream->runtime->state == SNDRV_PCM_STATE_PREPARED ||
- stream->runtime->state == SNDRV_PCM_STATE_SETUP) {
+ switch (stream->runtime->state) {
+ case SNDRV_PCM_STATE_OPEN:
+ case SNDRV_PCM_STATE_SETUP:
+ case SNDRV_PCM_STATE_PREPARED:
+ case SNDRV_PCM_STATE_PAUSED:
mutex_unlock(&stream->device->lock);
return -EPERM;
+ case SNDRV_PCM_STATE_XRUN:
+ mutex_unlock(&stream->device->lock);
+ return -EPIPE;
+ default:
+ break;
}
mutex_unlock(&stream->device->lock);
+
+ /* partial drain doesn't have any meaning for capture streams */
+ if (stream->direction == SND_COMPRESS_CAPTURE)
+ return -EPERM;
+
/* stream can be drained only when next track has been signalled */
if (stream->next_track == false)
return -EPERM;
diff --git a/sound/firewire/packets-buffer.c b/sound/firewire/packets-buffer.c
index 1ebf00c..715cd99 100644
--- a/sound/firewire/packets-buffer.c
+++ b/sound/firewire/packets-buffer.c
@@ -37,7 +37,7 @@ int iso_packets_buffer_init(struct iso_packets_buffer *b, struct fw_unit *unit,
packets_per_page = PAGE_SIZE / packet_size;
if (WARN_ON(!packets_per_page)) {
err = -EINVAL;
- goto error;
+ goto err_packets;
}
pages = DIV_ROUND_UP(count, packets_per_page);
diff --git a/sound/hda/hdac_i915.c b/sound/hda/hdac_i915.c
index 27eb027..3847fe8 100644
--- a/sound/hda/hdac_i915.c
+++ b/sound/hda/hdac_i915.c
@@ -143,10 +143,12 @@ int snd_hdac_i915_init(struct hdac_bus *bus)
if (!acomp)
return -ENODEV;
if (!acomp->ops) {
- request_module("i915");
- /* 60s timeout */
- wait_for_completion_timeout(&bind_complete,
- msecs_to_jiffies(60 * 1000));
+ if (!IS_ENABLED(CONFIG_MODULES) ||
+ !request_module("i915")) {
+ /* 60s timeout */
+ wait_for_completion_timeout(&bind_complete,
+ msecs_to_jiffies(60 * 1000));
+ }
}
if (!acomp->ops) {
dev_info(bus->dev, "couldn't bind with audio component\n");
diff --git a/sound/pci/hda/hda_controller.c b/sound/pci/hda/hda_controller.c
index a12e594..a41c1be 100644
--- a/sound/pci/hda/hda_controller.c
+++ b/sound/pci/hda/hda_controller.c
@@ -609,11 +609,9 @@ static int azx_pcm_open(struct snd_pcm_substream *substream)
}
runtime->private_data = azx_dev;
- if (chip->gts_present)
- azx_pcm_hw.info = azx_pcm_hw.info |
- SNDRV_PCM_INFO_HAS_LINK_SYNCHRONIZED_ATIME;
-
runtime->hw = azx_pcm_hw;
+ if (chip->gts_present)
+ runtime->hw.info |= SNDRV_PCM_INFO_HAS_LINK_SYNCHRONIZED_ATIME;
runtime->hw.channels_min = hinfo->channels_min;
runtime->hw.channels_max = hinfo->channels_max;
runtime->hw.formats = hinfo->formats;
@@ -626,6 +624,13 @@ static int azx_pcm_open(struct snd_pcm_substream *substream)
20,
178000000);
+ /* by some reason, the playback stream stalls on PulseAudio with
+ * tsched=1 when a capture stream triggers. Until we figure out the
+ * real cause, disable tsched mode by telling the PCM info flag.
+ */
+ if (chip->driver_caps & AZX_DCAPS_AMD_WORKAROUND)
+ runtime->hw.info |= SNDRV_PCM_INFO_BATCH;
+
if (chip->align_buffer_size)
/* constrain buffer sizes to be multiple of 128
bytes. This is more efficient in terms of memory
diff --git a/sound/pci/hda/hda_controller.h b/sound/pci/hda/hda_controller.h
index 53c3cd2..8a9dd47 100644
--- a/sound/pci/hda/hda_controller.h
+++ b/sound/pci/hda/hda_controller.h
@@ -40,7 +40,7 @@
/* 14 unused */
#define AZX_DCAPS_CTX_WORKAROUND (1 << 15) /* X-Fi workaround */
#define AZX_DCAPS_POSFIX_LPIB (1 << 16) /* Use LPIB as default */
-/* 17 unused */
+#define AZX_DCAPS_AMD_WORKAROUND (1 << 17) /* AMD-specific workaround */
#define AZX_DCAPS_NO_64BIT (1 << 18) /* No 64bit address */
#define AZX_DCAPS_SYNC_WRITE (1 << 19) /* sync each cmd write */
#define AZX_DCAPS_OLD_SSYNC (1 << 20) /* Old SSYNC reg for ICH */
diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c
index 579984e..bb2bd33 100644
--- a/sound/pci/hda/hda_generic.c
+++ b/sound/pci/hda/hda_generic.c
@@ -6033,6 +6033,24 @@ void snd_hda_gen_free(struct hda_codec *codec)
}
EXPORT_SYMBOL_GPL(snd_hda_gen_free);
+/**
+ * snd_hda_gen_reboot_notify - Make codec enter D3 before rebooting
+ * @codec: the HDA codec
+ *
+ * This can be put as patch_ops reboot_notify function.
+ */
+void snd_hda_gen_reboot_notify(struct hda_codec *codec)
+{
+ /* Make the codec enter D3 to avoid spurious noises from the internal
+ * speaker during (and after) reboot
+ */
+ snd_hda_codec_set_power_to_all(codec, codec->core.afg, AC_PWRST_D3);
+ snd_hda_codec_write(codec, codec->core.afg, 0,
+ AC_VERB_SET_POWER_STATE, AC_PWRST_D3);
+ msleep(10);
+}
+EXPORT_SYMBOL_GPL(snd_hda_gen_reboot_notify);
+
#ifdef CONFIG_PM
/**
* snd_hda_gen_check_power_status - check the loopback power save state
@@ -6060,6 +6078,7 @@ static const struct hda_codec_ops generic_patch_ops = {
.init = snd_hda_gen_init,
.free = snd_hda_gen_free,
.unsol_event = snd_hda_jack_unsol_event,
+ .reboot_notify = snd_hda_gen_reboot_notify,
#ifdef CONFIG_PM
.check_power_status = snd_hda_gen_check_power_status,
#endif
@@ -6082,7 +6101,7 @@ static int snd_hda_parse_generic_codec(struct hda_codec *codec)
err = snd_hda_parse_pin_defcfg(codec, &spec->autocfg, NULL, 0);
if (err < 0)
- return err;
+ goto error;
err = snd_hda_gen_parse_auto_config(codec, &spec->autocfg);
if (err < 0)
diff --git a/sound/pci/hda/hda_generic.h b/sound/pci/hda/hda_generic.h
index 1012366..ce9c293 100644
--- a/sound/pci/hda/hda_generic.h
+++ b/sound/pci/hda/hda_generic.h
@@ -336,6 +336,7 @@ int snd_hda_gen_parse_auto_config(struct hda_codec *codec,
struct auto_pin_cfg *cfg);
int snd_hda_gen_build_controls(struct hda_codec *codec);
int snd_hda_gen_build_pcms(struct hda_codec *codec);
+void snd_hda_gen_reboot_notify(struct hda_codec *codec);
/* standard jack event callbacks */
void snd_hda_gen_hp_automute(struct hda_codec *codec,
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index 308ce76..7a3e34b 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -78,6 +78,7 @@ enum {
POS_FIX_VIACOMBO,
POS_FIX_COMBO,
POS_FIX_SKL,
+ POS_FIX_FIFO,
};
/* Defines for ATI HD Audio support in SB450 south bridge */
@@ -149,7 +150,7 @@ module_param_array(model, charp, NULL, 0444);
MODULE_PARM_DESC(model, "Use the given board model.");
module_param_array(position_fix, int, NULL, 0444);
MODULE_PARM_DESC(position_fix, "DMA pointer read method."
- "(-1 = system default, 0 = auto, 1 = LPIB, 2 = POSBUF, 3 = VIACOMBO, 4 = COMBO, 5 = SKL+).");
+ "(-1 = system default, 0 = auto, 1 = LPIB, 2 = POSBUF, 3 = VIACOMBO, 4 = COMBO, 5 = SKL+, 6 = FIFO).");
module_param_array(bdl_pos_adj, int, NULL, 0644);
MODULE_PARM_DESC(bdl_pos_adj, "BDL position adjustment offset.");
module_param_array(probe_mask, int, NULL, 0444);
@@ -350,6 +351,11 @@ enum {
#define AZX_DCAPS_PRESET_ATI_HDMI_NS \
(AZX_DCAPS_PRESET_ATI_HDMI | AZX_DCAPS_SNOOP_OFF)
+/* quirks for AMD SB */
+#define AZX_DCAPS_PRESET_AMD_SB \
+ (AZX_DCAPS_NO_TCSEL | AZX_DCAPS_SYNC_WRITE | AZX_DCAPS_AMD_WORKAROUND |\
+ AZX_DCAPS_SNOOP_TYPE(ATI) | AZX_DCAPS_PM_RUNTIME)
+
/* quirks for Nvidia */
#define AZX_DCAPS_PRESET_NVIDIA \
(AZX_DCAPS_NO_MSI | AZX_DCAPS_CORBRP_SELF_CLEAR |\
@@ -920,6 +926,49 @@ static unsigned int azx_via_get_position(struct azx *chip,
return bound_pos + mod_dma_pos;
}
+#define AMD_FIFO_SIZE 32
+
+/* get the current DMA position with FIFO size correction */
+static unsigned int azx_get_pos_fifo(struct azx *chip, struct azx_dev *azx_dev)
+{
+ struct snd_pcm_substream *substream = azx_dev->core.substream;
+ struct snd_pcm_runtime *runtime = substream->runtime;
+ unsigned int pos, delay;
+
+ pos = snd_hdac_stream_get_pos_lpib(azx_stream(azx_dev));
+ if (!runtime)
+ return pos;
+
+ runtime->delay = AMD_FIFO_SIZE;
+ delay = frames_to_bytes(runtime, AMD_FIFO_SIZE);
+ if (azx_dev->insufficient) {
+ if (pos < delay) {
+ delay = pos;
+ runtime->delay = bytes_to_frames(runtime, pos);
+ } else {
+ azx_dev->insufficient = 0;
+ }
+ }
+
+ /* correct the DMA position for capture stream */
+ if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
+ if (pos < delay)
+ pos += azx_dev->core.bufsize;
+ pos -= delay;
+ }
+
+ return pos;
+}
+
+static int azx_get_delay_from_fifo(struct azx *chip, struct azx_dev *azx_dev,
+ unsigned int pos)
+{
+ struct snd_pcm_substream *substream = azx_dev->core.substream;
+
+ /* just read back the calculated value in the above */
+ return substream->runtime->delay;
+}
+
static unsigned int azx_skl_get_dpib_pos(struct azx *chip,
struct azx_dev *azx_dev)
{
@@ -1528,6 +1577,7 @@ static int check_position_fix(struct azx *chip, int fix)
case POS_FIX_VIACOMBO:
case POS_FIX_COMBO:
case POS_FIX_SKL:
+ case POS_FIX_FIFO:
return fix;
}
@@ -1544,6 +1594,10 @@ static int check_position_fix(struct azx *chip, int fix)
dev_dbg(chip->card->dev, "Using VIACOMBO position fix\n");
return POS_FIX_VIACOMBO;
}
+ if (chip->driver_caps & AZX_DCAPS_AMD_WORKAROUND) {
+ dev_dbg(chip->card->dev, "Using FIFO position fix\n");
+ return POS_FIX_FIFO;
+ }
if (chip->driver_caps & AZX_DCAPS_POSFIX_LPIB) {
dev_dbg(chip->card->dev, "Using LPIB position fix\n");
return POS_FIX_LPIB;
@@ -1564,6 +1618,7 @@ static void assign_position_fix(struct azx *chip, int fix)
[POS_FIX_VIACOMBO] = azx_via_get_position,
[POS_FIX_COMBO] = azx_get_pos_lpib,
[POS_FIX_SKL] = azx_get_pos_skl,
+ [POS_FIX_FIFO] = azx_get_pos_fifo,
};
chip->get_position[0] = chip->get_position[1] = callbacks[fix];
@@ -1578,6 +1633,9 @@ static void assign_position_fix(struct azx *chip, int fix)
azx_get_delay_from_lpib;
}
+ if (fix == POS_FIX_FIFO)
+ chip->get_delay[0] = chip->get_delay[1] =
+ azx_get_delay_from_fifo;
}
/*
@@ -2594,6 +2652,12 @@ static const struct pci_device_id azx_ids[] = {
/* AMD Hudson */
{ PCI_DEVICE(0x1022, 0x780d),
.driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_ATI_SB },
+ /* AMD, X370 & co */
+ { PCI_DEVICE(0x1022, 0x1457),
+ .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_AMD_SB },
+ /* AMD, X570 & co */
+ { PCI_DEVICE(0x1022, 0x1487),
+ .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_AMD_SB },
/* AMD Stoney */
{ PCI_DEVICE(0x1022, 0x157a),
.driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_ATI_SB |
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
index 3cbd211..6f17b25 100644
--- a/sound/pci/hda/patch_conexant.c
+++ b/sound/pci/hda/patch_conexant.c
@@ -176,23 +176,10 @@ static void cx_auto_reboot_notify(struct hda_codec *codec)
{
struct conexant_spec *spec = codec->spec;
- switch (codec->core.vendor_id) {
- case 0x14f12008: /* CX8200 */
- case 0x14f150f2: /* CX20722 */
- case 0x14f150f4: /* CX20724 */
- break;
- default:
- return;
- }
-
/* Turn the problematic codec into D3 to avoid spurious noises
from the internal speaker during (and after) reboot */
cx_auto_turn_eapd(codec, spec->num_eapds, spec->eapds, false);
-
- snd_hda_codec_set_power_to_all(codec, codec->core.afg, AC_PWRST_D3);
- snd_hda_codec_write(codec, codec->core.afg, 0,
- AC_VERB_SET_POWER_STATE, AC_PWRST_D3);
- msleep(10);
+ snd_hda_gen_reboot_notify(codec);
}
static void cx_auto_free(struct hda_codec *codec)
@@ -1096,6 +1083,7 @@ static int patch_conexant_auto(struct hda_codec *codec)
*/
static const struct hda_device_id snd_hda_id_conexant[] = {
+ HDA_CODEC_ENTRY(0x14f11f86, "CX8070", patch_conexant_auto),
HDA_CODEC_ENTRY(0x14f12008, "CX8200", patch_conexant_auto),
HDA_CODEC_ENTRY(0x14f15045, "CX20549 (Venice)", patch_conexant_auto),
HDA_CODEC_ENTRY(0x14f15047, "CX20551 (Waikiki)", patch_conexant_auto),
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index dc19896..9b5caf0 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -868,15 +868,6 @@ static void alc_reboot_notify(struct hda_codec *codec)
alc_shutup(codec);
}
-/* power down codec to D3 at reboot/shutdown; set as reboot_notify ops */
-static void alc_d3_at_reboot(struct hda_codec *codec)
-{
- snd_hda_codec_set_power_to_all(codec, codec->core.afg, AC_PWRST_D3);
- snd_hda_codec_write(codec, codec->core.afg, 0,
- AC_VERB_SET_POWER_STATE, AC_PWRST_D3);
- msleep(10);
-}
-
#define alc_free snd_hda_gen_free
#ifdef CONFIG_PM
@@ -5111,7 +5102,7 @@ static void alc_fixup_tpt440_dock(struct hda_codec *codec,
struct alc_spec *spec = codec->spec;
if (action == HDA_FIXUP_ACT_PRE_PROBE) {
- spec->reboot_notify = alc_d3_at_reboot; /* reduce noise */
+ spec->reboot_notify = snd_hda_gen_reboot_notify; /* reduce noise */
spec->parse_flags = HDA_PINCFG_NO_HP_FIXUP;
codec->power_save_node = 0; /* avoid click noises */
snd_hda_apply_pincfgs(codec, pincfgs);
@@ -6851,6 +6842,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x103c, 0x82bf, "HP G3 mini", ALC221_FIXUP_HP_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x103c, 0x82c0, "HP G3 mini premium", ALC221_FIXUP_HP_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x103c, 0x83b9, "HP Spectre x360", ALC269_FIXUP_HP_MUTE_LED_MIC3),
+ SND_PCI_QUIRK(0x103c, 0x8497, "HP Envy x360", ALC269_FIXUP_HP_MUTE_LED_MIC3),
SND_PCI_QUIRK(0x1043, 0x103e, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC),
SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300),
SND_PCI_QUIRK(0x1043, 0x106d, "Asus K53BE", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
diff --git a/sound/soc/davinci/davinci-mcasp.c b/sound/soc/davinci/davinci-mcasp.c
index 160b276..6a8c279 100644
--- a/sound/soc/davinci/davinci-mcasp.c
+++ b/sound/soc/davinci/davinci-mcasp.c
@@ -1150,6 +1150,28 @@ static int davinci_mcasp_trigger(struct snd_pcm_substream *substream,
return ret;
}
+static int davinci_mcasp_hw_rule_slot_width(struct snd_pcm_hw_params *params,
+ struct snd_pcm_hw_rule *rule)
+{
+ struct davinci_mcasp_ruledata *rd = rule->private;
+ struct snd_mask *fmt = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT);
+ struct snd_mask nfmt;
+ int i, slot_width;
+
+ snd_mask_none(&nfmt);
+ slot_width = rd->mcasp->slot_width;
+
+ for (i = 0; i <= SNDRV_PCM_FORMAT_LAST; i++) {
+ if (snd_mask_test(fmt, i)) {
+ if (snd_pcm_format_width(i) <= slot_width) {
+ snd_mask_set(&nfmt, i);
+ }
+ }
+ }
+
+ return snd_mask_refine(fmt, &nfmt);
+}
+
static const unsigned int davinci_mcasp_dai_rates[] = {
8000, 11025, 16000, 22050, 32000, 44100, 48000, 64000,
88200, 96000, 176400, 192000,
@@ -1257,7 +1279,7 @@ static int davinci_mcasp_startup(struct snd_pcm_substream *substream,
struct davinci_mcasp_ruledata *ruledata =
&mcasp->ruledata[substream->stream];
u32 max_channels = 0;
- int i, dir;
+ int i, dir, ret;
int tdm_slots = mcasp->tdm_slots;
/* Do not allow more then one stream per direction */
@@ -1286,6 +1308,7 @@ static int davinci_mcasp_startup(struct snd_pcm_substream *substream,
max_channels++;
}
ruledata->serializers = max_channels;
+ ruledata->mcasp = mcasp;
max_channels *= tdm_slots;
/*
* If the already active stream has less channels than the calculated
@@ -1311,20 +1334,22 @@ static int davinci_mcasp_startup(struct snd_pcm_substream *substream,
0, SNDRV_PCM_HW_PARAM_CHANNELS,
&mcasp->chconstr[substream->stream]);
- if (mcasp->slot_width)
- snd_pcm_hw_constraint_minmax(substream->runtime,
- SNDRV_PCM_HW_PARAM_SAMPLE_BITS,
- 8, mcasp->slot_width);
+ if (mcasp->slot_width) {
+ /* Only allow formats require <= slot_width bits on the bus */
+ ret = snd_pcm_hw_rule_add(substream->runtime, 0,
+ SNDRV_PCM_HW_PARAM_FORMAT,
+ davinci_mcasp_hw_rule_slot_width,
+ ruledata,
+ SNDRV_PCM_HW_PARAM_FORMAT, -1);
+ if (ret)
+ return ret;
+ }
/*
* If we rely on implicit BCLK divider setting we should
* set constraints based on what we can provide.
*/
if (mcasp->bclk_master && mcasp->bclk_div == 0 && mcasp->sysclk_freq) {
- int ret;
-
- ruledata->mcasp = mcasp;
-
ret = snd_pcm_hw_rule_add(substream->runtime, 0,
SNDRV_PCM_HW_PARAM_RATE,
davinci_mcasp_hw_rule_rate,
diff --git a/sound/soc/rockchip/rockchip_i2s.c b/sound/soc/rockchip/rockchip_i2s.c
index 60d43d5..11399f8 100644
--- a/sound/soc/rockchip/rockchip_i2s.c
+++ b/sound/soc/rockchip/rockchip_i2s.c
@@ -329,7 +329,6 @@ static int rockchip_i2s_hw_params(struct snd_pcm_substream *substream,
val |= I2S_CHN_4;
break;
case 2:
- case 1:
val |= I2S_CHN_2;
break;
default:
@@ -462,7 +461,7 @@ static struct snd_soc_dai_driver rockchip_i2s_dai = {
},
.capture = {
.stream_name = "Capture",
- .channels_min = 1,
+ .channels_min = 2,
.channels_max = 2,
.rates = SNDRV_PCM_RATE_8000_192000,
.formats = (SNDRV_PCM_FMTBIT_S8 |
@@ -662,7 +661,7 @@ static int rockchip_i2s_probe(struct platform_device *pdev)
}
if (!of_property_read_u32(node, "rockchip,capture-channels", &val)) {
- if (val >= 1 && val <= 8)
+ if (val >= 2 && val <= 8)
soc_dai->capture.channels_max = val;
}
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index 727a1b4..eb99496 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -1530,8 +1530,11 @@ static int soc_probe_link_dais(struct snd_soc_card *card,
}
}
- if (dai_link->dai_fmt)
- snd_soc_runtime_set_dai_fmt(rtd, dai_link->dai_fmt);
+ if (dai_link->dai_fmt) {
+ ret = snd_soc_runtime_set_dai_fmt(rtd, dai_link->dai_fmt);
+ if (ret)
+ return ret;
+ }
ret = soc_post_component_init(rtd, dai_link->name);
if (ret)
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
index 91cc574..b9ad15f 100644
--- a/sound/soc/soc-dapm.c
+++ b/sound/soc/soc-dapm.c
@@ -1147,8 +1147,8 @@ static __always_inline int is_connected_ep(struct snd_soc_dapm_widget *widget,
list_add_tail(&widget->work_list, list);
if (custom_stop_condition && custom_stop_condition(widget, dir)) {
- widget->endpoints[dir] = 1;
- return widget->endpoints[dir];
+ list = NULL;
+ custom_stop_condition = NULL;
}
if ((widget->is_ep & SND_SOC_DAPM_DIR_TO_EP(dir)) && widget->connected) {
@@ -1185,8 +1185,8 @@ static __always_inline int is_connected_ep(struct snd_soc_dapm_widget *widget,
*
* Optionally, can be supplied with a function acting as a stopping condition.
* This function takes the dapm widget currently being examined and the walk
- * direction as an arguments, it should return true if the walk should be
- * stopped and false otherwise.
+ * direction as an arguments, it should return true if widgets from that point
+ * in the graph onwards should not be added to the widget list.
*/
static int is_connected_output_ep(struct snd_soc_dapm_widget *widget,
struct list_head *list,
diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
index fa6e582..dc0c00a 100644
--- a/sound/soc/soc-pcm.c
+++ b/sound/soc/soc-pcm.c
@@ -2669,8 +2669,7 @@ int dpcm_be_dai_prepare(struct snd_soc_pcm_runtime *fe, int stream)
if ((be->dpcm[stream].state != SND_SOC_DPCM_STATE_HW_PARAMS) &&
(be->dpcm[stream].state != SND_SOC_DPCM_STATE_STOP) &&
- (be->dpcm[stream].state != SND_SOC_DPCM_STATE_SUSPEND) &&
- (be->dpcm[stream].state != SND_SOC_DPCM_STATE_PAUSED))
+ (be->dpcm[stream].state != SND_SOC_DPCM_STATE_SUSPEND))
continue;
dev_dbg(be->dev, "ASoC: prepare BE %s\n",
diff --git a/sound/sound_core.c b/sound/sound_core.c
index 40ad000..dd64c4b 100644
--- a/sound/sound_core.c
+++ b/sound/sound_core.c
@@ -280,7 +280,8 @@ static int sound_insert_unit(struct sound_unit **list, const struct file_operati
goto retry;
}
spin_unlock(&sound_loader_lock);
- return -EBUSY;
+ r = -EBUSY;
+ goto fail;
}
}
diff --git a/sound/usb/hiface/pcm.c b/sound/usb/hiface/pcm.c
index e1fbb9c..a197fc3 100644
--- a/sound/usb/hiface/pcm.c
+++ b/sound/usb/hiface/pcm.c
@@ -604,14 +604,13 @@ int hiface_pcm_init(struct hiface_chip *chip, u8 extra_freq)
ret = hiface_pcm_init_urb(&rt->out_urbs[i], chip, OUT_EP,
hiface_pcm_out_urb_handler);
if (ret < 0)
- return ret;
+ goto error;
}
ret = snd_pcm_new(chip->card, "USB-SPDIF Audio", 0, 1, 0, &pcm);
if (ret < 0) {
- kfree(rt);
dev_err(&chip->dev->dev, "Cannot create pcm instance\n");
- return ret;
+ goto error;
}
pcm->private_data = rt;
@@ -624,4 +623,10 @@ int hiface_pcm_init(struct hiface_chip *chip, u8 extra_freq)
chip->pcm = rt;
return 0;
+
+error:
+ for (i = 0; i < PCM_N_URBS; i++)
+ kfree(rt->out_urbs[i].buffer);
+ kfree(rt);
+ return ret;
}
diff --git a/sound/usb/line6/podhd.c b/sound/usb/line6/podhd.c
index 5f3c872..da627b0 100644
--- a/sound/usb/line6/podhd.c
+++ b/sound/usb/line6/podhd.c
@@ -417,7 +417,7 @@ static const struct line6_properties podhd_properties_table[] = {
.name = "POD HD500",
.capabilities = LINE6_CAP_PCM
| LINE6_CAP_HWMON,
- .altsetting = 1,
+ .altsetting = 0,
.ep_ctrl_r = 0x81,
.ep_ctrl_w = 0x01,
.ep_audio_r = 0x86,
diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
index e63a7d3..799d153 100644
--- a/sound/usb/mixer.c
+++ b/sound/usb/mixer.c
@@ -83,6 +83,7 @@ struct mixer_build {
unsigned char *buffer;
unsigned int buflen;
DECLARE_BITMAP(unitbitmap, MAX_ID_ELEMS);
+ DECLARE_BITMAP(termbitmap, MAX_ID_ELEMS);
struct usb_audio_term oterm;
const struct usbmix_name_map *map;
const struct usbmix_selector_map *selector_map;
@@ -759,6 +760,8 @@ static int uac_mixer_unit_get_channels(struct mixer_build *state,
return -EINVAL;
if (!desc->bNrInPins)
return -EINVAL;
+ if (desc->bLength < sizeof(*desc) + desc->bNrInPins)
+ return -EINVAL;
switch (state->mixer->protocol) {
case UAC_VERSION_1:
@@ -788,16 +791,25 @@ static int uac_mixer_unit_get_channels(struct mixer_build *state,
* parse the source unit recursively until it reaches to a terminal
* or a branched unit.
*/
-static int check_input_term(struct mixer_build *state, int id,
+static int __check_input_term(struct mixer_build *state, int id,
struct usb_audio_term *term)
{
int protocol = state->mixer->protocol;
int err;
void *p1;
+ unsigned char *hdr;
memset(term, 0, sizeof(*term));
- while ((p1 = find_audio_control_unit(state, id)) != NULL) {
- unsigned char *hdr = p1;
+ for (;;) {
+ /* a loop in the terminal chain? */
+ if (test_and_set_bit(id, state->termbitmap))
+ return -EINVAL;
+
+ p1 = find_audio_control_unit(state, id);
+ if (!p1)
+ break;
+
+ hdr = p1;
term->id = id;
if (protocol == UAC_VERSION_1 || protocol == UAC_VERSION_2) {
@@ -815,7 +827,7 @@ static int check_input_term(struct mixer_build *state, int id,
/* call recursively to verify that the
* referenced clock entity is valid */
- err = check_input_term(state, d->bCSourceID, term);
+ err = __check_input_term(state, d->bCSourceID, term);
if (err < 0)
return err;
@@ -849,7 +861,7 @@ static int check_input_term(struct mixer_build *state, int id,
case UAC2_CLOCK_SELECTOR: {
struct uac_selector_unit_descriptor *d = p1;
/* call recursively to retrieve the channel info */
- err = check_input_term(state, d->baSourceID[0], term);
+ err = __check_input_term(state, d->baSourceID[0], term);
if (err < 0)
return err;
term->type = UAC3_SELECTOR_UNIT << 16; /* virtual type */
@@ -912,7 +924,7 @@ static int check_input_term(struct mixer_build *state, int id,
/* call recursively to verify that the
* referenced clock entity is valid */
- err = check_input_term(state, d->bCSourceID, term);
+ err = __check_input_term(state, d->bCSourceID, term);
if (err < 0)
return err;
@@ -963,7 +975,7 @@ static int check_input_term(struct mixer_build *state, int id,
case UAC3_CLOCK_SELECTOR: {
struct uac_selector_unit_descriptor *d = p1;
/* call recursively to retrieve the channel info */
- err = check_input_term(state, d->baSourceID[0], term);
+ err = __check_input_term(state, d->baSourceID[0], term);
if (err < 0)
return err;
term->type = UAC3_SELECTOR_UNIT << 16; /* virtual type */
@@ -979,7 +991,7 @@ static int check_input_term(struct mixer_build *state, int id,
return -EINVAL;
/* call recursively to retrieve the channel info */
- err = check_input_term(state, d->baSourceID[0], term);
+ err = __check_input_term(state, d->baSourceID[0], term);
if (err < 0)
return err;
@@ -997,6 +1009,15 @@ static int check_input_term(struct mixer_build *state, int id,
return -ENODEV;
}
+
+static int check_input_term(struct mixer_build *state, int id,
+ struct usb_audio_term *term)
+{
+ memset(term, 0, sizeof(*term));
+ memset(state->termbitmap, 0, sizeof(state->termbitmap));
+ return __check_input_term(state, id, term);
+}
+
/*
* Feature Unit
*/
diff --git a/sound/usb/stream.c b/sound/usb/stream.c
index f6ce6d5..fa2cc4a 100644
--- a/sound/usb/stream.c
+++ b/sound/usb/stream.c
@@ -1058,6 +1058,7 @@ snd_usb_get_audioformat_uac3(struct snd_usb_audio *chip,
pd = kzalloc(sizeof(*pd), GFP_KERNEL);
if (!pd) {
+ kfree(fp->chmap);
kfree(fp->rate_table);
kfree(fp);
return NULL;
diff --git a/tools/iio/iio_utils.c b/tools/iio/iio_utils.c
index 7a6d61c..55272fe 100644
--- a/tools/iio/iio_utils.c
+++ b/tools/iio/iio_utils.c
@@ -159,9 +159,9 @@ int iioutils_get_type(unsigned *is_signed, unsigned *bytes, unsigned *bits_used,
*be = (endianchar == 'b');
*bytes = padint / 8;
if (*bits_used == 64)
- *mask = ~0;
+ *mask = ~(0ULL);
else
- *mask = (1ULL << *bits_used) - 1;
+ *mask = (1ULL << *bits_used) - 1ULL;
*is_signed = (signchar == 's');
if (fclose(sysfsfp)) {
diff --git a/tools/objtool/elf.c b/tools/objtool/elf.c
index abed594..b8f3cca 100644
--- a/tools/objtool/elf.c
+++ b/tools/objtool/elf.c
@@ -305,7 +305,7 @@ static int read_symbols(struct elf *elf)
if (sym->type != STT_FUNC)
continue;
sym->pfunc = sym->cfunc = sym;
- coldstr = strstr(sym->name, ".cold.");
+ coldstr = strstr(sym->name, ".cold");
if (!coldstr)
continue;
diff --git a/tools/perf/arch/s390/util/machine.c b/tools/perf/arch/s390/util/machine.c
index a19690a..c8c86a0 100644
--- a/tools/perf/arch/s390/util/machine.c
+++ b/tools/perf/arch/s390/util/machine.c
@@ -6,8 +6,9 @@
#include "machine.h"
#include "api/fs/fs.h"
#include "debug.h"
+#include "symbol.h"
-int arch__fix_module_text_start(u64 *start, const char *name)
+int arch__fix_module_text_start(u64 *start, u64 *size, const char *name)
{
u64 m_start = *start;
char path[PATH_MAX];
@@ -17,7 +18,35 @@ int arch__fix_module_text_start(u64 *start, const char *name)
if (sysfs__read_ull(path, (unsigned long long *)start) < 0) {
pr_debug2("Using module %s start:%#lx\n", path, m_start);
*start = m_start;
+ } else {
+ /* Successful read of the modules segment text start address.
+ * Calculate difference between module start address
+ * in memory and module text segment start address.
+ * For example module load address is 0x3ff8011b000
+ * (from /proc/modules) and module text segment start
+ * address is 0x3ff8011b870 (from file above).
+ *
+ * Adjust the module size and subtract the GOT table
+ * size located at the beginning of the module.
+ */
+ *size -= (*start - m_start);
}
return 0;
}
+
+/* On s390 kernel text segment start is located at very low memory addresses,
+ * for example 0x10000. Modules are located at very high memory addresses,
+ * for example 0x3ff xxxx xxxx. The gap between end of kernel text segment
+ * and beginning of first module's text segment is very big.
+ * Therefore do not fill this gap and do not assign it to the kernel dso map.
+ */
+void arch__symbols__fixup_end(struct symbol *p, struct symbol *c)
+{
+ if (strchr(p->name, '[') == NULL && strchr(c->name, '['))
+ /* Last kernel symbol mapped to end of page */
+ p->end = roundup(p->end, page_size);
+ else
+ p->end = c->start;
+ pr_debug4("%s sym:%s end:%#lx\n", __func__, p->name, p->end);
+}
diff --git a/tools/perf/bench/numa.c b/tools/perf/bench/numa.c
index fa56fde..91c0a44 100644
--- a/tools/perf/bench/numa.c
+++ b/tools/perf/bench/numa.c
@@ -378,8 +378,10 @@ static u8 *alloc_data(ssize_t bytes0, int map_flags,
/* Allocate and initialize all memory on CPU#0: */
if (init_cpu0) {
- orig_mask = bind_to_node(0);
- bind_to_memnode(0);
+ int node = numa_node_of_cpu(0);
+
+ orig_mask = bind_to_node(node);
+ bind_to_memnode(node);
}
bytes = bytes0 + HPSIZE;
diff --git a/tools/perf/builtin-ftrace.c b/tools/perf/builtin-ftrace.c
index f42f228..1379551 100644
--- a/tools/perf/builtin-ftrace.c
+++ b/tools/perf/builtin-ftrace.c
@@ -174,7 +174,7 @@ static int set_tracing_cpumask(struct cpu_map *cpumap)
int last_cpu;
last_cpu = cpu_map__cpu(cpumap, cpumap->nr - 1);
- mask_size = (last_cpu + 3) / 4 + 1;
+ mask_size = last_cpu / 4 + 2; /* one more byte for EOS */
mask_size += last_cpu / 32; /* ',' is needed for every 32th cpus */
cpumask = malloc(mask_size);
diff --git a/tools/perf/builtin-probe.c b/tools/perf/builtin-probe.c
index 99de916..0bdb34f 100644
--- a/tools/perf/builtin-probe.c
+++ b/tools/perf/builtin-probe.c
@@ -711,6 +711,16 @@ __cmd_probe(int argc, const char **argv)
ret = perf_add_probe_events(params.events, params.nevents);
if (ret < 0) {
+
+ /*
+ * When perf_add_probe_events() fails it calls
+ * cleanup_perf_probe_events(pevs, npevs), i.e.
+ * cleanup_perf_probe_events(params.events, params.nevents), which
+ * will call clear_perf_probe_event(), so set nevents to zero
+ * to avoid cleanup_params() to call clear_perf_probe_event() again
+ * on the same pevs.
+ */
+ params.nevents = 0;
pr_err_with_code(" Error: Failed to add events.", ret);
return ret;
}
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index 4072015..7899625 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -2497,8 +2497,8 @@ static int add_default_attributes(void)
fprintf(stderr,
"Cannot set up top down events %s: %d\n",
str, err);
- free(str);
parse_events_print_error(&errinfo, str);
+ free(str);
return -1;
}
} else {
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index 33eefc3..d073325 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -99,7 +99,7 @@ static void perf_top__resize(struct perf_top *top)
static int perf_top__parse_source(struct perf_top *top, struct hist_entry *he)
{
- struct perf_evsel *evsel = hists_to_evsel(he->hists);
+ struct perf_evsel *evsel;
struct symbol *sym;
struct annotation *notes;
struct map *map;
@@ -108,6 +108,8 @@ static int perf_top__parse_source(struct perf_top *top, struct hist_entry *he)
if (!he || !he->ms.sym)
return -1;
+ evsel = hists_to_evsel(he->hists);
+
sym = he->ms.sym;
map = he->ms.map;
@@ -224,7 +226,7 @@ static void perf_top__record_precise_ip(struct perf_top *top,
static void perf_top__show_details(struct perf_top *top)
{
struct hist_entry *he = top->sym_filter_entry;
- struct perf_evsel *evsel = hists_to_evsel(he->hists);
+ struct perf_evsel *evsel;
struct annotation *notes;
struct symbol *symbol;
int more;
@@ -232,6 +234,8 @@ static void perf_top__show_details(struct perf_top *top)
if (!he)
return;
+ evsel = hists_to_evsel(he->hists);
+
symbol = he->ms.sym;
notes = symbol__annotation(symbol);
diff --git a/tools/perf/builtin-version.c b/tools/perf/builtin-version.c
index 50df168..b02c961 100644
--- a/tools/perf/builtin-version.c
+++ b/tools/perf/builtin-version.c
@@ -19,6 +19,7 @@ static struct version version;
static struct option version_options[] = {
OPT_BOOLEAN(0, "build-options", &version.build_options,
"display the build options"),
+ OPT_END(),
};
static const char * const version_usage[] = {
diff --git a/tools/perf/pmu-events/jevents.c b/tools/perf/pmu-events/jevents.c
index 68c92bb..6b36b71 100644
--- a/tools/perf/pmu-events/jevents.c
+++ b/tools/perf/pmu-events/jevents.c
@@ -450,6 +450,7 @@ static struct fixed {
{ "inst_retired.any_p", "event=0xc0" },
{ "cpu_clk_unhalted.ref", "event=0x0,umask=0x03" },
{ "cpu_clk_unhalted.thread", "event=0x3c" },
+ { "cpu_clk_unhalted.core", "event=0x3c" },
{ "cpu_clk_unhalted.thread_any", "event=0x3c,any=1" },
{ NULL, NULL},
};
diff --git a/tools/perf/tests/mmap-thread-lookup.c b/tools/perf/tests/mmap-thread-lookup.c
index b1af249..7a9b123 100644
--- a/tools/perf/tests/mmap-thread-lookup.c
+++ b/tools/perf/tests/mmap-thread-lookup.c
@@ -52,7 +52,7 @@ static void *thread_fn(void *arg)
{
struct thread_data *td = arg;
ssize_t ret;
- int go;
+ int go = 0;
if (thread_init(td))
return NULL;
diff --git a/tools/perf/ui/browsers/hists.c b/tools/perf/ui/browsers/hists.c
index a96f62c..692d2fa 100644
--- a/tools/perf/ui/browsers/hists.c
+++ b/tools/perf/ui/browsers/hists.c
@@ -633,7 +633,11 @@ int hist_browser__run(struct hist_browser *browser, const char *help,
switch (key) {
case K_TIMER: {
u64 nr_entries;
- hbt->timer(hbt->arg);
+
+ WARN_ON_ONCE(!hbt);
+
+ if (hbt)
+ hbt->timer(hbt->arg);
if (hist_browser__has_filter(browser) ||
symbol_conf.report_hierarchy)
@@ -2707,7 +2711,7 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events,
{
struct hists *hists = evsel__hists(evsel);
struct hist_browser *browser = perf_evsel_browser__new(evsel, hbt, env, annotation_opts);
- struct branch_info *bi;
+ struct branch_info *bi = NULL;
#define MAX_OPTIONS 16
char *options[MAX_OPTIONS];
struct popup_action actions[MAX_OPTIONS];
@@ -2973,7 +2977,9 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events,
goto skip_annotation;
if (sort__mode == SORT_MODE__BRANCH) {
- bi = browser->he_selection->branch_info;
+
+ if (browser->he_selection)
+ bi = browser->he_selection->branch_info;
if (bi == NULL)
goto skip_annotation;
@@ -3144,7 +3150,8 @@ static int perf_evsel_menu__run(struct perf_evsel_menu *menu,
switch (key) {
case K_TIMER:
- hbt->timer(hbt->arg);
+ if (hbt)
+ hbt->timer(hbt->arg);
if (!menu->lost_events_warned &&
menu->lost_events &&
diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c
index c357051..daea1fd 100644
--- a/tools/perf/util/annotate.c
+++ b/tools/perf/util/annotate.c
@@ -1079,16 +1079,14 @@ static int disasm_line__parse(char *line, const char **namep, char **rawp)
*namep = strdup(name);
if (*namep == NULL)
- goto out_free_name;
+ goto out;
(*rawp)[0] = tmp;
*rawp = ltrim(*rawp);
return 0;
-out_free_name:
- free((void *)namep);
- *namep = NULL;
+out:
return -1;
}
diff --git a/tools/perf/util/cpumap.c b/tools/perf/util/cpumap.c
index 383674f..f93846e 100644
--- a/tools/perf/util/cpumap.c
+++ b/tools/perf/util/cpumap.c
@@ -701,7 +701,10 @@ size_t cpu_map__snprint_mask(struct cpu_map *map, char *buf, size_t size)
unsigned char *bitmap;
int last_cpu = cpu_map__cpu(map, map->nr - 1);
- bitmap = zalloc((last_cpu + 7) / 8);
+ if (buf == NULL)
+ return 0;
+
+ bitmap = zalloc(last_cpu / 8 + 1);
if (bitmap == NULL) {
buf[0] = '\0';
return 0;
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
index 7f2e3b1..54c34c1 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -3285,6 +3285,13 @@ int perf_session__read_header(struct perf_session *session)
data->file.path);
}
+ if (f_header.attr_size == 0) {
+ pr_err("ERROR: The %s file's attr size field is 0 which is unexpected.\n"
+ "Was the 'perf record' command properly terminated?\n",
+ data->file.path);
+ return -EINVAL;
+ }
+
nr_attrs = f_header.attrs.size / f_header.attr_size;
lseek(fd, f_header.attrs.offset, SEEK_SET);
@@ -3365,7 +3372,7 @@ int perf_event__synthesize_attr(struct perf_tool *tool,
size += sizeof(struct perf_event_header);
size += ids * sizeof(u64);
- ev = malloc(size);
+ ev = zalloc(size);
if (ev == NULL)
return -ENOMEM;
@@ -3472,7 +3479,7 @@ int perf_event__process_feature(struct perf_tool *tool,
return 0;
ff.buf = (void *)fe->data;
- ff.size = event->header.size - sizeof(event->header);
+ ff.size = event->header.size - sizeof(*fe);
ff.ph = &session->header;
if (feat_ops[feat].process(&ff, NULL))
diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
index 076718a..003b70d 100644
--- a/tools/perf/util/machine.c
+++ b/tools/perf/util/machine.c
@@ -1295,6 +1295,7 @@ static int machine__set_modules_path(struct machine *machine)
return map_groups__set_modules_path_dir(&machine->kmaps, modules_path, 0);
}
int __weak arch__fix_module_text_start(u64 *start __maybe_unused,
+ u64 *size __maybe_unused,
const char *name __maybe_unused)
{
return 0;
@@ -1306,7 +1307,7 @@ static int machine__create_module(void *arg, const char *name, u64 start,
struct machine *machine = arg;
struct map *map;
- if (arch__fix_module_text_start(&start, name) < 0)
+ if (arch__fix_module_text_start(&start, &size, name) < 0)
return -1;
map = machine__findnew_module_map(machine, start, name);
diff --git a/tools/perf/util/machine.h b/tools/perf/util/machine.h
index ebde3ea..6f37678 100644
--- a/tools/perf/util/machine.h
+++ b/tools/perf/util/machine.h
@@ -219,7 +219,7 @@ struct symbol *machine__find_kernel_symbol_by_name(struct machine *machine,
struct map *machine__findnew_module_map(struct machine *machine, u64 start,
const char *filename);
-int arch__fix_module_text_start(u64 *start, const char *name);
+int arch__fix_module_text_start(u64 *start, u64 *size, const char *name);
int machine__load_kallsyms(struct machine *machine, const char *filename);
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
index 1108609..f016d1b 100644
--- a/tools/perf/util/session.c
+++ b/tools/perf/util/session.c
@@ -1141,6 +1141,9 @@ static void dump_read(struct perf_evsel *evsel, union perf_event *event)
evsel ? perf_evsel__name(evsel) : "FAIL",
event->read.value);
+ if (!evsel)
+ return;
+
read_format = evsel->attr.read_format;
if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index 0715f97..91404ba 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -86,6 +86,11 @@ static int prefix_underscores_count(const char *str)
return tail - str;
}
+void __weak arch__symbols__fixup_end(struct symbol *p, struct symbol *c)
+{
+ p->end = c->start;
+}
+
const char * __weak arch__normalize_symbol_name(const char *name)
{
return name;
@@ -212,7 +217,7 @@ void symbols__fixup_end(struct rb_root *symbols)
curr = rb_entry(nd, struct symbol, rb_node);
if (prev->end == prev->start && prev->end != curr->start)
- prev->end = curr->start;
+ arch__symbols__fixup_end(prev, curr);
}
/* Last entry */
diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h
index f25fae4..76ef2fa 100644
--- a/tools/perf/util/symbol.h
+++ b/tools/perf/util/symbol.h
@@ -349,6 +349,7 @@ const char *arch__normalize_symbol_name(const char *name);
#define SYMBOL_A 0
#define SYMBOL_B 1
+void arch__symbols__fixup_end(struct symbol *p, struct symbol *c);
int arch__compare_symbol_names(const char *namea, const char *nameb);
int arch__compare_symbol_names_n(const char *namea, const char *nameb,
unsigned int n);
diff --git a/tools/perf/util/thread.c b/tools/perf/util/thread.c
index 56007a7..2c146d0 100644
--- a/tools/perf/util/thread.c
+++ b/tools/perf/util/thread.c
@@ -192,14 +192,24 @@ struct comm *thread__comm(const struct thread *thread)
struct comm *thread__exec_comm(const struct thread *thread)
{
- struct comm *comm, *last = NULL;
+ struct comm *comm, *last = NULL, *second_last = NULL;
list_for_each_entry(comm, &thread->comm_list, list) {
if (comm->exec)
return comm;
+ second_last = last;
last = comm;
}
+ /*
+ * 'last' with no start time might be the parent's comm of a synthesized
+ * thread (created by processing a synthesized fork event). For a main
+ * thread, that is very probably wrong. Prefer a later comm to avoid
+ * that case.
+ */
+ if (second_last && !last->start && thread->pid_ == thread->tid)
+ return second_last;
+
return last;
}
diff --git a/tools/testing/selftests/bpf/sendmsg6_prog.c b/tools/testing/selftests/bpf/sendmsg6_prog.c
index 5aeaa28..a680628 100644
--- a/tools/testing/selftests/bpf/sendmsg6_prog.c
+++ b/tools/testing/selftests/bpf/sendmsg6_prog.c
@@ -41,8 +41,7 @@ int sendmsg_v6_prog(struct bpf_sock_addr *ctx)
}
/* Rewrite destination. */
- if ((ctx->user_ip6[0] & 0xFFFF) == bpf_htons(0xFACE) &&
- ctx->user_ip6[0] >> 16 == bpf_htons(0xB00C)) {
+ if (ctx->user_ip6[0] == bpf_htonl(0xFACEB00C)) {
ctx->user_ip6[0] = bpf_htonl(DST_REWRITE_IP6_0);
ctx->user_ip6[1] = bpf_htonl(DST_REWRITE_IP6_1);
ctx->user_ip6[2] = bpf_htonl(DST_REWRITE_IP6_2);
diff --git a/tools/testing/selftests/cgroup/cgroup_util.c b/tools/testing/selftests/cgroup/cgroup_util.c
index 14c9fe2..075cb0c73 100644
--- a/tools/testing/selftests/cgroup/cgroup_util.c
+++ b/tools/testing/selftests/cgroup/cgroup_util.c
@@ -181,8 +181,7 @@ int cg_find_unified_root(char *root, size_t len)
strtok(NULL, delim);
strtok(NULL, delim);
- if (strcmp(fs, "cgroup") == 0 &&
- strcmp(type, "cgroup2") == 0) {
+ if (strcmp(type, "cgroup2") == 0) {
strncpy(root, mount, len);
return 0;
}
diff --git a/tools/testing/selftests/kvm/config b/tools/testing/selftests/kvm/config
new file mode 100644
index 0000000..63ed533
--- /dev/null
+++ b/tools/testing/selftests/kvm/config
@@ -0,0 +1,3 @@
+CONFIG_KVM=y
+CONFIG_KVM_INTEL=y
+CONFIG_KVM_AMD=y
diff --git a/tools/testing/selftests/net/forwarding/gre_multipath.sh b/tools/testing/selftests/net/forwarding/gre_multipath.sh
index cca2baa..a8d8e8b 100755
--- a/tools/testing/selftests/net/forwarding/gre_multipath.sh
+++ b/tools/testing/selftests/net/forwarding/gre_multipath.sh
@@ -93,18 +93,10 @@
ip route add vrf v$ol1 192.0.2.16/28 \
nexthop dev g1a \
nexthop dev g1b
-
- tc qdisc add dev $ul1 clsact
- tc filter add dev $ul1 egress pref 111 prot ipv4 \
- flower dst_ip 192.0.2.66 action pass
- tc filter add dev $ul1 egress pref 222 prot ipv4 \
- flower dst_ip 192.0.2.82 action pass
}
sw1_destroy()
{
- tc qdisc del dev $ul1 clsact
-
ip route del vrf v$ol1 192.0.2.16/28
ip route del vrf v$ol1 192.0.2.82/32 via 192.0.2.146
@@ -139,10 +131,18 @@
ip route add vrf v$ol2 192.0.2.0/28 \
nexthop dev g2a \
nexthop dev g2b
+
+ tc qdisc add dev $ul2 clsact
+ tc filter add dev $ul2 ingress pref 111 prot 802.1Q \
+ flower vlan_id 111 action pass
+ tc filter add dev $ul2 ingress pref 222 prot 802.1Q \
+ flower vlan_id 222 action pass
}
sw2_destroy()
{
+ tc qdisc del dev $ul2 clsact
+
ip route del vrf v$ol2 192.0.2.0/28
ip route del vrf v$ol2 192.0.2.81/32 via 192.0.2.145
@@ -187,12 +187,16 @@
sw1_create
sw2_create
h2_create
+
+ forwarding_enable
}
cleanup()
{
pre_cleanup
+ forwarding_restore
+
h2_destroy
sw2_destroy
sw1_destroy
@@ -211,15 +215,15 @@
nexthop dev g1a weight $weight1 \
nexthop dev g1b weight $weight2
- local t0_111=$(tc_rule_stats_get $ul1 111 egress)
- local t0_222=$(tc_rule_stats_get $ul1 222 egress)
+ local t0_111=$(tc_rule_stats_get $ul2 111 ingress)
+ local t0_222=$(tc_rule_stats_get $ul2 222 ingress)
ip vrf exec v$h1 \
$MZ $h1 -q -p 64 -A 192.0.2.1 -B 192.0.2.18 \
-d 1msec -t udp "sp=1024,dp=0-32768"
- local t1_111=$(tc_rule_stats_get $ul1 111 egress)
- local t1_222=$(tc_rule_stats_get $ul1 222 egress)
+ local t1_111=$(tc_rule_stats_get $ul2 111 ingress)
+ local t1_222=$(tc_rule_stats_get $ul2 222 ingress)
local d111=$((t1_111 - t0_111))
local d222=$((t1_222 - t0_222))
diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c
index 02bac8a..d982650 100644
--- a/virt/kvm/arm/arm.c
+++ b/virt/kvm/arm/arm.c
@@ -338,6 +338,17 @@ int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu)
{
kvm_timer_schedule(vcpu);
+ /*
+ * If we're about to block (most likely because we've just hit a
+ * WFI), we need to sync back the state of the GIC CPU interface
+ * so that we have the lastest PMR and group enables. This ensures
+ * that kvm_arch_vcpu_runnable has up-to-date data to decide
+ * whether we have pending interrupts.
+ */
+ preempt_disable();
+ kvm_vgic_vmcr_sync(vcpu);
+ preempt_enable();
+
kvm_vgic_v4_enable_doorbell(vcpu);
}
diff --git a/virt/kvm/arm/vgic/vgic-v2.c b/virt/kvm/arm/vgic/vgic-v2.c
index 69b892a..57281c1 100644
--- a/virt/kvm/arm/vgic/vgic-v2.c
+++ b/virt/kvm/arm/vgic/vgic-v2.c
@@ -495,10 +495,17 @@ void vgic_v2_load(struct kvm_vcpu *vcpu)
kvm_vgic_global_state.vctrl_base + GICH_APR);
}
-void vgic_v2_put(struct kvm_vcpu *vcpu)
+void vgic_v2_vmcr_sync(struct kvm_vcpu *vcpu)
{
struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
cpu_if->vgic_vmcr = readl_relaxed(kvm_vgic_global_state.vctrl_base + GICH_VMCR);
+}
+
+void vgic_v2_put(struct kvm_vcpu *vcpu)
+{
+ struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
+
+ vgic_v2_vmcr_sync(vcpu);
cpu_if->vgic_apr = readl_relaxed(kvm_vgic_global_state.vctrl_base + GICH_APR);
}
diff --git a/virt/kvm/arm/vgic/vgic-v3.c b/virt/kvm/arm/vgic/vgic-v3.c
index 3f2350a..5c55995 100644
--- a/virt/kvm/arm/vgic/vgic-v3.c
+++ b/virt/kvm/arm/vgic/vgic-v3.c
@@ -674,12 +674,17 @@ void vgic_v3_load(struct kvm_vcpu *vcpu)
__vgic_v3_activate_traps(vcpu);
}
-void vgic_v3_put(struct kvm_vcpu *vcpu)
+void vgic_v3_vmcr_sync(struct kvm_vcpu *vcpu)
{
struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
if (likely(cpu_if->vgic_sre))
cpu_if->vgic_vmcr = kvm_call_hyp(__vgic_v3_read_vmcr);
+}
+
+void vgic_v3_put(struct kvm_vcpu *vcpu)
+{
+ vgic_v3_vmcr_sync(vcpu);
kvm_call_hyp(__vgic_v3_save_aprs, vcpu);
diff --git a/virt/kvm/arm/vgic/vgic.c b/virt/kvm/arm/vgic/vgic.c
index c5165e3..250cd72 100644
--- a/virt/kvm/arm/vgic/vgic.c
+++ b/virt/kvm/arm/vgic/vgic.c
@@ -902,6 +902,17 @@ void kvm_vgic_put(struct kvm_vcpu *vcpu)
vgic_v3_put(vcpu);
}
+void kvm_vgic_vmcr_sync(struct kvm_vcpu *vcpu)
+{
+ if (unlikely(!irqchip_in_kernel(vcpu->kvm)))
+ return;
+
+ if (kvm_vgic_global_state.type == VGIC_V2)
+ vgic_v2_vmcr_sync(vcpu);
+ else
+ vgic_v3_vmcr_sync(vcpu);
+}
+
int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu)
{
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
diff --git a/virt/kvm/arm/vgic/vgic.h b/virt/kvm/arm/vgic/vgic.h
index a9002471..d5e4542 100644
--- a/virt/kvm/arm/vgic/vgic.h
+++ b/virt/kvm/arm/vgic/vgic.h
@@ -204,6 +204,7 @@ int vgic_register_dist_iodev(struct kvm *kvm, gpa_t dist_base_address,
void vgic_v2_init_lrs(void);
void vgic_v2_load(struct kvm_vcpu *vcpu);
void vgic_v2_put(struct kvm_vcpu *vcpu);
+void vgic_v2_vmcr_sync(struct kvm_vcpu *vcpu);
void vgic_v2_save_state(struct kvm_vcpu *vcpu);
void vgic_v2_restore_state(struct kvm_vcpu *vcpu);
@@ -234,6 +235,7 @@ bool vgic_v3_check_base(struct kvm *kvm);
void vgic_v3_load(struct kvm_vcpu *vcpu);
void vgic_v3_put(struct kvm_vcpu *vcpu);
+void vgic_v3_vmcr_sync(struct kvm_vcpu *vcpu);
bool vgic_has_its(struct kvm *kvm);
int kvm_vgic_register_its_device(void);
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 2b36a51..4a584a5 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -2317,6 +2317,29 @@ static bool kvm_vcpu_eligible_for_directed_yield(struct kvm_vcpu *vcpu)
#endif
}
+/*
+ * Unlike kvm_arch_vcpu_runnable, this function is called outside
+ * a vcpu_load/vcpu_put pair. However, for most architectures
+ * kvm_arch_vcpu_runnable does not require vcpu_load.
+ */
+bool __weak kvm_arch_dy_runnable(struct kvm_vcpu *vcpu)
+{
+ return kvm_arch_vcpu_runnable(vcpu);
+}
+
+static bool vcpu_dy_runnable(struct kvm_vcpu *vcpu)
+{
+ if (kvm_arch_dy_runnable(vcpu))
+ return true;
+
+#ifdef CONFIG_KVM_ASYNC_PF
+ if (!list_empty_careful(&vcpu->async_pf.done))
+ return true;
+#endif
+
+ return false;
+}
+
void kvm_vcpu_on_spin(struct kvm_vcpu *me, bool yield_to_kernel_mode)
{
struct kvm *kvm = me->kvm;
@@ -2346,7 +2369,7 @@ void kvm_vcpu_on_spin(struct kvm_vcpu *me, bool yield_to_kernel_mode)
continue;
if (vcpu == me)
continue;
- if (swait_active(&vcpu->wq) && !kvm_arch_vcpu_runnable(vcpu))
+ if (swait_active(&vcpu->wq) && !vcpu_dy_runnable(vcpu))
continue;
if (yield_to_kernel_mode && !kvm_arch_vcpu_in_kernel(vcpu))
continue;