Merge "msm:ipa: enable SMMU for MHI PRIME channels"
diff --git a/Documentation/devicetree/bindings/arm/msm/proxy-client.txt b/Documentation/devicetree/bindings/arm/msm/proxy-client.txt
new file mode 100644
index 0000000..29cfaf9
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/msm/proxy-client.txt
@@ -0,0 +1,34 @@
+Bus Proxy Client Bindings
+
+Bus proxy client provides means to cast proxy bandwidth votes during bootup
+which is removed at the end of boot. This feature can be used in situations
+where a shared resource can be scaled between several possible perfomance
+levels and hardware requires that it be at a high level at the beginning of
+boot before the client has probed and voted for required bandwidth.
+
+Required properties:
+- compatible: Must be "qcom,bus-proxy-client".
+
+Optional properties:
+- qcom,msm-bus,name: String representing the client-name.
+- qcom,msm-bus,num-cases: Total number of usecases.
+- qcom,msm-bus,active-only: Boolean context flag for requests in active or
+ dual (active & sleep) contex.
+- qcom,msm-bus,num-paths: Total number of master-slave pairs.
+- qcom,msm-bus,vectors-KBps: Arrays of unsigned integers representing:
+ master-id, slave-id, arbitrated bandwidth
+ in KBps, instantaneous bandwidth in KBps.
+
+Example:
+
+ qcom,proxy-client {
+ compatible = "qcom,bus-proxy-client";
+ qcom,msm-bus,name = "proxy_client";
+ qcom,msm-bus,num-cases = <3>;
+ qcom,msm-bus,num-paths = <2>;
+ qcom,msm-bus,active-only;
+ qcom,msm-bus,vectors-KBps =
+ <22 512 0 0>, <23 512 0 0>,
+ <22 512 0 6400000>, <23 512 0 6400000>,
+ <22 512 0 6400000>, <23 512 0 6400000>;
+ };
diff --git a/Documentation/devicetree/bindings/clock/qcom,gpucc.txt b/Documentation/devicetree/bindings/clock/qcom,gpucc.txt
index f58755c..58c4e29 100644
--- a/Documentation/devicetree/bindings/clock/qcom,gpucc.txt
+++ b/Documentation/devicetree/bindings/clock/qcom,gpucc.txt
@@ -3,7 +3,8 @@
Required properties :
- compatible: shall contain one of the following:
- "qcom,gpucc-kona".
+ "qcom,gpucc-kona"
+ "qcom,gpucc-lito".
- reg: shall contain base register offset and size.
- reg-names: names of registers listed in the same order as in the reg property.
Must contain "cc_base".
diff --git a/Documentation/devicetree/bindings/clock/qcom,rpmh-clk.txt b/Documentation/devicetree/bindings/clock/qcom,rpmh-clk.txt
index 4f91bba..3283ff0 100644
--- a/Documentation/devicetree/bindings/clock/qcom,rpmh-clk.txt
+++ b/Documentation/devicetree/bindings/clock/qcom,rpmh-clk.txt
@@ -9,6 +9,7 @@
- compatible : Shall contain one of the following:
"qcom,kona-rpmh-clk",
"qcom,sdm845-rpmh-clk"
+ "qcom,lito-rpmh-clk"
- #clock-cells : must contain 1
diff --git a/Documentation/devicetree/bindings/platform/msm/ipa.txt b/Documentation/devicetree/bindings/platform/msm/ipa.txt
index 1ba6974..625e5d8 100644
--- a/Documentation/devicetree/bindings/platform/msm/ipa.txt
+++ b/Documentation/devicetree/bindings/platform/msm/ipa.txt
@@ -59,6 +59,8 @@
a pipe reset via the IPA uC is required
- qcom,ipa-wdi2: Boolean context flag to indicate whether
using wdi-2.0 or not
+- qcom,ipa-wdi3-over-gsi: Boolean context flag to indicate whether
+ using wdi-3.0 or not
- qcom,bandwidth-vote-for-ipa: Boolean context flag to indicate whether
ipa clock voting is done by bandwidth
voting via msm-bus-scale driver or not
diff --git a/Documentation/devicetree/bindings/power/supply/qcom/qpnp-fg-gen4.txt b/Documentation/devicetree/bindings/power/supply/qcom/qpnp-fg-gen4.txt
index ac8fec3..e37bbb7 100644
--- a/Documentation/devicetree/bindings/power/supply/qcom/qpnp-fg-gen4.txt
+++ b/Documentation/devicetree/bindings/power/supply/qcom/qpnp-fg-gen4.txt
@@ -435,6 +435,30 @@
resolution of monotonic SOC under CAPACITY_RAW property
during charging in the scale of 0-10000.
+- qcom,soc-scale-mode-en
+ Usage: optional
+ Value type: <boolean>
+ Definition: A boolean property that when specified will enable scaling
+ of the SOC linearly, based on the filtered battery voltage
+ after crossing below a Vbatt threshold.
+
+- qcom,soc-scale-vbatt-mv
+ Usage: optional
+ Value type: <u32>
+ Definition: Threshold voltage to decide when SOC should
+ be scaled based on filtered voltage when
+ qcom,soc-scale-mode-en is specified. If this
+ is not specified, then the default value is 3400.
+ Unit is in mV.
+
+- qcom,soc-scale-time-ms
+ Usage: optional
+ Value type: <u32>
+ Definition: Timer value for doing SOC calculation based on
+ filtered voltage when qcom,soc-scale-mode-en is
+ specified. If this is not specified, then the
+ default value is 10000. Unit is in ms.
+
==========================================================
Second Level Nodes - Peripherals managed by FG Gen4 driver
==========================================================
diff --git a/Documentation/devicetree/bindings/power/supply/qcom/qpnp-smb5.txt b/Documentation/devicetree/bindings/power/supply/qcom/qpnp-smb5.txt
index e81b8e7..16f8750 100644
--- a/Documentation/devicetree/bindings/power/supply/qcom/qpnp-smb5.txt
+++ b/Documentation/devicetree/bindings/power/supply/qcom/qpnp-smb5.txt
@@ -280,6 +280,12 @@
Value type: bool
Definition: Boolean flag which when present disables USB-PD operation.
+- qcom,lpd-disable
+ Usage: optional
+ Value type: bool
+ Definition: Boolean flag which when present disables liquid presence
+ detection.
+
- qcom,hw-die-temp-mitigation
Usage: optional
Value type: bool
diff --git a/Documentation/devicetree/bindings/qbt_handler/qbt_handler.txt b/Documentation/devicetree/bindings/qbt_handler/qbt_handler.txt
new file mode 100644
index 0000000..168aa24
--- /dev/null
+++ b/Documentation/devicetree/bindings/qbt_handler/qbt_handler.txt
@@ -0,0 +1,35 @@
+Qualcomm Technologies, Inc. QBT_HANDLER Specific Bindings
+
+QBT is a fingerprint sensor ASIC capable of performing fingerprint image scans
+and detecting finger presence on the sensor using programmable firmware.
+
+=======================
+Required Node Structure
+=======================
+
+- compatible
+ Usage: required
+ Value type: <string>
+ Definition: "qcom,qbt-handler".
+
+- qcom,ipc-gpio
+ Usage: required
+ Value type: <phandle>
+ Definition: phandle for GPIO to be used for IPC.
+
+- qcom,finger-detect-gpio
+ Usage: required
+ Value type: <phandle>
+ Definition: phandle for GPIO to be used for finger detect.
+
+=======
+Example
+=======
+
+qcom,qbt_handler {
+ compatible = "qcom,qbt-handler";
+ qcom,ipc-gpio = <&tlmm 23 0>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&key_home_default>;
+ qcom,finger-detect-gpio = <&pm8150_gpios 1 0>;
+};
diff --git a/Documentation/devicetree/bindings/sound/wcd_codec.txt b/Documentation/devicetree/bindings/sound/wcd_codec.txt
index bb5f74f..db50c09 100644
--- a/Documentation/devicetree/bindings/sound/wcd_codec.txt
+++ b/Documentation/devicetree/bindings/sound/wcd_codec.txt
@@ -8,6 +8,7 @@
soundwire core registers.
- clock-names : clock names defined for WSA macro
- clocks : clock handles defined for WSA macro
+ - qcom,default-clk-id: Default clk ID used for WSA macro
- qcom,wsa-swr-gpios: phandle for SWR data and clock GPIOs of WSA macro
- qcom,wsa-bcl-pmic-params: u8 array of PMIC ID, SID and PPID in same order
required to be configured to receive interrupts
@@ -24,6 +25,7 @@
<&clock_audio_wsa_2 0>;
qcom,wsa-swr-gpios = &wsa_swr_gpios;
qcom,wsa-bcl-pmic-params = /bits/ 8 <0x00 0x00 0x1E>;
+ qcom,default-clk-id = <TX_CORE_CLK>;
swr_0: wsa_swr_master {
compatible = "qcom,swr-mstr";
wsa881x_1: wsa881x@20170212 {
@@ -43,6 +45,7 @@
soundwire core registers.
- clock-names : clock names defined for VA macro
- clocks : clock handles defined for VA macro
+ - qcom,default-clk-id: Default clk ID used for VA macro
- va-vdd-micb-supply: phandle of mic bias supply's regulator device tree node
- qcom,va-vdd-micb-voltage: mic bias supply's voltage level min and max in mV
- qcom,va-vdd-micb-current: mic bias supply's max current in mA
@@ -61,6 +64,7 @@
reg = <0x0C490000 0x0>;
clock-names = "va_core_clk";
clocks = <&clock_audio_va 0>;
+ qcom,default-clk-id = <TX_CORE_CLK>;
va-vdd-micb-supply = <&S4A>;
qcom,va-vdd-micb-voltage = <1800000 1800000>;
qcom,va-vdd-micb-current = <11200>;
@@ -78,6 +82,7 @@
soundwire core registers.
- clock-names : clock names defined for RX macro
- clocks : clock handles defined for RX macro
+ - qcom,default-clk-id: Default clk ID used for RX macro
- qcom,rx-swr-gpios: phandle for SWR data and clock GPIOs of RX macro
- qcom,rx_mclk_mode_muxsel: register address for RX macro MCLK mode mux select
- qcom,rx-bcl-pmic-params: u8 array of PMIC ID, SID and PPID in same order
@@ -96,6 +101,7 @@
qcom,rx-swr-gpios = <&rx_swr_gpios>;
qcom,rx_mclk_mode_muxsel = <0x62C25020>;
qcom,rx-bcl-pmic-params = /bits/ 8 <0x00 0x00 0x1E>;
+ qcom,default-clk-id = <TX_CORE_CLK>;
swr_1: rx_swr_master {
compatible = "qcom,swr-mstr";
wcd938x_rx_slave: wcd938x-rx-slave {
@@ -220,3 +226,35 @@
qcom,cdc-on-demand-supplies = "cdc-vdd-buck",
"cdc-vdd-mic-bias";
};
+
+Bolero Clock Resource Manager
+
+Required Properties:
+ - compatible = "qcom,bolero-clk-rsc-mngr";
+ - qcom,fs-gen-sequence: Register sequence for fs clock generation
+ - clock-names : clock names defined for WSA macro
+ - clocks : clock handles defined for WSA macro
+
+Optional Properties:
+ - qcom,rx_mclk_mode_muxsel: register address for RX macro MCLK mode mux select
+ - qcom,wsa_mclk_mode_muxsel: register address for WSA macro MCLK mux select
+ - qcom,va_mclk_mode_muxsel: register address for VA macro MCLK mode mux select
+
+Example:
+&bolero {
+ bolero-clock-rsc-manager {
+ compatible = "qcom,bolero-clk-rsc-mngr";
+ qcom,fs-gen-sequence = <0x3000 0x1>,
+ <0x3004 0x1>, <0x3080 0x2>;
+ qcom,rx_mclk_mode_muxsel = <0x033240D8>;
+ qcom,wsa_mclk_mode_muxsel = <0x033220D8>;
+ qcom,va_mclk_mode_muxsel = <0x033A0000>;
+ clock-names = "tx_core_clk", "tx_npl_clk", "rx_core_clk",
+ "rx_npl_clk", "wsa_core_clk", "wsa_npl_clk",
+ "va_core_clk", "va_npl_clk";
+ clocks = <&clock_audio_tx_1 0>, <&clock_audio_tx_2 0>,
+ <&clock_audio_rx_1 0>, <&clock_audio_rx_2 0>,
+ <&clock_audio_wsa_1 0>, <&clock_audio_wsa_2 0>,
+ <&clock_audio_va_1 0>, <&clock_audio_va_2 0>;
+ };
+};
diff --git a/Makefile b/Makefile
index bc15999a..c05200f 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 4
PATCHLEVEL = 19
-SUBLEVEL = 26
+SUBLEVEL = 27
EXTRAVERSION =
NAME = "People's Front"
diff --git a/arch/arc/include/asm/bitops.h b/arch/arc/include/asm/bitops.h
index 8da87fe..99e6d89 100644
--- a/arch/arc/include/asm/bitops.h
+++ b/arch/arc/include/asm/bitops.h
@@ -340,7 +340,7 @@ static inline __attribute__ ((const)) int __fls(unsigned long x)
/*
* __ffs: Similar to ffs, but zero based (0-31)
*/
-static inline __attribute__ ((const)) int __ffs(unsigned long word)
+static inline __attribute__ ((const)) unsigned long __ffs(unsigned long word)
{
if (!word)
return word;
@@ -400,9 +400,9 @@ static inline __attribute__ ((const)) int ffs(unsigned long x)
/*
* __ffs: Similar to ffs, but zero based (0-31)
*/
-static inline __attribute__ ((const)) int __ffs(unsigned long x)
+static inline __attribute__ ((const)) unsigned long __ffs(unsigned long x)
{
- int n;
+ unsigned long n;
asm volatile(
" ffs.f %0, %1 \n" /* 0:31; 31(Z) if src 0 */
diff --git a/arch/arc/kernel/troubleshoot.c b/arch/arc/kernel/troubleshoot.c
index e8d9fb4..5c66633 100644
--- a/arch/arc/kernel/troubleshoot.c
+++ b/arch/arc/kernel/troubleshoot.c
@@ -18,6 +18,8 @@
#include <asm/arcregs.h>
#include <asm/irqflags.h>
+#define ARC_PATH_MAX 256
+
/*
* Common routine to print scratch regs (r0-r12) or callee regs (r13-r25)
* -Prints 3 regs per line and a CR.
@@ -58,11 +60,12 @@ static void show_callee_regs(struct callee_regs *cregs)
print_reg_file(&(cregs->r13), 13);
}
-static void print_task_path_n_nm(struct task_struct *tsk, char *buf)
+static void print_task_path_n_nm(struct task_struct *tsk)
{
char *path_nm = NULL;
struct mm_struct *mm;
struct file *exe_file;
+ char buf[ARC_PATH_MAX];
mm = get_task_mm(tsk);
if (!mm)
@@ -72,7 +75,7 @@ static void print_task_path_n_nm(struct task_struct *tsk, char *buf)
mmput(mm);
if (exe_file) {
- path_nm = file_path(exe_file, buf, 255);
+ path_nm = file_path(exe_file, buf, ARC_PATH_MAX-1);
fput(exe_file);
}
@@ -80,10 +83,9 @@ static void print_task_path_n_nm(struct task_struct *tsk, char *buf)
pr_info("Path: %s\n", !IS_ERR(path_nm) ? path_nm : "?");
}
-static void show_faulting_vma(unsigned long address, char *buf)
+static void show_faulting_vma(unsigned long address)
{
struct vm_area_struct *vma;
- char *nm = buf;
struct mm_struct *active_mm = current->active_mm;
/* can't use print_vma_addr() yet as it doesn't check for
@@ -96,8 +98,11 @@ static void show_faulting_vma(unsigned long address, char *buf)
* if the container VMA is not found
*/
if (vma && (vma->vm_start <= address)) {
+ char buf[ARC_PATH_MAX];
+ char *nm = "?";
+
if (vma->vm_file) {
- nm = file_path(vma->vm_file, buf, PAGE_SIZE - 1);
+ nm = file_path(vma->vm_file, buf, ARC_PATH_MAX-1);
if (IS_ERR(nm))
nm = "?";
}
@@ -173,13 +178,8 @@ void show_regs(struct pt_regs *regs)
{
struct task_struct *tsk = current;
struct callee_regs *cregs;
- char *buf;
- buf = (char *)__get_free_page(GFP_KERNEL);
- if (!buf)
- return;
-
- print_task_path_n_nm(tsk, buf);
+ print_task_path_n_nm(tsk);
show_regs_print_info(KERN_INFO);
show_ecr_verbose(regs);
@@ -189,7 +189,7 @@ void show_regs(struct pt_regs *regs)
(void *)regs->blink, (void *)regs->ret);
if (user_mode(regs))
- show_faulting_vma(regs->ret, buf); /* faulting code, not data */
+ show_faulting_vma(regs->ret); /* faulting code, not data */
pr_info("[STAT32]: 0x%08lx", regs->status32);
@@ -221,8 +221,6 @@ void show_regs(struct pt_regs *regs)
cregs = (struct callee_regs *)current->thread.callee_reg;
if (cregs)
show_callee_regs(cregs);
-
- free_page((unsigned long)buf);
}
void show_kernel_fault_diag(const char *str, struct pt_regs *regs,
diff --git a/arch/arm64/boot/dts/qcom/kona-audio-overlay.dtsi b/arch/arm64/boot/dts/qcom/kona-audio-overlay.dtsi
index 8e81f00..b9e7d0a 100644
--- a/arch/arm64/boot/dts/qcom/kona-audio-overlay.dtsi
+++ b/arch/arm64/boot/dts/qcom/kona-audio-overlay.dtsi
@@ -4,12 +4,28 @@
*/
#include <dt-bindings/clock/qcom,audio-ext-clk.h>
+#include <dt-bindings/sound/qcom,bolero-clk-rsc.h>
#include <dt-bindings/sound/audio-codec-port-types.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
#include "kona-va-bolero.dtsi"
&bolero {
qcom,num-macros = <4>;
+ bolero-clk-rsc-mngr {
+ compatible = "qcom,bolero-clk-rsc-mngr";
+ qcom,fs-gen-sequence = <0x3000 0x1>,
+ <0x3004 0x1>, <0x3080 0x2>;
+ qcom,rx_mclk_mode_muxsel = <0x033240D8>;
+ qcom,wsa_mclk_mode_muxsel = <0x033220D8>;
+ qcom,va_mclk_mode_muxsel = <0x033A0000>;
+ clock-names = "tx_core_clk", "tx_npl_clk", "rx_core_clk", "rx_npl_clk",
+ "wsa_core_clk", "wsa_npl_clk", "va_core_clk", "va_npl_clk";
+ clocks = <&clock_audio_tx_1 0>, <&clock_audio_tx_2 0>,
+ <&clock_audio_rx_1 0>, <&clock_audio_rx_2 0>,
+ <&clock_audio_wsa_1 0>, <&clock_audio_wsa_2 0>,
+ <&clock_audio_va_1 0>, <&clock_audio_va_2 0>;
+ };
+
tx_macro: tx-macro@3220000 {
compatible = "qcom,tx-macro";
reg = <0x3220000 0x0>;
@@ -55,6 +71,7 @@
qcom,rx-swr-gpios = <&rx_swr_gpios>;
qcom,rx_mclk_mode_muxsel = <0x033240D8>;
qcom,rx-bcl-pmic-params = /bits/ 8 <0x00 0x00 0x1E>;
+ qcom,mux0-clk-id = <TX_CORE_CLK>;
swr1: rx_swr_master {
compatible = "qcom,swr-mstr";
#address-cells = <2>;
@@ -87,6 +104,7 @@
<&clock_audio_wsa_2 0>;
qcom,wsa-swr-gpios = <&wsa_swr_gpios>;
qcom,wsa-bcl-pmic-params = /bits/ 8 <0x00 0x00 0x1E>;
+ qcom,mux0-clk-id = <TX_CORE_CLK>;
swr0: wsa_swr_master {
compatible = "qcom,swr-mstr";
#address-cells = <2>;
@@ -390,4 +408,20 @@
qcom,codec-lpass-clk-id = <0x30D>;
#clock-cells = <1>;
};
+
+ clock_audio_va_1: va_core_clk {
+ compatible = "qcom,audio-ref-clk";
+ qcom,codec-ext-clk-src = <AUDIO_LPASS_MCLK>;
+ qcom,codec-lpass-ext-clk-freq = <19200000>;
+ qcom,codec-lpass-clk-id = <0x30B>;
+ #clock-cells = <1>;
+ };
+
+ clock_audio_va_2: va_npl_clk {
+ compatible = "qcom,audio-ref-clk";
+ qcom,codec-ext-clk-src = <AUDIO_LPASS_MCLK_8>;
+ qcom,codec-lpass-ext-clk-freq = <19200000>;
+ qcom,codec-lpass-clk-id = <0x310>;
+ #clock-cells = <1>;
+ };
};
diff --git a/arch/arm64/boot/dts/qcom/kona-audio.dtsi b/arch/arm64/boot/dts/qcom/kona-audio.dtsi
index 929168ba..430def3 100644
--- a/arch/arm64/boot/dts/qcom/kona-audio.dtsi
+++ b/arch/arm64/boot/dts/qcom/kona-audio.dtsi
@@ -38,6 +38,10 @@
compatible = "qcom,bolero-codec";
clock-names = "lpass_core_hw_vote";
clocks = <&lpass_core_hw_vote 0>;
+ bolero-clk-rsc-mngr {
+ compatible = "qcom,bolero-clk-rsc-mngr";
+ };
+
tx_macro: tx-macro@3220000 {
swr2: tx_swr_master {
};
diff --git a/arch/arm64/boot/dts/qcom/kona-camera-sensor-cdp.dtsi b/arch/arm64/boot/dts/qcom/kona-camera-sensor-cdp.dtsi
index c2b1fdd..8b766c3 100644
--- a/arch/arm64/boot/dts/qcom/kona-camera-sensor-cdp.dtsi
+++ b/arch/arm64/boot/dts/qcom/kona-camera-sensor-cdp.dtsi
@@ -54,7 +54,7 @@
rgltr-cntrl-support;
rgltr-min-voltage = <0 2800000 1104000 0 2856000>;
rgltr-max-voltage = <0 3000000 1104000 0 3104000>;
- rgltr-load-current = <0 80000 1200000 0 0>;
+ rgltr-load-current = <0 80000 1200000 0 100000>;
gpio-no-mux = <0>;
pinctrl-names = "cam_default", "cam_suspend";
pinctrl-0 = <&cam_sensor_mclk0_active
@@ -91,7 +91,7 @@
rgltr-cntrl-support;
rgltr-min-voltage = <0 2800000 1200000 0 2856000>;
rgltr-max-voltage = <0 3000000 1200000 0 3104000>;
- rgltr-load-current = <0 80000 1200000 0 0>;
+ rgltr-load-current = <0 80000 1200000 0 100000>;
gpio-no-mux = <0>;
pinctrl-names = "cam_default", "cam_suspend";
pinctrl-0 = <&cam_sensor_mclk1_active
@@ -217,7 +217,7 @@
rgltr-cntrl-support;
rgltr-min-voltage = <0 2800000 1056000 0 2856000>;
rgltr-max-voltage = <0 3000000 1056000 0 3104000>;
- rgltr-load-current = <0 80000 1200000 0 0>;
+ rgltr-load-current = <0 80000 1200000 0 100000>;
gpio-no-mux = <0>;
pinctrl-names = "cam_default", "cam_suspend";
pinctrl-0 = <&cam_sensor_mclk2_active
diff --git a/arch/arm64/boot/dts/qcom/kona-camera-sensor-mtp.dtsi b/arch/arm64/boot/dts/qcom/kona-camera-sensor-mtp.dtsi
index 65cdd1b..d90af20 100644
--- a/arch/arm64/boot/dts/qcom/kona-camera-sensor-mtp.dtsi
+++ b/arch/arm64/boot/dts/qcom/kona-camera-sensor-mtp.dtsi
@@ -54,7 +54,7 @@
rgltr-cntrl-support;
rgltr-min-voltage = <0 2800000 1104000 0 2856000>;
rgltr-max-voltage = <0 3000000 1104000 0 3104000>;
- rgltr-load-current = <0 80000 1200000 0 0>;
+ rgltr-load-current = <0 80000 1200000 0 100000>;
gpio-no-mux = <0>;
pinctrl-names = "cam_default", "cam_suspend";
pinctrl-0 = <&cam_sensor_mclk0_active
@@ -91,7 +91,7 @@
rgltr-cntrl-support;
rgltr-min-voltage = <0 2800000 1200000 0 2856000>;
rgltr-max-voltage = <0 3000000 1200000 0 3104000>;
- rgltr-load-current = <0 80000 1200000 0 0>;
+ rgltr-load-current = <0 80000 1200000 0 100000>;
gpio-no-mux = <0>;
pinctrl-names = "cam_default", "cam_suspend";
pinctrl-0 = <&cam_sensor_mclk1_active
@@ -217,7 +217,7 @@
rgltr-cntrl-support;
rgltr-min-voltage = <0 2800000 1056000 0 2856000>;
rgltr-max-voltage = <0 3000000 1056000 0 3104000>;
- rgltr-load-current = <0 80000 1200000 0 0>;
+ rgltr-load-current = <0 80000 1200000 0 100000>;
gpio-no-mux = <0>;
pinctrl-names = "cam_default", "cam_suspend";
pinctrl-0 = <&cam_sensor_mclk2_active
diff --git a/arch/arm64/boot/dts/qcom/kona-camera-sensor-qrd.dtsi b/arch/arm64/boot/dts/qcom/kona-camera-sensor-qrd.dtsi
index 9348b4c..1593128 100644
--- a/arch/arm64/boot/dts/qcom/kona-camera-sensor-qrd.dtsi
+++ b/arch/arm64/boot/dts/qcom/kona-camera-sensor-qrd.dtsi
@@ -54,7 +54,7 @@
rgltr-cntrl-support;
rgltr-min-voltage = <0 2800000 1104000 0 2856000>;
rgltr-max-voltage = <0 3000000 1104000 0 3104000>;
- rgltr-load-current = <0 80000 1200000 0 0>;
+ rgltr-load-current = <0 80000 1200000 0 100000>;
gpio-no-mux = <0>;
pinctrl-names = "cam_default", "cam_suspend";
pinctrl-0 = <&cam_sensor_mclk0_active
@@ -91,7 +91,7 @@
rgltr-cntrl-support;
rgltr-min-voltage = <0 2800000 1200000 0 2856000>;
rgltr-max-voltage = <0 3000000 1200000 0 3104000>;
- rgltr-load-current = <0 80000 1200000 0 0>;
+ rgltr-load-current = <0 80000 1200000 0 100000>;
gpio-no-mux = <0>;
pinctrl-names = "cam_default", "cam_suspend";
pinctrl-0 = <&cam_sensor_mclk1_active
@@ -217,7 +217,7 @@
rgltr-cntrl-support;
rgltr-min-voltage = <0 2800000 1056000 0 2856000>;
rgltr-max-voltage = <0 3000000 1056000 0 3104000>;
- rgltr-load-current = <0 80000 1200000 0 0>;
+ rgltr-load-current = <0 80000 1200000 0 100000>;
gpio-no-mux = <0>;
pinctrl-names = "cam_default", "cam_suspend";
pinctrl-0 = <&cam_sensor_mclk2_active
diff --git a/arch/arm64/boot/dts/qcom/kona-cdp.dtsi b/arch/arm64/boot/dts/qcom/kona-cdp.dtsi
index 3199342..391ed85 100644
--- a/arch/arm64/boot/dts/qcom/kona-cdp.dtsi
+++ b/arch/arm64/boot/dts/qcom/kona-cdp.dtsi
@@ -114,6 +114,13 @@
linux,can-disable;
};
};
+
+ qcom,qbt_handler {
+ compatible = "qcom,qbt-handler";
+ qcom,ipc-gpio = <&tlmm 23 0>;
+ qcom,finger-detect-gpio = <&pm8150_gpios 1 0>;
+ status = "disabled";
+ };
};
&qupv3_se13_i2c {
diff --git a/arch/arm64/boot/dts/qcom/kona-coresight.dtsi b/arch/arm64/boot/dts/qcom/kona-coresight.dtsi
index df7ce4a..6b80be3 100644
--- a/arch/arm64/boot/dts/qcom/kona-coresight.dtsi
+++ b/arch/arm64/boot/dts/qcom/kona-coresight.dtsi
@@ -623,7 +623,7 @@
<13 64>,
<15 32>,
<16 64>,
- <17 64>,
+ <17 32>,
<18 64>,
<20 64>,
<21 64>,
diff --git a/arch/arm64/boot/dts/qcom/kona-cvp.dtsi b/arch/arm64/boot/dts/qcom/kona-cvp.dtsi
index d1c93ab..7092ba6 100644
--- a/arch/arm64/boot/dts/qcom/kona-cvp.dtsi
+++ b/arch/arm64/boot/dts/qcom/kona-cvp.dtsi
@@ -20,16 +20,17 @@
/* Clocks */
clock-names = "gcc_video_axi0",
- "gcc_video_axi1", "cvp_clk";
+ "gcc_video_axi1", "cvp_clk", "core_clk";
clocks = <&clock_gcc GCC_VIDEO_AXI0_CLK>,
<&clock_gcc GCC_VIDEO_AXI1_CLK>,
- <&clock_videocc VIDEO_CC_MVS1C_CLK>;
+ <&clock_videocc VIDEO_CC_MVS1C_CLK>,
+ <&clock_videocc VIDEO_CC_MVS1_CLK>;
qcom,proxy-clock-names = "gcc_video_axi0", "gcc_video_axi1",
- "cvp_clk";
+ "cvp_clk", "core_clk";
- qcom,clock-configs = <0x0 0x0 0x1>;
- qcom,allowed-clock-rates = <403000000 520000000
- 549000000 666000000 800000000>;
+ qcom,clock-configs = <0x0 0x0 0x1 0x1>;
+ qcom,allowed-clock-rates = <239999999 338000000
+ 366000000 444000000>;
/* Buses */
bus_cnoc {
diff --git a/arch/arm64/boot/dts/qcom/kona-gpu.dtsi b/arch/arm64/boot/dts/qcom/kona-gpu.dtsi
index 155e294..69df65a 100644
--- a/arch/arm64/boot/dts/qcom/kona-gpu.dtsi
+++ b/arch/arm64/boot/dts/qcom/kona-gpu.dtsi
@@ -232,7 +232,6 @@
"gpu_cc_ahb";
qcom,secure_align_mask = <0xfff>;
- qcom,global_pt;
qcom,retention;
qcom,hyp_secure_alloc;
diff --git a/arch/arm64/boot/dts/qcom/kona-mhi.dtsi b/arch/arm64/boot/dts/qcom/kona-mhi.dtsi
index a70b183..8cd7629 100644
--- a/arch/arm64/boot/dts/qcom/kona-mhi.dtsi
+++ b/arch/arm64/boot/dts/qcom/kona-mhi.dtsi
@@ -623,6 +623,15 @@
mhi,rsc-parent = <&mhi_netdev_0>;
};
+ mhi_qdss_dev_0 {
+ mhi,chan = "QDSS";
+ mhi,default-channel;
+ };
+
+ mhi_qdss_dev_1 {
+ mhi,chan = "IP_HW_QDSS";
+ };
+
mhi_qrtr {
mhi,chan = "IPCR";
qcom,net-id = <3>;
diff --git a/arch/arm64/boot/dts/qcom/kona-mtp.dtsi b/arch/arm64/boot/dts/qcom/kona-mtp.dtsi
index 932f194..67ae102 100644
--- a/arch/arm64/boot/dts/qcom/kona-mtp.dtsi
+++ b/arch/arm64/boot/dts/qcom/kona-mtp.dtsi
@@ -103,6 +103,14 @@
linux,can-disable;
};
};
+
+ qcom,qbt_handler {
+ compatible = "qcom,qbt-handler";
+ qcom,ipc-gpio = <&tlmm 23 0>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&key_home_default>;
+ qcom,finger-detect-gpio = <&pm8150_gpios 1 0>;
+ };
};
&qupv3_se13_i2c {
diff --git a/arch/arm64/boot/dts/qcom/kona-qrd.dtsi b/arch/arm64/boot/dts/qcom/kona-qrd.dtsi
index 29b02de..c08d2ba 100644
--- a/arch/arm64/boot/dts/qcom/kona-qrd.dtsi
+++ b/arch/arm64/boot/dts/qcom/kona-qrd.dtsi
@@ -195,6 +195,14 @@
linux,can-disable;
};
};
+
+ qcom,qbt_handler {
+ compatible = "qcom,qbt-handler";
+ qcom,ipc-gpio = <&tlmm 23 0>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&key_home_default>;
+ qcom,finger-detect-gpio = <&pm8150_gpios 1 0>;
+ };
};
&vreg_hap_boost {
@@ -458,6 +466,25 @@
qcom,platform-reset-gpio = <&tlmm 75 0>;
};
+&dsi_sw43404_amoled_video {
+ qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+ qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_dcs";
+ qcom,mdss-dsi-bl-min-level = <1>;
+ qcom,mdss-dsi-bl-max-level = <1023>;
+ qcom,mdss-brightness-max-level = <255>;
+ qcom,platform-reset-gpio = <&tlmm 75 0>;
+};
+
+&dsi_sw43404_amoled_fhd_plus_cmd {
+ qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+ qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_dcs";
+ qcom,mdss-dsi-bl-min-level = <1>;
+ qcom,mdss-dsi-bl-max-level = <1023>;
+ qcom,mdss-brightness-max-level = <255>;
+ qcom,platform-te-gpio = <&tlmm 66 0>;
+ qcom,platform-reset-gpio = <&tlmm 75 0>;
+};
+
&sde_dsi {
qcom,dsi-default-panel = <&dsi_sw43404_amoled_cmd>;
};
diff --git a/arch/arm64/boot/dts/qcom/kona-regulators.dtsi b/arch/arm64/boot/dts/qcom/kona-regulators.dtsi
index 81c7876..233b71a 100644
--- a/arch/arm64/boot/dts/qcom/kona-regulators.dtsi
+++ b/arch/arm64/boot/dts/qcom/kona-regulators.dtsi
@@ -315,9 +315,12 @@
<RPMH_REGULATOR_MODE_LPM
RPMH_REGULATOR_MODE_HPM>;
qcom,mode-threshold-currents = <0 10000>;
+ proxy-supply = <&pm8150_l14>;
L14A: pm8150_l14: regulator-pm8150-l14 {
regulator-name = "pm8150_l14";
qcom,set = <RPMH_REGULATOR_SET_ALL>;
+ qcom,proxy-consumer-enable;
+ qcom,proxy-consumer-current = <62000>;
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1880000>;
qcom,init-voltage = <1800000>;
@@ -429,7 +432,6 @@
= <RPMH_REGULATOR_LEVEL_MAX>;
qcom,init-voltage-level
= <RPMH_REGULATOR_LEVEL_LOW_SVS>;
- regulator-always-on;
};
VDD_MMCX_LEVEL_AO: S4C_LEVEL_AO:
@@ -684,9 +686,12 @@
<RPMH_REGULATOR_MODE_LPM
RPMH_REGULATOR_MODE_HPM>;
qcom,mode-threshold-currents = <0 10000>;
+ proxy-supply = <&pm8150a_l11>;
L11C: pm8150a_l11: regulator-pm8150a-l11 {
regulator-name = "pm8150a_l11";
qcom,set = <RPMH_REGULATOR_SET_ALL>;
+ qcom,proxy-consumer-enable;
+ qcom,proxy-consumer-current = <857000>;
regulator-min-microvolt = <3104000>;
regulator-max-microvolt = <3304000>;
qcom,init-voltage = <3104000>;
diff --git a/arch/arm64/boot/dts/qcom/kona-sde.dtsi b/arch/arm64/boot/dts/qcom/kona-sde.dtsi
index 6158a8e..e28a2c5 100644
--- a/arch/arm64/boot/dts/qcom/kona-sde.dtsi
+++ b/arch/arm64/boot/dts/qcom/kona-sde.dtsi
@@ -345,7 +345,6 @@
<&clock_gcc GCC_USB3_PRIM_PHY_PIPE_CLK>,
<&clock_dispcc DISP_CC_MDSS_DP_LINK_CLK>,
<&clock_dispcc DISP_CC_MDSS_DP_LINK_INTF_CLK>,
- <&clock_dispcc DISP_CC_MDSS_DP_CRYPTO_CLK>,
<&clock_dispcc DISP_CC_MDSS_DP_PIXEL_CLK_SRC>,
<&mdss_dp_pll DP_VCO_DIVIDED_CLK_SRC_MUX>,
<&clock_dispcc DISP_CC_MDSS_DP_PIXEL1_CLK_SRC>,
@@ -354,7 +353,7 @@
<&clock_dispcc DISP_CC_MDSS_DP_PIXEL1_CLK>;
clock-names = "core_aux_clk", "core_usb_ref_clk_src",
"core_usb_pipe_clk", "link_clk", "link_iface_clk",
- "crypto_clk", "pixel_clk_rcg", "pixel_parent",
+ "pixel_clk_rcg", "pixel_parent",
"pixel1_clk_rcg", "pixel1_parent",
"strm0_pixel_clk", "strm1_pixel_clk";
@@ -480,6 +479,7 @@
<0x0aeb8000 0x3000>;
reg-names = "mdp_phys",
"rot_vbif_phys";
+ status = "disabled";
#list-cells = <1>;
diff --git a/arch/arm64/boot/dts/qcom/kona-va-bolero.dtsi b/arch/arm64/boot/dts/qcom/kona-va-bolero.dtsi
index 3ddc27e..6149297 100644
--- a/arch/arm64/boot/dts/qcom/kona-va-bolero.dtsi
+++ b/arch/arm64/boot/dts/qcom/kona-va-bolero.dtsi
@@ -8,23 +8,14 @@
compatible = "qcom,va-macro";
reg = <0x3370000 0x0>;
clock-names = "va_core_clk";
- clocks = <&clock_audio_va 0>;
+ clocks = <&clock_audio_va_1 0>;
va-vdd-micb-supply = <&S4A>;
qcom,va-vdd-micb-voltage = <1800000 1800000>;
qcom,va-vdd-micb-current = <11200>;
qcom,va-dmic-sample-rate = <4800000>;
qcom,va-clk-mux-select = <1>;
qcom,va-island-mode-muxsel = <0x033A0000>;
- };
-};
-
-&soc {
- clock_audio_va: va_core_clk {
- compatible = "qcom,audio-ref-clk";
- qcom,codec-ext-clk-src = <AUDIO_LPASS_MCLK>;
- qcom,codec-lpass-ext-clk-freq = <19200000>;
- qcom,codec-lpass-clk-id = <0x30B>;
- #clock-cells = <1>;
+ qcom,mux0-clk-id = <TX_CORE_CLK>;
};
};
diff --git a/arch/arm64/boot/dts/qcom/kona-vidc.dtsi b/arch/arm64/boot/dts/qcom/kona-vidc.dtsi
index 67f8dbe..95bc7d5 100644
--- a/arch/arm64/boot/dts/qcom/kona-vidc.dtsi
+++ b/arch/arm64/boot/dts/qcom/kona-vidc.dtsi
@@ -26,16 +26,15 @@
vcodec-supply = <&mvs0_gdsc>;
/* Clocks */
- clock-names = "gcc_video_axi0", "ahb_clk",
+ clock-names = "gcc_video_axi0",
"core_clk", "vcodec_clk";
clocks = <&clock_gcc GCC_VIDEO_AXI0_CLK>,
- <&clock_videocc VIDEO_CC_AHB_CLK>,
<&clock_videocc VIDEO_CC_MVS0C_CLK>,
<&clock_videocc VIDEO_CC_MVS0_CLK>;
- qcom,proxy-clock-names = "gcc_video_axi0", "ahb_clk",
+ qcom,proxy-clock-names = "gcc_video_axi0",
"core_clk", "vcodec_clk";
/* Mask: Bit0: Clock Scaling, Bit1: Mem Retention*/
- qcom,clock-configs = <0x0 0x0 0x1 0x1>;
+ qcom,clock-configs = <0x0 0x1 0x1>;
qcom,allowed-clock-rates = <239999999 338000000
366000000 444000000>;
resets = <&clock_gcc GCC_VIDEO_AXI0_CLK_ARES>,
diff --git a/arch/arm64/boot/dts/qcom/kona.dtsi b/arch/arm64/boot/dts/qcom/kona.dtsi
index 4106d92..8c1387b 100644
--- a/arch/arm64/boot/dts/qcom/kona.dtsi
+++ b/arch/arm64/boot/dts/qcom/kona.dtsi
@@ -627,7 +627,7 @@
alloc-ranges = <0x0 0x00000000 0x0 0xffffffff>;
reusable;
alignment = <0x0 0x400000>;
- size = <0x0 0x800000>;
+ size = <0x0 0xc00000>;
};
qseecom_mem: qseecom_region {
@@ -945,6 +945,18 @@
};
};
+ bus_proxy_client: qcom,bus_proxy_client {
+ compatible = "qcom,bus-proxy-client";
+ qcom,msm-bus,name = "bus-proxy-client";
+ qcom,msm-bus,num-cases = <2>;
+ qcom,msm-bus,num-paths = <2>;
+ qcom,msm-bus,vectors-KBps =
+ <22 512 0 0>, <23 512 0 0>,
+ <22 512 1500000 1500000>, <23 512 1500000 1500000>;
+ qcom,msm-bus,active-only;
+ status = "ok";
+ };
+
keepalive_opp_table: keepalive-opp-table {
compatible = "operating-points-v2";
opp-1 {
@@ -1077,23 +1089,30 @@
npu_npu_ddr_bwmon: qcom,npu-npu-ddr-bwmon@60300 {
compatible = "qcom,bimc-bwmon4";
- reg = <0x00060300 0x300>, <0x00060400 0x200>;
+ reg = <0x00060400 0x300>, <0x00060300 0x200>;
reg-names = "base", "global_base";
- interrupts = <GIC_SPI 476 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 347 IRQ_TYPE_LEVEL_HIGH>;
qcom,mport = <0>;
qcom,hw-timer-hz = <19200000>;
qcom,target-dev = <&npu_npu_ddr_bw>;
qcom,count-unit = <0x10000>;
};
- npu_npu_ddr_bwmon_dsp: qcom,npu-npu-ddr-bwmoni_dsp@70200 {
+ npudsp_npu_ddr_bw: qcom,npudsp-npu-ddr-bw {
+ compatible = "qcom,devbw";
+ governor = "performance";
+ qcom,src-dst-ports = <MSM_BUS_MASTER_NPU MSM_BUS_SLAVE_EBI_CH0>;
+ operating-points-v2 = <&suspendable_ddr_bw_opp_table>;
+ };
+
+ npudsp_npu_ddr_bwmon: qcom,npudsp-npu-ddr-bwmon@70200 {
compatible = "qcom,bimc-bwmon4";
- reg = <0x00070200 0x300>, <0x00070300 0x200>;
+ reg = <0x00070300 0x300>, <0x00070200 0x200>;
reg-names = "base", "global_base";
interrupts = <GIC_SPI 155 IRQ_TYPE_LEVEL_HIGH>;
qcom,mport = <0>;
qcom,hw-timer-hz = <19200000>;
- qcom,target-dev = <&npu_npu_ddr_bw>;
+ qcom,target-dev = <&npudsp_npu_ddr_bw>;
qcom,count-unit = <0x10000>;
};
@@ -1848,11 +1867,9 @@
lanes-per-direction = <2>;
clock-names = "ref_clk_src",
- "ref_clk",
"ref_aux_clk";
clocks = <&clock_rpmh RPMH_CXO_CLK>,
- <&clock_gcc GCC_UFS_1X_CLKREF_EN>,
- <&clock_gcc GCC_UFS_PHY_PHY_AUX_CLK>;
+ <&clock_gcc GCC_UFS_PHY_PHY_AUX_HW_CTL_CLK>;
status = "disabled";
};
@@ -1879,11 +1896,11 @@
"rx_lane0_sync_clk",
"rx_lane1_sync_clk";
clocks =
- <&clock_gcc GCC_UFS_PHY_AXI_CLK>,
- <&clock_gcc GCC_AGGRE_UFS_PHY_AXI_CLK>,
+ <&clock_gcc GCC_UFS_PHY_AXI_HW_CTL_CLK>,
+ <&clock_gcc GCC_AGGRE_UFS_PHY_AXI_HW_CTL_CLK>,
<&clock_gcc GCC_UFS_PHY_AHB_CLK>,
- <&clock_gcc GCC_UFS_PHY_UNIPRO_CORE_CLK>,
- <&clock_gcc GCC_UFS_PHY_ICE_CORE_CLK>,
+ <&clock_gcc GCC_UFS_PHY_UNIPRO_CORE_HW_CTL_CLK>,
+ <&clock_gcc GCC_UFS_PHY_ICE_CORE_HW_CTL_CLK>,
<&clock_rpmh RPMH_CXO_CLK>,
<&clock_gcc GCC_UFS_PHY_TX_SYMBOL_0_CLK>,
<&clock_gcc GCC_UFS_PHY_RX_SYMBOL_0_CLK>,
@@ -1893,7 +1910,7 @@
<0 0>,
<0 0>,
<37500000 300000000>,
- <75000000 300000000>,
+ <37500000 300000000>,
<0 0>,
<0 0>,
<0 0>,
@@ -2222,6 +2239,7 @@
ipa_hw: qcom,ipa@1e00000 {
compatible = "qcom,ipa";
+ mboxes = <&qmp_aop 0>;
reg =
<0x1e00000 0x84000>,
<0x1e04000 0x23000>;
diff --git a/arch/arm64/boot/dts/qcom/lito-gdsc.dtsi b/arch/arm64/boot/dts/qcom/lito-gdsc.dtsi
index f093a93..dfb2644 100644
--- a/arch/arm64/boot/dts/qcom/lito-gdsc.dtsi
+++ b/arch/arm64/boot/dts/qcom/lito-gdsc.dtsi
@@ -107,7 +107,7 @@
};
gpu_cx_gdsc: qcom,gdsc@3d9106c {
- compatible = "regulator-fixed";
+ compatible = "qcom,gdsc";
reg = <0x3d9106c 0x4>;
regulator-name = "gpu_cx_gdsc";
hw-ctrl-addr = <&gpu_cx_hw_ctrl>;
@@ -128,7 +128,7 @@
};
gpu_gx_gdsc: qcom,gdsc@3d9100c {
- compatible = "regulator-fixed";
+ compatible = "qcom,gdsc";
reg = <0x3d9100c 0x4>;
regulator-name = "gpu_gx_gdsc";
domain-addr = <&gpu_gx_domain_addr>;
diff --git a/arch/arm64/boot/dts/qcom/lito-ion.dtsi b/arch/arm64/boot/dts/qcom/lito-ion.dtsi
new file mode 100644
index 0000000..e68f421
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/lito-ion.dtsi
@@ -0,0 +1,22 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+
+&soc {
+ qcom,ion {
+ compatible = "qcom,msm-ion";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ system_heap: qcom,ion-heap@25 {
+ reg = <25>;
+ qcom,ion-heap-type = "SYSTEM";
+ };
+
+ system_secure_heap: qcom,ion-heap@9 {
+ reg = <9>;
+ qcom,ion-heap-type = "SYSTEM_SECURE";
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/lito-rumi.dtsi b/arch/arm64/boot/dts/qcom/lito-rumi.dtsi
index 093f3d6..9cb6c487 100644
--- a/arch/arm64/boot/dts/qcom/lito-rumi.dtsi
+++ b/arch/arm64/boot/dts/qcom/lito-rumi.dtsi
@@ -35,6 +35,27 @@
usb_nop_phy: usb_nop_phy {
compatible = "usb-nop-xceiv";
};
+
+ cxo: bi_tcxo {
+ compatible = "fixed-factor-clock";
+ clocks = <&xo_board>;
+ clock-mult = <1>;
+ clock-div = <2>;
+ #clock-cells = <0>;
+ };
+
+ cxo_a: bi_tcxo_ao {
+ compatible = "fixed-factor-clock";
+ clocks = <&xo_board>;
+ clock-mult = <1>;
+ clock-div = <2>;
+ #clock-cells = <0>;
+ };
+};
+
+&rpmhcc {
+ compatible = "qcom,dummycc";
+ clock-output-names = "rpmh_clocks";
};
&usb0 {
diff --git a/arch/arm64/boot/dts/qcom/lito.dtsi b/arch/arm64/boot/dts/qcom/lito.dtsi
index e1e8b2c..609691a 100644
--- a/arch/arm64/boot/dts/qcom/lito.dtsi
+++ b/arch/arm64/boot/dts/qcom/lito.dtsi
@@ -850,28 +850,6 @@
};
};
- cxo: bi_tcxo {
- compatible = "fixed-factor-clock";
- clocks = <&xo_board>;
- clock-mult = <1>;
- clock-div = <2>;
- #clock-cells = <0>;
- };
-
- cxo_a: bi_tcxo_ao {
- compatible = "fixed-factor-clock";
- clocks = <&xo_board>;
- clock-mult = <1>;
- clock-div = <2>;
- #clock-cells = <0>;
- };
-
- rpmhcc: qcom,rpmhclk {
- compatible = "qcom,dummycc";
- clock-output-names = "rpmh_clocks";
- #clock-cells = <1>;
- };
-
aopcc: qcom,aopclk {
compatible = "qcom,dummycc";
clock-output-names = "qdss_clocks";
@@ -912,7 +890,7 @@
};
dispcc: qcom,dispcc {
- compatible = "qcom,lito-dispcc";
+ compatible = "qcom,lito-dispcc", "syscon";
reg = <0xaf00000 0x20000>;
reg-names = "cc_base";
clock-names = "cfg_ahb_clk";
@@ -922,6 +900,16 @@
#reset-cells = <1>;
};
+ gpucc: qcom,gpucc {
+ compatible = "qcom,gpucc-lito", "syscon";
+ reg = <0x3d90000 0x9000>;
+ reg-names = "cc_base";
+ vdd_cx-supply = <&VDD_CX_LEVEL>;
+ vdd_mx-supply = <&VDD_MX_LEVEL>;
+ #clock-cells = <1>;
+ #reset-cells = <1>;
+ };
+
ufsphy_mem: ufsphy_mem@1d87000 {
reg = <0x1d87000 0xe00>; /* PHY regs */
reg-names = "phy_mem";
@@ -1055,13 +1043,6 @@
#reset-cells = <1>;
};
- gpucc: qcom,gpucc {
- compatible = "qcom,dummycc";
- clock-output-names = "gpucc_clocks";
- #clock-cells = <1>;
- #reset-cells = <1>;
- };
-
apps_rsc: rsc@18200000 {
label = "apps_rsc";
compatible = "qcom,rpmh-rsc";
@@ -1082,6 +1063,11 @@
system_pm {
compatible = "qcom,system-pm";
};
+
+ rpmhcc: qcom,rpmhclk {
+ compatible = "qcom,lito-rpmh-clk";
+ #clock-cells = <1>;
+ };
};
disp_rsc: rsc@af20000 {
@@ -1469,6 +1455,7 @@
#include "lito-regulators.dtsi"
#include "lito-smp2p.dtsi"
#include "lito-usb.dtsi"
+#include "lito-ion.dtsi"
&ufs_phy_gdsc {
status = "ok";
@@ -1536,10 +1523,13 @@
};
&gpu_cx_gdsc {
+ parent-supply = <&VDD_CX_LEVEL>;
status = "ok";
};
&gpu_gx_gdsc {
+ parent-supply = <&VDD_GFX_LEVEL>;
+ vdd_parent-supply = <&VDD_GFX_LEVEL>;
status = "ok";
};
diff --git a/arch/arm64/boot/dts/qcom/msm-arm-smmu-kona.dtsi b/arch/arm64/boot/dts/qcom/msm-arm-smmu-kona.dtsi
index 2862a0f..20f1a38 100644
--- a/arch/arm64/boot/dts/qcom/msm-arm-smmu-kona.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm-arm-smmu-kona.dtsi
@@ -419,22 +419,27 @@
kgsl_iommu_test_device {
compatible = "iommu-debug-test";
iommus = <&kgsl_smmu 0x7 0>;
+ qcom,iommu-dma = "disabled";
};
kgsl_iommu_coherent_test_device {
+ status = "disabled";
compatible = "iommu-debug-test";
iommus = <&kgsl_smmu 0x9 0>;
+ qcom,iommu-dma = "disabled";
dma-coherent;
};
apps_iommu_test_device {
compatible = "iommu-debug-test";
iommus = <&apps_smmu 0x21 0>;
+ qcom,iommu-dma = "disabled";
};
apps_iommu_coherent_test_device {
compatible = "iommu-debug-test";
iommus = <&apps_smmu 0x23 0>;
+ qcom,iommu-dma = "disabled";
dma-coherent;
};
};
diff --git a/arch/arm64/boot/dts/qcom/pm8150.dtsi b/arch/arm64/boot/dts/qcom/pm8150.dtsi
index e81f65f..29495c1 100644
--- a/arch/arm64/boot/dts/qcom/pm8150.dtsi
+++ b/arch/arm64/boot/dts/qcom/pm8150.dtsi
@@ -48,6 +48,8 @@
compatible = "qcom,spmi-temp-alarm";
reg = <0x2400 0x100>;
interrupts = <0x0 0x24 0x0 IRQ_TYPE_EDGE_BOTH>;
+ io-channels = <&pm8150_vadc ADC_DIE_TEMP>;
+ io-channel-names = "thermal";
#thermal-sensor-cells = <0>;
qcom,temperature-threshold-set = <1>;
};
diff --git a/arch/arm64/boot/dts/qcom/pm8150b.dtsi b/arch/arm64/boot/dts/qcom/pm8150b.dtsi
index e57d394..c427d85 100644
--- a/arch/arm64/boot/dts/qcom/pm8150b.dtsi
+++ b/arch/arm64/boot/dts/qcom/pm8150b.dtsi
@@ -31,6 +31,8 @@
compatible = "qcom,spmi-temp-alarm";
reg = <0x2400 0x100>;
interrupts = <0x2 0x24 0x0 IRQ_TYPE_EDGE_BOTH>;
+ io-channels = <&pm8150b_vadc ADC_DIE_TEMP>;
+ io-channel-names = "thermal";
#thermal-sensor-cells = <0>;
qcom,temperature-threshold-set = <1>;
};
diff --git a/arch/arm64/boot/dts/qcom/pm8150l.dtsi b/arch/arm64/boot/dts/qcom/pm8150l.dtsi
index bc9e7fd..da4e4e5 100644
--- a/arch/arm64/boot/dts/qcom/pm8150l.dtsi
+++ b/arch/arm64/boot/dts/qcom/pm8150l.dtsi
@@ -31,6 +31,8 @@
compatible = "qcom,spmi-temp-alarm";
reg = <0x2400 0x100>;
interrupts = <0x4 0x24 0x0 IRQ_TYPE_EDGE_BOTH>;
+ io-channels = <&pm8150l_vadc ADC_DIE_TEMP>;
+ io-channel-names = "thermal";
#thermal-sensor-cells = <0>;
qcom,temperature-threshold-set = <1>;
};
@@ -360,6 +362,14 @@
};
};
+ pm8150l_pwm: qcom,pwms@bc00 {
+ compatible = "qcom,pwm-lpg";
+ reg = <0xbc00 0x200>;
+ reg-names = "lpg-base";
+ #pwm-cells = <2>;
+ qcom,num-lpg-channels = <2>;
+ };
+
pm8150l_rgb_led: qcom,leds@d000 {
compatible = "qcom,tri-led";
reg = <0xd000 0x100>;
diff --git a/arch/arm64/configs/vendor/kona-perf_defconfig b/arch/arm64/configs/vendor/kona-perf_defconfig
index ab3c611..2fdb64d 100644
--- a/arch/arm64/configs/vendor/kona-perf_defconfig
+++ b/arch/arm64/configs/vendor/kona-perf_defconfig
@@ -281,6 +281,7 @@
CONFIG_SCSI_UFSHCD=y
CONFIG_SCSI_UFSHCD_PLATFORM=y
CONFIG_SCSI_UFS_QCOM=y
+CONFIG_SCSI_UFS_QCOM_ICE=y
CONFIG_MD=y
CONFIG_BLK_DEV_DM=y
CONFIG_DM_UEVENT=y
@@ -441,6 +442,8 @@
CONFIG_USB_CONFIGFS_F_CCID=y
CONFIG_USB_CONFIGFS_F_QDSS=y
CONFIG_USB_CONFIGFS_F_GSI=y
+CONFIG_USB_CONFIGFS_F_MTP=y
+CONFIG_USB_CONFIGFS_F_PTP=y
CONFIG_TYPEC=y
CONFIG_USB_PD_POLICY=y
CONFIG_QPNP_USB_PDPHY=y
@@ -513,6 +516,7 @@
CONFIG_OVERRIDE_MEMORY_LIMIT=y
CONFIG_QCOM_CPUSS_DUMP=y
CONFIG_QCOM_RUN_QUEUE_STATS=y
+CONFIG_MSM_QBT_HANDLER=y
CONFIG_QCOM_IPCC=y
CONFIG_QCOM_LLCC=y
CONFIG_QCOM_KONA_LLCC=y
@@ -588,6 +592,8 @@
CONFIG_EXT4_FS=y
CONFIG_EXT4_FS_SECURITY=y
CONFIG_EXT4_ENCRYPTION=y
+CONFIG_EXT4_FS_ENCRYPTION=y
+CONFIG_EXT4_FS_ICE_ENCRYPTION=y
CONFIG_QUOTA=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
CONFIG_QFMT_V2=y
@@ -600,6 +606,7 @@
CONFIG_SDCARD_FS=y
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ISO8859_1=y
+CONFIG_PFK=y
CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y
CONFIG_SECURITY=y
CONFIG_HARDENED_USERCOPY=y
@@ -612,6 +619,7 @@
CONFIG_CRYPTO_TWOFISH=y
CONFIG_CRYPTO_ANSI_CPRNG=y
CONFIG_CRYPTO_DEV_QCE=y
+CONFIG_CRYPTO_DEV_QCOM_ICE=y
CONFIG_PRINTK_TIME=y
CONFIG_DEBUG_INFO=y
CONFIG_MAGIC_SYSRQ=y
diff --git a/arch/arm64/configs/vendor/kona_defconfig b/arch/arm64/configs/vendor/kona_defconfig
index d4a2fbd..2210bec 100644
--- a/arch/arm64/configs/vendor/kona_defconfig
+++ b/arch/arm64/configs/vendor/kona_defconfig
@@ -289,6 +289,7 @@
CONFIG_SCSI_UFSHCD=y
CONFIG_SCSI_UFSHCD_PLATFORM=y
CONFIG_SCSI_UFS_QCOM=y
+CONFIG_SCSI_UFS_QCOM_ICE=y
CONFIG_MD=y
CONFIG_BLK_DEV_DM=y
CONFIG_DM_UEVENT=y
@@ -452,6 +453,8 @@
CONFIG_USB_CONFIGFS_F_CCID=y
CONFIG_USB_CONFIGFS_F_QDSS=y
CONFIG_USB_CONFIGFS_F_GSI=y
+CONFIG_USB_CONFIGFS_F_MTP=y
+CONFIG_USB_CONFIGFS_F_PTP=y
CONFIG_TYPEC=y
CONFIG_USB_PD_POLICY=y
CONFIG_QPNP_USB_PDPHY=y
@@ -529,6 +532,7 @@
CONFIG_OVERRIDE_MEMORY_LIMIT=y
CONFIG_QCOM_CPUSS_DUMP=y
CONFIG_QCOM_RUN_QUEUE_STATS=y
+CONFIG_MSM_QBT_HANDLER=y
CONFIG_QCOM_IPCC=y
CONFIG_QCOM_LLCC=y
CONFIG_QCOM_KONA_LLCC=y
@@ -610,6 +614,8 @@
CONFIG_EXT4_FS=y
CONFIG_EXT4_FS_SECURITY=y
CONFIG_EXT4_ENCRYPTION=y
+CONFIG_EXT4_FS_ENCRYPTION=y
+CONFIG_EXT4_FS_ICE_ENCRYPTION=y
CONFIG_QUOTA=y
CONFIG_QUOTA_NETLINK_INTERFACE=y
CONFIG_QFMT_V2=y
@@ -624,6 +630,7 @@
# CONFIG_NETWORK_FILESYSTEMS is not set
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ISO8859_1=y
+CONFIG_PFK=y
CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y
CONFIG_SECURITY=y
CONFIG_HARDENED_USERCOPY=y
@@ -637,6 +644,7 @@
CONFIG_CRYPTO_TWOFISH=y
CONFIG_CRYPTO_ANSI_CPRNG=y
CONFIG_CRYPTO_DEV_QCE=y
+CONFIG_CRYPTO_DEV_QCOM_ICE=y
CONFIG_XZ_DEC=y
CONFIG_PRINTK_TIME=y
CONFIG_DYNAMIC_DEBUG=y
diff --git a/arch/arm64/configs/vendor/lito-perf_defconfig b/arch/arm64/configs/vendor/lito-perf_defconfig
new file mode 100644
index 0000000..870c6ce
--- /dev/null
+++ b/arch/arm64/configs/vendor/lito-perf_defconfig
@@ -0,0 +1,473 @@
+CONFIG_LOCALVERSION="-perf"
+# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_AUDIT=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_PREEMPT=y
+CONFIG_IRQ_TIME_ACCOUNTING=y
+CONFIG_TASKSTATS=y
+CONFIG_TASK_XACCT=y
+CONFIG_TASK_IO_ACCOUNTING=y
+CONFIG_RCU_EXPERT=y
+CONFIG_RCU_FAST_NO_HZ=y
+CONFIG_RCU_NOCB_CPU=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_CPU_MAX_BUF_SHIFT=17
+CONFIG_MEMCG=y
+CONFIG_MEMCG_SWAP=y
+CONFIG_BLK_CGROUP=y
+CONFIG_RT_GROUP_SCHED=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CPUSETS=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_CGROUP_BPF=y
+CONFIG_NAMESPACES=y
+# CONFIG_UTS_NS is not set
+# CONFIG_PID_NS is not set
+CONFIG_SCHED_AUTOGROUP=y
+CONFIG_BLK_DEV_INITRD=y
+# CONFIG_RD_XZ is not set
+# CONFIG_RD_LZO is not set
+# CONFIG_RD_LZ4 is not set
+# CONFIG_FHANDLE is not set
+CONFIG_KALLSYMS_ALL=y
+CONFIG_BPF_SYSCALL=y
+CONFIG_EMBEDDED=y
+# CONFIG_SLUB_DEBUG is not set
+# CONFIG_COMPAT_BRK is not set
+CONFIG_SLAB_FREELIST_RANDOM=y
+CONFIG_SLAB_FREELIST_HARDENED=y
+CONFIG_PROFILING=y
+# CONFIG_ZONE_DMA32 is not set
+CONFIG_ARCH_QCOM=y
+CONFIG_ARCH_LITO=y
+CONFIG_PCI=y
+CONFIG_SCHED_MC=y
+CONFIG_NR_CPUS=8
+CONFIG_HZ_100=y
+CONFIG_SECCOMP=y
+# CONFIG_UNMAP_KERNEL_AT_EL0 is not set
+CONFIG_ARMV8_DEPRECATED=y
+CONFIG_SWP_EMULATION=y
+CONFIG_CP15_BARRIER_EMULATION=y
+CONFIG_SETEND_EMULATION=y
+# CONFIG_ARM64_VHE is not set
+CONFIG_RANDOMIZE_BASE=y
+# CONFIG_EFI is not set
+CONFIG_BUILD_ARM64_APPENDED_DTB_IMAGE=y
+CONFIG_COMPAT=y
+CONFIG_PM_AUTOSLEEP=y
+CONFIG_PM_WAKELOCKS=y
+CONFIG_PM_WAKELOCKS_LIMIT=0
+# CONFIG_PM_WAKELOCKS_GC is not set
+CONFIG_CPU_IDLE=y
+CONFIG_ARM_CPUIDLE=y
+CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_GOV_POWERSAVE=y
+CONFIG_CPU_FREQ_GOV_USERSPACE=y
+CONFIG_CPU_FREQ_GOV_ONDEMAND=y
+CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
+CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y
+CONFIG_ARM_QCOM_CPUFREQ_HW=y
+CONFIG_ARM64_CRYPTO=y
+CONFIG_CRYPTO_SHA1_ARM64_CE=y
+CONFIG_CRYPTO_SHA2_ARM64_CE=y
+CONFIG_CRYPTO_GHASH_ARM64_CE=y
+CONFIG_CRYPTO_AES_ARM64_CE_CCM=y
+CONFIG_CRYPTO_AES_ARM64_CE_BLK=y
+CONFIG_CRYPTO_AES_ARM64_NEON_BLK=y
+CONFIG_PANIC_ON_REFCOUNT_ERROR=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_MODULE_SIG=y
+CONFIG_MODULE_SIG_FORCE=y
+CONFIG_MODULE_SIG_SHA512=y
+# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PARTITION_ADVANCED=y
+# CONFIG_IOSCHED_DEADLINE is not set
+CONFIG_CFQ_GROUP_IOSCHED=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_CMA=y
+CONFIG_ZSMALLOC=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=y
+CONFIG_XFRM_INTERFACE=y
+CONFIG_XFRM_STATISTICS=y
+CONFIG_NET_KEY=y
+CONFIG_INET=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_ROUTE_VERBOSE=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_NET_IPGRE_DEMUX=y
+CONFIG_INET_AH=y
+CONFIG_INET_ESP=y
+CONFIG_INET_IPCOMP=y
+CONFIG_IPV6_ROUTER_PREF=y
+CONFIG_IPV6_ROUTE_INFO=y
+CONFIG_IPV6_OPTIMISTIC_DAD=y
+CONFIG_INET6_AH=y
+CONFIG_INET6_ESP=y
+CONFIG_INET6_IPCOMP=y
+CONFIG_IPV6_MIP6=y
+CONFIG_IPV6_MULTIPLE_TABLES=y
+CONFIG_IPV6_SUBTREES=y
+CONFIG_NETFILTER=y
+CONFIG_NF_CONNTRACK=y
+CONFIG_NF_CONNTRACK_SECMARK=y
+CONFIG_NF_CONNTRACK_EVENTS=y
+CONFIG_NF_CONNTRACK_AMANDA=y
+CONFIG_NF_CONNTRACK_FTP=y
+CONFIG_NF_CONNTRACK_H323=y
+CONFIG_NF_CONNTRACK_IRC=y
+CONFIG_NF_CONNTRACK_NETBIOS_NS=y
+CONFIG_NF_CONNTRACK_PPTP=y
+CONFIG_NF_CONNTRACK_SANE=y
+CONFIG_NF_CONNTRACK_TFTP=y
+CONFIG_NF_CT_NETLINK=y
+CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y
+CONFIG_NETFILTER_XT_TARGET_CONNMARK=y
+CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y
+CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y
+CONFIG_NETFILTER_XT_TARGET_LOG=y
+CONFIG_NETFILTER_XT_TARGET_MARK=y
+CONFIG_NETFILTER_XT_TARGET_NFLOG=y
+CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y
+CONFIG_NETFILTER_XT_TARGET_NOTRACK=y
+CONFIG_NETFILTER_XT_TARGET_TEE=y
+CONFIG_NETFILTER_XT_TARGET_TPROXY=y
+CONFIG_NETFILTER_XT_TARGET_TRACE=y
+CONFIG_NETFILTER_XT_TARGET_SECMARK=y
+CONFIG_NETFILTER_XT_TARGET_TCPMSS=y
+CONFIG_NETFILTER_XT_MATCH_COMMENT=y
+CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y
+CONFIG_NETFILTER_XT_MATCH_CONNMARK=y
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y
+CONFIG_NETFILTER_XT_MATCH_DSCP=y
+CONFIG_NETFILTER_XT_MATCH_ESP=y
+CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y
+CONFIG_NETFILTER_XT_MATCH_HELPER=y
+CONFIG_NETFILTER_XT_MATCH_IPRANGE=y
+# CONFIG_NETFILTER_XT_MATCH_L2TP is not set
+CONFIG_NETFILTER_XT_MATCH_LENGTH=y
+CONFIG_NETFILTER_XT_MATCH_LIMIT=y
+CONFIG_NETFILTER_XT_MATCH_MAC=y
+CONFIG_NETFILTER_XT_MATCH_MARK=y
+CONFIG_NETFILTER_XT_MATCH_MULTIPORT=y
+CONFIG_NETFILTER_XT_MATCH_OWNER=y
+CONFIG_NETFILTER_XT_MATCH_POLICY=y
+CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA=y
+CONFIG_NETFILTER_XT_MATCH_STATE=y
+CONFIG_NETFILTER_XT_MATCH_STATISTIC=y
+CONFIG_NETFILTER_XT_MATCH_STRING=y
+CONFIG_NETFILTER_XT_MATCH_TIME=y
+CONFIG_NETFILTER_XT_MATCH_U32=y
+CONFIG_IP_NF_IPTABLES=y
+CONFIG_IP_NF_MATCH_AH=y
+CONFIG_IP_NF_MATCH_ECN=y
+CONFIG_IP_NF_MATCH_TTL=y
+CONFIG_IP_NF_FILTER=y
+CONFIG_IP_NF_TARGET_REJECT=y
+CONFIG_IP_NF_NAT=y
+CONFIG_IP_NF_TARGET_MASQUERADE=y
+CONFIG_IP_NF_TARGET_NETMAP=y
+CONFIG_IP_NF_TARGET_REDIRECT=y
+CONFIG_IP_NF_MANGLE=y
+CONFIG_IP_NF_RAW=y
+CONFIG_IP_NF_SECURITY=y
+CONFIG_IP_NF_ARPTABLES=y
+CONFIG_IP_NF_ARPFILTER=y
+CONFIG_IP_NF_ARP_MANGLE=y
+CONFIG_IP6_NF_IPTABLES=y
+CONFIG_IP6_NF_FILTER=y
+CONFIG_IP6_NF_TARGET_REJECT=y
+CONFIG_IP6_NF_MANGLE=y
+CONFIG_IP6_NF_RAW=y
+CONFIG_BRIDGE_NF_EBTABLES=y
+CONFIG_BRIDGE_EBT_BROUTE=y
+CONFIG_L2TP=y
+CONFIG_L2TP_V3=y
+CONFIG_L2TP_IP=y
+CONFIG_L2TP_ETH=y
+CONFIG_BRIDGE=y
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_HTB=y
+CONFIG_NET_SCH_PRIO=y
+CONFIG_NET_SCH_MULTIQ=y
+CONFIG_NET_SCH_INGRESS=y
+CONFIG_NET_CLS_FW=y
+CONFIG_NET_CLS_U32=y
+CONFIG_CLS_U32_MARK=y
+CONFIG_NET_CLS_FLOW=y
+CONFIG_NET_CLS_BPF=y
+CONFIG_NET_EMATCH=y
+CONFIG_NET_EMATCH_CMP=y
+CONFIG_NET_EMATCH_NBYTE=y
+CONFIG_NET_EMATCH_U32=y
+CONFIG_NET_EMATCH_META=y
+CONFIG_NET_EMATCH_TEXT=y
+CONFIG_NET_CLS_ACT=y
+CONFIG_NET_ACT_GACT=y
+CONFIG_NET_ACT_MIRRED=y
+CONFIG_NET_ACT_SKBEDIT=y
+CONFIG_DNS_RESOLVER=y
+CONFIG_QRTR=y
+CONFIG_QRTR_SMD=y
+CONFIG_BT=y
+CONFIG_CFG80211=y
+CONFIG_RFKILL=y
+CONFIG_FW_LOADER_USER_HELPER=y
+CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y
+CONFIG_DMA_CMA=y
+CONFIG_ZRAM=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=8192
+CONFIG_UID_SYS_STATS=y
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_SG=y
+CONFIG_CHR_DEV_SCH=y
+CONFIG_SCSI_CONSTANTS=y
+CONFIG_SCSI_LOGGING=y
+CONFIG_SCSI_SCAN_ASYNC=y
+CONFIG_SCSI_UFSHCD=y
+CONFIG_SCSI_UFSHCD_PLATFORM=y
+CONFIG_SCSI_UFS_QCOM=y
+CONFIG_MD=y
+CONFIG_BLK_DEV_DM=y
+CONFIG_DM_UEVENT=y
+CONFIG_DM_VERITY=y
+CONFIG_DM_VERITY_FEC=y
+CONFIG_NETDEVICES=y
+CONFIG_BONDING=y
+CONFIG_DUMMY=y
+CONFIG_TUN=y
+CONFIG_RMNET=y
+CONFIG_PHYLIB=y
+CONFIG_PPP=y
+CONFIG_PPP_BSDCOMP=y
+CONFIG_PPP_DEFLATE=y
+CONFIG_PPP_MPPE=y
+CONFIG_PPTP=y
+CONFIG_PPPOL2TP=y
+CONFIG_INPUT_EVDEV=y
+CONFIG_KEYBOARD_GPIO=y
+# CONFIG_INPUT_MOUSE is not set
+CONFIG_INPUT_JOYSTICK=y
+CONFIG_INPUT_TOUCHSCREEN=y
+CONFIG_INPUT_MISC=y
+CONFIG_INPUT_UINPUT=y
+# CONFIG_SERIO_SERPORT is not set
+# CONFIG_VT is not set
+# CONFIG_LEGACY_PTYS is not set
+# CONFIG_DEVMEM is not set
+CONFIG_SERIAL_MSM_GENI=y
+CONFIG_TTY_PRINTK=y
+CONFIG_HW_RANDOM=y
+CONFIG_DIAG_CHAR=y
+CONFIG_I2C_CHARDEV=y
+CONFIG_SPI=y
+CONFIG_SPI_SPIDEV=y
+CONFIG_SPMI=y
+CONFIG_PINCTRL_QCOM_SPMI_PMIC=y
+CONFIG_PINCTRL_LITO=y
+CONFIG_GPIO_SYSFS=y
+CONFIG_POWER_RESET_QCOM=y
+CONFIG_POWER_RESET_XGENE=y
+CONFIG_POWER_RESET_SYSCON=y
+CONFIG_THERMAL=y
+CONFIG_THERMAL_WRITABLE_TRIPS=y
+CONFIG_THERMAL_GOV_USER_SPACE=y
+CONFIG_DEVFREQ_THERMAL=y
+CONFIG_QCOM_SPMI_TEMP_ALARM=y
+CONFIG_MFD_SPMI_PMIC=y
+CONFIG_REGULATOR_FIXED_VOLTAGE=y
+CONFIG_REGULATOR_STUB=y
+CONFIG_MEDIA_SUPPORT=y
+CONFIG_MEDIA_CAMERA_SUPPORT=y
+CONFIG_MEDIA_CONTROLLER=y
+CONFIG_VIDEO_V4L2_SUBDEV_API=y
+CONFIG_VIDEO_ADV_DEBUG=y
+CONFIG_VIDEO_FIXED_MINOR_RANGES=y
+CONFIG_V4L_PLATFORM_DRIVERS=y
+CONFIG_DRM=y
+CONFIG_DRM_MSM_REGISTER_LOGGING=y
+CONFIG_FB_VIRTUAL=y
+CONFIG_BACKLIGHT_LCD_SUPPORT=y
+CONFIG_BACKLIGHT_CLASS_DEVICE=y
+CONFIG_LOGO=y
+# CONFIG_LOGO_LINUX_MONO is not set
+# CONFIG_LOGO_LINUX_VGA16 is not set
+CONFIG_SOUND=y
+CONFIG_SND=y
+CONFIG_SND_DYNAMIC_MINORS=y
+CONFIG_SND_USB_AUDIO=y
+CONFIG_SND_SOC=y
+CONFIG_UHID=y
+CONFIG_HID_APPLE=y
+CONFIG_HID_MICROSOFT=y
+CONFIG_USB=y
+CONFIG_USB_XHCI_HCD=y
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_EHCI_HCD_PLATFORM=y
+CONFIG_USB_OHCI_HCD=y
+CONFIG_USB_OHCI_HCD_PLATFORM=y
+CONFIG_USB_STORAGE=y
+CONFIG_USB_DWC3=y
+CONFIG_USB_DWC3_MSM=y
+CONFIG_USB_ISP1760=y
+CONFIG_USB_ISP1760_HOST_ROLE=y
+CONFIG_USB_EHSET_TEST_FIXTURE=y
+CONFIG_USB_LINK_LAYER_TEST=y
+CONFIG_NOP_USB_XCEIV=y
+CONFIG_USB_QCOM_EMU_PHY=y
+CONFIG_USB_GADGET=y
+CONFIG_USB_GADGET_VBUS_DRAW=900
+CONFIG_USB_CONFIGFS=y
+CONFIG_USB_CONFIGFS_MASS_STORAGE=y
+CONFIG_USB_CONFIGFS_F_FS=y
+CONFIG_USB_CONFIGFS_F_MIDI=y
+CONFIG_USB_CONFIGFS_F_HID=y
+CONFIG_USB_CONFIGFS_F_DIAG=y
+CONFIG_USB_CONFIGFS_F_CDEV=y
+CONFIG_USB_CONFIGFS_F_CCID=y
+CONFIG_USB_CONFIGFS_F_QDSS=y
+CONFIG_USB_CONFIGFS_F_GSI=y
+CONFIG_MMC=y
+CONFIG_MMC_BLOCK_MINORS=32
+CONFIG_MMC_TEST=y
+CONFIG_MMC_SDHCI=y
+CONFIG_MMC_SDHCI_PLTFM=y
+CONFIG_MMC_SDHCI_MSM=y
+CONFIG_EDAC=y
+CONFIG_EDAC_KRYO_ARM64=y
+CONFIG_EDAC_KRYO_ARM64_PANIC_ON_UE=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_PM8XXX=y
+CONFIG_DMADEVICES=y
+CONFIG_UIO=y
+CONFIG_UIO_MSM_SHAREDMEM=y
+CONFIG_STAGING=y
+CONFIG_ASHMEM=y
+CONFIG_ION=y
+CONFIG_SPS=y
+CONFIG_SPS_SUPPORT_NDP_BAM=y
+CONFIG_QCOM_GENI_SE=y
+# CONFIG_QCOM_A53PLL is not set
+CONFIG_QCOM_CLK_RPMH=y
+CONFIG_MSM_CLK_AOP_QMP=y
+CONFIG_SM_GCC_LITO=y
+CONFIG_SM_VIDEOCC_LITO=y
+CONFIG_SM_CAMCC_LITO=y
+CONFIG_SM_DISPCC_LITO=y
+CONFIG_SM_GPUCC_LITO=y
+CONFIG_HWSPINLOCK=y
+CONFIG_HWSPINLOCK_QCOM=y
+CONFIG_MAILBOX=y
+CONFIG_MSM_QMP=y
+CONFIG_IOMMU_IO_PGTABLE_FAST=y
+CONFIG_ARM_SMMU=y
+CONFIG_QCOM_LAZY_MAPPING=y
+CONFIG_IOMMU_DEBUG=y
+CONFIG_IOMMU_DEBUG_TRACKING=y
+CONFIG_IOMMU_TESTS=y
+CONFIG_RPMSG_CHAR=y
+CONFIG_RPMSG_QCOM_GLINK_SMEM=y
+CONFIG_RPMSG_QCOM_GLINK_SPSS=y
+CONFIG_QCOM_COMMAND_DB=y
+CONFIG_QCOM_CPUSS_DUMP=y
+CONFIG_QCOM_RUN_QUEUE_STATS=y
+CONFIG_QCOM_IPCC=y
+CONFIG_QCOM_LLCC=y
+CONFIG_QCOM_LITO_LLCC=y
+CONFIG_QCOM_QMI_HELPERS=y
+CONFIG_QCOM_RPMH=y
+CONFIG_QCOM_SMEM=y
+CONFIG_QCOM_EARLY_RANDOM=y
+CONFIG_QCOM_MEMORY_DUMP_V2=y
+CONFIG_QCOM_SMP2P=y
+CONFIG_SETUP_SSR_NOTIF_TIMEOUTS=y
+CONFIG_SSR_SYSMON_NOTIF_TIMEOUT=20000
+CONFIG_SSR_SUBSYS_NOTIF_TIMEOUT=20000
+CONFIG_PANIC_ON_SSR_NOTIF_TIMEOUT=y
+CONFIG_QCOM_SECURE_BUFFER=y
+CONFIG_MSM_SERVICE_LOCATOR=y
+CONFIG_MSM_SERVICE_NOTIFIER=y
+CONFIG_MSM_SUBSYSTEM_RESTART=y
+CONFIG_MSM_PIL=y
+CONFIG_MSM_SYSMON_QMI_COMM=y
+CONFIG_MSM_PIL_SSR_GENERIC=y
+CONFIG_MSM_BOOT_STATS=y
+CONFIG_QCOM_EUD=y
+CONFIG_QCOM_WATCHDOG_V2=y
+CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y
+CONFIG_QCOM_GLINK=y
+CONFIG_QCOM_GLINK_PKT=y
+CONFIG_MSM_EVENT_TIMER=y
+CONFIG_MSM_PM=y
+CONFIG_QTI_RPM_STATS_LOG=y
+CONFIG_QMP_DEBUGFS_CLIENT=y
+CONFIG_DEVFREQ_GOV_PASSIVE=y
+CONFIG_ARM_QCOM_DEVFREQ_FW=y
+CONFIG_IIO=y
+CONFIG_PWM=y
+CONFIG_ARM_GIC_V3_ACL=y
+CONFIG_QCOM_PDC=y
+CONFIG_PHY_XGENE=y
+CONFIG_RAS=y
+CONFIG_ANDROID=y
+CONFIG_ANDROID_BINDER_IPC=y
+CONFIG_SLIMBUS=y
+CONFIG_EXT4_FS=y
+CONFIG_EXT4_FS_SECURITY=y
+CONFIG_F2FS_FS=y
+CONFIG_F2FS_FS_SECURITY=y
+CONFIG_QUOTA=y
+CONFIG_QUOTA_NETLINK_INTERFACE=y
+CONFIG_QFMT_V2=y
+CONFIG_FUSE_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_ECRYPT_FS=y
+CONFIG_ECRYPT_FS_MESSAGING=y
+CONFIG_SDCARD_FS=y
+# CONFIG_NETWORK_FILESYSTEMS is not set
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y
+CONFIG_SECURITY=y
+CONFIG_HARDENED_USERCOPY=y
+CONFIG_HARDENED_USERCOPY_PAGESPAN=y
+CONFIG_FORTIFY_SOURCE=y
+CONFIG_SECURITY_SELINUX=y
+CONFIG_SECURITY_SMACK=y
+CONFIG_CRYPTO_GCM=y
+CONFIG_CRYPTO_XCBC=y
+CONFIG_CRYPTO_MD4=y
+CONFIG_CRYPTO_TWOFISH=y
+CONFIG_CRYPTO_ANSI_CPRNG=y
+CONFIG_CRYPTO_DEV_QCE=y
+CONFIG_PRINTK_TIME=y
+CONFIG_DEBUG_INFO=y
+# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_PANIC_TIMEOUT=-1
+CONFIG_SCHEDSTATS=y
+CONFIG_DEBUG_SPINLOCK=y
+CONFIG_DEBUG_MUTEXES=y
+CONFIG_IPC_LOGGING=y
+CONFIG_DEBUG_ALIGN_RODATA=y
+CONFIG_ARM64_STRICT_BREAK_BEFORE_MAKE=y
+CONFIG_CORESIGHT=y
+CONFIG_CORESIGHT_LINK_AND_SINK_TMC=y
+CONFIG_CORESIGHT_DYNAMIC_REPLICATOR=y
+CONFIG_CORESIGHT_STM=y
diff --git a/arch/arm64/configs/vendor/lito_defconfig b/arch/arm64/configs/vendor/lito_defconfig
index 7b1b2dd..4c43eb8 100644
--- a/arch/arm64/configs/vendor/lito_defconfig
+++ b/arch/arm64/configs/vendor/lito_defconfig
@@ -30,6 +30,7 @@
# CONFIG_RD_XZ is not set
# CONFIG_RD_LZO is not set
# CONFIG_RD_LZ4 is not set
+# CONFIG_FHANDLE is not set
CONFIG_KALLSYMS_ALL=y
CONFIG_BPF_SYSCALL=y
CONFIG_EMBEDDED=y
@@ -54,7 +55,6 @@
# CONFIG_ARM64_VHE is not set
CONFIG_RANDOMIZE_BASE=y
CONFIG_BUILD_ARM64_APPENDED_DTB_IMAGE=y
-CONFIG_KRYO_PMU_WORKAROUND=y
CONFIG_COMPAT=y
CONFIG_PM_AUTOSLEEP=y
CONFIG_PM_WAKELOCKS=y
@@ -77,6 +77,7 @@
CONFIG_CRYPTO_AES_ARM64_CE_CCM=y
CONFIG_CRYPTO_AES_ARM64_CE_BLK=y
CONFIG_CRYPTO_AES_ARM64_NEON_BLK=y
+CONFIG_PANIC_ON_REFCOUNT_ERROR=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y
@@ -96,6 +97,7 @@
CONFIG_PACKET=y
CONFIG_UNIX=y
CONFIG_XFRM_USER=y
+CONFIG_XFRM_INTERFACE=y
CONFIG_XFRM_STATISTICS=y
CONFIG_NET_KEY=y
CONFIG_INET=y
@@ -104,6 +106,7 @@
CONFIG_IP_ROUTE_VERBOSE=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
+CONFIG_NET_IPGRE_DEMUX=y
CONFIG_INET_AH=y
CONFIG_INET_ESP=y
CONFIG_INET_IPCOMP=y
@@ -158,6 +161,7 @@
CONFIG_NETFILTER_XT_MATCH_MAC=y
CONFIG_NETFILTER_XT_MATCH_MARK=y
CONFIG_NETFILTER_XT_MATCH_MULTIPORT=y
+CONFIG_NETFILTER_XT_MATCH_OWNER=y
CONFIG_NETFILTER_XT_MATCH_POLICY=y
CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y
CONFIG_NETFILTER_XT_MATCH_QUOTA=y
@@ -204,6 +208,7 @@
CONFIG_NET_CLS_U32=y
CONFIG_CLS_U32_MARK=y
CONFIG_NET_CLS_FLOW=y
+CONFIG_NET_CLS_BPF=y
CONFIG_NET_EMATCH=y
CONFIG_NET_EMATCH_CMP=y
CONFIG_NET_EMATCH_NBYTE=y
@@ -227,6 +232,7 @@
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=8192
+CONFIG_UID_SYS_STATS=y
CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_SG=y
@@ -252,6 +258,8 @@
CONFIG_PPP_BSDCOMP=y
CONFIG_PPP_DEFLATE=y
CONFIG_PPP_MPPE=y
+CONFIG_PPTP=y
+CONFIG_PPPOL2TP=y
CONFIG_INPUT_EVDEV=y
CONFIG_KEYBOARD_GPIO=y
# CONFIG_INPUT_MOUSE is not set
@@ -348,6 +356,8 @@
CONFIG_EDAC_KRYO_ARM64_PANIC_ON_CE=y
CONFIG_EDAC_KRYO_ARM64_PANIC_ON_UE=y
CONFIG_EDAC_QCOM=y
+CONFIG_EDAC_QCOM_LLCC_PANIC_ON_CE=y
+CONFIG_EDAC_QCOM_LLCC_PANIC_ON_UE=y
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_PM8XXX=y
CONFIG_DMADEVICES=y
@@ -361,10 +371,12 @@
CONFIG_QCOM_GENI_SE=y
# CONFIG_QCOM_A53PLL is not set
CONFIG_QCOM_CLK_RPMH=y
+CONFIG_MSM_CLK_AOP_QMP=y
CONFIG_SM_GCC_LITO=y
CONFIG_SM_VIDEOCC_LITO=y
CONFIG_SM_CAMCC_LITO=y
CONFIG_SM_DISPCC_LITO=y
+CONFIG_SM_GPUCC_LITO=y
CONFIG_HWSPINLOCK=y
CONFIG_HWSPINLOCK_QCOM=y
CONFIG_MAILBOX=y
@@ -380,6 +392,7 @@
CONFIG_RPMSG_QCOM_GLINK_SPSS=y
CONFIG_QCOM_COMMAND_DB=y
CONFIG_QCOM_CPUSS_DUMP=y
+CONFIG_QCOM_RUN_QUEUE_STATS=y
CONFIG_QCOM_IPCC=y
CONFIG_QCOM_LLCC=y
CONFIG_QCOM_LITO_LLCC=y
@@ -398,12 +411,14 @@
CONFIG_MSM_SERVICE_NOTIFIER=y
CONFIG_MSM_SUBSYSTEM_RESTART=y
CONFIG_MSM_PIL=y
+CONFIG_MSM_SYSMON_QMI_COMM=y
CONFIG_MSM_PIL_SSR_GENERIC=y
CONFIG_MSM_BOOT_STATS=y
CONFIG_QCOM_EUD=y
CONFIG_MSM_CORE_HANG_DETECT=y
CONFIG_MSM_GLADIATOR_HANG_DETECT=y
CONFIG_QCOM_WATCHDOG_V2=y
+CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y
CONFIG_QCOM_GLINK=y
CONFIG_QCOM_GLINK_PKT=y
CONFIG_MSM_EVENT_TIMER=y
@@ -439,12 +454,14 @@
# CONFIG_NETWORK_FILESYSTEMS is not set
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ISO8859_1=y
+CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y
CONFIG_SECURITY=y
CONFIG_HARDENED_USERCOPY=y
CONFIG_HARDENED_USERCOPY_PAGESPAN=y
CONFIG_FORTIFY_SOURCE=y
CONFIG_SECURITY_SELINUX=y
CONFIG_SECURITY_SMACK=y
+CONFIG_CRYPTO_GCM=y
CONFIG_CRYPTO_XCBC=y
CONFIG_CRYPTO_MD4=y
CONFIG_CRYPTO_TWOFISH=y
@@ -469,15 +486,17 @@
CONFIG_DEBUG_MEMORY_INIT=y
CONFIG_SOFTLOCKUP_DETECTOR=y
CONFIG_WQ_WATCHDOG=y
-CONFIG_PANIC_TIMEOUT=5
+CONFIG_PANIC_TIMEOUT=-1
CONFIG_SCHEDSTATS=y
CONFIG_SCHED_STACK_END_CHECK=y
CONFIG_DEBUG_SPINLOCK=y
CONFIG_DEBUG_MUTEXES=y
CONFIG_DEBUG_ATOMIC_SLEEP=y
+CONFIG_LOCK_TORTURE_TEST=m
CONFIG_DEBUG_SG=y
CONFIG_DEBUG_NOTIFIERS=y
CONFIG_DEBUG_CREDENTIALS=y
+CONFIG_RCU_TORTURE_TEST=m
CONFIG_FAULT_INJECTION=y
CONFIG_FAIL_PAGE_ALLOC=y
CONFIG_FAULT_INJECTION_DEBUG_FS=y
@@ -489,9 +508,14 @@
CONFIG_IRQSOFF_TRACER=y
CONFIG_PREEMPT_TRACER=y
CONFIG_BLK_DEV_IO_TRACE=y
+CONFIG_LKDTM=m
+CONFIG_ATOMIC64_SELFTEST=m
+CONFIG_TEST_USER_COPY=m
CONFIG_MEMTEST=y
CONFIG_BUG_ON_DATA_CORRUPTION=y
+CONFIG_PANIC_ON_DATA_CORRUPTION=y
CONFIG_PID_IN_CONTEXTIDR=y
+CONFIG_ARM64_STRICT_BREAK_BEFORE_MAKE=y
CONFIG_CORESIGHT=y
CONFIG_CORESIGHT_LINK_AND_SINK_TMC=y
CONFIG_CORESIGHT_SOURCE_ETM4X=y
diff --git a/arch/mips/bcm63xx/dev-enet.c b/arch/mips/bcm63xx/dev-enet.c
index 07b4c65..8e73d65 100644
--- a/arch/mips/bcm63xx/dev-enet.c
+++ b/arch/mips/bcm63xx/dev-enet.c
@@ -70,6 +70,8 @@ static struct platform_device bcm63xx_enet_shared_device = {
static int shared_device_registered;
+static u64 enet_dmamask = DMA_BIT_MASK(32);
+
static struct resource enet0_res[] = {
{
.start = -1, /* filled at runtime */
@@ -99,6 +101,8 @@ static struct platform_device bcm63xx_enet0_device = {
.resource = enet0_res,
.dev = {
.platform_data = &enet0_pd,
+ .dma_mask = &enet_dmamask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
},
};
@@ -131,6 +135,8 @@ static struct platform_device bcm63xx_enet1_device = {
.resource = enet1_res,
.dev = {
.platform_data = &enet1_pd,
+ .dma_mask = &enet_dmamask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
},
};
@@ -157,6 +163,8 @@ static struct platform_device bcm63xx_enetsw_device = {
.resource = enetsw_res,
.dev = {
.platform_data = &enetsw_pd,
+ .dma_mask = &enet_dmamask,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
},
};
diff --git a/arch/mips/kernel/cmpxchg.c b/arch/mips/kernel/cmpxchg.c
index 0b9535b..6b2a4a9 100644
--- a/arch/mips/kernel/cmpxchg.c
+++ b/arch/mips/kernel/cmpxchg.c
@@ -54,10 +54,9 @@ unsigned long __xchg_small(volatile void *ptr, unsigned long val, unsigned int s
unsigned long __cmpxchg_small(volatile void *ptr, unsigned long old,
unsigned long new, unsigned int size)
{
- u32 mask, old32, new32, load32;
+ u32 mask, old32, new32, load32, load;
volatile u32 *ptr32;
unsigned int shift;
- u8 load;
/* Check that ptr is naturally aligned */
WARN_ON((unsigned long)ptr & (size - 1));
diff --git a/arch/mips/net/ebpf_jit.c b/arch/mips/net/ebpf_jit.c
index 252c009..9bda82ed 100644
--- a/arch/mips/net/ebpf_jit.c
+++ b/arch/mips/net/ebpf_jit.c
@@ -1818,7 +1818,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
/* Update the icache */
flush_icache_range((unsigned long)ctx.target,
- (unsigned long)(ctx.target + ctx.idx * sizeof(u32)));
+ (unsigned long)&ctx.target[ctx.idx]);
if (bpf_jit_enable > 1)
/* Dump JIT code */
diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
index aae77eb..4111edb 100644
--- a/arch/x86/include/asm/uaccess.h
+++ b/arch/x86/include/asm/uaccess.h
@@ -293,8 +293,7 @@ do { \
__put_user_asm(x, ptr, retval, "l", "k", "ir", errret); \
break; \
case 8: \
- __put_user_asm_u64((__typeof__(*ptr))(x), ptr, retval, \
- errret); \
+ __put_user_asm_u64(x, ptr, retval, errret); \
break; \
default: \
__put_user_bad(); \
@@ -440,8 +439,10 @@ do { \
#define __put_user_nocheck(x, ptr, size) \
({ \
int __pu_err; \
+ __typeof__(*(ptr)) __pu_val; \
+ __pu_val = x; \
__uaccess_begin(); \
- __put_user_size((x), (ptr), (size), __pu_err, -EFAULT); \
+ __put_user_size(__pu_val, (ptr), (size), __pu_err, -EFAULT);\
__uaccess_end(); \
__builtin_expect(__pu_err, 0); \
})
diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c
index 7654feb..652e7ff 100644
--- a/arch/x86/kernel/apic/vector.c
+++ b/arch/x86/kernel/apic/vector.c
@@ -313,14 +313,13 @@ assign_managed_vector(struct irq_data *irqd, const struct cpumask *dest)
struct apic_chip_data *apicd = apic_chip_data(irqd);
int vector, cpu;
- cpumask_and(vector_searchmask, vector_searchmask, affmsk);
- cpu = cpumask_first(vector_searchmask);
- if (cpu >= nr_cpu_ids)
- return -EINVAL;
+ cpumask_and(vector_searchmask, dest, affmsk);
+
/* set_affinity might call here for nothing */
if (apicd->vector && cpumask_test_cpu(apicd->cpu, vector_searchmask))
return 0;
- vector = irq_matrix_alloc_managed(vector_matrix, cpu);
+ vector = irq_matrix_alloc_managed(vector_matrix, vector_searchmask,
+ &cpu);
trace_vector_alloc_managed(irqd->irq, vector, vector);
if (vector < 0)
return vector;
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index ee8f8d7..b475419 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -3399,6 +3399,14 @@ static int nested_svm_vmexit(struct vcpu_svm *svm)
kvm_mmu_reset_context(&svm->vcpu);
kvm_mmu_load(&svm->vcpu);
+ /*
+ * Drop what we picked up for L2 via svm_complete_interrupts() so it
+ * doesn't end up in L1.
+ */
+ svm->vcpu.arch.nmi_injected = false;
+ kvm_clear_exception_queue(&svm->vcpu);
+ kvm_clear_interrupt_queue(&svm->vcpu);
+
return 0;
}
@@ -4485,25 +4493,14 @@ static int avic_incomplete_ipi_interception(struct vcpu_svm *svm)
kvm_lapic_reg_write(apic, APIC_ICR, icrl);
break;
case AVIC_IPI_FAILURE_TARGET_NOT_RUNNING: {
- int i;
- struct kvm_vcpu *vcpu;
- struct kvm *kvm = svm->vcpu.kvm;
struct kvm_lapic *apic = svm->vcpu.arch.apic;
/*
- * At this point, we expect that the AVIC HW has already
- * set the appropriate IRR bits on the valid target
- * vcpus. So, we just need to kick the appropriate vcpu.
+ * Update ICR high and low, then emulate sending IPI,
+ * which is handled when writing APIC_ICR.
*/
- kvm_for_each_vcpu(i, vcpu, kvm) {
- bool m = kvm_apic_match_dest(vcpu, apic,
- icrl & KVM_APIC_SHORT_MASK,
- GET_APIC_DEST_FIELD(icrh),
- icrl & KVM_APIC_DEST_MASK);
-
- if (m && !avic_vcpu_is_running(vcpu))
- kvm_vcpu_wake_up(vcpu);
- }
+ kvm_lapic_reg_write(apic, APIC_ICR2, icrh);
+ kvm_lapic_reg_write(apic, APIC_ICR, icrl);
break;
}
case AVIC_IPI_FAILURE_INVALID_TARGET:
diff --git a/arch/x86/mm/mem_encrypt_identity.c b/arch/x86/mm/mem_encrypt_identity.c
index 7ae3686..c9faf34 100644
--- a/arch/x86/mm/mem_encrypt_identity.c
+++ b/arch/x86/mm/mem_encrypt_identity.c
@@ -157,8 +157,8 @@ static void __init sme_populate_pgd(struct sme_populate_pgd_data *ppd)
pmd = pmd_offset(pud, ppd->vaddr);
if (pmd_none(*pmd)) {
pte = ppd->pgtable_area;
- memset(pte, 0, sizeof(pte) * PTRS_PER_PTE);
- ppd->pgtable_area += sizeof(pte) * PTRS_PER_PTE;
+ memset(pte, 0, sizeof(*pte) * PTRS_PER_PTE);
+ ppd->pgtable_area += sizeof(*pte) * PTRS_PER_PTE;
set_pmd(pmd, __pmd(PMD_FLAGS | __pa(pte)));
}
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index 6495abf..68fcda4 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -358,6 +358,7 @@ struct binder_error {
* @min_priority: minimum scheduling priority
* (invariant after initialized)
* @inherit_rt: inherit RT scheduling policy from caller
+ * @txn_security_ctx: require sender's security context
* (invariant after initialized)
* @async_todo: list of async work items
* (protected by @proc->inner_lock)
@@ -397,6 +398,7 @@ struct binder_node {
u8 sched_policy:2;
u8 inherit_rt:1;
u8 accept_fds:1;
+ u8 txn_security_ctx:1;
u8 min_priority;
};
bool has_async_transaction;
@@ -654,6 +656,7 @@ struct binder_transaction {
struct binder_priority saved_priority;
bool set_priority_called;
kuid_t sender_euid;
+ binder_uintptr_t security_ctx;
/**
* @lock: protects @from, @to_proc, and @to_thread
*
@@ -1363,6 +1366,7 @@ static struct binder_node *binder_init_node_ilocked(
node->min_priority = to_kernel_prio(node->sched_policy, priority);
node->accept_fds = !!(flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
node->inherit_rt = !!(flags & FLAT_BINDER_FLAG_INHERIT_RT);
+ node->txn_security_ctx = !!(flags & FLAT_BINDER_FLAG_TXN_SECURITY_CTX);
spin_lock_init(&node->lock);
INIT_LIST_HEAD(&node->work.entry);
INIT_LIST_HEAD(&node->async_todo);
@@ -2900,6 +2904,8 @@ static void binder_transaction(struct binder_proc *proc,
binder_size_t last_fixup_min_off = 0;
struct binder_context *context = proc->context;
int t_debug_id = atomic_inc_return(&binder_last_id);
+ char *secctx = NULL;
+ u32 secctx_sz = 0;
e = binder_transaction_log_add(&binder_transaction_log);
e->debug_id = t_debug_id;
@@ -3123,6 +3129,20 @@ static void binder_transaction(struct binder_proc *proc,
t->priority = target_proc->default_priority;
}
+ if (target_node && target_node->txn_security_ctx) {
+ u32 secid;
+
+ security_task_getsecid(proc->tsk, &secid);
+ ret = security_secid_to_secctx(secid, &secctx, &secctx_sz);
+ if (ret) {
+ return_error = BR_FAILED_REPLY;
+ return_error_param = ret;
+ return_error_line = __LINE__;
+ goto err_get_secctx_failed;
+ }
+ extra_buffers_size += ALIGN(secctx_sz, sizeof(u64));
+ }
+
trace_binder_transaction(reply, t, target_node);
t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size,
@@ -3139,6 +3159,19 @@ static void binder_transaction(struct binder_proc *proc,
t->buffer = NULL;
goto err_binder_alloc_buf_failed;
}
+ if (secctx) {
+ size_t buf_offset = ALIGN(tr->data_size, sizeof(void *)) +
+ ALIGN(tr->offsets_size, sizeof(void *)) +
+ ALIGN(extra_buffers_size, sizeof(void *)) -
+ ALIGN(secctx_sz, sizeof(u64));
+ char *kptr = t->buffer->data + buf_offset;
+
+ t->security_ctx = (uintptr_t)kptr +
+ binder_alloc_get_user_buffer_offset(&target_proc->alloc);
+ memcpy(kptr, secctx, secctx_sz);
+ security_release_secctx(secctx, secctx_sz);
+ secctx = NULL;
+ }
t->buffer->debug_id = t->debug_id;
t->buffer->transaction = t;
t->buffer->target_node = target_node;
@@ -3409,6 +3442,9 @@ static void binder_transaction(struct binder_proc *proc,
t->buffer->transaction = NULL;
binder_alloc_free_buf(&target_proc->alloc, t->buffer);
err_binder_alloc_buf_failed:
+ if (secctx)
+ security_release_secctx(secctx, secctx_sz);
+err_get_secctx_failed:
kfree(tcomplete);
binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
err_alloc_tcomplete_failed:
@@ -4055,11 +4091,13 @@ static int binder_thread_read(struct binder_proc *proc,
while (1) {
uint32_t cmd;
- struct binder_transaction_data tr;
+ struct binder_transaction_data_secctx tr;
+ struct binder_transaction_data *trd = &tr.transaction_data;
struct binder_work *w = NULL;
struct list_head *list = NULL;
struct binder_transaction *t = NULL;
struct binder_thread *t_from;
+ size_t trsize = sizeof(*trd);
binder_inner_proc_lock(proc);
if (!binder_worklist_empty_ilocked(&thread->todo))
@@ -4255,41 +4293,47 @@ static int binder_thread_read(struct binder_proc *proc,
struct binder_node *target_node = t->buffer->target_node;
struct binder_priority node_prio;
- tr.target.ptr = target_node->ptr;
- tr.cookie = target_node->cookie;
+ trd->target.ptr = target_node->ptr;
+ trd->cookie = target_node->cookie;
node_prio.sched_policy = target_node->sched_policy;
node_prio.prio = target_node->min_priority;
binder_transaction_priority(current, t, node_prio,
target_node->inherit_rt);
cmd = BR_TRANSACTION;
} else {
- tr.target.ptr = 0;
- tr.cookie = 0;
+ trd->target.ptr = 0;
+ trd->cookie = 0;
cmd = BR_REPLY;
}
- tr.code = t->code;
- tr.flags = t->flags;
- tr.sender_euid = from_kuid(current_user_ns(), t->sender_euid);
+ trd->code = t->code;
+ trd->flags = t->flags;
+ trd->sender_euid = from_kuid(current_user_ns(), t->sender_euid);
t_from = binder_get_txn_from(t);
if (t_from) {
struct task_struct *sender = t_from->proc->tsk;
- tr.sender_pid = task_tgid_nr_ns(sender,
- task_active_pid_ns(current));
+ trd->sender_pid =
+ task_tgid_nr_ns(sender,
+ task_active_pid_ns(current));
} else {
- tr.sender_pid = 0;
+ trd->sender_pid = 0;
}
- tr.data_size = t->buffer->data_size;
- tr.offsets_size = t->buffer->offsets_size;
- tr.data.ptr.buffer = (binder_uintptr_t)
+ trd->data_size = t->buffer->data_size;
+ trd->offsets_size = t->buffer->offsets_size;
+ trd->data.ptr.buffer = (binder_uintptr_t)
((uintptr_t)t->buffer->data +
binder_alloc_get_user_buffer_offset(&proc->alloc));
- tr.data.ptr.offsets = tr.data.ptr.buffer +
+ trd->data.ptr.offsets = trd->data.ptr.buffer +
ALIGN(t->buffer->data_size,
sizeof(void *));
+ tr.secctx = t->security_ctx;
+ if (t->security_ctx) {
+ cmd = BR_TRANSACTION_SEC_CTX;
+ trsize = sizeof(tr);
+ }
if (put_user(cmd, (uint32_t __user *)ptr)) {
if (t_from)
binder_thread_dec_tmpref(t_from);
@@ -4300,7 +4344,7 @@ static int binder_thread_read(struct binder_proc *proc,
return -EFAULT;
}
ptr += sizeof(uint32_t);
- if (copy_to_user(ptr, &tr, sizeof(tr))) {
+ if (copy_to_user(ptr, &tr, trsize)) {
if (t_from)
binder_thread_dec_tmpref(t_from);
@@ -4309,7 +4353,7 @@ static int binder_thread_read(struct binder_proc *proc,
return -EFAULT;
}
- ptr += sizeof(tr);
+ ptr += trsize;
trace_binder_transaction_received(t);
binder_stat_br(proc, thread, cmd);
@@ -4317,16 +4361,18 @@ static int binder_thread_read(struct binder_proc *proc,
"%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n",
proc->pid, thread->pid,
(cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
- "BR_REPLY",
+ (cmd == BR_TRANSACTION_SEC_CTX) ?
+ "BR_TRANSACTION_SEC_CTX" : "BR_REPLY",
t->debug_id, t_from ? t_from->proc->pid : 0,
t_from ? t_from->pid : 0, cmd,
t->buffer->data_size, t->buffer->offsets_size,
- (u64)tr.data.ptr.buffer, (u64)tr.data.ptr.offsets);
+ (u64)trd->data.ptr.buffer,
+ (u64)trd->data.ptr.offsets);
if (t_from)
binder_thread_dec_tmpref(t_from);
t->buffer->allow_user_free = 1;
- if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) {
+ if (cmd != BR_REPLY && !(t->flags & TF_ONE_WAY)) {
binder_inner_proc_lock(thread->proc);
t->to_parent = thread->transaction_stack;
t->to_thread = thread;
@@ -4671,7 +4717,8 @@ static int binder_ioctl_write_read(struct file *filp,
return ret;
}
-static int binder_ioctl_set_ctx_mgr(struct file *filp)
+static int binder_ioctl_set_ctx_mgr(struct file *filp,
+ struct flat_binder_object *fbo)
{
int ret = 0;
struct binder_proc *proc = filp->private_data;
@@ -4700,7 +4747,7 @@ static int binder_ioctl_set_ctx_mgr(struct file *filp)
} else {
context->binder_context_mgr_uid = curr_euid;
}
- new_node = binder_new_node(proc, NULL);
+ new_node = binder_new_node(proc, fbo);
if (!new_node) {
ret = -ENOMEM;
goto out;
@@ -4823,8 +4870,20 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
binder_inner_proc_unlock(proc);
break;
}
+ case BINDER_SET_CONTEXT_MGR_EXT: {
+ struct flat_binder_object fbo;
+
+ if (copy_from_user(&fbo, ubuf, sizeof(fbo))) {
+ ret = -EINVAL;
+ goto err;
+ }
+ ret = binder_ioctl_set_ctx_mgr(filp, &fbo);
+ if (ret)
+ goto err;
+ break;
+ }
case BINDER_SET_CONTEXT_MGR:
- ret = binder_ioctl_set_ctx_mgr(filp);
+ ret = binder_ioctl_set_ctx_mgr(filp, NULL);
if (ret)
goto err;
break;
diff --git a/drivers/char/adsprpc.c b/drivers/char/adsprpc.c
index 35c50ac..f94f335 100644
--- a/drivers/char/adsprpc.c
+++ b/drivers/char/adsprpc.c
@@ -3185,7 +3185,7 @@ static int fastrpc_device_open(struct inode *inode, struct file *filp)
return err;
snprintf(strpid, PID_SIZE, "%d", current->pid);
- buf_size = strlen(current->comm) + strlen(strpid) + 1;
+ buf_size = strlen(current->comm) + strlen("_") + strlen(strpid) + 1;
VERIFY(err, NULL != (fl->debug_buf = kzalloc(buf_size, GFP_KERNEL)));
if (err) {
kfree(fl);
diff --git a/drivers/clk/clk-versaclock5.c b/drivers/clk/clk-versaclock5.c
index decffb3..a738af8 100644
--- a/drivers/clk/clk-versaclock5.c
+++ b/drivers/clk/clk-versaclock5.c
@@ -262,8 +262,10 @@ static int vc5_mux_set_parent(struct clk_hw *hw, u8 index)
if (vc5->clk_mux_ins == VC5_MUX_IN_XIN)
src = VC5_PRIM_SRC_SHDN_EN_XTAL;
- if (vc5->clk_mux_ins == VC5_MUX_IN_CLKIN)
+ else if (vc5->clk_mux_ins == VC5_MUX_IN_CLKIN)
src = VC5_PRIM_SRC_SHDN_EN_CLKIN;
+ else /* Invalid; should have been caught by vc5_probe() */
+ return -EINVAL;
}
return regmap_update_bits(vc5->regmap, VC5_PRIM_SRC_SHDN, mask, src);
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index 1cb305f..19ba6f4 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -3275,7 +3275,7 @@ static void clk_dump_one(struct seq_file *s, struct clk_core *c, int level)
seq_printf(s, "\"protect_count\": %d,", c->protect_count);
seq_printf(s, "\"rate\": %lu,", clk_core_get_rate(c));
seq_printf(s, "\"accuracy\": %lu,", clk_core_get_accuracy(c));
- seq_printf(s, "\"phase\": %d", clk_core_get_phase(c));
+ seq_printf(s, "\"phase\": %d,", clk_core_get_phase(c));
seq_printf(s, "\"duty_cycle\": %u",
clk_core_get_scaled_duty_cycle(c, 100000));
}
diff --git a/drivers/clk/qcom/Kconfig b/drivers/clk/qcom/Kconfig
index 7a38880..3d7bbea 100644
--- a/drivers/clk/qcom/Kconfig
+++ b/drivers/clk/qcom/Kconfig
@@ -384,3 +384,11 @@
LITO devices.
Say Y if you want to support display devices and functionality such as
splash screen.
+
+config SM_GPUCC_LITO
+ tristate "LITO Graphics Clock Controller"
+ depends on COMMON_CLK_QCOM
+ help
+ Support for the graphics clock controller on Qualcomm Technologies, Inc.
+ LITO devices.
+ Say Y if you want to support graphics controller devices.
diff --git a/drivers/clk/qcom/Makefile b/drivers/clk/qcom/Makefile
index 141f84a..8fce145 100644
--- a/drivers/clk/qcom/Makefile
+++ b/drivers/clk/qcom/Makefile
@@ -56,6 +56,7 @@
obj-$(CONFIG_SM_DISPCC_LITO) += dispcc-lito.o
obj-$(CONFIG_SM_GCC_LITO) += gcc-lito.o
obj-$(CONFIG_SM_VIDEOCC_LITO) += videocc-lito.o
+obj-$(CONFIG_SM_GPUCC_LITO) += gpucc-lito.o
obj-$(CONFIG_SPMI_PMIC_CLKDIV) += clk-spmi-pmic-div.o
obj-y += mdss/
diff --git a/drivers/clk/qcom/camcc-kona.c b/drivers/clk/qcom/camcc-kona.c
index 912f372..25873aa 100644
--- a/drivers/clk/qcom/camcc-kona.c
+++ b/drivers/clk/qcom/camcc-kona.c
@@ -280,6 +280,9 @@ static const struct alpha_pll_config cam_cc_pll2_config = {
.config_ctl_val = 0x08200920,
.config_ctl_hi_val = 0x05008011,
.config_ctl_hi1_val = 0x00000000,
+ .test_ctl_val = 0x00010000,
+ .test_ctl_hi_val = 0x00000000,
+ .test_ctl_hi1_val = 0x00000000,
.user_ctl_val = 0x00000100,
.user_ctl_hi_val = 0x00000000,
.user_ctl_hi1_val = 0x00000000,
diff --git a/drivers/clk/qcom/clk-alpha-pll.c b/drivers/clk/qcom/clk-alpha-pll.c
index a0521d1..5fc3c80 100644
--- a/drivers/clk/qcom/clk-alpha-pll.c
+++ b/drivers/clk/qcom/clk-alpha-pll.c
@@ -161,7 +161,8 @@ EXPORT_SYMBOL_GPL(clk_alpha_pll_regs);
/* ZONDA PLL specific offsets */
#define ZONDA_PLL_OUT_MASK 0x9
-
+#define ZONDA_STAY_IN_CFA BIT(16)
+#define ZONDA_PLL_FREQ_LOCK_DET BIT(29)
#define pll_alpha_width(p) \
((PLL_ALPHA_VAL_U(p) - PLL_ALPHA_VAL(p) == 4) ? \
@@ -216,6 +217,9 @@ static int wait_for_pll(struct clk_alpha_pll *pll, u32 mask, bool inverse,
#define wait_for_pll_enable_lock(pll) \
wait_for_pll(pll, PLL_LOCK_DET, 0, "enable")
+#define wait_for_zonda_pll_freq_lock(pll) \
+ wait_for_pll(pll, ZONDA_PLL_FREQ_LOCK_DET, 0, "freq enable")
+
#define wait_for_pll_disable(pll) \
wait_for_pll(pll, PLL_ACTIVE_FLAG, 1, "disable")
@@ -879,8 +883,7 @@ void clk_zonda_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
config->test_ctl_hi1_val);
regmap_update_bits(regmap, PLL_MODE(pll),
- PLL_UPDATE_BYPASS,
- PLL_UPDATE_BYPASS);
+ PLL_BYPASSNL, 0);
/* Disable PLL output */
regmap_update_bits(regmap, PLL_MODE(pll),
@@ -900,7 +903,7 @@ void clk_zonda_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
static int clk_zonda_pll_enable(struct clk_hw *hw)
{
struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
- u32 val;
+ u32 val, test_ctl_val;
int ret;
ret = regmap_read(pll->clkr.regmap, PLL_MODE(pll), &val);
@@ -937,7 +940,15 @@ static int clk_zonda_pll_enable(struct clk_hw *hw)
regmap_write(pll->clkr.regmap, PLL_OPMODE(pll),
PLL_OPMODE_RUN);
- ret = wait_for_pll_enable_lock(pll);
+ ret = regmap_read(pll->clkr.regmap, PLL_TEST_CTL(pll), &test_ctl_val);
+ if (ret)
+ return ret;
+
+ /* If cfa mode then poll for freq lock */
+ if (test_ctl_val & ZONDA_STAY_IN_CFA)
+ ret = wait_for_zonda_pll_freq_lock(pll);
+ else
+ ret = wait_for_pll_enable_lock(pll);
if (ret)
return ret;
@@ -1001,6 +1012,7 @@ static int clk_zonda_pll_set_rate(struct clk_hw *hw, unsigned long rate,
{
struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
unsigned long rrate;
+ u32 test_ctl_val;
u32 l;
u64 a;
int ret;
@@ -1022,7 +1034,16 @@ static int clk_zonda_pll_set_rate(struct clk_hw *hw, unsigned long rate,
/* Wait before polling for the frequency latch */
udelay(5);
- ret = wait_for_pll_enable_lock(pll);
+ /* Read stay in cfa mode */
+ ret = regmap_read(pll->clkr.regmap, PLL_TEST_CTL(pll), &test_ctl_val);
+ if (ret)
+ return ret;
+
+ /* If cfa mode then poll for freq lock */
+ if (test_ctl_val & ZONDA_STAY_IN_CFA)
+ ret = wait_for_zonda_pll_freq_lock(pll);
+ else
+ ret = wait_for_pll_enable_lock(pll);
if (ret)
return ret;
diff --git a/drivers/clk/qcom/clk-rpmh.c b/drivers/clk/qcom/clk-rpmh.c
index d7f3b9e..55f6a3b 100644
--- a/drivers/clk/qcom/clk-rpmh.c
+++ b/drivers/clk/qcom/clk-rpmh.c
@@ -281,6 +281,33 @@ static const struct clk_rpmh_desc clk_rpmh_kona = {
.num_clks = ARRAY_SIZE(kona_rpmh_clocks),
};
+DEFINE_CLK_RPMH_ARC(lito, bi_tcxo, bi_tcxo_ao, "xo.lvl", 0x3, 2);
+DEFINE_CLK_RPMH_VRM(lito, ln_bb_clk3, ln_bb_clk3_ao, "lnbclka3", 2);
+DEFINE_CLK_RPMH_VRM(lito, rf_clk1, rf_clk1_ao, "rfclkd1", 1);
+DEFINE_CLK_RPMH_VRM(lito, rf_clk2, rf_clk2_ao, "rfclkd2", 1);
+DEFINE_CLK_RPMH_VRM(lito, rf_clk3, rf_clk3_ao, "rfclkd3", 1);
+DEFINE_CLK_RPMH_VRM(lito, rf_clk4, rf_clk4_ao, "rfclkd4", 1);
+
+static struct clk_hw *lito_rpmh_clocks[] = {
+ [RPMH_CXO_CLK] = &lito_bi_tcxo.hw,
+ [RPMH_CXO_CLK_A] = &lito_bi_tcxo_ao.hw,
+ [RPMH_LN_BB_CLK3] = &lito_ln_bb_clk3.hw,
+ [RPMH_LN_BB_CLK3_A] = &lito_ln_bb_clk3_ao.hw,
+ [RPMH_RF_CLK1] = &lito_rf_clk1.hw,
+ [RPMH_RF_CLK1_A] = &lito_rf_clk1_ao.hw,
+ [RPMH_RF_CLK2] = &lito_rf_clk2.hw,
+ [RPMH_RF_CLK2_A] = &lito_rf_clk2_ao.hw,
+ [RPMH_RF_CLK3] = &lito_rf_clk3.hw,
+ [RPMH_RF_CLK3_A] = &lito_rf_clk3_ao.hw,
+ [RPMH_RF_CLK4] = &lito_rf_clk4.hw,
+ [RPMH_RF_CLK4_A] = &lito_rf_clk4_ao.hw,
+};
+
+static const struct clk_rpmh_desc clk_rpmh_lito = {
+ .clks = lito_rpmh_clocks,
+ .num_clks = ARRAY_SIZE(lito_rpmh_clocks),
+};
+
static struct clk_hw *of_clk_rpmh_hw_get(struct of_phandle_args *clkspec,
void *data)
{
@@ -358,6 +385,7 @@ static int clk_rpmh_probe(struct platform_device *pdev)
static const struct of_device_id clk_rpmh_match_table[] = {
{ .compatible = "qcom,sdm845-rpmh-clk", .data = &clk_rpmh_sdm845},
{ .compatible = "qcom,kona-rpmh-clk", .data = &clk_rpmh_kona},
+ { .compatible = "qcom,lito-rpmh-clk", .data = &clk_rpmh_lito},
{ }
};
MODULE_DEVICE_TABLE(of, clk_rpmh_match_table);
diff --git a/drivers/clk/qcom/debugcc-kona.c b/drivers/clk/qcom/debugcc-kona.c
index 7d71211..4947287 100644
--- a/drivers/clk/qcom/debugcc-kona.c
+++ b/drivers/clk/qcom/debugcc-kona.c
@@ -93,8 +93,6 @@ static const char *const debug_mux_parent_names[] = {
"disp_cc_mdss_byte1_intf_clk",
"disp_cc_mdss_dp_aux1_clk",
"disp_cc_mdss_dp_aux_clk",
- "disp_cc_mdss_dp_crypto1_clk",
- "disp_cc_mdss_dp_crypto_clk",
"disp_cc_mdss_dp_link1_clk",
"disp_cc_mdss_dp_link1_intf_clk",
"disp_cc_mdss_dp_link_clk",
@@ -431,10 +429,6 @@ static struct clk_debug_mux gcc_debug_mux = {
0x25, 0xFF, 0, 0x3, 0, 4, 0x7000, 0x5008, 0x500C },
{ "disp_cc_mdss_dp_aux_clk", 0x56, 2, DISP_CC,
0x20, 0xFF, 0, 0x3, 0, 4, 0x7000, 0x5008, 0x500C },
- { "disp_cc_mdss_dp_crypto1_clk", 0x56, 2, DISP_CC,
- 0x24, 0xFF, 0, 0x3, 0, 4, 0x7000, 0x5008, 0x500C },
- { "disp_cc_mdss_dp_crypto_clk", 0x56, 2, DISP_CC,
- 0x1D, 0xFF, 0, 0x3, 0, 4, 0x7000, 0x5008, 0x500C },
{ "disp_cc_mdss_dp_link1_clk", 0x56, 2, DISP_CC,
0x22, 0xFF, 0, 0x3, 0, 4, 0x7000, 0x5008, 0x500C },
{ "disp_cc_mdss_dp_link1_intf_clk", 0x56, 2, DISP_CC,
diff --git a/drivers/clk/qcom/dispcc-kona.c b/drivers/clk/qcom/dispcc-kona.c
index 46592a4..3835bb7 100644
--- a/drivers/clk/qcom/dispcc-kona.c
+++ b/drivers/clk/qcom/dispcc-kona.c
@@ -422,60 +422,6 @@ static struct clk_rcg2 disp_cc_mdss_dp_aux_clk_src = {
},
};
-static const struct freq_tbl ftbl_disp_cc_mdss_dp_crypto1_clk_src[] = {
- F( 108000, P_DP_PHY_PLL_LINK_CLK, 3, 0, 0),
- F( 180000, P_DP_PHY_PLL_LINK_CLK, 3, 0, 0),
- F( 360000, P_DP_PHY_PLL_LINK_CLK, 3, 0, 0),
- F( 540000, P_DP_PHY_PLL_LINK_CLK, 3, 0, 0),
- { }
-};
-
-static struct clk_rcg2 disp_cc_mdss_dp_crypto1_clk_src = {
- .cmd_rcgr = 0x2228,
- .mnd_width = 0,
- .hid_width = 5,
- .parent_map = disp_cc_parent_map_0,
- .freq_tbl = ftbl_disp_cc_mdss_dp_crypto1_clk_src,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "disp_cc_mdss_dp_crypto1_clk_src",
- .parent_names = disp_cc_parent_names_0,
- .num_parents = 8,
- .flags = CLK_GET_RATE_NOCACHE,
- .ops = &clk_rcg2_ops,
- .vdd_class = &vdd_mm,
- .num_rate_max = VDD_NUM,
- .rate_max = (unsigned long[VDD_NUM]) {
- [VDD_MIN] = 12800,
- [VDD_LOWER] = 108000,
- [VDD_LOW] = 180000,
- [VDD_LOW_L1] = 360000,
- [VDD_NOMINAL] = 540000},
- },
-};
-
-static struct clk_rcg2 disp_cc_mdss_dp_crypto_clk_src = {
- .cmd_rcgr = 0x2194,
- .mnd_width = 0,
- .hid_width = 5,
- .parent_map = disp_cc_parent_map_0,
- .freq_tbl = ftbl_disp_cc_mdss_dp_crypto1_clk_src,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "disp_cc_mdss_dp_crypto_clk_src",
- .parent_names = disp_cc_parent_names_0,
- .num_parents = 8,
- .flags = CLK_GET_RATE_NOCACHE,
- .ops = &clk_rcg2_ops,
- .vdd_class = &vdd_mm,
- .num_rate_max = VDD_NUM,
- .rate_max = (unsigned long[VDD_NUM]) {
- [VDD_MIN] = 12800,
- [VDD_LOWER] = 108000,
- [VDD_LOW] = 180000,
- [VDD_LOW_L1] = 360000,
- [VDD_NOMINAL] = 540000},
- },
-};
-
static const struct freq_tbl ftbl_disp_cc_mdss_dp_link1_clk_src[] = {
F( 162000, P_DP_PHY_PLL_LINK_CLK, 1, 0, 0),
F( 270000, P_DP_PHY_PLL_LINK_CLK, 1, 0, 0),
@@ -997,42 +943,6 @@ static struct clk_branch disp_cc_mdss_dp_aux_clk = {
},
};
-static struct clk_branch disp_cc_mdss_dp_crypto1_clk = {
- .halt_reg = 0x2064,
- .halt_check = BRANCH_HALT,
- .clkr = {
- .enable_reg = 0x2064,
- .enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
- .name = "disp_cc_mdss_dp_crypto1_clk",
- .parent_names = (const char *[]){
- "disp_cc_mdss_dp_crypto1_clk_src",
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct clk_branch disp_cc_mdss_dp_crypto_clk = {
- .halt_reg = 0x2048,
- .halt_check = BRANCH_HALT,
- .clkr = {
- .enable_reg = 0x2048,
- .enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
- .name = "disp_cc_mdss_dp_crypto_clk",
- .parent_names = (const char *[]){
- "disp_cc_mdss_dp_crypto_clk_src",
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
static struct clk_branch disp_cc_mdss_dp_link1_clk = {
.halt_reg = 0x205c,
.halt_check = BRANCH_HALT,
@@ -1493,11 +1403,6 @@ static struct clk_regmap *disp_cc_kona_clocks[] = {
[DISP_CC_MDSS_DP_AUX1_CLK_SRC] = &disp_cc_mdss_dp_aux1_clk_src.clkr,
[DISP_CC_MDSS_DP_AUX_CLK] = &disp_cc_mdss_dp_aux_clk.clkr,
[DISP_CC_MDSS_DP_AUX_CLK_SRC] = &disp_cc_mdss_dp_aux_clk_src.clkr,
- [DISP_CC_MDSS_DP_CRYPTO1_CLK] = &disp_cc_mdss_dp_crypto1_clk.clkr,
- [DISP_CC_MDSS_DP_CRYPTO1_CLK_SRC] =
- &disp_cc_mdss_dp_crypto1_clk_src.clkr,
- [DISP_CC_MDSS_DP_CRYPTO_CLK] = &disp_cc_mdss_dp_crypto_clk.clkr,
- [DISP_CC_MDSS_DP_CRYPTO_CLK_SRC] = &disp_cc_mdss_dp_crypto_clk_src.clkr,
[DISP_CC_MDSS_DP_LINK1_CLK] = &disp_cc_mdss_dp_link1_clk.clkr,
[DISP_CC_MDSS_DP_LINK1_CLK_SRC] = &disp_cc_mdss_dp_link1_clk_src.clkr,
[DISP_CC_MDSS_DP_LINK1_DIV_CLK_SRC] =
diff --git a/drivers/clk/qcom/gpucc-kona.c b/drivers/clk/qcom/gpucc-kona.c
index b46e269..8d00c6b 100644
--- a/drivers/clk/qcom/gpucc-kona.c
+++ b/drivers/clk/qcom/gpucc-kona.c
@@ -26,6 +26,12 @@
#include "reset.h"
#include "vdd-level.h"
+#define CX_GMU_CBCR_SLEEP_SHIFT 4
+#define CX_GMU_CBCR_SLEEP_MASK GENMASK(7, 4)
+#define CX_GMU_CBCR_WAKE_SHIFT 8
+#define CX_GMU_CBCR_WAKE_MASK GENMASK(11, 8)
+
+
static DEFINE_VDD_REGULATORS(vdd_cx, VDD_NUM, 1, vdd_corner);
static DEFINE_VDD_REGULATORS(vdd_mx, VDD_NUM, 1, vdd_corner);
@@ -419,6 +425,7 @@ static int gpu_cc_kona_probe(struct platform_device *pdev)
{
struct regmap *regmap;
struct clk *clk;
+ unsigned int value, mask;
int i, ret;
regmap = qcom_cc_map(pdev, &gpu_cc_kona_desc);
@@ -449,6 +456,12 @@ static int gpu_cc_kona_probe(struct platform_device *pdev)
return PTR_ERR(clk);
}
+ /* Recommended WAKEUP/SLEEP settings for the gpu_cc_cx_gmu_clk */
+ mask = CX_GMU_CBCR_SLEEP_MASK | CX_GMU_CBCR_WAKE_MASK;
+ value = 0xf << CX_GMU_CBCR_WAKE_SHIFT | 0xf << CX_GMU_CBCR_SLEEP_SHIFT;
+ regmap_update_bits(regmap, gpu_cc_cx_gmu_clk.clkr.enable_reg,
+ mask, value);
+
ret = qcom_cc_really_probe(pdev, &gpu_cc_kona_desc, regmap);
if (ret) {
dev_err(&pdev->dev, "Failed to register GPU CC clocks\n");
diff --git a/drivers/clk/qcom/gpucc-lito.c b/drivers/clk/qcom/gpucc-lito.c
new file mode 100644
index 0000000..92c2f19
--- /dev/null
+++ b/drivers/clk/qcom/gpucc-lito.c
@@ -0,0 +1,471 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+
+#define pr_fmt(fmt) "clk: %s: " fmt, __func__
+
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/reset-controller.h>
+
+#include <dt-bindings/clock/qcom,gpucc-lito.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-pll.h"
+#include "clk-rcg.h"
+#include "clk-regmap.h"
+#include "common.h"
+#include "reset.h"
+#include "vdd-level.h"
+
+static DEFINE_VDD_REGULATORS(vdd_cx, VDD_NUM, 1, vdd_corner);
+static DEFINE_VDD_REGULATORS(vdd_mx, VDD_NUM, 1, vdd_corner);
+
+#define CX_GMU_CBCR_SLEEP_MASK 0xF
+#define CX_GMU_CBCR_SLEEP_SHIFT 4
+#define CX_GMU_CBCR_WAKE_MASK 0xF
+#define CX_GMU_CBCR_WAKE_SHIFT 8
+
+enum {
+ P_BI_TCXO,
+ P_CORE_BI_PLL_TEST_SE,
+ P_GPLL0_OUT_MAIN,
+ P_GPLL0_OUT_MAIN_DIV,
+ P_GPU_CC_PLL0_OUT_MAIN,
+ P_GPU_CC_PLL1_OUT_MAIN,
+};
+
+static const struct parent_map gpu_cc_parent_map_0[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPU_CC_PLL0_OUT_MAIN, 1 },
+ { P_GPU_CC_PLL1_OUT_MAIN, 3 },
+ { P_GPLL0_OUT_MAIN, 5 },
+ { P_GPLL0_OUT_MAIN_DIV, 6 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const gpu_cc_parent_names_0[] = {
+ "bi_tcxo",
+ "gpu_cc_pll0",
+ "gpu_cc_pll1",
+ "gcc_gpu_gpll0_clk_src",
+ "gcc_gpu_gpll0_div_clk_src",
+ "core_bi_pll_test_se",
+};
+
+static const struct parent_map gpu_cc_parent_map_1[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_MAIN, 5 },
+ { P_GPLL0_OUT_MAIN_DIV, 6 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const char * const gpu_cc_parent_names_1[] = {
+ "bi_tcxo",
+ "gcc_gpu_gpll0_clk_src",
+ "gcc_gpu_gpll0_div_clk_src",
+ "core_bi_pll_test_se",
+};
+
+static struct pll_vco lucid_vco[] = {
+ { 249600000, 2000000000, 0 },
+};
+
+static const struct alpha_pll_config gpu_cc_pll1_config = {
+ .l = 0x1A,
+ .cal_l = 0x44,
+ .alpha = 0xAAA,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00002261,
+ .config_ctl_hi1_val = 0x029A699C,
+ .user_ctl_val = 0x00000001,
+ .user_ctl_hi_val = 0x00000805,
+ .user_ctl_hi1_val = 0x00000000,
+};
+
+static struct clk_alpha_pll gpu_cc_pll1 = {
+ .offset = 0x100,
+ .vco_table = lucid_vco,
+ .num_vco = ARRAY_SIZE(lucid_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_pll1",
+ .parent_names = (const char *[]){ "bi_tcxo" },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_ops,
+ .vdd_class = &vdd_mx,
+ .num_rate_max = VDD_NUM,
+ .rate_max = (unsigned long[VDD_NUM]) {
+ [VDD_MIN] = 615000000,
+ [VDD_LOW] = 1066000000,
+ [VDD_LOW_L1] = 1600000000,
+ [VDD_NOMINAL] = 2000000000},
+ },
+ },
+};
+
+static const struct freq_tbl ftbl_gpu_cc_gmu_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(200000000, P_GPLL0_OUT_MAIN_DIV, 1.5, 0, 0),
+ F(500000000, P_GPU_CC_PLL1_OUT_MAIN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gpu_cc_gmu_clk_src = {
+ .cmd_rcgr = 0x1120,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gpu_cc_parent_map_0,
+ .freq_tbl = ftbl_gpu_cc_gmu_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_gmu_clk_src",
+ .parent_names = gpu_cc_parent_names_0,
+ .num_parents = 6,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ .vdd_class = &vdd_cx,
+ .num_rate_max = VDD_NUM,
+ .rate_max = (unsigned long[VDD_NUM]) {
+ [VDD_LOWER] = 200000000,
+ [VDD_LOW] = 500000000},
+ },
+};
+
+static const struct freq_tbl ftbl_gpu_cc_rbcpr_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(50000000, P_GPLL0_OUT_MAIN_DIV, 6, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gpu_cc_rbcpr_clk_src = {
+ .cmd_rcgr = 0x10b0,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gpu_cc_parent_map_1,
+ .freq_tbl = ftbl_gpu_cc_rbcpr_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_rbcpr_clk_src",
+ .parent_names = gpu_cc_parent_names_1,
+ .num_parents = 4,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ .vdd_class = &vdd_cx,
+ .num_rate_max = VDD_NUM,
+ .rate_max = (unsigned long[VDD_NUM]) {
+ [VDD_LOWER] = 19200000,
+ [VDD_NOMINAL] = 50000000},
+ },
+};
+
+static struct clk_branch gpu_cc_ahb_clk = {
+ .halt_reg = 0x1078,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1078,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_crc_ahb_clk = {
+ .halt_reg = 0x107c,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x107c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_crc_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_cx_apb_clk = {
+ .halt_reg = 0x1088,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x1088,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_cx_apb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_cx_gmu_clk = {
+ .halt_reg = 0x1098,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1098,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_cx_gmu_clk",
+ .parent_names = (const char *[]){
+ "gpu_cc_gmu_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_cx_snoc_dvm_clk = {
+ .halt_reg = 0x108c,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x108c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_cx_snoc_dvm_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_cxo_aon_clk = {
+ .halt_reg = 0x1004,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x1004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_cxo_aon_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_cxo_clk = {
+ .halt_reg = 0x109c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x109c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_cxo_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_gx_gmu_clk = {
+ .halt_reg = 0x1064,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1064,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_gx_gmu_clk",
+ .parent_names = (const char *[]){
+ "gpu_cc_gmu_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_gx_vsense_clk = {
+ .halt_reg = 0x1058,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x1058,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_gx_vsense_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_rbcpr_clk = {
+ .halt_reg = 0x10f0,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x10f0,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_rbcpr_clk",
+ .parent_names = (const char *[]){
+ "gpu_cc_rbcpr_clk_src",
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gpu_cc_sleep_clk = {
+ .halt_reg = 0x1090,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x1090,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpu_cc_sleep_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+/* Measure-only clock for gpu_cc_cx_gfx3d_clk. */
+static struct clk_dummy measure_only_gpu_cc_cx_gfx3d_clk = {
+ .rrate = 1000,
+ .hw.init = &(struct clk_init_data){
+ .name = "measure_only_gpu_cc_cx_gfx3d_clk",
+ .ops = &clk_dummy_ops,
+ },
+};
+
+/* Measure-only clock for gpu_cc_cx_gfx3d_slv_clk. */
+static struct clk_dummy measure_only_gpu_cc_cx_gfx3d_slv_clk = {
+ .rrate = 1000,
+ .hw.init = &(struct clk_init_data){
+ .name = "measure_only_gpu_cc_cx_gfx3d_slv_clk",
+ .ops = &clk_dummy_ops,
+ },
+};
+
+/* Measure-only clock for gpu_cc_gx_gfx3d_clk. */
+static struct clk_dummy measure_only_gpu_cc_gx_gfx3d_clk = {
+ .rrate = 1000,
+ .hw.init = &(struct clk_init_data){
+ .name = "measure_only_gpu_cc_gx_gfx3d_clk",
+ .ops = &clk_dummy_ops,
+ },
+};
+
+struct clk_hw *gpu_cc_lito_hws[] = {
+ [MEASURE_ONLY_GPU_CC_CX_GFX3D_CLK] =
+ &measure_only_gpu_cc_cx_gfx3d_clk.hw,
+ [MEASURE_ONLY_GPU_CC_CX_GFX3D_SLV_CLK] =
+ &measure_only_gpu_cc_cx_gfx3d_slv_clk.hw,
+ [MEASURE_ONLY_GPU_CC_GX_GFX3D_CLK] =
+ &measure_only_gpu_cc_gx_gfx3d_clk.hw,
+};
+
+static struct clk_regmap *gpu_cc_lito_clocks[] = {
+ [GPU_CC_AHB_CLK] = &gpu_cc_ahb_clk.clkr,
+ [GPU_CC_CRC_AHB_CLK] = &gpu_cc_crc_ahb_clk.clkr,
+ [GPU_CC_CX_APB_CLK] = &gpu_cc_cx_apb_clk.clkr,
+ [GPU_CC_CX_GMU_CLK] = &gpu_cc_cx_gmu_clk.clkr,
+ [GPU_CC_CX_SNOC_DVM_CLK] = &gpu_cc_cx_snoc_dvm_clk.clkr,
+ [GPU_CC_CXO_AON_CLK] = &gpu_cc_cxo_aon_clk.clkr,
+ [GPU_CC_CXO_CLK] = &gpu_cc_cxo_clk.clkr,
+ [GPU_CC_GMU_CLK_SRC] = &gpu_cc_gmu_clk_src.clkr,
+ [GPU_CC_GX_GMU_CLK] = &gpu_cc_gx_gmu_clk.clkr,
+ [GPU_CC_GX_VSENSE_CLK] = &gpu_cc_gx_vsense_clk.clkr,
+ [GPU_CC_PLL1] = &gpu_cc_pll1.clkr,
+ [GPU_CC_RBCPR_CLK] = &gpu_cc_rbcpr_clk.clkr,
+ [GPU_CC_RBCPR_CLK_SRC] = &gpu_cc_rbcpr_clk_src.clkr,
+ [GPU_CC_SLEEP_CLK] = &gpu_cc_sleep_clk.clkr,
+};
+
+static const struct regmap_config gpu_cc_lito_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x8008,
+ .fast_io = true,
+};
+
+static const struct qcom_cc_desc gpu_cc_lito_desc = {
+ .config = &gpu_cc_lito_regmap_config,
+ .hwclks = gpu_cc_lito_hws,
+ .num_hwclks = ARRAY_SIZE(gpu_cc_lito_hws),
+ .clks = gpu_cc_lito_clocks,
+ .num_clks = ARRAY_SIZE(gpu_cc_lito_clocks),
+};
+
+static const struct of_device_id gpu_cc_lito_match_table[] = {
+ { .compatible = "qcom,gpucc-lito" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, gpu_cc_lito_match_table);
+
+static int gpu_cc_lito_probe(struct platform_device *pdev)
+{
+ struct regmap *regmap;
+ unsigned int value, mask;
+ int ret;
+
+ regmap = qcom_cc_map(pdev, &gpu_cc_lito_desc);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ vdd_cx.regulator[0] = devm_regulator_get(&pdev->dev, "vdd_cx");
+ if (IS_ERR(vdd_cx.regulator[0])) {
+ if (PTR_ERR(vdd_cx.regulator[0]) != -EPROBE_DEFER)
+ dev_err(&pdev->dev,
+ "Unable to get vdd_cx regulator\n");
+ return PTR_ERR(vdd_cx.regulator[0]);
+ }
+
+ vdd_mx.regulator[0] = devm_regulator_get(&pdev->dev, "vdd_mx");
+ if (IS_ERR(vdd_mx.regulator[0])) {
+ if (PTR_ERR(vdd_mx.regulator[0]) != -EPROBE_DEFER)
+ dev_err(&pdev->dev,
+ "Unable to get vdd_mx regulator\n");
+ return PTR_ERR(vdd_mx.regulator[0]);
+ }
+
+ clk_lucid_pll_configure(&gpu_cc_pll1, regmap, &gpu_cc_pll1_config);
+
+ /* Recommended WAKEUP/SLEEP settings for the gpu_cc_cx_gmu_clk */
+ mask = CX_GMU_CBCR_WAKE_MASK << CX_GMU_CBCR_WAKE_SHIFT;
+ mask |= CX_GMU_CBCR_SLEEP_MASK << CX_GMU_CBCR_SLEEP_SHIFT;
+ value = 0xF << CX_GMU_CBCR_WAKE_SHIFT | 0xF << CX_GMU_CBCR_SLEEP_SHIFT;
+ regmap_update_bits(regmap, gpu_cc_cx_gmu_clk.clkr.enable_reg,
+ mask, value);
+
+ ret = qcom_cc_really_probe(pdev, &gpu_cc_lito_desc, regmap);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to register GPU CC clocks\n");
+ return ret;
+ }
+
+ dev_info(&pdev->dev, "Registered GPU CC clocks\n");
+ return 0;
+}
+
+static struct platform_driver gpu_cc_lito_driver = {
+ .probe = gpu_cc_lito_probe,
+ .driver = {
+ .name = "gpu_cc-lito",
+ .of_match_table = gpu_cc_lito_match_table,
+ },
+};
+
+static int __init gpu_cc_lito_init(void)
+{
+ return platform_driver_register(&gpu_cc_lito_driver);
+}
+subsys_initcall(gpu_cc_lito_init);
+
+static void __exit gpu_cc_lito_exit(void)
+{
+ platform_driver_unregister(&gpu_cc_lito_driver);
+}
+module_exit(gpu_cc_lito_exit);
+
+MODULE_DESCRIPTION("QTI GPU_CC LITO Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:gpu_cc-lito");
diff --git a/drivers/clk/qcom/videocc-kona.c b/drivers/clk/qcom/videocc-kona.c
index a85a880..8698403 100644
--- a/drivers/clk/qcom/videocc-kona.c
+++ b/drivers/clk/qcom/videocc-kona.c
@@ -87,9 +87,9 @@ static struct pll_vco lucid_vco[] = {
};
static const struct alpha_pll_config video_pll0_config = {
- .l = 0x14,
+ .l = 0x25,
.cal_l = 0x44,
- .alpha = 0xD555,
+ .alpha = 0x8000,
.config_ctl_val = 0x20485699,
.config_ctl_hi_val = 0x00002261,
.config_ctl_hi1_val = 0x029A699C,
@@ -121,9 +121,9 @@ static struct clk_alpha_pll video_pll0 = {
};
static const struct alpha_pll_config video_pll1_config = {
- .l = 0x14,
+ .l = 0x29,
.cal_l = 0x44,
- .alpha = 0xD555,
+ .alpha = 0xFAAA,
.config_ctl_val = 0x20485699,
.config_ctl_hi_val = 0x00002261,
.config_ctl_hi1_val = 0x029A699C,
@@ -227,12 +227,10 @@ static struct clk_rcg2 video_cc_ahb_clk_src = {
};
static const struct freq_tbl ftbl_video_cc_mvs0_clk_src[] = {
- F(400000000, P_VIDEO_PLL0_OUT_MAIN, 1, 0, 0),
F(720000000, P_VIDEO_PLL0_OUT_MAIN, 1, 0, 0),
F(1014000000, P_VIDEO_PLL0_OUT_MAIN, 1, 0, 0),
F(1098000000, P_VIDEO_PLL0_OUT_MAIN, 1, 0, 0),
F(1332000000, P_VIDEO_PLL0_OUT_MAIN, 1, 0, 0),
- F(1599000000, P_VIDEO_PLL0_OUT_MAIN, 1, 0, 0),
{ }
};
@@ -252,22 +250,18 @@ static struct clk_rcg2 video_cc_mvs0_clk_src = {
.vdd_class = &vdd_mm,
.num_rate_max = VDD_NUM,
.rate_max = (unsigned long[VDD_NUM]) {
- [VDD_MIN] = 400000000,
[VDD_LOWER] = 720000000,
[VDD_LOW] = 1014000000,
[VDD_LOW_L1] = 1098000000,
- [VDD_NOMINAL] = 1332000000,
- [VDD_HIGH] = 1599000000},
+ [VDD_NOMINAL] = 1332000000},
},
};
static const struct freq_tbl ftbl_video_cc_mvs1_clk_src[] = {
- F(400000000, P_VIDEO_PLL1_OUT_MAIN, 1, 0, 0),
F(806000000, P_VIDEO_PLL1_OUT_MAIN, 1, 0, 0),
F(1040000000, P_VIDEO_PLL1_OUT_MAIN, 1, 0, 0),
F(1098000000, P_VIDEO_PLL1_OUT_MAIN, 1, 0, 0),
F(1332000000, P_VIDEO_PLL1_OUT_MAIN, 1, 0, 0),
- F(1599000000, P_VIDEO_PLL1_OUT_MAIN, 1, 0, 0),
{ }
};
@@ -287,12 +281,10 @@ static struct clk_rcg2 video_cc_mvs1_clk_src = {
.vdd_class = &vdd_mm,
.num_rate_max = VDD_NUM,
.rate_max = (unsigned long[VDD_NUM]) {
- [VDD_MIN] = 400000000,
[VDD_LOWER] = 806000000,
[VDD_LOW] = 1040000000,
[VDD_LOW_L1] = 1098000000,
- [VDD_NOMINAL] = 1332000000,
- [VDD_HIGH] = 1599000000},
+ [VDD_NOMINAL] = 1332000000},
},
};
@@ -316,7 +308,7 @@ static struct clk_rcg2 video_cc_sleep_clk_src = {
.vdd_class = &vdd_mm,
.num_rate_max = VDD_NUM,
.rate_max = (unsigned long[VDD_NUM]) {
- [VDD_MIN] = 32000},
+ [VDD_LOWER] = 32000},
},
};
diff --git a/drivers/clk/tegra/clk-tegra124-dfll-fcpu.c b/drivers/clk/tegra/clk-tegra124-dfll-fcpu.c
index 269d359..edc31bb 100644
--- a/drivers/clk/tegra/clk-tegra124-dfll-fcpu.c
+++ b/drivers/clk/tegra/clk-tegra124-dfll-fcpu.c
@@ -133,9 +133,11 @@ static int tegra124_dfll_fcpu_remove(struct platform_device *pdev)
struct tegra_dfll_soc_data *soc;
soc = tegra_dfll_unregister(pdev);
- if (IS_ERR(soc))
+ if (IS_ERR(soc)) {
dev_err(&pdev->dev, "failed to unregister DFLL: %ld\n",
PTR_ERR(soc));
+ return PTR_ERR(soc);
+ }
tegra_cvb_remove_opp_table(soc->dev, soc->cvb, soc->max_freq);
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index d5b1ec6..c01c683 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -2278,7 +2278,6 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
ret = cpufreq_start_governor(policy);
if (!ret) {
pr_debug("cpufreq: governor change\n");
- sched_cpufreq_governor_change(policy, old_gov);
return 0;
}
cpufreq_exit_governor(policy);
diff --git a/drivers/dma-buf/Kconfig b/drivers/dma-buf/Kconfig
index ed3b785..0dece83d 100644
--- a/drivers/dma-buf/Kconfig
+++ b/drivers/dma-buf/Kconfig
@@ -30,4 +30,15 @@
WARNING: improper use of this can result in deadlocking kernel
drivers from userspace. Intended for test and debug only.
+config DEBUG_DMA_BUF_REF
+ bool "DEBUG Reference Count"
+ depends on STACKDEPOT
+ depends on DMA_SHARED_BUFFER
+ default n
+ help
+ Save stack traces for every call to dma_buf_get and dma_buf_put, to
+ help debug memory leaks. Potential leaks may be found by manually
+ matching the get/put call stacks. This feature consumes extra memory
+ in order to save the stack traces using STACKDEPOT.
+
endmenu
diff --git a/drivers/dma-buf/Makefile b/drivers/dma-buf/Makefile
index c33bf88..dcbc33f 100644
--- a/drivers/dma-buf/Makefile
+++ b/drivers/dma-buf/Makefile
@@ -1,3 +1,4 @@
obj-y := dma-buf.o dma-fence.o dma-fence-array.o reservation.o seqno-fence.o
obj-$(CONFIG_SYNC_FILE) += sync_file.o
obj-$(CONFIG_SW_SYNC) += sw_sync.o sync_debug.o
+obj-$(CONFIG_DEBUG_DMA_BUF_REF) += dma-buf-ref.o
diff --git a/drivers/dma-buf/dma-buf-ref.c b/drivers/dma-buf/dma-buf-ref.c
new file mode 100644
index 0000000..6298574
--- /dev/null
+++ b/drivers/dma-buf/dma-buf-ref.c
@@ -0,0 +1,112 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/dma-buf.h>
+#include <linux/slab.h>
+#include <linux/stackdepot.h>
+#include <linux/stacktrace.h>
+#include <linux/seq_file.h>
+
+#define DMA_BUF_STACK_DEPTH (16)
+
+struct dma_buf_ref {
+ struct list_head list;
+ depot_stack_handle_t handle;
+ int count;
+};
+
+void dma_buf_ref_init(struct dma_buf *dmabuf)
+{
+ INIT_LIST_HEAD(&dmabuf->refs);
+}
+
+void dma_buf_ref_destroy(struct dma_buf *dmabuf)
+{
+ struct dma_buf_ref *r, *n;
+
+ mutex_lock(&dmabuf->lock);
+ list_for_each_entry_safe(r, n, &dmabuf->refs, list) {
+ list_del(&r->list);
+ kfree(r);
+ }
+ mutex_unlock(&dmabuf->lock);
+}
+
+static void dma_buf_ref_insert_handle(struct dma_buf *dmabuf,
+ depot_stack_handle_t handle,
+ int count)
+{
+ struct dma_buf_ref *r;
+
+ mutex_lock(&dmabuf->lock);
+ list_for_each_entry(r, &dmabuf->refs, list) {
+ if (r->handle == handle) {
+ r->count += count;
+ goto out;
+ }
+ }
+
+ r = kzalloc(sizeof(*r), GFP_KERNEL);
+ if (!r)
+ goto out;
+
+ INIT_LIST_HEAD(&r->list);
+ r->handle = handle;
+ r->count = count;
+ list_add(&r->list, &dmabuf->refs);
+
+out:
+ mutex_unlock(&dmabuf->lock);
+}
+
+void dma_buf_ref_mod(struct dma_buf *dmabuf, int nr)
+{
+ unsigned long entries[DMA_BUF_STACK_DEPTH];
+ struct stack_trace trace = {
+ .nr_entries = 0,
+ .entries = entries,
+ .max_entries = DMA_BUF_STACK_DEPTH,
+ .skip = 1
+ };
+ depot_stack_handle_t handle;
+
+ save_stack_trace(&trace);
+ if (trace.nr_entries != 0 &&
+ trace.entries[trace.nr_entries-1] == ULONG_MAX)
+ trace.nr_entries--;
+
+ handle = depot_save_stack(&trace, GFP_KERNEL);
+ if (!handle)
+ return;
+
+ dma_buf_ref_insert_handle(dmabuf, handle, nr);
+}
+
+/**
+ * Called with dmabuf->lock held
+ */
+int dma_buf_ref_show(struct seq_file *s, struct dma_buf *dmabuf)
+{
+ char *buf;
+ struct dma_buf_ref *ref;
+ int count = 0;
+ struct stack_trace trace;
+
+ buf = (void *)__get_free_page(GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ list_for_each_entry(ref, &dmabuf->refs, list) {
+ count += ref->count;
+
+ seq_printf(s, "References: %d\n", ref->count);
+ depot_fetch_stack(ref->handle, &trace);
+ snprint_stack_trace(buf, PAGE_SIZE, &trace, 0);
+ seq_puts(s, buf);
+ seq_putc(s, '\n');
+ }
+
+ seq_printf(s, "Total references: %d\n\n\n", count);
+ free_page((unsigned long)buf);
+
+ return 0;
+}
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
index 5b0c24f..db82aae 100644
--- a/drivers/dma-buf/dma-buf.c
+++ b/drivers/dma-buf/dma-buf.c
@@ -36,6 +36,9 @@
#include <linux/mm.h>
#include <linux/kernel.h>
#include <linux/atomic.h>
+#include <linux/sched/signal.h>
+#include <linux/fdtable.h>
+#include <linux/list_sort.h>
#include <uapi/linux/dma-buf.h>
@@ -48,6 +51,19 @@ struct dma_buf_list {
struct mutex lock;
};
+struct dma_info {
+ struct dma_buf *dmabuf;
+ struct list_head head;
+};
+
+struct dma_proc {
+ char name[TASK_COMM_LEN];
+ pid_t pid;
+ size_t size;
+ struct list_head dma_bufs;
+ struct list_head head;
+};
+
static struct dma_buf_list db_list;
static int dma_buf_release(struct inode *inode, struct file *file)
@@ -71,12 +87,14 @@ static int dma_buf_release(struct inode *inode, struct file *file)
*/
BUG_ON(dmabuf->cb_shared.active || dmabuf->cb_excl.active);
- dmabuf->ops->release(dmabuf);
-
mutex_lock(&db_list.lock);
list_del(&dmabuf->list_node);
mutex_unlock(&db_list.lock);
+ dmabuf->ops->release(dmabuf);
+
+ dma_buf_ref_destroy(dmabuf);
+
if (dmabuf->resv == (struct reservation_object *)&dmabuf[1])
reservation_object_fini(dmabuf->resv);
@@ -457,6 +475,7 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
dmabuf->cb_excl.poll = dmabuf->cb_shared.poll = &dmabuf->poll;
dmabuf->cb_excl.active = dmabuf->cb_shared.active = 0;
dmabuf->name = bufname;
+ dmabuf->ktime = ktime_get();
if (!resv) {
resv = (struct reservation_object *)&dmabuf[1];
@@ -477,6 +496,9 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
mutex_init(&dmabuf->lock);
INIT_LIST_HEAD(&dmabuf->attachments);
+ dma_buf_ref_init(dmabuf);
+ dma_buf_ref_mod(dmabuf, 1);
+
mutex_lock(&db_list.lock);
list_add(&dmabuf->list_node, &db_list.head);
mutex_unlock(&db_list.lock);
@@ -538,6 +560,7 @@ struct dma_buf *dma_buf_get(int fd)
fput(file);
return ERR_PTR(-EINVAL);
}
+ dma_buf_ref_mod(file->private_data, 1);
return file->private_data;
}
@@ -558,6 +581,7 @@ void dma_buf_put(struct dma_buf *dmabuf)
if (WARN_ON(!dmabuf || !dmabuf->file))
return;
+ dma_buf_ref_mod(dmabuf, -1);
fput(dmabuf->file);
}
EXPORT_SYMBOL_GPL(dma_buf_put);
@@ -1203,6 +1227,8 @@ static int dma_buf_debug_show(struct seq_file *s, void *unused)
seq_printf(s, "Total %d devices attached\n\n",
attach_count);
+ dma_buf_ref_show(s, buf_obj);
+
count++;
size += buf_obj->size;
mutex_unlock(&buf_obj->lock);
@@ -1226,6 +1252,157 @@ static const struct file_operations dma_buf_debug_fops = {
.release = single_release,
};
+static bool list_contains(struct list_head *list, struct dma_buf *info)
+{
+ struct dma_info *curr;
+
+ list_for_each_entry(curr, list, head)
+ if (curr->dmabuf == info)
+ return true;
+
+ return false;
+}
+
+static int get_dma_info(const void *data, struct file *file, unsigned int n)
+{
+ struct dma_proc *dma_proc;
+ struct dma_info *dma_info;
+
+ dma_proc = (struct dma_proc *)data;
+ if (!is_dma_buf_file(file))
+ return 0;
+
+ if (list_contains(&dma_proc->dma_bufs, file->private_data))
+ return 0;
+
+ dma_info = kzalloc(sizeof(*dma_info), GFP_ATOMIC);
+ if (!dma_info)
+ return -ENOMEM;
+
+ get_file(file);
+ dma_info->dmabuf = file->private_data;
+ dma_proc->size += dma_info->dmabuf->size / SZ_1K;
+ list_add(&dma_info->head, &dma_proc->dma_bufs);
+ return 0;
+}
+
+static void write_proc(struct seq_file *s, struct dma_proc *proc)
+{
+ struct dma_info *tmp;
+
+ seq_printf(s, "\n%s (PID %ld) size: %ld\nDMA Buffers:\n",
+ proc->name, proc->pid, proc->size);
+ seq_printf(s, "%-8s\t%-8s\t%-8s\n",
+ "Name", "Size (KB)", "Time Alive (sec)");
+
+ list_for_each_entry(tmp, &proc->dma_bufs, head) {
+ struct dma_buf *dmabuf = tmp->dmabuf;
+ ktime_t elapmstime = ktime_ms_delta(ktime_get(), dmabuf->ktime);
+
+ elapmstime = ktime_divns(elapmstime, MSEC_PER_SEC);
+ seq_printf(s, "%-8s\t%-8ld\t%-8ld\n",
+ dmabuf->name,
+ dmabuf->size / SZ_1K,
+ elapmstime);
+ }
+}
+
+static void free_proc(struct dma_proc *proc)
+{
+ struct dma_info *tmp, *n;
+
+ list_for_each_entry_safe(tmp, n, &proc->dma_bufs, head) {
+ dma_buf_put(tmp->dmabuf);
+ list_del(&tmp->head);
+ kfree(tmp);
+ }
+ kfree(proc);
+}
+
+static int dmacmp(void *unused, struct list_head *a, struct list_head *b)
+{
+ struct dma_info *a_buf, *b_buf;
+
+ a_buf = list_entry(a, struct dma_info, head);
+ b_buf = list_entry(b, struct dma_info, head);
+ return b_buf->dmabuf->size - a_buf->dmabuf->size;
+}
+
+static int proccmp(void *unused, struct list_head *a, struct list_head *b)
+{
+ struct dma_proc *a_proc, *b_proc;
+
+ a_proc = list_entry(a, struct dma_proc, head);
+ b_proc = list_entry(b, struct dma_proc, head);
+ return b_proc->size - a_proc->size;
+}
+
+static int dma_procs_debug_show(struct seq_file *s, void *unused)
+{
+ struct task_struct *task, *thread;
+ struct files_struct *files;
+ int ret = 0;
+ struct dma_proc *tmp, *n;
+ LIST_HEAD(plist);
+
+ read_lock(&tasklist_lock);
+ for_each_process(task) {
+ struct files_struct *group_leader_files = NULL;
+
+ tmp = kzalloc(sizeof(*tmp), GFP_ATOMIC);
+ if (!tmp) {
+ ret = -ENOMEM;
+ read_unlock(&tasklist_lock);
+ goto mem_err;
+ }
+ INIT_LIST_HEAD(&tmp->dma_bufs);
+ for_each_thread(task, thread) {
+ task_lock(thread);
+ if (unlikely(!group_leader_files))
+ group_leader_files = task->group_leader->files;
+ files = thread->files;
+ if (files && (group_leader_files != files ||
+ thread == task->group_leader))
+ ret = iterate_fd(files, 0, get_dma_info, tmp);
+ task_unlock(thread);
+ }
+ if (ret || list_empty(&tmp->dma_bufs))
+ goto skip;
+ list_sort(NULL, &tmp->dma_bufs, dmacmp);
+ get_task_comm(tmp->name, task);
+ tmp->pid = task->tgid;
+ list_add(&tmp->head, &plist);
+ continue;
+skip:
+ free_proc(tmp);
+ }
+ read_unlock(&tasklist_lock);
+
+ list_sort(NULL, &plist, proccmp);
+ list_for_each_entry(tmp, &plist, head)
+ write_proc(s, tmp);
+
+ ret = 0;
+mem_err:
+ list_for_each_entry_safe(tmp, n, &plist, head) {
+ list_del(&tmp->head);
+ free_proc(tmp);
+ }
+ return ret;
+}
+
+static int dma_procs_debug_open(struct inode *f_inode, struct file *file)
+{
+ return single_open(file, dma_procs_debug_show, NULL);
+}
+
+static const struct file_operations dma_procs_debug_fops = {
+ .open = dma_procs_debug_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release
+};
+
static struct dentry *dma_buf_debugfs_dir;
static int dma_buf_init_debugfs(void)
@@ -1246,6 +1423,17 @@ static int dma_buf_init_debugfs(void)
debugfs_remove_recursive(dma_buf_debugfs_dir);
dma_buf_debugfs_dir = NULL;
err = PTR_ERR(d);
+ return err;
+ }
+
+ d = debugfs_create_file("dmaprocs", 0444, dma_buf_debugfs_dir,
+ NULL, &dma_procs_debug_fops);
+
+ if (IS_ERR(d)) {
+ pr_debug("dma_buf: debugfs: failed to create node dmaprocs\n");
+ debugfs_remove_recursive(dma_buf_debugfs_dir);
+ dma_buf_debugfs_dir = NULL;
+ err = PTR_ERR(d);
}
return err;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c
index 16b1a9c..743d3c9 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_processpptables.c
@@ -32,6 +32,7 @@
#include "vega10_pptable.h"
#define NUM_DSPCLK_LEVELS 8
+#define VEGA10_ENGINECLOCK_HARDMAX 198000
static void set_hw_cap(struct pp_hwmgr *hwmgr, bool enable,
enum phm_platform_caps cap)
@@ -258,7 +259,26 @@ static int init_over_drive_limits(
struct pp_hwmgr *hwmgr,
const ATOM_Vega10_POWERPLAYTABLE *powerplay_table)
{
- hwmgr->platform_descriptor.overdriveLimit.engineClock =
+ const ATOM_Vega10_GFXCLK_Dependency_Table *gfxclk_dep_table =
+ (const ATOM_Vega10_GFXCLK_Dependency_Table *)
+ (((unsigned long) powerplay_table) +
+ le16_to_cpu(powerplay_table->usGfxclkDependencyTableOffset));
+ bool is_acg_enabled = false;
+ ATOM_Vega10_GFXCLK_Dependency_Record_V2 *patom_record_v2;
+
+ if (gfxclk_dep_table->ucRevId == 1) {
+ patom_record_v2 =
+ (ATOM_Vega10_GFXCLK_Dependency_Record_V2 *)gfxclk_dep_table->entries;
+ is_acg_enabled =
+ (bool)patom_record_v2[gfxclk_dep_table->ucNumEntries-1].ucACGEnable;
+ }
+
+ if (powerplay_table->ulMaxODEngineClock > VEGA10_ENGINECLOCK_HARDMAX &&
+ !is_acg_enabled)
+ hwmgr->platform_descriptor.overdriveLimit.engineClock =
+ VEGA10_ENGINECLOCK_HARDMAX;
+ else
+ hwmgr->platform_descriptor.overdriveLimit.engineClock =
le32_to_cpu(powerplay_table->ulMaxODEngineClock);
hwmgr->platform_descriptor.overdriveLimit.memoryClock =
le32_to_cpu(powerplay_table->ulMaxODMemoryClock);
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index 23397c0..1d74aed 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -1564,6 +1564,15 @@ int drm_atomic_helper_async_check(struct drm_device *dev,
old_plane_state->crtc != new_plane_state->crtc)
return -EINVAL;
+ /*
+ * FIXME: Since prepare_fb and cleanup_fb are always called on
+ * the new_plane_state for async updates we need to block framebuffer
+ * changes. This prevents use of a fb that's been cleaned up and
+ * double cleanups from occuring.
+ */
+ if (old_plane_state->fb != new_plane_state->fb)
+ return -EINVAL;
+
funcs = plane->helper_private;
if (!funcs->atomic_async_update)
return -EINVAL;
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
index bbb8126..9acb9df 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
@@ -896,7 +896,7 @@ static u32 a6xx_gmu_get_arc_level(struct device *dev, unsigned long freq)
np = dev_pm_opp_get_of_node(opp);
if (np) {
- of_property_read_u32(np, "qcom,level", &val);
+ of_property_read_u32(np, "opp-level", &val);
of_node_put(np);
}
diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.c b/drivers/gpu/drm/msm/dp/dp_ctrl.c
index 811ba98..75a2f16 100644
--- a/drivers/gpu/drm/msm/dp/dp_ctrl.c
+++ b/drivers/gpu/drm/msm/dp/dp_ctrl.c
@@ -1161,7 +1161,7 @@ static int dp_ctrl_on(struct dp_ctrl *dp_ctrl, bool mst_mode,
ctrl->mst_mode = mst_mode;
ctrl->fec_mode = fec_mode;
- rate = ctrl->panel->get_optimal_link_rate(ctrl->panel);
+ rate = ctrl->panel->link_info.rate;
if (ctrl->link->sink_request & DP_TEST_LINK_PHY_TEST_PATTERN) {
pr_debug("using phy test link parameters\n");
diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c
index f2ef730..bdd2478 100644
--- a/drivers/gpu/drm/msm/dp/dp_display.c
+++ b/drivers/gpu/drm/msm/dp/dp_display.c
@@ -1310,7 +1310,6 @@ static int dp_display_set_mode(struct dp_display *dp_display, void *panel,
const u32 num_components = 3, default_bpp = 24;
struct dp_display_private *dp;
struct dp_panel *dp_panel;
- u32 rc;
if (!dp_display || !panel) {
pr_err("invalid input\n");
@@ -1335,14 +1334,7 @@ static int dp_display_set_mode(struct dp_display *dp_display, void *panel,
mode->timing.bpp, mode->timing.pixel_clk_khz);
dp_panel->pinfo = mode->timing;
- rc = dp_panel->init(dp_panel);
-
- if (rc == -EAGAIN) {
- dp->ctrl->off(dp->ctrl);
- dp->ctrl->on(dp->ctrl, dp->mst.mst_active,
- dp->panel->fec_en, false);
- }
-
+ dp_panel->init(dp_panel);
mutex_unlock(&dp->session_lock);
return 0;
diff --git a/drivers/gpu/drm/msm/dp/dp_panel.c b/drivers/gpu/drm/msm/dp/dp_panel.c
index 15413f6..eca7909 100644
--- a/drivers/gpu/drm/msm/dp/dp_panel.c
+++ b/drivers/gpu/drm/msm/dp/dp_panel.c
@@ -74,9 +74,6 @@ struct dp_panel_private {
u8 spd_product_description[16];
u8 major;
u8 minor;
- u32 bpp;
- u32 active_pclk;
- u32 optimal_link_rate;
};
static const struct dp_panel_info fail_safe = {
@@ -1767,50 +1764,12 @@ static int dp_panel_set_dpcd(struct dp_panel *dp_panel, u8 *dpcd)
return 0;
}
-static u32 dp_panel_get_optimal_link_rate(struct dp_panel *dp_panel)
-{
- struct dp_panel_private *panel;
- u32 lrate, rate = 0;
-
- if (!dp_panel) {
- pr_err("invalid input\n");
- goto end;
- }
-
- panel = container_of(dp_panel, struct dp_panel_private, dp_panel);
-
- /*
- * As MST can support multiple streams,
- * do not optimize the link rate for MST.
- */
- if (panel->dp_panel.mst_state) {
- rate = panel->dp_panel.link_info.rate;
- goto end;
- }
-
- lrate = ((panel->active_pclk / panel->dp_panel.link_info.num_lanes) *
- panel->bpp) / 8;
-
- if (lrate <= DP_LINK_RATE_RBR)
- rate = DP_LINK_RATE_RBR;
- else if (lrate <= DP_LINK_RATE_HBR)
- rate = DP_LINK_RATE_HBR;
- else if (lrate <= DP_LINK_RATE_HBR2)
- rate = DP_LINK_RATE_HBR2;
- else
- rate = DP_LINK_RATE_HBR3;
-end:
- panel->optimal_link_rate = rate;
- return rate;
-}
-
static int dp_panel_read_edid(struct dp_panel *dp_panel,
struct drm_connector *connector)
{
int ret = 0;
struct dp_panel_private *panel;
struct edid *edid;
- struct drm_display_mode *mode;
if (!dp_panel) {
pr_err("invalid input\n");
@@ -1831,16 +1790,6 @@ static int dp_panel_read_edid(struct dp_panel *dp_panel,
ret = -EINVAL;
goto end;
}
-
- mutex_lock(&connector->dev->mode_config.mutex);
- _sde_edid_update_modes(connector, dp_panel->edid_ctrl);
- mutex_unlock(&connector->dev->mode_config.mutex);
-
- mode = list_first_entry(&connector->probed_modes,
- struct drm_display_mode, head);
-
- panel->bpp = connector->display_info.bpc * 3;
- panel->active_pclk = mode->clock;
end:
edid = dp_panel->edid_ctrl->edid;
dp_panel->audio_supported = drm_detect_monitor_audio(edid);
@@ -2376,7 +2325,6 @@ static int dp_panel_init_panel_info(struct dp_panel *dp_panel)
int rc = 0;
struct dp_panel_private *panel;
struct dp_panel_info *pinfo;
- u32 current_link_rate;
if (!dp_panel) {
pr_err("invalid input\n");
@@ -2400,13 +2348,6 @@ static int dp_panel_init_panel_info(struct dp_panel *dp_panel)
pinfo->refresh_rate, pinfo->bpp, pinfo->pixel_clk_khz,
panel->link->link_params.bw_code,
panel->link->link_params.lane_count);
-
- panel->active_pclk = pinfo->pixel_clk_khz;
- current_link_rate = panel->optimal_link_rate;
- dp_panel_get_optimal_link_rate(dp_panel);
-
- if (panel->optimal_link_rate != current_link_rate)
- rc = -EAGAIN;
end:
return rc;
}
@@ -2971,32 +2912,34 @@ struct dp_panel *dp_panel_get(struct dp_panel_in *in)
goto error;
}
- dp_panel = &panel->dp_panel;
-
- if (in->base_panel) {
- struct dp_panel_private *base_panel_priv =
- container_of(in->base_panel,
- struct dp_panel_private, dp_panel);
-
- memcpy(panel, base_panel_priv, sizeof(*panel));
-
- goto update;
- }
-
panel->dev = in->dev;
panel->aux = in->aux;
panel->catalog = in->catalog;
panel->link = in->link;
panel->parser = in->parser;
+ dp_panel = &panel->dp_panel;
dp_panel->max_bw_code = DP_LINK_BW_8_1;
dp_panel->spd_enabled = true;
memcpy(panel->spd_vendor_name, vendor_name, (sizeof(u8) * 8));
memcpy(panel->spd_product_description, product_desc, (sizeof(u8) * 16));
+ dp_panel->connector = in->connector;
dp_panel->dsc_feature_enable = panel->parser->dsc_feature_enable;
dp_panel->fec_feature_enable = panel->parser->fec_feature_enable;
+ if (in->base_panel) {
+ memcpy(dp_panel->dpcd, in->base_panel->dpcd,
+ DP_RECEIVER_CAP_SIZE + 1);
+ memcpy(&dp_panel->link_info, &in->base_panel->link_info,
+ sizeof(dp_panel->link_info));
+ dp_panel->mst_state = in->base_panel->mst_state;
+ dp_panel->widebus_en = in->base_panel->widebus_en;
+ dp_panel->fec_en = in->base_panel->fec_en;
+ dp_panel->dsc_en = in->base_panel->dsc_en;
+ dp_panel->fec_overhead_fp = in->base_panel->fec_overhead_fp;
+ }
+
dp_panel->init = dp_panel_init_panel_info;
dp_panel->deinit = dp_panel_deinit_panel_info;
dp_panel->hw_cfg = dp_panel_hw_cfg;
@@ -3017,9 +2960,7 @@ struct dp_panel *dp_panel_get(struct dp_panel_in *in)
dp_panel->read_mst_cap = dp_panel_read_mst_cap;
dp_panel->convert_to_dp_mode = dp_panel_convert_to_dp_mode;
dp_panel->update_pps = dp_panel_update_pps;
- dp_panel->get_optimal_link_rate = dp_panel_get_optimal_link_rate;
-update:
- dp_panel->connector = in->connector;
+
sde_conn = to_sde_connector(dp_panel->connector);
sde_conn->drv_panel = dp_panel;
diff --git a/drivers/gpu/drm/msm/dp/dp_panel.h b/drivers/gpu/drm/msm/dp/dp_panel.h
index a3473ec..90d5346 100644
--- a/drivers/gpu/drm/msm/dp/dp_panel.h
+++ b/drivers/gpu/drm/msm/dp/dp_panel.h
@@ -18,11 +18,6 @@
#define DP_RECEIVER_DSC_CAP_SIZE 15
#define DP_RECEIVER_FEC_STATUS_SIZE 3
-#define DP_LINK_RATE_RBR 162000
-#define DP_LINK_RATE_HBR 270000
-#define DP_LINK_RATE_HBR2 540000
-#define DP_LINK_RATE_HBR3 810000
-
/*
* A source initiated power down flag is set
* when the DP is powered off while physical
@@ -168,7 +163,6 @@ struct dp_panel {
const struct drm_display_mode *drm_mode,
struct dp_display_mode *dp_mode);
void (*update_pps)(struct dp_panel *dp_panel, char *pps_cmd);
- u32 (*get_optimal_link_rate)(struct dp_panel *dp_panel);
};
struct dp_tu_calc_input {
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_drm.c b/drivers/gpu/drm/msm/dsi-staging/dsi_drm.c
index 387c889..68fc901 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_drm.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_drm.c
@@ -623,13 +623,111 @@ void dsi_connector_put_modes(struct drm_connector *connector,
dsi_display->modes = NULL;
}
-int dsi_connector_get_modes(struct drm_connector *connector,
- void *display)
+
+static int dsi_drm_update_edid_name(struct edid *edid, const char *name)
{
- u32 count = 0;
+ u8 *dtd = (u8 *)&edid->detailed_timings[3];
+ u8 standard_header[] = {0x00, 0x00, 0x00, 0xFE, 0x00};
+ u32 dtd_size = 18;
+ u32 header_size = sizeof(standard_header);
+
+ if (!name)
+ return -EINVAL;
+
+ /* Fill standard header */
+ memcpy(dtd, standard_header, header_size);
+
+ dtd_size -= header_size;
+ dtd_size = min_t(u32, dtd_size, strlen(name));
+
+ memcpy(dtd + header_size, name, dtd_size);
+
+ return 0;
+}
+
+static void dsi_drm_update_dtd(struct edid *edid,
+ struct dsi_display_mode *modes, u32 modes_count)
+{
+ u32 i;
+ u32 count = min_t(u32, modes_count, 3);
+
+ for (i = 0; i < count; i++) {
+ struct detailed_timing *dtd = &edid->detailed_timings[i];
+ struct dsi_display_mode *mode = &modes[i];
+ struct dsi_mode_info *timing = &mode->timing;
+ struct detailed_pixel_timing *pd = &dtd->data.pixel_data;
+ u32 h_blank = timing->h_front_porch + timing->h_sync_width +
+ timing->h_back_porch;
+ u32 v_blank = timing->v_front_porch + timing->v_sync_width +
+ timing->v_back_porch;
+ u32 h_img = 0, v_img = 0;
+
+ dtd->pixel_clock = mode->pixel_clk_khz / 10;
+
+ pd->hactive_lo = timing->h_active & 0xFF;
+ pd->hblank_lo = h_blank & 0xFF;
+ pd->hactive_hblank_hi = ((h_blank >> 8) & 0xF) |
+ ((timing->h_active >> 8) & 0xF) << 4;
+
+ pd->vactive_lo = timing->v_active & 0xFF;
+ pd->vblank_lo = v_blank & 0xFF;
+ pd->vactive_vblank_hi = ((v_blank >> 8) & 0xF) |
+ ((timing->v_active >> 8) & 0xF) << 4;
+
+ pd->hsync_offset_lo = timing->h_front_porch & 0xFF;
+ pd->hsync_pulse_width_lo = timing->h_sync_width & 0xFF;
+ pd->vsync_offset_pulse_width_lo =
+ ((timing->v_front_porch & 0xF) << 4) |
+ (timing->v_sync_width & 0xF);
+
+ pd->hsync_vsync_offset_pulse_width_hi =
+ (((timing->h_front_porch >> 8) & 0x3) << 6) |
+ (((timing->h_sync_width >> 8) & 0x3) << 4) |
+ (((timing->v_front_porch >> 4) & 0x3) << 2) |
+ (((timing->v_sync_width >> 4) & 0x3) << 0);
+
+ pd->width_mm_lo = h_img & 0xFF;
+ pd->height_mm_lo = v_img & 0xFF;
+ pd->width_height_mm_hi = (((h_img >> 8) & 0xF) << 4) |
+ ((v_img >> 8) & 0xF);
+
+ pd->hborder = 0;
+ pd->vborder = 0;
+ pd->misc = 0;
+ }
+}
+
+static void dsi_drm_update_checksum(struct edid *edid)
+{
+ u8 *data = (u8 *)edid;
+ u32 i, sum = 0;
+
+ for (i = 0; i < EDID_LENGTH - 1; i++)
+ sum += data[i];
+
+ edid->checksum = 0x100 - (sum & 0xFF);
+}
+
+int dsi_connector_get_modes(struct drm_connector *connector, void *data)
+{
+ int rc, i;
+ u32 count = 0, edid_size;
struct dsi_display_mode *modes = NULL;
struct drm_display_mode drm_mode;
- int rc, i;
+ struct dsi_display *display = data;
+ struct edid edid;
+ const u8 edid_buf[EDID_LENGTH] = {
+ 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x44, 0x6D,
+ 0x01, 0x00, 0x01, 0x00, 0x00, 0x00, 0x1B, 0x10, 0x01, 0x03,
+ 0x80, 0x50, 0x2D, 0x78, 0x0A, 0x0D, 0xC9, 0xA0, 0x57, 0x47,
+ 0x98, 0x27, 0x12, 0x48, 0x4C, 0x00, 0x00, 0x00, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x01, 0x01, 0x01, 0x01,
+ };
+
+ edid_size = min_t(u32, sizeof(edid), EDID_LENGTH);
+
+ memcpy(&edid, edid_buf, edid_size);
if (sde_connector_get_panel(connector)) {
/*
@@ -669,6 +767,18 @@ int dsi_connector_get_modes(struct drm_connector *connector,
m->height_mm = connector->display_info.height_mm;
drm_mode_probed_add(connector, m);
}
+
+ rc = dsi_drm_update_edid_name(&edid, display->panel->name);
+ if (rc) {
+ count = 0;
+ goto end;
+ }
+
+ dsi_drm_update_dtd(&edid, modes, count);
+ dsi_drm_update_checksum(&edid);
+ rc = drm_connector_update_edid_property(connector, &edid);
+ if (rc)
+ count = 0;
end:
pr_debug("MODE COUNT =%d\n\n", count);
return count;
diff --git a/drivers/gpu/drm/msm/msm_rd.c b/drivers/gpu/drm/msm/msm_rd.c
index f7a0ede..d4cc5ce 100644
--- a/drivers/gpu/drm/msm/msm_rd.c
+++ b/drivers/gpu/drm/msm/msm_rd.c
@@ -115,7 +115,9 @@ static void rd_write(struct msm_rd_state *rd, const void *buf, int sz)
char *fptr = &fifo->buf[fifo->head];
int n;
- wait_event(rd->fifo_event, circ_space(&rd->fifo) > 0);
+ wait_event(rd->fifo_event, circ_space(&rd->fifo) > 0 || !rd->open);
+ if (!rd->open)
+ return;
/* Note that smp_load_acquire() is not strictly required
* as CIRC_SPACE_TO_END() does not access the tail more
@@ -213,7 +215,10 @@ static int rd_open(struct inode *inode, struct file *file)
static int rd_release(struct inode *inode, struct file *file)
{
struct msm_rd_state *rd = inode->i_private;
+
rd->open = false;
+ wake_up_all(&rd->fifo_event);
+
return 0;
}
diff --git a/drivers/gpu/drm/msm/sde/sde_connector.c b/drivers/gpu/drm/msm/sde/sde_connector.c
index fce411b..c3be2fb 100644
--- a/drivers/gpu/drm/msm/sde/sde_connector.c
+++ b/drivers/gpu/drm/msm/sde/sde_connector.c
@@ -1189,8 +1189,7 @@ static int sde_connector_atomic_set_property(struct drm_connector *connector,
struct sde_connector *c_conn;
struct sde_connector_state *c_state;
int idx, rc;
- uint64_t fence_user_fd;
- uint64_t __user prev_user_fd;
+ uint64_t fence_fd;
if (!connector || !state || !property) {
SDE_ERROR("invalid argument(s), conn %pK, state %pK, prp %pK\n",
@@ -1233,42 +1232,23 @@ static int sde_connector_atomic_set_property(struct drm_connector *connector,
if (!val)
goto end;
- rc = copy_from_user(&prev_user_fd, (void __user *)val,
- sizeof(uint64_t));
+ /*
+ * update the the offset to a timeline for commit completion
+ */
+ rc = sde_fence_create(c_conn->retire_fence, &fence_fd, 1);
if (rc) {
- SDE_ERROR("copy from user failed rc:%d\n", rc);
- rc = -EFAULT;
+ SDE_ERROR("fence create failed rc:%d\n", rc);
goto end;
}
- /*
- * client is expected to reset the property to -1 before
- * requesting for the retire fence
- */
- if (prev_user_fd == -1) {
- /*
- * update the offset to a timeline for
- * commit completion
- */
- rc = sde_fence_create(c_conn->retire_fence,
- &fence_user_fd, 1);
- if (rc) {
- SDE_ERROR("fence create failed rc:%d\n", rc);
- goto end;
- }
-
- rc = copy_to_user((uint64_t __user *)(uintptr_t)val,
- &fence_user_fd, sizeof(uint64_t));
- if (rc) {
- SDE_ERROR("copy to user failed rc:%d\n", rc);
- /*
- * fence will be released with timeline
- * update
- */
- put_unused_fd(fence_user_fd);
- rc = -EFAULT;
- goto end;
- }
+ rc = copy_to_user((uint64_t __user *)(uintptr_t)val, &fence_fd,
+ sizeof(uint64_t));
+ if (rc) {
+ SDE_ERROR("copy to user failed rc:%d\n", rc);
+ /* fence will be released with timeline update */
+ put_unused_fd(fence_fd);
+ rc = -EFAULT;
+ goto end;
}
break;
case CONNECTOR_PROP_ROI_V1:
diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.c b/drivers/gpu/drm/msm/sde/sde_crtc.c
index 6ad0612c..93a1f0b 100644
--- a/drivers/gpu/drm/msm/sde/sde_crtc.c
+++ b/drivers/gpu/drm/msm/sde/sde_crtc.c
@@ -5102,8 +5102,7 @@ static int sde_crtc_atomic_set_property(struct drm_crtc *crtc,
struct sde_crtc *sde_crtc;
struct sde_crtc_state *cstate;
int idx, ret;
- uint64_t fence_user_fd;
- uint64_t __user prev_user_fd;
+ uint64_t fence_fd;
if (!crtc || !state || !property) {
SDE_ERROR("invalid argument(s)\n");
@@ -5163,34 +5162,19 @@ static int sde_crtc_atomic_set_property(struct drm_crtc *crtc,
if (!val)
goto exit;
- ret = copy_from_user(&prev_user_fd, (void __user *)val,
- sizeof(uint64_t));
+ ret = _sde_crtc_get_output_fence(crtc, state, &fence_fd);
if (ret) {
- SDE_ERROR("copy from user failed rc:%d\n", ret);
- ret = -EFAULT;
+ SDE_ERROR("fence create failed rc:%d\n", ret);
goto exit;
}
- /*
- * client is expected to reset the property to -1 before
- * requesting for the release fence
- */
- if (prev_user_fd == -1) {
- ret = _sde_crtc_get_output_fence(crtc, state,
- &fence_user_fd);
- if (ret) {
- SDE_ERROR("fence create failed rc:%d\n", ret);
- goto exit;
- }
-
- ret = copy_to_user((uint64_t __user *)(uintptr_t)val,
- &fence_user_fd, sizeof(uint64_t));
- if (ret) {
- SDE_ERROR("copy to user failed rc:%d\n", ret);
- put_unused_fd(fence_user_fd);
- ret = -EFAULT;
- goto exit;
- }
+ ret = copy_to_user((uint64_t __user *)(uintptr_t)val, &fence_fd,
+ sizeof(uint64_t));
+ if (ret) {
+ SDE_ERROR("copy to user failed rc:%d\n", ret);
+ put_unused_fd(fence_fd);
+ ret = -EFAULT;
+ goto exit;
}
break;
default:
diff --git a/drivers/gpu/drm/msm/sde/sde_fence.c b/drivers/gpu/drm/msm/sde/sde_fence.c
index 0fdc41b..dc9cc77 100644
--- a/drivers/gpu/drm/msm/sde/sde_fence.c
+++ b/drivers/gpu/drm/msm/sde/sde_fence.c
@@ -351,6 +351,7 @@ int sde_fence_create(struct sde_fence_context *ctx, uint64_t *val,
list_for_each_entry(fc, &ctx->fence_list_head, fence_list) {
if (trigger_value == fc->base.seqno) {
fd = fc->fd;
+ *val = fd;
break;
}
}
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_catalog.c b/drivers/gpu/drm/msm/sde/sde_hw_catalog.c
index 31dcfa8..e0bb15d8 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_catalog.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_catalog.c
@@ -65,7 +65,8 @@
#define MAX_DOWNSCALE_RATIO 4
#define SSPP_UNITY_SCALE 1
-#define MAX_DOWNSCALE_RATIO_INLINE_ROT_RT_DEFAULT 2
+#define MAX_DOWNSCALE_RATIO_INLINE_ROT_RT_NUMERATOR 11
+#define MAX_DOWNSCALE_RATIO_INLINE_ROT_RT_DENOMINATOR 5
#define MAX_DOWNSCALE_RATIO_INLINE_ROT_NRT_DEFAULT 4
#define MAX_PRE_ROT_HEIGHT_INLINE_ROT_DEFAULT 1088
@@ -1143,8 +1144,10 @@ static void _sde_sspp_setup_vig(struct sde_mdss_cfg *sde_cfg,
if (IS_SDE_INLINE_ROT_REV_100(sde_cfg->true_inline_rot_rev)) {
set_bit(SDE_SSPP_TRUE_INLINE_ROT_V1, &sspp->features);
sblk->in_rot_format_list = sde_cfg->inline_rot_formats;
- sblk->in_rot_maxdwnscale_rt =
- sde_cfg->true_inline_dwnscale_rt;
+ sblk->in_rot_maxdwnscale_rt_num =
+ sde_cfg->true_inline_dwnscale_rt_num;
+ sblk->in_rot_maxdwnscale_rt_denom =
+ sde_cfg->true_inline_dwnscale_rt_denom;
sblk->in_rot_maxdwnscale_nrt =
sde_cfg->true_inline_dwnscale_nrt;
sblk->in_rot_maxheight =
@@ -3778,6 +3781,8 @@ static int _sde_hardware_pre_caps(struct sde_mdss_cfg *sde_cfg, uint32_t hw_rev)
sde_cfg->sui_ns_allowed = true;
sde_cfg->sui_misr_supported = true;
sde_cfg->sui_block_xin_mask = 0x3F71;
+ sde_cfg->has_sui_blendstage = true;
+ sde_cfg->has_qos_fl_nocalc = true;
sde_cfg->has_3d_merge_reset = true;
clear_bit(MDSS_INTR_LTM_0_INTR, sde_cfg->mdss_irqs);
clear_bit(MDSS_INTR_LTM_1_INTR, sde_cfg->mdss_irqs);
@@ -3806,6 +3811,8 @@ static int _sde_hardware_pre_caps(struct sde_mdss_cfg *sde_cfg, uint32_t hw_rev)
sde_cfg->sui_misr_supported = true;
sde_cfg->has_decimation = true;
sde_cfg->sui_block_xin_mask = 0x2EE1;
+ sde_cfg->has_sui_blendstage = true;
+ sde_cfg->has_qos_fl_nocalc = true;
sde_cfg->has_3d_merge_reset = true;
clear_bit(MDSS_INTR_LTM_0_INTR, sde_cfg->mdss_irqs);
clear_bit(MDSS_INTR_LTM_1_INTR, sde_cfg->mdss_irqs);
@@ -3823,6 +3830,8 @@ static int _sde_hardware_pre_caps(struct sde_mdss_cfg *sde_cfg, uint32_t hw_rev)
sde_cfg->sui_ns_allowed = true;
sde_cfg->sui_misr_supported = true;
sde_cfg->sui_block_xin_mask = 0xE71;
+ sde_cfg->has_sui_blendstage = true;
+ sde_cfg->has_qos_fl_nocalc = true;
sde_cfg->has_3d_merge_reset = true;
} else if (IS_KONA_TARGET(hw_rev)) {
sde_cfg->has_cwb_support = true;
@@ -3836,6 +3845,8 @@ static int _sde_hardware_pre_caps(struct sde_mdss_cfg *sde_cfg, uint32_t hw_rev)
sde_cfg->sui_ns_allowed = true;
sde_cfg->sui_misr_supported = true;
sde_cfg->sui_block_xin_mask = 0x3F71;
+ sde_cfg->has_sui_blendstage = true;
+ sde_cfg->has_qos_fl_nocalc = true;
sde_cfg->has_3d_merge_reset = true;
clear_bit(MDSS_INTR_AD4_0_INTR, sde_cfg->mdss_irqs);
clear_bit(MDSS_INTR_AD4_1_INTR, sde_cfg->mdss_irqs);
@@ -3844,8 +3855,10 @@ static int _sde_hardware_pre_caps(struct sde_mdss_cfg *sde_cfg, uint32_t hw_rev)
set_bit(SDE_MDP_DHDR_MEMPOOL, &sde_cfg->mdp[0].features);
sde_cfg->has_vig_p010 = true;
sde_cfg->true_inline_rot_rev = SDE_INLINE_ROT_VERSION_1_0_0;
- sde_cfg->true_inline_dwnscale_rt =
- MAX_DOWNSCALE_RATIO_INLINE_ROT_RT_DEFAULT;
+ sde_cfg->true_inline_dwnscale_rt_num =
+ MAX_DOWNSCALE_RATIO_INLINE_ROT_RT_NUMERATOR;
+ sde_cfg->true_inline_dwnscale_rt_denom =
+ MAX_DOWNSCALE_RATIO_INLINE_ROT_RT_DENOMINATOR;
sde_cfg->true_inline_dwnscale_nrt =
MAX_DOWNSCALE_RATIO_INLINE_ROT_NRT_DEFAULT;
sde_cfg->true_inline_prefill_fudge_lines = 2;
@@ -3875,16 +3888,10 @@ static int _sde_hardware_post_caps(struct sde_mdss_cfg *sde_cfg,
if (!sde_cfg)
return -EINVAL;
- if (IS_SM8150_TARGET(hw_rev) || IS_SM6150_TARGET(hw_rev) ||
- IS_SDMMAGPIE_TARGET(hw_rev)) {
+ if (sde_cfg->has_sui_blendstage)
sde_cfg->sui_supported_blendstage =
sde_cfg->max_mixer_blendstages - SDE_STAGE_0;
- for (i = 0; i < sde_cfg->sspp_count; i++)
- set_bit(SDE_PERF_SSPP_QOS_FL_NOCALC,
- &sde_cfg->sspp[i].perf_features);
- }
-
for (i = 0; i < sde_cfg->sspp_count; i++) {
if (sde_cfg->sspp[i].sblk) {
max_horz_deci = max(max_horz_deci,
@@ -3893,6 +3900,10 @@ static int _sde_hardware_post_caps(struct sde_mdss_cfg *sde_cfg,
sde_cfg->sspp[i].sblk->maxvdeciexp);
}
+ if (sde_cfg->has_qos_fl_nocalc)
+ set_bit(SDE_PERF_SSPP_QOS_FL_NOCALC,
+ &sde_cfg->sspp[i].perf_features);
+
/*
* set sec-ui blocked SSPP feature flag based on blocked
* xin-mask if sec-ui-misr feature is enabled;
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_catalog.h b/drivers/gpu/drm/msm/sde/sde_hw_catalog.h
index 2f7c781..7d25092c 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_catalog.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_catalog.h
@@ -593,7 +593,10 @@ struct sde_qos_lut_tbl {
* @format_list: Pointer to list of supported formats
* @virt_format_list: Pointer to list of supported formats for virtual planes
* @in_rot_format_list: Pointer to list of supported formats for inline rotation
- * @in_rot_maxdwnscale_rt: max downscale ratio for inline rotation rt clients
+ * @in_rot_maxdwnscale_rt_num: max downscale ratio for inline rotation
+ * rt clients - numerator
+ * @in_rot_maxdwnscale_rt_denom: max downscale ratio for inline rotation
+ * rt clients - denominator
* @in_rot_maxdwnscale_nrt: max downscale ratio for inline rotation nrt clients
* @in_rot_maxheight: max pre rotated height for inline rotation
* @in_rot_prefill_fudge_lines: prefill fudge lines for inline rotation
@@ -630,7 +633,8 @@ struct sde_sspp_sub_blks {
const struct sde_format_extended *format_list;
const struct sde_format_extended *virt_format_list;
const struct sde_format_extended *in_rot_format_list;
- u32 in_rot_maxdwnscale_rt;
+ u32 in_rot_maxdwnscale_rt_num;
+ u32 in_rot_maxdwnscale_rt_denom;
u32 in_rot_maxdwnscale_nrt;
u32 in_rot_maxheight;
u32 in_rot_prefill_fudge_lines;
@@ -1186,7 +1190,10 @@ struct sde_perf_cfg {
* @vbif_qos_nlvl number of vbif QoS priority level
* @ts_prefill_rev prefill traffic shaper feature revision
* @true_inline_rot_rev inline rotator feature revision
- * @true_inline_dwnscale_rt true inline rotator downscale ratio for rt
+ * @true_inline_dwnscale_rt_num true inline rotator downscale ratio for rt
+ * - numerator
+ * @true_inline_dwnscale_rt_denom true inline rot downscale ratio for rt
+ * - denominator
* @true_inline_dwnscale_nrt true inline rotator downscale ratio for nrt
* @true_inline_prefill_fudge_lines true inline rotator prefill fudge lines
* @true_inline_prefill_lines_nv12 true inline prefill lines for nv12 format
@@ -1197,6 +1204,7 @@ struct sde_perf_cfg {
* @has_qsync Supports qsync feature
* @has_3d_merge_reset Supports 3D merge reset
* @has_decimation Supports decimation
+ * @has_qos_fl_nocalc flag to indicate QoS fill level needs no calculation
* @sc_cfg: system cache configuration
* @uidle_cfg Settings for uidle feature
* @sui_misr_supported indicate if secure-ui-misr is supported
@@ -1208,6 +1216,7 @@ struct sde_perf_cfg {
* @sui_ns_allowed flag to indicate non-secure context banks are allowed
* during secure-ui session
* @sui_supported_blendstage secure-ui supported blendstage
+ * @has_sui_blendstage flag to indicate secure-ui has a blendstage restriction
* @has_cursor indicates if hardware cursor is supported
* @has_vig_p010 indicates if vig pipe supports p010 format
* @inline_rot_formats formats supported by the inline rotator feature
@@ -1242,7 +1251,8 @@ struct sde_mdss_cfg {
u32 vbif_qos_nlvl;
u32 ts_prefill_rev;
u32 true_inline_rot_rev;
- u32 true_inline_dwnscale_rt;
+ u32 true_inline_dwnscale_rt_num;
+ u32 true_inline_dwnscale_rt_denom;
u32 true_inline_dwnscale_nrt;
u32 true_inline_prefill_fudge_lines;
u32 true_inline_prefill_lines_nv12;
@@ -1253,6 +1263,7 @@ struct sde_mdss_cfg {
bool has_qsync;
bool has_3d_merge_reset;
bool has_decimation;
+ bool has_qos_fl_nocalc;
struct sde_sc_cfg sc_cfg;
@@ -1263,6 +1274,7 @@ struct sde_mdss_cfg {
u32 sec_sid_mask[MAX_BLOCKS];
u32 sui_ns_allowed;
u32 sui_supported_blendstage;
+ bool has_sui_blendstage;
bool has_hdr;
bool has_hdr_plus;
diff --git a/drivers/gpu/drm/msm/sde/sde_kms.c b/drivers/gpu/drm/msm/sde/sde_kms.c
index c5d90984..06628d4 100644
--- a/drivers/gpu/drm/msm/sde/sde_kms.c
+++ b/drivers/gpu/drm/msm/sde/sde_kms.c
@@ -2499,9 +2499,14 @@ static void _sde_kms_null_commit(struct drm_device *dev,
}
crtc_state->active = true;
- drm_atomic_set_crtc_for_connector(conn_state, enc->crtc);
+ ret = drm_atomic_set_crtc_for_connector(conn_state, enc->crtc);
+ if (ret)
+ SDE_ERROR("error %d setting the crtc\n", ret);
- drm_atomic_commit(state);
+ ret = drm_atomic_commit(state);
+ if (ret)
+ SDE_ERROR("Error %d doing the atomic commit\n", ret);
+
end:
if (state)
drm_atomic_state_put(state);
diff --git a/drivers/gpu/drm/msm/sde/sde_plane.c b/drivers/gpu/drm/msm/sde/sde_plane.c
index 47bfa93..eff1a2d 100644
--- a/drivers/gpu/drm/msm/sde/sde_plane.c
+++ b/drivers/gpu/drm/msm/sde/sde_plane.c
@@ -1645,14 +1645,16 @@ static int sde_plane_rot_atomic_check(struct drm_plane *plane,
*/
rotation ^= (DRM_MODE_REFLECT_X | DRM_MODE_REFLECT_Y);
- if (!psde->pipe_sblk->in_rot_maxdwnscale_rt ||
+ if (!psde->pipe_sblk->in_rot_maxdwnscale_rt_num ||
+ !psde->pipe_sblk->in_rot_maxdwnscale_rt_denom ||
!psde->pipe_sblk->in_rot_maxdwnscale_nrt ||
!psde->pipe_sblk->in_rot_maxheight ||
!psde->pipe_sblk->in_rot_format_list ||
!(psde->features & BIT(SDE_SSPP_TRUE_INLINE_ROT_V1))) {
SDE_ERROR_PLANE(psde,
- "wrong config rt:%d nrt:%d fmt:%d h:%d 0x%x\n",
- !psde->pipe_sblk->in_rot_maxdwnscale_rt,
+ "wrong config rt:%d/%d nrt:%d fmt:%d h:%d 0x%x\n",
+ !psde->pipe_sblk->in_rot_maxdwnscale_rt_num,
+ !psde->pipe_sblk->in_rot_maxdwnscale_rt_denom,
!psde->pipe_sblk->in_rot_maxdwnscale_nrt,
!psde->pipe_sblk->in_rot_format_list,
!psde->pipe_sblk->in_rot_maxheight,
@@ -2312,6 +2314,8 @@ static int _sde_plane_validate_scaler_v2(struct sde_plane *psde,
uint32_t hor_req_pixels, hor_fetch_pixels;
uint32_t vert_req_pixels, vert_fetch_pixels;
uint32_t src_w_tmp, src_h_tmp;
+ uint32_t scaler_w, scaler_h;
+ bool rot;
/* re-use color plane 1's config for plane 2 */
if (i == 2)
@@ -2361,20 +2365,27 @@ static int _sde_plane_validate_scaler_v2(struct sde_plane *psde,
}
/*
+ * swap the scaler src width & height for inline-rotation 90
+ * comparison with Pixel-Extension, as PE is based on
+ * pre-rotation and QSEED is based on post-rotation
+ */
+ rot = pstate->rotation & DRM_MODE_ROTATE_90;
+ scaler_w = rot ? pstate->scaler3_cfg.src_height[i]
+ : pstate->scaler3_cfg.src_width[i];
+ scaler_h = rot ? pstate->scaler3_cfg.src_width[i]
+ : pstate->scaler3_cfg.src_height[i];
+ /*
* Alpha plane can only be scaled using bilinear or pixel
* repeat/drop, src_width and src_height are only specified
* for Y and UV plane
*/
- if (i != 3 &&
- (hor_req_pixels != pstate->scaler3_cfg.src_width[i] ||
- vert_req_pixels != pstate->scaler3_cfg.src_height[i])) {
+ if (i != 3 && (hor_req_pixels != scaler_w ||
+ vert_req_pixels != scaler_h)) {
SDE_ERROR_PLANE(psde,
- "roi[%d] %d/%d, scaler src %dx%d, src %dx%d\n",
+ "roi[%d] roi:%dx%d scaler:%dx%d src:%dx%d rot:%d\n",
i, pstate->pixel_ext.roi_w[i],
pstate->pixel_ext.roi_h[i],
- pstate->scaler3_cfg.src_width[i],
- pstate->scaler3_cfg.src_height[i],
- src_w, src_h);
+ scaler_w, scaler_h, src_w, src_h, rot);
return -EINVAL;
}
@@ -2410,7 +2421,8 @@ static int _sde_atomic_check_decimation_scaler(struct drm_plane_state *state,
int ret = 0;
uint32_t deci_w, deci_h, src_deci_w, src_deci_h;
uint32_t scaler_src_w, scaler_src_h;
- uint32_t max_upscale, max_downscale, max_linewidth;
+ uint32_t max_downscale_num, max_downscale_denom;
+ uint32_t max_upscale, max_linewidth;
bool inline_rotation, rt_client;
struct drm_crtc *crtc;
@@ -2439,14 +2451,20 @@ static int _sde_atomic_check_decimation_scaler(struct drm_plane_state *state,
else
rt_client = true;
+ max_downscale_denom = 1;
/* inline rotation RT clients have a different max downscaling limit */
if (inline_rotation) {
- if (rt_client)
- max_downscale = psde->pipe_sblk->in_rot_maxdwnscale_rt;
- else
- max_downscale = psde->pipe_sblk->in_rot_maxdwnscale_nrt;
+ if (rt_client) {
+ max_downscale_num =
+ psde->pipe_sblk->in_rot_maxdwnscale_rt_num;
+ max_downscale_denom =
+ psde->pipe_sblk->in_rot_maxdwnscale_rt_denom;
+ } else {
+ max_downscale_num =
+ psde->pipe_sblk->in_rot_maxdwnscale_nrt;
+ }
} else {
- max_downscale = psde->pipe_sblk->maxdwnscale;
+ max_downscale_num = psde->pipe_sblk->maxdwnscale;
}
/* decimation validation */
@@ -2479,8 +2497,10 @@ static int _sde_atomic_check_decimation_scaler(struct drm_plane_state *state,
/* check max scaler capability */
else if (((scaler_src_w * max_upscale) < dst->w) ||
((scaler_src_h * max_upscale) < dst->h) ||
- ((dst->w * max_downscale) < scaler_src_w) ||
- ((dst->h * max_downscale) < scaler_src_h)) {
+ (((dst->w * max_downscale_num) / max_downscale_denom)
+ < scaler_src_w) ||
+ (((dst->h * max_downscale_num) / max_downscale_denom)
+ < scaler_src_h)) {
SDE_ERROR_PLANE(psde,
"too much scaling requested %ux%u->%ux%u rot:%d\n",
scaler_src_w, scaler_src_h, dst->w, dst->h,
@@ -3545,8 +3565,16 @@ static void _sde_plane_install_properties(struct drm_plane *plane,
const struct sde_format_extended *inline_rot_fmt_list;
sde_kms_info_add_keyint(info, "true_inline_rot_rev", 1);
- sde_kms_info_add_keyint(info, "true_inline_dwnscale_rt",
- psde->pipe_sblk->in_rot_maxdwnscale_rt);
+ sde_kms_info_add_keyint(info,
+ "true_inline_dwnscale_rt",
+ (int) (psde->pipe_sblk->in_rot_maxdwnscale_rt_num /
+ psde->pipe_sblk->in_rot_maxdwnscale_rt_denom));
+ sde_kms_info_add_keyint(info,
+ "true_inline_dwnscale_rt_numerator",
+ psde->pipe_sblk->in_rot_maxdwnscale_rt_num);
+ sde_kms_info_add_keyint(info,
+ "true_inline_dwnscale_rt_denominator",
+ psde->pipe_sblk->in_rot_maxdwnscale_rt_denom);
sde_kms_info_add_keyint(info, "true_inline_dwnscale_nrt",
psde->pipe_sblk->in_rot_maxdwnscale_nrt);
sde_kms_info_add_keyint(info, "true_inline_max_height",
@@ -4276,10 +4304,14 @@ static int _sde_plane_init_debugfs(struct drm_plane *plane)
&psde->debugfs_default_scale);
if (cfg->features & BIT(SDE_SSPP_TRUE_INLINE_ROT_V1)) {
- debugfs_create_u32("in_rot_max_downscale_rt",
+ debugfs_create_u32("in_rot_max_downscale_rt_num",
0600,
psde->debugfs_root,
- (u32 *) &psde->pipe_sblk->in_rot_maxdwnscale_rt);
+ (u32 *) &psde->pipe_sblk->in_rot_maxdwnscale_rt_num);
+ debugfs_create_u32("in_rot_max_downscale_rt_denom",
+ 0600,
+ psde->debugfs_root,
+ (u32 *) &psde->pipe_sblk->in_rot_maxdwnscale_rt_denom);
debugfs_create_u32("in_rot_max_downscale_nrt",
0600,
psde->debugfs_root,
diff --git a/drivers/gpu/drm/msm/sde_hdcp_2x.c b/drivers/gpu/drm/msm/sde_hdcp_2x.c
index 54dfc8f..c72ab1b 100644
--- a/drivers/gpu/drm/msm/sde_hdcp_2x.c
+++ b/drivers/gpu/drm/msm/sde_hdcp_2x.c
@@ -333,20 +333,17 @@ static void sde_hdcp_2x_clean(struct sde_hdcp_2x_ctrl *hdcp)
static u8 sde_hdcp_2x_stream_type(u8 min_enc_level)
{
u8 stream_type = 0;
- u8 const hdcp_min_enc_level_0 = 0, hdcp_min_enc_level_1 = 1,
- hdcp_min_enc_level_2 = 2;
- u8 const stream_type_0 = 0, stream_type_1 = 1;
switch (min_enc_level) {
- case hdcp_min_enc_level_0:
- case hdcp_min_enc_level_1:
- stream_type = stream_type_0;
+ case 0:
+ case 1:
+ stream_type = 0;
break;
- case hdcp_min_enc_level_2:
- stream_type = stream_type_1;
+ case 2:
+ stream_type = 1;
break;
default:
- stream_type = stream_type_0;
+ stream_type = 0;
break;
}
diff --git a/drivers/gpu/drm/msm/sde_power_handle.h b/drivers/gpu/drm/msm/sde_power_handle.h
index cada0fb..d14441c 100644
--- a/drivers/gpu/drm/msm/sde_power_handle.h
+++ b/drivers/gpu/drm/msm/sde_power_handle.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
*/
#ifndef _SDE_POWER_HANDLE_H_
@@ -14,8 +14,8 @@
#define SDE_POWER_HANDLE_ENABLE_NRT_BUS_IB_QUOTA 0
#define SDE_POWER_HANDLE_DISABLE_BUS_IB_QUOTA 0
-#define SDE_POWER_HANDLE_CONT_SPLASH_BUS_IB_QUOTA 1800000000
-#define SDE_POWER_HANDLE_CONT_SPLASH_BUS_AB_QUOTA 1800000000
+#define SDE_POWER_HANDLE_CONT_SPLASH_BUS_IB_QUOTA 3000000000
+#define SDE_POWER_HANDLE_CONT_SPLASH_BUS_AB_QUOTA 3000000000
#include <linux/sde_io_util.h>
#include <soc/qcom/cx_ipeak.h>
diff --git a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
index 061d2e0..416da53 100644
--- a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
+++ b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
@@ -92,6 +92,8 @@ static void sun4i_hdmi_disable(struct drm_encoder *encoder)
val = readl(hdmi->base + SUN4I_HDMI_VID_CTRL_REG);
val &= ~SUN4I_HDMI_VID_CTRL_ENABLE;
writel(val, hdmi->base + SUN4I_HDMI_VID_CTRL_REG);
+
+ clk_disable_unprepare(hdmi->tmds_clk);
}
static void sun4i_hdmi_enable(struct drm_encoder *encoder)
@@ -102,6 +104,8 @@ static void sun4i_hdmi_enable(struct drm_encoder *encoder)
DRM_DEBUG_DRIVER("Enabling the HDMI Output\n");
+ clk_prepare_enable(hdmi->tmds_clk);
+
sun4i_hdmi_setup_avi_infoframes(hdmi, mode);
val |= SUN4I_HDMI_PKT_CTRL_TYPE(0, SUN4I_HDMI_PKT_AVI);
val |= SUN4I_HDMI_PKT_CTRL_TYPE(1, SUN4I_HDMI_PKT_END);
diff --git a/drivers/gpu/msm/a6xx_reg.h b/drivers/gpu/msm/a6xx_reg.h
index 8819898..3ddb7f2 100644
--- a/drivers/gpu/msm/a6xx_reg.h
+++ b/drivers/gpu/msm/a6xx_reg.h
@@ -967,6 +967,7 @@
#define A6XX_GMU_RPMH_HYST_CTRL 0x1F8E9
#define A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE 0x1F8EC
#define A6XX_GPU_GMU_CX_GMU_PWR_COL_CP_MSG 0x1F900
+#define A6XX_GPU_GMU_CX_GMU_PWR_COL_CP_RESP 0x1F901
#define A6XX_GMU_BOOT_KMD_LM_HANDSHAKE 0x1F9F0
#define A6XX_GMU_LLM_GLM_SLEEP_CTRL 0x1F957
#define A6XX_GMU_LLM_GLM_SLEEP_STATUS 0x1F958
diff --git a/drivers/gpu/msm/adreno_a6xx_gmu.c b/drivers/gpu/msm/adreno_a6xx_gmu.c
index 5f308c3..2dc05a2 100644
--- a/drivers/gpu/msm/adreno_a6xx_gmu.c
+++ b/drivers/gpu/msm/adreno_a6xx_gmu.c
@@ -325,6 +325,10 @@ static int a6xx_gmu_start(struct kgsl_device *device)
kgsl_regwrite(device, A6XX_GMU_CX_GMU_WFI_CONFIG, 0x0);
+ /* Set the log wptr index */
+ gmu_core_regwrite(device, A6XX_GPU_GMU_CX_GMU_PWR_COL_CP_RESP,
+ gmu->log_wptr_retention);
+
/* Bring GMU out of reset */
gmu_core_regwrite(device, A6XX_GMU_CM3_SYSRESET, 0);
if (timed_poll_check(device,
@@ -423,6 +427,9 @@ static int a6xx_rpmh_power_off_gpu(struct kgsl_device *device)
/* Make sure M3 is in reset before going on */
wmb();
+ gmu_core_regread(device, A6XX_GPU_GMU_CX_GMU_PWR_COL_CP_RESP,
+ &gmu->log_wptr_retention);
+
/* RSC sleep sequence is different on v1 */
if (adreno_is_a630v1(adreno_dev))
gmu_core_regwrite(device, A6XX_RSCC_TIMESTAMP_UNIT1_EN_DRV0 +
diff --git a/drivers/gpu/msm/kgsl_gmu.c b/drivers/gpu/msm/kgsl_gmu.c
index 37e6f5f..ffb52fc 100644
--- a/drivers/gpu/msm/kgsl_gmu.c
+++ b/drivers/gpu/msm/kgsl_gmu.c
@@ -22,9 +22,6 @@
#undef MODULE_PARAM_PREFIX
#define MODULE_PARAM_PREFIX "kgsl."
-#define GMU_CONTEXT_USER 0
-#define GMU_CONTEXT_KERNEL 1
-
#define GMU_CM3_CFG_NONMASKINTR_SHIFT 9
struct gmu_iommu_context {
@@ -363,11 +360,11 @@ static int gmu_iommu_init(struct gmu_device *gmu, struct device_node *node)
}
/*
- * gmu_kmem_close() - free all kernel memory allocated for GMU and detach GMU
+ * gmu_memory_close() - free all memory allocated for GMU and detach GMU
* from IOMMU context banks.
* @gmu: Pointer to GMU device
*/
-static void gmu_kmem_close(struct gmu_device *gmu)
+static void gmu_memory_close(struct gmu_device *gmu)
{
int i;
struct gmu_memdesc *md;
@@ -395,19 +392,14 @@ static void gmu_kmem_close(struct gmu_device *gmu)
clear_bit(i, &gmu->kmem_bitmap);
}
- /* Detach the device from SMMU context bank */
- iommu_detach_device(ctx->domain, ctx->dev);
+ for (i = 0; i < ARRAY_SIZE(gmu_ctx); i++) {
+ ctx = &gmu_ctx[i];
- /* free kernel mem context */
- iommu_domain_free(ctx->domain);
-}
-
-static void gmu_memory_close(struct gmu_device *gmu)
-{
- gmu_kmem_close(gmu);
- /* Free user memory context */
- iommu_domain_free(gmu_ctx[GMU_CONTEXT_USER].domain);
-
+ if (ctx->domain) {
+ iommu_detach_device(ctx->domain, ctx->dev);
+ iommu_domain_free(ctx->domain);
+ }
+ }
}
static enum gmu_mem_type gmu_get_blk_memtype(struct gmu_device *gmu,
@@ -459,38 +451,31 @@ int gmu_memory_probe(struct kgsl_device *device)
{
struct gmu_device *gmu = KGSL_GMU_DEVICE(device);
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
- int ret;
/* Allocates & maps memory for HFI */
- gmu->hfi_mem = allocate_gmu_kmem(gmu, GMU_NONCACHED_KERNEL, 0,
- HFIMEM_SIZE, (IOMMU_READ | IOMMU_WRITE));
- if (IS_ERR(gmu->hfi_mem)) {
- ret = PTR_ERR(gmu->hfi_mem);
- goto err_ret;
- }
+ if (IS_ERR_OR_NULL(gmu->hfi_mem))
+ gmu->hfi_mem = allocate_gmu_kmem(gmu, GMU_NONCACHED_KERNEL, 0,
+ HFIMEM_SIZE, (IOMMU_READ | IOMMU_WRITE));
+ if (IS_ERR(gmu->hfi_mem))
+ return PTR_ERR(gmu->hfi_mem);
/* Allocates & maps GMU crash dump memory */
if (adreno_is_a630(adreno_dev) || adreno_is_a615_family(adreno_dev)) {
- gmu->dump_mem = allocate_gmu_kmem(gmu, GMU_NONCACHED_KERNEL, 0,
- DUMPMEM_SIZE, (IOMMU_READ | IOMMU_WRITE));
- if (IS_ERR(gmu->dump_mem)) {
- ret = PTR_ERR(gmu->dump_mem);
- goto err_ret;
- }
+ if (IS_ERR_OR_NULL(gmu->dump_mem))
+ gmu->dump_mem = allocate_gmu_kmem(gmu,
+ GMU_NONCACHED_KERNEL, 0,
+ DUMPMEM_SIZE,
+ (IOMMU_READ | IOMMU_WRITE));
+ if (IS_ERR(gmu->dump_mem))
+ return PTR_ERR(gmu->dump_mem);
}
/* GMU master log */
- gmu->gmu_log = allocate_gmu_kmem(gmu, GMU_NONCACHED_KERNEL, 0,
- LOGMEM_SIZE, (IOMMU_READ | IOMMU_WRITE | IOMMU_PRIV));
- if (IS_ERR(gmu->gmu_log)) {
- ret = PTR_ERR(gmu->gmu_log);
- goto err_ret;
- }
-
- return 0;
-err_ret:
- gmu_memory_close(gmu);
- return ret;
+ if (IS_ERR_OR_NULL(gmu->gmu_log))
+ gmu->gmu_log = allocate_gmu_kmem(gmu, GMU_NONCACHED_KERNEL, 0,
+ LOGMEM_SIZE,
+ (IOMMU_READ | IOMMU_WRITE | IOMMU_PRIV));
+ return PTR_ERR_OR_ZERO(gmu->gmu_log);
}
/*
diff --git a/drivers/gpu/msm/kgsl_gmu.h b/drivers/gpu/msm/kgsl_gmu.h
index e5845b7..e57a844 100644
--- a/drivers/gpu/msm/kgsl_gmu.h
+++ b/drivers/gpu/msm/kgsl_gmu.h
@@ -93,6 +93,11 @@ enum gmu_mem_type {
GMU_MEM_TYPE_MAX,
};
+enum gmu_context_index {
+ GMU_CONTEXT_USER = 0,
+ GMU_CONTEXT_KERNEL,
+};
+
/**
* struct gmu_memdesc - Gmu shared memory object descriptor
* @hostptr: Kernel virtual address
@@ -108,7 +113,7 @@ struct gmu_memdesc {
phys_addr_t physaddr;
uint64_t size;
enum gmu_mem_type mem_type;
- uint32_t ctx_idx;
+ enum gmu_context_index ctx_idx;
};
struct gmu_bw_votes {
@@ -172,6 +177,7 @@ struct kgsl_mailbox {
* @idle_level: Minimal GPU idle power level
* @fault_count: GMU fault count
* @mailbox: Messages to AOP for ACD enable/disable go through this
+ * @log_wptr_retention: Store the log wptr offset on slumber
*/
struct gmu_device {
struct {
@@ -214,6 +220,7 @@ struct gmu_device {
struct gmu_memdesc kmem_entries[GMU_KERNEL_ENTRIES];
unsigned long kmem_bitmap;
const struct gmu_vma_entry *vma;
+ unsigned int log_wptr_retention;
};
struct gmu_memdesc *gmu_get_memdesc(struct gmu_device *gmu,
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index c6cbfee..d3936b3 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -3021,14 +3021,6 @@ static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
return ret;
}
-static void arm_smmu_iotlb_sync(struct iommu_domain *domain)
-{
- struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
-
- if (smmu_domain->tlb_ops)
- smmu_domain->tlb_ops->tlb_sync(smmu_domain);
-}
-
#define MAX_MAP_SG_BATCH_SIZE (SZ_4M)
static size_t arm_smmu_map_sg(struct iommu_domain *domain, unsigned long iova,
struct scatterlist *sg, unsigned int nents, int prot)
@@ -4000,8 +3992,6 @@ static struct iommu_ops arm_smmu_ops = {
.map = arm_smmu_map,
.unmap = arm_smmu_unmap,
.map_sg = arm_smmu_map_sg,
- .flush_iotlb_all = arm_smmu_iotlb_sync,
- .iotlb_sync = arm_smmu_iotlb_sync,
.iova_to_phys = arm_smmu_iova_to_phys,
.iova_to_phys_hard = arm_smmu_iova_to_phys_hard,
.add_device = arm_smmu_add_device,
diff --git a/drivers/irqchip/irq-gic-v3-mbi.c b/drivers/irqchip/irq-gic-v3-mbi.c
index ad70e7c..fbfa7ff 100644
--- a/drivers/irqchip/irq-gic-v3-mbi.c
+++ b/drivers/irqchip/irq-gic-v3-mbi.c
@@ -24,7 +24,7 @@ struct mbi_range {
unsigned long *bm;
};
-static struct mutex mbi_lock;
+static DEFINE_MUTEX(mbi_lock);
static phys_addr_t mbi_phys_base;
static struct mbi_range *mbi_ranges;
static unsigned int mbi_range_nr;
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe480.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe480.h
index 2dddd9d..e855a54 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe480.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe480.h
@@ -105,6 +105,7 @@ static struct cam_vfe_top_ver3_reg_offset_common vfe480_top_common_reg = {
.diag_config = 0x00000064,
.diag_sensor_status_0 = 0x00000068,
.diag_sensor_status_1 = 0x00000098,
+ .bus_overflow_status = 0x0000AA68,
};
static struct cam_vfe_camif_lite_ver3_reg vfe480_camif_rdi[3] = {
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe_lite48x.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe_lite48x.h
index 9ebeb55..c9d66ed 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe_lite48x.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe_lite48x.h
@@ -47,6 +47,7 @@ static struct cam_vfe_top_ver3_reg_offset_common vfe48x_top_common_reg = {
.reg_update_cmd = 0x00000020,
.diag_config = 0x00000050,
.diag_sensor_status_0 = 0x00000054,
+ .bus_overflow_status = 0x00001A68,
};
static struct cam_vfe_camif_lite_ver3_reg vfe48x_camif_rdi[4] = {
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_lite_ver3.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_lite_ver3.c
index 322ef84..bc1a4f8 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_lite_ver3.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_lite_ver3.c
@@ -408,6 +408,115 @@ static int cam_vfe_camif_lite_process_cmd(
return rc;
}
+static void cam_vfe_camif_lite_print_status(uint32_t val,
+ uint32_t violation_status, int ret, bool is_ife_lite)
+{
+ uint32_t violation_mask = 0x3F00;
+
+ if (is_ife_lite) {
+
+ if (ret == CAM_VFE_IRQ_STATUS_OVERFLOW) {
+ if (val & 0x100)
+ CAM_INFO(CAM_ISP, "RDI3 FRAME DROP");
+
+ if (val & 0x80)
+ CAM_INFO(CAM_ISP, "RDI2 FRAME DROP");
+
+ if (val & 0x40)
+ CAM_INFO(CAM_ISP, "RDI1 FRAME DROP");
+
+ if (val & 0x20)
+ CAM_INFO(CAM_ISP, "RDI0 FRAME DROP");
+
+ if (val & 0x8)
+ CAM_INFO(CAM_ISP, "RDI3 OVERFLOW");
+
+ if (val & 0x4)
+ CAM_INFO(CAM_ISP, "RDI2 OVERFLOW");
+
+ if (val & 0x2)
+ CAM_INFO(CAM_ISP, "RDI1 OVERFLOW");
+
+ if (val & 0x1)
+ CAM_INFO(CAM_ISP, "RDI0 OVERFLOW");
+ }
+
+ if (ret == CAM_VFE_IRQ_STATUS_VIOLATION) {
+
+ if (val & 0x800)
+ CAM_INFO(CAM_ISP, "RDI3 CAMIF VIOLATION");
+
+ if (val & 0x400)
+ CAM_INFO(CAM_ISP, "RDI2 CAMIF VIOLATION");
+
+ if (val & 0x200)
+ CAM_INFO(CAM_ISP, "RDI1 CAMIF VIOLATION");
+
+ if (val & 0x100)
+ CAM_INFO(CAM_ISP, "RDI0 CAMIF VIOLATION");
+ }
+ } else {
+
+ if (ret == CAM_VFE_IRQ_STATUS_OVERFLOW) {
+ if (val & 0x200000)
+ CAM_INFO(CAM_ISP, "RDI2 FRAME DROP");
+
+ if (val & 0x400000)
+ CAM_INFO(CAM_ISP, "RDI1 FRAME DROP");
+
+ if (val & 0x800000)
+ CAM_INFO(CAM_ISP, "RDI0 FRAME DROP");
+
+ if (val & 0x1000000)
+ CAM_INFO(CAM_ISP, "PD PIPE FRAME DROP");
+
+ if (val & 0x8000000)
+ CAM_INFO(CAM_ISP, "RDI2 OVERFLOW");
+
+ if (val & 0x10000000)
+ CAM_INFO(CAM_ISP, "RDI1 OVERFLOW");
+
+ if (val & 0x20000000)
+ CAM_INFO(CAM_ISP, "RDI0 OVERFLOW");
+
+ if (val & 0x40000000)
+ CAM_INFO(CAM_ISP, "PD PIPE OVERFLOW");
+ }
+
+ if (ret == CAM_VFE_IRQ_STATUS_VIOLATION) {
+ if (val & 0x02000)
+ CAM_INFO(CAM_ISP, "PD CAMIF VIOLATION");
+
+ if (val & 0x04000)
+ CAM_INFO(CAM_ISP, "PD VIOLATION");
+
+ if (val & 0x08000)
+ CAM_INFO(CAM_ISP, "LCR CAMIF VIOLATION");
+
+ if (val & 0x010000)
+ CAM_INFO(CAM_ISP, "LCR VIOLATION");
+
+ if (val & 0x020000)
+ CAM_INFO(CAM_ISP, "RDI0 CAMIF VIOLATION");
+
+ if (val & 0x040000)
+ CAM_INFO(CAM_ISP, "RDI1 CAMIF VIOLATION");
+
+ if (val & 0x080000)
+ CAM_INFO(CAM_ISP, "RDI2 CAMIF VIOLATION");
+ }
+
+ if (violation_mask & violation_status)
+ CAM_INFO(CAM_ISP, "LCR VIOLATION, module = %d",
+ violation_mask & violation_status);
+
+ violation_mask = 0x0F0000;
+ if (violation_mask & violation_status)
+ CAM_INFO(CAM_ISP, "PD Violation, module = %d",
+ violation_mask & violation_status);
+ }
+}
+
static int cam_vfe_camif_lite_handle_irq_top_half(uint32_t evt_id,
struct cam_irq_th_payload *th_payload)
{
@@ -450,14 +559,15 @@ static int cam_vfe_camif_lite_handle_irq_bottom_half(
void *handler_priv,
void *evt_payload_priv)
{
- int ret = CAM_VFE_IRQ_STATUS_MAX;
- struct cam_isp_resource_node *camif_lite_node;
- struct cam_vfe_mux_camif_lite_data *camif_lite_priv;
- struct cam_vfe_top_irq_evt_payload *payload;
- struct cam_isp_hw_event_info evt_info;
- uint32_t irq_status0;
- uint32_t irq_status1;
- uint32_t irq_status2;
+ int ret = CAM_VFE_IRQ_STATUS_MAX;
+ struct cam_isp_resource_node *camif_lite_node;
+ struct cam_vfe_mux_camif_lite_data *camif_lite_priv;
+ struct cam_vfe_top_irq_evt_payload *payload;
+ struct cam_isp_hw_event_info evt_info;
+ uint32_t irq_status[CAM_IFE_IRQ_REGISTERS_MAX];
+ int i = 0;
+ bool is_ife_lite = true;
+ uint32_t val = 0;
if (!handler_priv || !evt_payload_priv) {
CAM_ERR(CAM_ISP, "Invalid params");
@@ -468,9 +578,8 @@ static int cam_vfe_camif_lite_handle_irq_bottom_half(
camif_lite_priv = camif_lite_node->res_priv;
payload = evt_payload_priv;
- irq_status0 = payload->irq_reg_val[CAM_IFE_IRQ_CAMIF_REG_STATUS0];
- irq_status1 = payload->irq_reg_val[CAM_IFE_IRQ_CAMIF_REG_STATUS1];
- irq_status2 = payload->irq_reg_val[CAM_IFE_IRQ_CAMIF_REG_STATUS2];
+ for (i = 0; i < CAM_IFE_IRQ_REGISTERS_MAX; i++)
+ irq_status[i] = payload->irq_reg_val[i];
evt_info.hw_idx = camif_lite_node->hw_intf->hw_idx;
evt_info.res_id = camif_lite_node->res_id;
@@ -478,9 +587,16 @@ static int cam_vfe_camif_lite_handle_irq_bottom_half(
CAM_DBG(CAM_ISP,
"irq_status_0 = 0x%x, irq_status_1 = 0x%x, irq_status_2 = 0x%x",
- irq_status0, irq_status1, irq_status2);
+ irq_status[CAM_IFE_IRQ_CAMIF_REG_STATUS0],
+ irq_status[CAM_IFE_IRQ_CAMIF_REG_STATUS1],
+ irq_status[CAM_IFE_IRQ_CAMIF_REG_STATUS2]);
- if (irq_status1 & camif_lite_priv->reg_data->sof_irq_mask) {
+ if (strnstr(camif_lite_priv->soc_info->compatible, "lite",
+ strlen(camif_lite_priv->soc_info->compatible)) == NULL)
+ is_ife_lite = false;
+
+ if (irq_status[CAM_IFE_IRQ_CAMIF_REG_STATUS1]
+ & camif_lite_priv->reg_data->sof_irq_mask) {
CAM_DBG(CAM_ISP, "Received SOF");
ret = CAM_VFE_IRQ_STATUS_SUCCESS;
@@ -489,7 +605,8 @@ static int cam_vfe_camif_lite_handle_irq_bottom_half(
CAM_ISP_HW_EVENT_SOF, (void *)&evt_info);
}
- if (irq_status1 & camif_lite_priv->reg_data->epoch0_irq_mask) {
+ if (irq_status[CAM_IFE_IRQ_CAMIF_REG_STATUS1]
+ & camif_lite_priv->reg_data->epoch0_irq_mask) {
CAM_DBG(CAM_ISP, "Received EPOCH");
ret = CAM_VFE_IRQ_STATUS_SUCCESS;
@@ -498,7 +615,8 @@ static int cam_vfe_camif_lite_handle_irq_bottom_half(
CAM_ISP_HW_EVENT_EPOCH, (void *)&evt_info);
}
- if (irq_status1 & camif_lite_priv->reg_data->eof_irq_mask) {
+ if (irq_status[CAM_IFE_IRQ_CAMIF_REG_STATUS1]
+ & camif_lite_priv->reg_data->eof_irq_mask) {
CAM_DBG(CAM_ISP, "Received EOF\n");
ret = CAM_VFE_IRQ_STATUS_SUCCESS;
@@ -507,7 +625,8 @@ static int cam_vfe_camif_lite_handle_irq_bottom_half(
CAM_ISP_HW_EVENT_EOF, (void *)&evt_info);
}
- if (irq_status0 & camif_lite_priv->reg_data->error_irq_mask0) {
+ if (irq_status[CAM_IFE_IRQ_CAMIF_REG_STATUS0]
+ & camif_lite_priv->reg_data->error_irq_mask0) {
CAM_DBG(CAM_ISP, "Received VFE Overflow ERROR\n");
evt_info.err_type = CAM_VFE_IRQ_STATUS_OVERFLOW;
@@ -516,10 +635,54 @@ static int cam_vfe_camif_lite_handle_irq_bottom_half(
camif_lite_priv->event_cb(camif_lite_priv->priv,
CAM_ISP_HW_EVENT_ERROR, (void *)&evt_info);
+ val = cam_io_r(camif_lite_priv->mem_base +
+ camif_lite_priv->common_reg->bus_overflow_status);
+
+ if (is_ife_lite && val) {
+
+ if (val & 0x01)
+ CAM_INFO(CAM_ISP,
+ "RDI0 bus overflow");
+
+ if (val & 0x02)
+ CAM_INFO(CAM_ISP,
+ "RDI1 bus overflow");
+
+ if (val & 0x04)
+ CAM_INFO(CAM_ISP,
+ "RDI2 bus overflow");
+
+ if (val & 0x08)
+ CAM_INFO(CAM_ISP,
+ "RDI3 bus overflow");
+ }
+
+ if (!is_ife_lite && val) {
+
+ if (val & 0x0800)
+ CAM_INFO(CAM_ISP, "CAMIF PD bus overflow");
+
+ if (val & 0x0400000)
+ CAM_INFO(CAM_ISP, "LCR bus overflow");
+
+ if (val & 0x0800000)
+ CAM_INFO(CAM_ISP, "RDI0 bus overflow");
+
+ if (val & 0x01000000)
+ CAM_INFO(CAM_ISP, "RDI1 bus overflow");
+
+ if (val & 0x02000000)
+ CAM_INFO(CAM_ISP, "RDI2 bus overflow");
+ }
+
ret = CAM_VFE_IRQ_STATUS_OVERFLOW;
+ cam_vfe_camif_lite_print_status(
+ irq_status[CAM_IFE_IRQ_CAMIF_REG_STATUS0],
+ irq_status[CAM_IFE_IRQ_VIOLATION_STATUS], ret,
+ is_ife_lite);
}
- if (irq_status2 & camif_lite_priv->reg_data->error_irq_mask2) {
+ if (irq_status[CAM_IFE_IRQ_CAMIF_REG_STATUS2]) {
CAM_DBG(CAM_ISP, "Received CAMIF Lite Violation ERROR\n");
evt_info.err_type = CAM_VFE_IRQ_STATUS_VIOLATION;
@@ -529,6 +692,10 @@ static int cam_vfe_camif_lite_handle_irq_bottom_half(
CAM_ISP_HW_EVENT_ERROR, (void *)&evt_info);
ret = CAM_VFE_IRQ_STATUS_VIOLATION;
+ cam_vfe_camif_lite_print_status(
+ irq_status[CAM_IFE_IRQ_CAMIF_REG_STATUS2],
+ irq_status[CAM_IFE_IRQ_VIOLATION_STATUS], ret,
+ is_ife_lite);
}
cam_vfe_camif_lite_put_evt_payload(camif_lite_priv, &payload);
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver3.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver3.c
index f54ad2e..f8cd120 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver3.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver3.c
@@ -732,6 +732,262 @@ static int cam_vfe_camif_ver3_process_cmd(
return rc;
}
+static void cam_vfe_camif_ver3_print_status(uint32_t val,
+ uint32_t violation_status, int ret)
+{
+ uint32_t violation_mask = 0x3F;
+ uint32_t module_id;
+
+ if (ret == CAM_VFE_IRQ_STATUS_OVERFLOW) {
+ if (val & 0x0200)
+ CAM_INFO(CAM_ISP, "DSP OVERFLOW");
+
+ if (val & 0x2000000)
+ CAM_INFO(CAM_ISP, "PIXEL PIPE FRAME DROP");
+
+ if (val & 0x80000000)
+ CAM_INFO(CAM_ISP, "PIXEL PIPE OVERFLOW");
+ }
+
+ if (ret == CAM_VFE_IRQ_STATUS_VIOLATION) {
+
+ if (val & 0x080)
+ CAM_INFO(CAM_ISP, "DSP IFE PROTOCOL VIOLATION");
+
+ if (val & 0x0100)
+ CAM_INFO(CAM_ISP, "IFE DSP TX PROTOCOL VIOLATION");
+
+ if (val & 0x0200)
+ CAM_INFO(CAM_ISP, "DSP IFE RX PROTOCOL VIOLATION");
+
+ if (val & 0x0400)
+ CAM_INFO(CAM_ISP, "PP PREPROCESS VIOLATION");
+
+ if (val & 0x0800)
+ CAM_INFO(CAM_ISP, "PP CAMIF VIOLATION");
+
+ if (val & 0x01000)
+ CAM_INFO(CAM_ISP, "PP VIOLATION");
+
+ if (val & 0x0100000)
+ CAM_INFO(CAM_ISP,
+ "DSP_TX_VIOLATION:overflow on DSP interface TX path FIFO");
+
+ if (val & 0x0200000)
+ CAM_INFO(CAM_ISP,
+ "DSP_RX_VIOLATION:overflow on DSP interface RX path FIFO");
+
+ if (val & 0x10000000)
+ CAM_INFO(CAM_ISP, "DSP ERROR VIOLATION");
+
+ if (val & 0x20000000)
+ CAM_INFO(CAM_ISP,
+ "DIAG VIOLATION: HBI is less than the minimum required HBI");
+ }
+
+ if (violation_mask & violation_status) {
+ CAM_INFO(CAM_ISP, "PP VIOLATION, module = %d",
+ violation_mask & violation_status);
+ module_id = violation_mask & violation_status;
+ switch (module_id) {
+ case 0:
+ CAM_INFO(CAM_ISP, "Demux");
+ break;
+ case 1:
+ CAM_INFO(CAM_ISP,
+ "CHROMA_UP");
+ break;
+ case 2:
+ CAM_INFO(CAM_ISP,
+ "PEDESTAL");
+ break;
+ case 3:
+ CAM_INFO(CAM_ISP,
+ "LINEARIZATION");
+ break;
+ case 4:
+ CAM_INFO(CAM_ISP,
+ "BPC_PDPC");
+ break;
+ case 5:
+ CAM_INFO(CAM_ISP,
+ "HDR_BINCORRECT");
+ break;
+ case 6:
+ CAM_INFO(CAM_ISP, "ABF");
+ break;
+ case 7:
+ CAM_INFO(CAM_ISP, "LSC");
+ break;
+ case 8:
+ CAM_INFO(CAM_ISP, "DEMOSAIC");
+ break;
+ case 9:
+ CAM_INFO(CAM_ISP,
+ "COLOR_CORRECT");
+ break;
+ case 10:
+ CAM_INFO(CAM_ISP, "GTM");
+ break;
+ case 11:
+ CAM_INFO(CAM_ISP, "GLUT");
+ break;
+ case 12:
+ CAM_INFO(CAM_ISP,
+ "COLOR_XFORM");
+ break;
+ case 13:
+ CAM_INFO(CAM_ISP,
+ "CROP_RND_CLAMP_PIXEL_RAW_OUT");
+ break;
+ case 14:
+ CAM_INFO(CAM_ISP,
+ "DOWNSCALE_MN_Y_FD_OUT");
+ break;
+ case 15:
+ CAM_INFO(CAM_ISP,
+ "DOWNSCALE_MN_C_FD_OUT");
+ break;
+ case 16:
+ CAM_INFO(CAM_ISP,
+ "CROP_RND_CLAMP_POST_DOWNSCALE_MN_Y_FD_OUT");
+ break;
+ case 17:
+ CAM_INFO(CAM_ISP,
+ "CROP_RND_CLAMP_POST_DOWNSCALE_MN_C_FD_OUT");
+ break;
+ case 18:
+ CAM_INFO(CAM_ISP,
+ "DOWNSCALE_MN_Y_DISP_OUT");
+ break;
+ case 19:
+ CAM_INFO(CAM_ISP,
+ "DOWNSCALE_MN_C_DISP_OUT");
+ break;
+ case 20:
+ CAM_INFO(CAM_ISP,
+ "module: CROP_RND_CLAMP_POST_DOWNSCALE_MN_Y_DISP_OUT");
+ break;
+ case 21:
+ CAM_INFO(CAM_ISP,
+ "CROP_RND_CLAMP_POST_DOWNSCALE_MN_C_DISP_OUT");
+ break;
+ case 22:
+ CAM_INFO(CAM_ISP,
+ "DOWNSCALE_4TO1_Y_DISP_DS4_OUT");
+ break;
+ case 23:
+ CAM_INFO(CAM_ISP,
+ "DOWNSCALE_4TO1_C_DISP_DS4_OUT");
+ break;
+ case 24:
+ CAM_INFO(CAM_ISP,
+ "CROP_RND_CLAMP_POST_DOWNSCALE_4TO1_Y_DISP_DS4_OUT");
+ break;
+ case 25:
+ CAM_INFO(CAM_ISP,
+ "CROP_RND_CLAMP_POST_DOWNSCALE_4TO1_C_DISP_DS4_OUT");
+ break;
+ case 26:
+ CAM_INFO(CAM_ISP,
+ "DOWNSCALE_4TO1_Y_DISP_DS16_OUT");
+ break;
+ case 27:
+ CAM_INFO(CAM_ISP,
+ "DOWNSCALE_4TO1_C_DISP_DS16_OUT");
+ break;
+ case 28:
+ CAM_INFO(CAM_ISP,
+ "CROP_RND_CLAMP_POST_DOWNSCALE_4TO1_Y_DISP_DS16_OUT");
+ break;
+ case 29:
+ CAM_INFO(CAM_ISP,
+ "CROP_RND_CLAMP_POST_DOWNSCALE_4TO1_C_DISP_DS16_OUT");
+ break;
+ case 30:
+ CAM_INFO(CAM_ISP,
+ "DOWNSCALE_MN_Y_VID_OUT");
+ break;
+ case 31:
+ CAM_INFO(CAM_ISP,
+ "DOWNSCALE_MN_C_VID_OUT");
+ break;
+ case 32:
+ CAM_INFO(CAM_ISP,
+ "CROP_RND_CLAMP_POST_DOWNSCALE_MN_Y_VID_OUT");
+ break;
+ case 33:
+ CAM_INFO(CAM_ISP,
+ "CROP_RND_CLAMP_POST_DOWNSCALE_MN_C_VID_OUT");
+ break;
+ case 34:
+ CAM_INFO(CAM_ISP, "DSX_Y_VID_OUT");
+ break;
+ case 35:
+ CAM_INFO(CAM_ISP, "DSX_C_VID_OUT");
+ break;
+ case 36:
+ CAM_INFO(CAM_ISP,
+ "CROP_RND_CLAMP_POST_DSX_Y_VID_OUT");
+ break;
+ case 37:
+ CAM_INFO(CAM_ISP,
+ "CROP_RND_CLAMP_POST_DSX_C_VID_OUT");
+ break;
+ case 38:
+ CAM_INFO(CAM_ISP,
+ "DOWNSCALE_4TO1_Y_VID_DS16_OUT");
+ break;
+ case 39:
+ CAM_INFO(CAM_ISP,
+ "DOWNSCALE_4TO1_C_VID_DS16_OUT");
+ break;
+ case 40:
+ CAM_INFO(CAM_ISP,
+ "CROP_RND_CLAMP_POST_DOWNSCALE_4TO1_Y_VID_DS16_OUT");
+ break;
+ case 41:
+ CAM_INFO(CAM_ISP,
+ "CROP_RND_CLAMP_POST_DOWNSCALE_4TO1_C_VID_DS16_OUT");
+ break;
+ case 42:
+ CAM_INFO(CAM_ISP, "BLS");
+ break;
+ case 43:
+ CAM_INFO(CAM_ISP, "STATS_TINTLESS_BG");
+ break;
+ case 44:
+ CAM_INFO(CAM_ISP, "STATS_HDR_BHIST");
+ break;
+ case 45:
+ CAM_INFO(CAM_ISP, "STATS_HDR_BE");
+ break;
+ case 46:
+ CAM_INFO(CAM_ISP, "STATS_AWB_BG");
+ break;
+ case 47:
+ CAM_INFO(CAM_ISP, "STATS_BHIST");
+ break;
+ case 48:
+ CAM_INFO(CAM_ISP, "STATS_BAF");
+ break;
+ case 49:
+ CAM_INFO(CAM_ISP, "STATS_RS");
+ break;
+ case 50:
+ CAM_INFO(CAM_ISP, "STATS_CS");
+ break;
+ case 51:
+ CAM_INFO(CAM_ISP, "STATS_IHIST");
+ break;
+ default:
+ CAM_ERR(CAM_ISP,
+ "Invalid Module ID:%d", module_id);
+ break;
+ }
+ }
+}
+
static int cam_vfe_camif_ver3_handle_irq_top_half(uint32_t evt_id,
struct cam_irq_th_payload *th_payload)
{
@@ -773,15 +1029,14 @@ static int cam_vfe_camif_ver3_handle_irq_top_half(uint32_t evt_id,
static int cam_vfe_camif_ver3_handle_irq_bottom_half(void *handler_priv,
void *evt_payload_priv)
{
- int ret = CAM_VFE_IRQ_STATUS_ERR;
- struct cam_isp_resource_node *camif_node;
- struct cam_vfe_mux_camif_ver3_data *camif_priv;
- struct cam_vfe_top_irq_evt_payload *payload;
- struct cam_isp_hw_event_info evt_info;
- uint32_t irq_status0;
- uint32_t irq_status1;
- uint32_t irq_status2;
- uint32_t val;
+ int ret = CAM_VFE_IRQ_STATUS_ERR;
+ struct cam_isp_resource_node *camif_node;
+ struct cam_vfe_mux_camif_ver3_data *camif_priv;
+ struct cam_vfe_top_irq_evt_payload *payload;
+ struct cam_isp_hw_event_info evt_info;
+ uint32_t irq_status[CAM_IFE_IRQ_REGISTERS_MAX];
+ uint32_t val;
+ int i = 0;
if (!handler_priv || !evt_payload_priv) {
CAM_ERR(CAM_ISP,
@@ -793,15 +1048,15 @@ static int cam_vfe_camif_ver3_handle_irq_bottom_half(void *handler_priv,
camif_node = handler_priv;
camif_priv = camif_node->res_priv;
payload = evt_payload_priv;
- irq_status0 = payload->irq_reg_val[CAM_IFE_IRQ_CAMIF_REG_STATUS0];
- irq_status1 = payload->irq_reg_val[CAM_IFE_IRQ_CAMIF_REG_STATUS1];
- irq_status2 = payload->irq_reg_val[CAM_IFE_IRQ_CAMIF_REG_STATUS2];
+ for (i = 0; i < CAM_IFE_IRQ_REGISTERS_MAX; i++)
+ irq_status[i] = payload->irq_reg_val[i];
evt_info.hw_idx = camif_node->hw_intf->hw_idx;
evt_info.res_id = camif_node->res_id;
evt_info.res_type = camif_node->res_type;
- if (irq_status1 & camif_priv->reg_data->sof_irq_mask) {
+ if (irq_status[CAM_IFE_IRQ_CAMIF_REG_STATUS1]
+ & camif_priv->reg_data->sof_irq_mask) {
if ((camif_priv->enable_sof_irq_debug) &&
(camif_priv->irq_debug_cnt <=
CAM_VFE_CAMIF_IRQ_SOF_DEBUG_CNT_MAX)) {
@@ -824,7 +1079,8 @@ static int cam_vfe_camif_ver3_handle_irq_bottom_half(void *handler_priv,
ret = CAM_VFE_IRQ_STATUS_SUCCESS;
}
- if (irq_status1 & camif_priv->reg_data->epoch0_irq_mask) {
+ if (irq_status[CAM_IFE_IRQ_CAMIF_REG_STATUS1]
+ & camif_priv->reg_data->epoch0_irq_mask) {
CAM_DBG(CAM_ISP, "Received EPOCH");
if (camif_priv->event_cb)
@@ -834,7 +1090,8 @@ static int cam_vfe_camif_ver3_handle_irq_bottom_half(void *handler_priv,
ret = CAM_VFE_IRQ_STATUS_SUCCESS;
}
- if (irq_status1 & camif_priv->reg_data->eof_irq_mask) {
+ if (irq_status[CAM_IFE_IRQ_CAMIF_REG_STATUS1]
+ & camif_priv->reg_data->eof_irq_mask) {
CAM_DBG(CAM_ISP, "Received EOF");
if (camif_priv->event_cb)
@@ -844,29 +1101,104 @@ static int cam_vfe_camif_ver3_handle_irq_bottom_half(void *handler_priv,
ret = CAM_VFE_IRQ_STATUS_SUCCESS;
}
- if (irq_status0 & camif_priv->reg_data->error_irq_mask0) {
- CAM_ERR(CAM_ISP, "Received VFE Overflow ERROR\n");
-
- evt_info.err_type = CAM_VFE_IRQ_STATUS_OVERFLOW;
+ if (irq_status[CAM_IFE_IRQ_CAMIF_REG_STATUS0]
+ & camif_priv->reg_data->error_irq_mask0) {
+ CAM_ERR(CAM_ISP, "VFE Overflow");
if (camif_priv->event_cb)
camif_priv->event_cb(camif_priv->priv,
CAM_ISP_HW_EVENT_ERROR, (void *)&evt_info);
+ val = cam_io_r(camif_priv->mem_base +
+ camif_priv->common_reg->bus_overflow_status);
+
+ if (val) {
+
+ if (val & 0x01)
+ CAM_INFO(CAM_ISP, "VID Y 1:1 bus overflow");
+
+ if (val & 0x02)
+ CAM_INFO(CAM_ISP, "VID C 1:1 bus overflow");
+
+ if (val & 0x04)
+ CAM_INFO(CAM_ISP, "VID YC 4:1 bus overflow");
+
+ if (val & 0x08)
+ CAM_INFO(CAM_ISP, "VID YC 16:1 bus overflow");
+
+ if (val & 0x010)
+ CAM_INFO(CAM_ISP, "DISP Y 1:1 bus overflow");
+
+ if (val & 0x020)
+ CAM_INFO(CAM_ISP, "DISP C 1:1 bus overflow");
+
+ if (val & 0x040)
+ CAM_INFO(CAM_ISP, "DISP YC 4:1 bus overflow");
+
+ if (val & 0x080)
+ CAM_INFO(CAM_ISP, "DISP YC 16:1 bus overflow");
+
+ if (val & 0x0100)
+ CAM_INFO(CAM_ISP, "FD Y bus overflow");
+
+ if (val & 0x0200)
+ CAM_INFO(CAM_ISP, "FD C bus overflow");
+
+ if (val & 0x0400)
+ CAM_INFO(CAM_ISP,
+ "PIXEL RAW DUMP bus overflow");
+
+ if (val & 0x01000)
+ CAM_INFO(CAM_ISP, "STATS HDR BE bus overflow");
+
+ if (val & 0x02000)
+ CAM_INFO(CAM_ISP,
+ "STATS HDR BHIST bus overflow");
+
+ if (val & 0x04000)
+ CAM_INFO(CAM_ISP,
+ "STATS TINTLESS BG bus overflow");
+
+ if (val & 0x08000)
+ CAM_INFO(CAM_ISP, "STATS AWB BG bus overflow");
+
+ if (val & 0x010000)
+ CAM_INFO(CAM_ISP, "STATS BHIST bus overflow");
+
+ if (val & 0x020000)
+ CAM_INFO(CAM_ISP, "STATS RS bus overflow");
+
+ if (val & 0x040000)
+ CAM_INFO(CAM_ISP, "STATS CS bus overflow");
+
+ if (val & 0x080000)
+ CAM_INFO(CAM_ISP, "STATS IHIST bus overflow");
+
+ if (val & 0x0100000)
+ CAM_INFO(CAM_ISP, "STATS BAF bus overflow");
+
+ if (val & 0x0200000)
+ CAM_INFO(CAM_ISP, "PDAF bus overflow");
+ }
+
ret = CAM_VFE_IRQ_STATUS_OVERFLOW;
+ cam_vfe_camif_ver3_print_status(
+ irq_status[CAM_IFE_IRQ_CAMIF_REG_STATUS0],
+ irq_status[CAM_IFE_IRQ_VIOLATION_STATUS], ret);
cam_vfe_camif_ver3_reg_dump(camif_node->res_priv);
}
- if (irq_status2 & camif_priv->reg_data->error_irq_mask2) {
- CAM_ERR(CAM_ISP, "Received CAMIF Violation ERROR\n");
-
- evt_info.err_type = CAM_VFE_IRQ_STATUS_VIOLATION;
+ if (irq_status[CAM_IFE_IRQ_CAMIF_REG_STATUS2]) {
+ CAM_ERR(CAM_ISP, "VFE Violation");
if (camif_priv->event_cb)
camif_priv->event_cb(camif_priv->priv,
CAM_ISP_HW_EVENT_ERROR, (void *)&evt_info);
ret = CAM_VFE_IRQ_STATUS_VIOLATION;
+ cam_vfe_camif_ver3_print_status(
+ irq_status[CAM_IFE_IRQ_CAMIF_REG_STATUS2],
+ irq_status[CAM_IFE_IRQ_VIOLATION_STATUS], ret);
cam_vfe_camif_ver3_reg_dump(camif_node->res_priv);
}
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver3.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver3.h
index a83048d..08ad0eb 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver3.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver3.h
@@ -36,6 +36,7 @@ struct cam_vfe_top_ver3_reg_offset_common {
uint32_t diag_config;
uint32_t diag_sensor_status_0;
uint32_t diag_sensor_status_1;
+ uint32_t bus_overflow_status;
};
struct cam_vfe_top_ver3_hw_info {
diff --git a/drivers/media/platform/msm/cvp/Makefile b/drivers/media/platform/msm/cvp/Makefile
index 2525565..0d80860 100644
--- a/drivers/media/platform/msm/cvp/Makefile
+++ b/drivers/media/platform/msm/cvp/Makefile
@@ -1,5 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
ccflags-y += -I$(srctree)/drivers/media/platform/msm/cvp/
+ccflags-y += -I$(srctree)/drivers/media/platform/msm/synx/
msm-cvp-objs := msm_v4l2_cvp.o \
msm_v4l2_private.o \
diff --git a/drivers/media/platform/msm/cvp/cvp_hfi.c b/drivers/media/platform/msm/cvp/cvp_hfi.c
index 65a108d..3741ac2 100644
--- a/drivers/media/platform/msm/cvp/cvp_hfi.c
+++ b/drivers/media/platform/msm/cvp/cvp_hfi.c
@@ -37,6 +37,58 @@
#define REG_ADDR_OFFSET_BITMASK 0x000FFFFF
#define QDSS_IOVA_START 0x80001000
+const struct msm_cvp_hfi_defs cvp_hfi_defs[] = {
+ {
+ .size = HFI_DFS_CONFIG_CMD_SIZE,
+ .type = HFI_CMD_SESSION_CVP_DFS_CONFIG,
+ .buf_offset = 0,
+ .buf_num = 0,
+ .resp = HAL_SESSION_DFS_CONFIG_CMD_DONE,
+ },
+ {
+ .size = HFI_DFS_FRAME_CMD_SIZE,
+ .type = HFI_CMD_SESSION_CVP_DFS_FRAME,
+ .buf_offset = HFI_DFS_FRAME_BUFFERS_OFFSET,
+ .buf_num = HFI_DFS_BUF_NUM,
+ .resp = HAL_NO_RESP,
+ },
+ {
+ .size = HFI_DME_CONFIG_CMD_SIZE,
+ .type = HFI_CMD_SESSION_CVP_DME_CONFIG,
+ .buf_offset = 0,
+ .buf_num = 0,
+ .resp = HAL_SESSION_DME_CONFIG_CMD_DONE,
+ },
+ {
+ .size = HFI_DME_BASIC_CONFIG_CMD_SIZE,
+ .type = HFI_CMD_SESSION_CVP_DME_BASIC_CONFIG,
+ .buf_offset = 0,
+ .buf_num = 0,
+ .resp = HAL_SESSION_DME_BASIC_CONFIG_CMD_DONE,
+ },
+ {
+ .size = HFI_DME_FRAME_CMD_SIZE,
+ .type = HFI_CMD_SESSION_CVP_DME_FRAME,
+ .buf_offset = HFI_DME_FRAME_BUFFERS_OFFSET,
+ .buf_num = HFI_DME_BUF_NUM,
+ .resp = HAL_NO_RESP,
+ },
+ {
+ .size = HFI_PERSIST_CMD_SIZE,
+ .type = HFI_CMD_SESSION_CVP_SET_PERSIST_BUFFERS,
+ .buf_offset = HFI_PERSIST_BUFFERS_OFFSET,
+ .buf_num = HFI_PERSIST_BUF_NUM,
+ .resp = HAL_NO_RESP,
+ },
+ {
+ .size = HFI_DS_CMD_SIZE,
+ .type = HFI_CMD_SESSION_CVP_DS,
+ .buf_offset = HFI_DS_BUFFERS_OFFSET,
+ .buf_num = HFI_DS_BUF_NUM,
+ .resp = HAL_NO_RESP,
+ },
+};
+
static struct hal_device_data hal_ctxt;
#define TZBSP_MEM_PROTECT_VIDEO_VAR 0x8
@@ -72,7 +124,7 @@ const struct msm_cvp_gov_data CVP_DEFAULT_BUS_VOTE = {
.data_count = 0,
};
-const int cvp_max_packets = 1000;
+const int cvp_max_packets = 32;
static void venus_hfi_pm_handler(struct work_struct *work);
static DECLARE_DELAYED_WORK(venus_hfi_pm_work, venus_hfi_pm_handler);
@@ -147,6 +199,18 @@ static inline bool is_sys_cache_present(struct venus_hfi_device *device)
#define ROW_SIZE 32
+int get_pkt_index(struct cvp_hal_session_cmd_pkt *hdr)
+{
+ int i, pkt_num = ARRAY_SIZE(cvp_hfi_defs);
+
+ for (i = 0; i < pkt_num; i++)
+ if ((cvp_hfi_defs[i].size*sizeof(unsigned int) == hdr->size) &&
+ (cvp_hfi_defs[i].type == hdr->packet_type))
+ return i;
+
+ return -EINVAL;
+}
+
static void __dump_packet(u8 *packet, enum cvp_msg_prio log_level)
{
u32 c = 0, packet_size = *(u32 *)packet;
@@ -1292,7 +1356,7 @@ static int __set_clocks(struct venus_hfi_device *device, u32 freq)
}
trace_msm_cvp_perf_clock_scale(cl->name, freq);
- dprintk(CVP_PROF, "Scaling clock %s to %u\n",
+ dprintk(CVP_DBG, "Scaling clock %s to %u\n",
cl->name, freq);
}
}
@@ -2641,11 +2705,11 @@ static int venus_hfi_session_stop(void *session)
return rc;
}
-static int venus_hfi_session_cvp_dme_config(void *sess,
- struct msm_cvp_internal_dmeconfig *dme_config)
+static int venus_hfi_session_send(void *sess,
+ struct cvp_kmd_hfi_packet *in_pkt)
{
int rc = 0;
- struct hfi_cmd_session_cvp_dme_config_packet pkt;
+ struct cvp_kmd_hfi_packet pkt;
struct hal_session *session = sess;
struct venus_hfi_device *device;
@@ -2659,174 +2723,26 @@ static int venus_hfi_session_cvp_dme_config(void *sess,
if (!__is_session_valid(device, session, __func__)) {
rc = -EINVAL;
- goto dme_config_err;
+ goto err_send_pkt;
}
- rc = call_hfi_pkt_op(device, session_cvp_dme_config,
- &pkt, session, dme_config);
+ rc = call_hfi_pkt_op(device, session_cvp_hfi_packetize,
+ &pkt, session, in_pkt);
if (rc) {
dprintk(CVP_ERR,
- "Session get buf req: failed to create pkt\n");
- goto dme_config_err;
- }
-
- if (__iface_cmdq_write(session->device, &pkt))
- rc = -ENOTEMPTY;
- dprintk(CVP_DBG, "%s: calling __iface_cmdq_write\n", __func__);
-dme_config_err:
- mutex_unlock(&device->lock);
- return rc;
-}
-
-static int venus_hfi_session_cvp_dme_frame(void *sess,
- struct msm_cvp_internal_dmeframe *dme_frame)
-{
- int rc = 0;
- struct hfi_cmd_session_cvp_dme_frame_packet pkt;
- struct hal_session *session = sess;
- struct venus_hfi_device *device;
-
- if (!session || !session->device) {
- dprintk(CVP_ERR, "invalid session");
- return -ENODEV;
- }
-
- device = session->device;
- mutex_lock(&device->lock);
-
- if (!__is_session_valid(device, session, __func__)) {
- rc = -EINVAL;
- goto dme_frame_err;
- }
- rc = call_hfi_pkt_op(device, session_cvp_dme_frame,
- &pkt, session, dme_frame);
- if (rc) {
- dprintk(CVP_ERR,
- "Session get buf req: failed to create pkt\n");
- goto dme_frame_err;
- }
-
- if (__iface_cmdq_write(session->device, &pkt))
- rc = -ENOTEMPTY;
- dprintk(CVP_DBG, "%s: done calling __iface_cmdq_write\n", __func__);
-dme_frame_err:
- mutex_unlock(&device->lock);
- return rc;
-}
-
-
-static int venus_hfi_session_cvp_persist(void *sess,
- struct msm_cvp_internal_persist_cmd *pbuf_cmd)
-{
- int rc = 0;
- struct hfi_cmd_session_cvp_persist_packet pkt;
- struct hal_session *session = sess;
- struct venus_hfi_device *device;
-
- if (!session || !session->device) {
- dprintk(CVP_ERR, "invalid session");
- return -ENODEV;
- }
-
- device = session->device;
- mutex_lock(&device->lock);
-
- if (!__is_session_valid(device, session, __func__)) {
- rc = -EINVAL;
- goto persist_err;
- }
- rc = call_hfi_pkt_op(device, session_cvp_persist,
- &pkt, session, pbuf_cmd);
- if (rc) {
- dprintk(CVP_ERR,
- "Failed to create persist pkt\n");
- goto persist_err;
+ "failed to create pkt\n");
+ goto err_send_pkt;
}
if (__iface_cmdq_write(session->device, &pkt))
rc = -ENOTEMPTY;
- dprintk(CVP_DBG, "%s: done calling __iface_cmdq_write\n", __func__);
-persist_err:
- mutex_unlock(&device->lock);
- return rc;
-}
-
-static int venus_hfi_session_cvp_dfs_config(void *sess,
- struct msm_cvp_internal_dfsconfig *dfs_config)
-{
- int rc = 0;
- struct hfi_cmd_session_cvp_dfs_config_packet pkt;
- struct hal_session *session = sess;
- struct venus_hfi_device *device;
-
- if (!session || !session->device) {
- dprintk(CVP_ERR, "invalid session");
- return -ENODEV;
- }
-
- device = session->device;
- mutex_lock(&device->lock);
-
- if (!__is_session_valid(device, session, __func__)) {
- rc = -EINVAL;
- goto err_create_pkt;
- }
- rc = call_hfi_pkt_op(device, session_cvp_dfs_config,
- &pkt, session, dfs_config);
- if (rc) {
- dprintk(CVP_ERR,
- "Session get buf req: failed to create pkt\n");
- goto err_create_pkt;
- }
-
- dprintk(CVP_DBG, "%s: calling __iface_cmdq_write\n", __func__);
- if (__iface_cmdq_write(session->device, &pkt))
- rc = -ENOTEMPTY;
- dprintk(CVP_DBG, "%s: done calling __iface_cmdq_write\n", __func__);
-err_create_pkt:
+err_send_pkt:
mutex_unlock(&device->lock);
return rc;
return rc;
}
-static int venus_hfi_session_cvp_dfs_frame(void *sess,
- struct msm_cvp_internal_dfsframe *dfs_frame)
-{
- int rc = 0;
- struct hfi_cmd_session_cvp_dfs_frame_packet pkt;
- struct hal_session *session = sess;
- struct venus_hfi_device *device;
-
- if (!session || !session->device) {
- dprintk(CVP_ERR, "invalid session");
- return -ENODEV;
- }
-
- device = session->device;
- mutex_lock(&device->lock);
-
- if (!__is_session_valid(device, session, __func__)) {
- rc = -EINVAL;
- goto err_create_pkt;
- }
- rc = call_hfi_pkt_op(device, session_cvp_dfs_frame,
- &pkt, session, dfs_frame);
- if (rc) {
- dprintk(CVP_ERR,
- "Session get buf req: failed to create pkt\n");
- goto err_create_pkt;
- }
-
- dprintk(CVP_DBG, "%s: calling __iface_cmdq_write\n", __func__);
- if (__iface_cmdq_write(session->device, &pkt))
- rc = -ENOTEMPTY;
- dprintk(CVP_DBG, "%s: done calling __iface_cmdq_write\n", __func__);
-err_create_pkt:
- mutex_unlock(&device->lock);
- return rc;
-}
-
static int venus_hfi_session_get_buf_req(void *sess)
{
struct hfi_cmd_session_get_property_packet pkt;
@@ -3284,6 +3200,7 @@ static void **get_session_id(struct msm_cvp_cb_info *info)
case HAL_SESSION_UNREGISTER_BUFFER_DONE:
case HAL_SESSION_DFS_CONFIG_CMD_DONE:
case HAL_SESSION_DME_CONFIG_CMD_DONE:
+ case HAL_SESSION_DME_BASIC_CONFIG_CMD_DONE:
case HAL_SESSION_DFS_FRAME_CMD_DONE:
case HAL_SESSION_DME_FRAME_CMD_DONE:
case HAL_SESSION_PERSIST_CMD_DONE:
@@ -3357,6 +3274,9 @@ static int __response_handler(struct venus_hfi_device *device)
"Corrupt/unknown packet found, discarding\n");
--packet_count;
continue;
+ } else if (info->response_type == HAL_NO_RESP) {
+ --packet_count;
+ continue;
}
/* Process the packet types that we're interested in */
@@ -4014,30 +3934,14 @@ static int __enable_regulators(struct venus_hfi_device *device)
dprintk(CVP_DBG, "Enabling regulators\n");
venus_hfi_for_each_regulator(device, rinfo) {
- if (rinfo->has_hw_power_collapse) {
- rc = regulator_set_mode(rinfo->regulator,
- REGULATOR_MODE_FAST);
- if (rc) {
- dprintk(CVP_ERR,
- "Failed to enable hwctrl%s: %d\n",
- rinfo->name, rc);
- goto err_reg_enable_failed;
- }
- dprintk(CVP_DBG, "Enabled regulator %s hw ctrl\n",
- rinfo->name);
-
- } else {
- rc = regulator_enable(rinfo->regulator);
- if (rc) {
- dprintk(CVP_ERR,
- "Failed to enable %s: %d\n",
- rinfo->name, rc);
- goto err_reg_enable_failed;
- }
-
- dprintk(CVP_DBG, "Enabled regulator %s\n",
- rinfo->name);
+ rc = regulator_enable(rinfo->regulator);
+ if (rc) {
+ dprintk(CVP_ERR, "Failed to enable %s: %d\n",
+ rinfo->name, rc);
+ goto err_reg_enable_failed;
}
+
+ dprintk(CVP_DBG, "Enabled regulator %s\n", rinfo->name);
c++;
}
@@ -4743,11 +4647,7 @@ static void venus_init_hfi_callbacks(struct hfi_device *hdev)
hdev->session_continue = venus_hfi_session_continue;
hdev->session_stop = venus_hfi_session_stop;
hdev->session_get_buf_req = venus_hfi_session_get_buf_req;
- hdev->session_cvp_dfs_config = venus_hfi_session_cvp_dfs_config;
- hdev->session_cvp_dfs_frame = venus_hfi_session_cvp_dfs_frame;
- hdev->session_cvp_dme_config = venus_hfi_session_cvp_dme_config;
- hdev->session_cvp_dme_frame = venus_hfi_session_cvp_dme_frame;
- hdev->session_cvp_persist = venus_hfi_session_cvp_persist;
+ hdev->session_cvp_hfi_send = venus_hfi_session_send;
hdev->session_flush = venus_hfi_session_flush;
hdev->session_set_property = venus_hfi_session_set_property;
hdev->session_get_property = venus_hfi_session_get_property;
diff --git a/drivers/media/platform/msm/cvp/cvp_hfi.h b/drivers/media/platform/msm/cvp/cvp_hfi.h
index 8a3de61..0d9f5f3 100644
--- a/drivers/media/platform/msm/cvp/cvp_hfi.h
+++ b/drivers/media/platform/msm/cvp/cvp_hfi.h
@@ -382,6 +382,8 @@ struct hfi_uncompressed_plane_actual_constraints_info {
#define HFI_CMD_SESSION_CVP_DME_CONFIG\
(HFI_CMD_SESSION_CVP_START + 0x039)
+#define HFI_CMD_SESSION_CVP_DME_BASIC_CONFIG\
+ (HFI_CMD_SESSION_CVP_START + 0x03B)
#define HFI_CMD_SESSION_CVP_DME_FRAME\
(HFI_CMD_SESSION_CVP_START + 0x03A)
@@ -581,42 +583,6 @@ struct HFI_CVP_COLOR_PLANE_INFO {
u32 buf_size[HFI_MAX_PLANES];
};
-struct hfi_cmd_session_hdr {
- u32 size;
- u32 packet_type;
- u32 session_id;
-};
-
-struct hfi_cmd_session_cvp_dfs_config_packet {
- u32 cvp_internal_dfs_config[CVP_DFS_CONFIG_CMD_SIZE];
-};
-
-struct hfi_cmd_session_cvp_dfs_frame_packet {
- u32 cvp_dfs_frame[CVP_DFS_FRAME_BUFFERS_OFFSET];
- u32 left_view_buffer_addr;
- u32 left_view_buffer_size;
- u32 right_view_buffer_addr;
- u32 right_view_buffer_size;
- u32 disparity_map_buffer_addr;
- u32 disparity_map_buffer_size;
- u32 occlusion_mask_buffer_addr;
- u32 occlusion_mask_buffer_size;
-};
-
-struct hfi_cmd_session_cvp_dme_config_packet {
- u32 cvp_internal_dme_config[CVP_DME_CONFIG_CMD_SIZE];
-};
-
-struct hfi_cmd_session_cvp_dme_frame_packet {
- u32 cvp_dme_frame[CVP_DME_FRAME_BUFFERS_OFFSET];
- struct buf_desc bufs[8];
-};
-
-struct hfi_cmd_session_cvp_persist_packet {
- u32 cvp_persist_frame[CVP_PERSIST_BUFFERS_OFFSET];
- struct buf_desc bufs[CVP_PSRSIST_BUF_NUM];
-};
-
struct hfi_cmd_session_release_buffer_packet {
u32 size;
u32 packet_type;
diff --git a/drivers/media/platform/msm/cvp/cvp_hfi_api.h b/drivers/media/platform/msm/cvp/cvp_hfi_api.h
index dae783a..00a188c 100644
--- a/drivers/media/platform/msm/cvp/cvp_hfi_api.h
+++ b/drivers/media/platform/msm/cvp/cvp_hfi_api.h
@@ -13,6 +13,7 @@
#include <linux/hash.h>
#include "msm_cvp_core.h"
#include "msm_cvp_resources.h"
+#include "cvp_hfi_helper.h"
#define CONTAINS(__a, __sz, __t) (\
(__t >= __a) && \
@@ -56,6 +57,26 @@
/* 16 encoder and 16 decoder sessions */
#define CVP_MAX_SESSIONS 32
+#define HFI_DFS_CONFIG_CMD_SIZE 38
+#define HFI_DFS_FRAME_CMD_SIZE 16
+#define HFI_DFS_FRAME_BUFFERS_OFFSET 8
+#define HFI_DFS_BUF_NUM 4
+
+#define HFI_DME_CONFIG_CMD_SIZE 194
+#define HFI_DME_BASIC_CONFIG_CMD_SIZE 51
+#define HFI_DME_FRAME_CMD_SIZE 28
+#define HFI_DME_FRAME_BUFFERS_OFFSET 12
+#define HFI_DME_BUF_NUM 8
+
+#define HFI_PERSIST_CMD_SIZE 11
+#define HFI_PERSIST_BUFFERS_OFFSET 7
+#define HFI_PERSIST_BUF_NUM 2
+
+#define HFI_DS_CMD_SIZE 54
+#define HFI_DS_BUFFERS_OFFSET 48
+#define HFI_DS_BUF_NUM 3
+
+
enum cvp_status {
CVP_ERR_NONE = 0x0,
CVP_ERR_FAIL = 0x80000000,
@@ -1082,7 +1103,7 @@ union hal_get_property {
#define IS_HAL_SESSION_CMD(cmd) ((cmd) >= HAL_SESSION_EVENT_CHANGE && \
(cmd) <= HAL_SESSION_ERROR)
enum hal_command_response {
- /* SYSTEM COMMANDS_DONE*/
+ HAL_NO_RESP,
HAL_SYS_INIT_DONE,
HAL_SYS_SET_RESOURCE_DONE,
HAL_SYS_RELEASE_RESOURCE_DONE,
@@ -1112,6 +1133,7 @@ enum hal_command_response {
HAL_SESSION_DFS_CONFIG_CMD_DONE,
HAL_SESSION_DFS_FRAME_CMD_DONE,
HAL_SESSION_DME_CONFIG_CMD_DONE,
+ HAL_SESSION_DME_BASIC_CONFIG_CMD_DONE,
HAL_SESSION_DME_FRAME_CMD_DONE,
HAL_SESSION_PERSIST_CMD_DONE,
HAL_SESSION_PROPERTY_INFO,
@@ -1252,6 +1274,7 @@ struct msm_cvp_cb_cmd_done {
enum cvp_status status;
u32 size;
union {
+ struct hfi_msg_session_hdr msg_hdr;
struct cvp_resource_hdr resource_hdr;
struct cvp_buffer_addr_info buffer_addr_info;
struct cvp_frame_plane_config frame_plane_config;
@@ -1411,73 +1434,17 @@ struct hal_vbv_hdr_buf_size {
(((q) && (q)->op) ? ((q)->op(args)) : 0)
/* DFS related structures */
-struct msm_cvp_internal_dfsconfig {
- struct list_head list;
- struct msm_smem smem;
- struct msm_cvp_dfs_config dfs_config;
-};
-
struct buf_desc {
u32 fd;
u32 size;
};
-/**
- * struct msm_cvp_dfs_frame_kmd - argument passed with VIDIOC_CVP_CMD
- * @cvp_dfs_frame: parameters for DFS frame command
- * @left_view_buffer_fd: fd for left view buffer
- * @left_view_buffer_size: size for left view buffer
- * @right_view_buffer_fd: fd for right view buffer
- * @right_view_buffer_size: size for right view buffer
- * @disparity_map_buffer_fd: fd for disparity map buffer
- * @disparity_map_buffer_size: size for disparity map buffer
- * @occlusion_mask_buffer_fd: fd for occlusion mask buffer
- * @occlusion_mask_buffer_size: size for occlusion mask buffer
- */
-
-struct msm_cvp_dfs_frame_kmd {
- unsigned int cvp_dfs_frame[CVP_DFS_FRAME_BUFFERS_OFFSET];
- unsigned int left_view_buffer_fd;
- unsigned int left_view_buffer_size;
- unsigned int right_view_buffer_fd;
- unsigned int right_view_buffer_size;
- unsigned int disparity_map_buffer_fd;
- unsigned int disparity_map_buffer_size;
- unsigned int occlusion_mask_buffer_fd;
- unsigned int occlusion_mask_buffer_size;
-};
-
-
-struct msm_cvp_internal_dfsframe {
- struct list_head list;
- struct msm_cvp_dfs_frame_kmd dfs_frame;
-};
-
-/* DME related structures */
-struct msm_cvp_internal_dmeconfig {
- struct list_head list;
- struct msm_smem smem;
- struct msm_cvp_dme_config dme_config;
-};
-
-struct msm_cvp_dme_frame_kmd {
- unsigned int cvp_dme_frame[CVP_DME_FRAME_BUFFERS_OFFSET];
- struct buf_desc bufs[CVP_DME_BUF_NUM];
-};
-
-struct msm_cvp_internal_dmeframe {
- struct list_head list;
- struct msm_cvp_dme_frame_kmd dme_frame;
-};
-
-struct msm_cvp_persist_kmd {
- unsigned int cvp_pcmd[CVP_PERSIST_BUFFERS_OFFSET];
- struct buf_desc bufs[CVP_PSRSIST_BUF_NUM];
-};
-
-struct msm_cvp_internal_persist_cmd {
- struct list_head list;
- struct msm_cvp_persist_kmd persist_cmd;
+struct msm_cvp_hfi_defs {
+ unsigned int size;
+ unsigned int type;
+ unsigned int buf_offset;
+ unsigned int buf_num;
+ enum hal_command_response resp;
};
struct hfi_device {
@@ -1502,18 +1469,8 @@ struct hfi_device {
int (*session_start)(void *sess);
int (*session_continue)(void *sess);
int (*session_stop)(void *sess);
- int (*session_cvp_operation_config)(void *sess,
- struct cvp_frame_data *input_frame);
- int (*session_cvp_dfs_config)(void *sess,
- struct msm_cvp_internal_dfsconfig *dfs_config);
- int (*session_cvp_dfs_frame)(void *sess,
- struct msm_cvp_internal_dfsframe *dfs_frame);
- int (*session_cvp_dme_config)(void *sess,
- struct msm_cvp_internal_dmeconfig *dme_config);
- int (*session_cvp_dme_frame)(void *sess,
- struct msm_cvp_internal_dmeframe *dme_frame);
- int (*session_cvp_persist)(void *sess,
- struct msm_cvp_internal_persist_cmd *pbuf_cmd);
+ int (*session_cvp_hfi_send)(void *sess,
+ struct cvp_kmd_hfi_packet *in_pkt);
int (*session_get_buf_req)(void *sess);
int (*session_flush)(void *sess, enum hal_flush flush_mode);
int (*session_set_property)(void *sess, enum hal_property ptype,
@@ -1547,4 +1504,7 @@ u32 cvp_get_hfi_codec(enum hal_video_codec hal_codec);
enum hal_domain cvp_get_hal_domain(u32 hfi_domain);
enum hal_video_codec cvp_get_hal_codec(u32 hfi_codec);
+int get_pkt_index(struct cvp_hal_session_cmd_pkt *hdr);
+extern const struct msm_cvp_hfi_defs cvp_hfi_defs[];
+
#endif /*__CVP_HFI_API_H__ */
diff --git a/drivers/media/platform/msm/cvp/cvp_hfi_helper.h b/drivers/media/platform/msm/cvp/cvp_hfi_helper.h
index e11a85b..4707ec1 100644
--- a/drivers/media/platform/msm/cvp/cvp_hfi_helper.h
+++ b/drivers/media/platform/msm/cvp/cvp_hfi_helper.h
@@ -1020,7 +1020,7 @@ struct hfi_cmd_session_cvp_release_buffers_packet {
u32 buffer_idx;
};
-struct hfi_msg_session_cvp_release_buffers_done_packet {
+struct hfi_msg_session_hdr {
u32 size;
u32 packet_type;
u32 session_id;
diff --git a/drivers/media/platform/msm/cvp/hfi_packetization.c b/drivers/media/platform/msm/cvp/hfi_packetization.c
index 86da0579..4bd0ae0 100644
--- a/drivers/media/platform/msm/cvp/hfi_packetization.c
+++ b/drivers/media/platform/msm/cvp/hfi_packetization.c
@@ -465,6 +465,7 @@ static int create_pkt_cmd_sys_ubwc_config(
hfi->bank_spreading = ubwc_config->bank_spreading;
hfi->override_bit_info.bank_spreading_override =
ubwc_config->override_bit_info.bank_spreading_override;
+ hfi->size = sizeof(struct hfi_cmd_sys_set_ubwc_config_packet_type);
return rc;
}
@@ -852,171 +853,42 @@ int cvp_create_pkt_cmd_session_get_buf_req(
return rc;
}
-int cvp_create_pkt_cmd_session_cvp_dfs_config(
- struct hfi_cmd_session_cvp_dfs_config_packet *pkt,
+int cvp_create_pkt_cmd_hfi_packetize(
+ struct cvp_kmd_hfi_packet *out_pkt,
struct hal_session *session,
- struct msm_cvp_internal_dfsconfig *dfs_config)
+ struct cvp_kmd_hfi_packet *in_pkt)
{
- struct hfi_cmd_session_hdr *ptr =
- (struct hfi_cmd_session_hdr *)pkt;
+ int def_idx;
+ struct cvp_hal_session_cmd_pkt *ptr =
+ (struct cvp_hal_session_cmd_pkt *)in_pkt;
- if (!pkt || !session)
+ if (!out_pkt || !in_pkt || !session)
return -EINVAL;
- memcpy(pkt, &dfs_config->dfs_config,
- CVP_DFS_CONFIG_CMD_SIZE*sizeof(unsigned int));
+ def_idx = get_pkt_index(ptr);
+ if (def_idx < 0)
+ goto error_hfi_packet;
- if (ptr->size != CVP_DFS_CONFIG_CMD_SIZE*sizeof(unsigned int))
- goto error_dfs_config;
+ if (cvp_hfi_defs[def_idx].type != ptr->packet_type)
+ goto error_hfi_packet;
- if (ptr->packet_type != HFI_CMD_SESSION_CVP_DFS_CONFIG)
- goto error_dfs_config;
+ if ((cvp_hfi_defs[def_idx].size*sizeof(unsigned int)) != ptr->size)
+ goto error_hfi_packet;
if (ptr->session_id != hash32_ptr(session))
- goto error_dfs_config;
+ goto error_hfi_packet;
+
+ memcpy(out_pkt, in_pkt, ptr->size);
return 0;
-error_dfs_config:
- dprintk(CVP_ERR, "%s: size=%d type=%d sessionid=%d\n",
+error_hfi_packet:
+ dprintk(CVP_ERR, "%s incorrect packet: size=%d type=%d sessionid=%d\n",
__func__, ptr->size, ptr->packet_type, ptr->session_id);
return -EINVAL;
}
-
-int cvp_create_pkt_cmd_session_cvp_dfs_frame(
- struct hfi_cmd_session_cvp_dfs_frame_packet *pkt,
- struct hal_session *session,
- struct msm_cvp_internal_dfsframe *dfs_frame)
-{
- struct hfi_cmd_session_hdr *ptr =
- (struct hfi_cmd_session_hdr *)pkt;
-
- if (!pkt || !session)
- return -EINVAL;
-
- memcpy(pkt, &dfs_frame->dfs_frame,
- CVP_DFS_FRAME_CMD_SIZE*sizeof(unsigned int));
-
- if (ptr->size != CVP_DFS_FRAME_CMD_SIZE*sizeof(unsigned int))
- goto error_dfs_frame;
-
- if (ptr->packet_type != HFI_CMD_SESSION_CVP_DFS_FRAME)
- goto error_dfs_frame;
-
- if (ptr->session_id != hash32_ptr(session))
- goto error_dfs_frame;
-
-
- return 0;
-
-error_dfs_frame:
- dprintk(CVP_ERR, "%s: size=%d type=%d sessionid=%d\n",
- __func__, ptr->size, ptr->packet_type, ptr->session_id);
-
- return -EINVAL;
-}
-
-int cvp_create_pkt_cmd_session_cvp_dme_config(
- struct hfi_cmd_session_cvp_dme_config_packet *pkt,
- struct hal_session *session,
- struct msm_cvp_internal_dmeconfig *dme_config)
-{
- struct hfi_cmd_session_hdr *ptr =
- (struct hfi_cmd_session_hdr *)pkt;
-
- if (!pkt || !session)
- return -EINVAL;
-
- memcpy(pkt, &dme_config->dme_config,
- CVP_DME_CONFIG_CMD_SIZE*sizeof(unsigned int));
-
- if (ptr->size != CVP_DME_CONFIG_CMD_SIZE*sizeof(unsigned int))
- goto error_dme_config;
-
- if (ptr->packet_type != HFI_CMD_SESSION_CVP_DME_CONFIG)
- goto error_dme_config;
-
- if (ptr->session_id != hash32_ptr(session))
- goto error_dme_config;
-
- return 0;
-
-error_dme_config:
- dprintk(CVP_ERR, "%s: size=%d type=%d sessionid=%d\n",
- __func__, ptr->size, ptr->packet_type, ptr->session_id);
-
- return -EINVAL;
-}
-
-
-int cvp_create_pkt_cmd_session_cvp_dme_frame(
- struct hfi_cmd_session_cvp_dme_frame_packet *pkt,
- struct hal_session *session,
- struct msm_cvp_internal_dmeframe *dme_frame)
-{
- struct hfi_cmd_session_hdr *ptr =
- (struct hfi_cmd_session_hdr *)pkt;
-
- if (!pkt || !session)
- return -EINVAL;
-
- memcpy(pkt, &dme_frame->dme_frame,
- CVP_DME_FRAME_CMD_SIZE*sizeof(unsigned int));
-
- if (ptr->size != CVP_DME_FRAME_CMD_SIZE*sizeof(unsigned int))
- goto error_dme_frame;
-
- if (ptr->packet_type != HFI_CMD_SESSION_CVP_DME_FRAME)
- goto error_dme_frame;
-
- if (ptr->session_id != hash32_ptr(session))
- goto error_dme_frame;
-
- return 0;
-
-error_dme_frame:
- dprintk(CVP_ERR, "%s: size=%d type=%d sessionid=%d\n",
- __func__, ptr->size, ptr->packet_type, ptr->session_id);
-
- return -EINVAL;
-}
-
-int cvp_create_pckt_cmd_session_cvp_persist(
- struct hfi_cmd_session_cvp_persist_packet *pkt,
- struct hal_session *session,
- struct msm_cvp_internal_persist_cmd *pbuf_cmd)
-{
- struct hfi_cmd_session_hdr *ptr =
- (struct hfi_cmd_session_hdr *)pkt;
-
- if (!pkt || !session)
- return -EINVAL;
-
- memcpy(pkt, &pbuf_cmd->persist_cmd,
- CVP_PERSIST_CMD_SIZE*sizeof(unsigned int));
-
- if (ptr->size != CVP_PERSIST_CMD_SIZE*sizeof(unsigned int))
- goto error_persist;
-
- if (ptr->packet_type != HFI_CMD_SESSION_CVP_SET_PERSIST_BUFFERS)
- goto error_persist;
-
- if (ptr->session_id != hash32_ptr(session))
- goto error_persist;
-
- return 0;
-
-error_persist:
- dprintk(CVP_ERR, "%s: size=%d type=%d sessionid=%d\n",
- __func__, ptr->size, ptr->packet_type, ptr->session_id);
-
- return -EINVAL;
-
-}
-
-
int cvp_create_pkt_cmd_session_flush(struct hfi_cmd_session_flush_packet *pkt,
struct hal_session *session, enum hal_flush flush_mode)
{
@@ -2221,16 +2093,7 @@ static struct hfi_packetization_ops hfi_default = {
.session_flush = cvp_create_pkt_cmd_session_flush,
.session_get_property = cvp_create_pkt_cmd_session_get_property,
.session_set_property = cvp_create_pkt_cmd_session_set_property,
- .session_cvp_dfs_config =
- cvp_create_pkt_cmd_session_cvp_dfs_config,
- .session_cvp_dfs_frame =
- cvp_create_pkt_cmd_session_cvp_dfs_frame,
- .session_cvp_dme_config =
- cvp_create_pkt_cmd_session_cvp_dme_config,
- .session_cvp_dme_frame =
- cvp_create_pkt_cmd_session_cvp_dme_frame,
- .session_cvp_persist =
- cvp_create_pckt_cmd_session_cvp_persist,
+ .session_cvp_hfi_packetize = cvp_create_pkt_cmd_hfi_packetize,
};
struct hfi_packetization_ops *cvp_hfi_get_pkt_ops_handle(
diff --git a/drivers/media/platform/msm/cvp/hfi_packetization.h b/drivers/media/platform/msm/cvp/hfi_packetization.h
index aac76dc..f8e5a1d 100644
--- a/drivers/media/platform/msm/cvp/hfi_packetization.h
+++ b/drivers/media/platform/msm/cvp/hfi_packetization.h
@@ -79,26 +79,10 @@ struct hfi_packetization_ops {
int (*session_sync_process)(
struct hfi_cmd_session_sync_process_packet *pkt,
struct hal_session *session);
- int (*session_cvp_dfs_config)(
- struct hfi_cmd_session_cvp_dfs_config_packet *pkt,
+ int (*session_cvp_hfi_packetize)(
+ struct cvp_kmd_hfi_packet *out_pkt,
struct hal_session *session,
- struct msm_cvp_internal_dfsconfig *dfs_config);
- int (*session_cvp_dfs_frame)(
- struct hfi_cmd_session_cvp_dfs_frame_packet *pkt,
- struct hal_session *session,
- struct msm_cvp_internal_dfsframe *dfs_frame);
- int (*session_cvp_dme_config)(
- struct hfi_cmd_session_cvp_dme_config_packet *pkt,
- struct hal_session *session,
- struct msm_cvp_internal_dmeconfig *dme_config);
- int (*session_cvp_dme_frame)(
- struct hfi_cmd_session_cvp_dme_frame_packet *pkt,
- struct hal_session *session,
- struct msm_cvp_internal_dmeframe *dme_frame);
- int (*session_cvp_persist)(
- struct hfi_cmd_session_cvp_persist_packet *pkt,
- struct hal_session *session,
- struct msm_cvp_internal_persist_cmd *pbuf_cmd);
+ struct cvp_kmd_hfi_packet *in_pkt);
};
struct hfi_packetization_ops *cvp_hfi_get_pkt_ops_handle(
diff --git a/drivers/media/platform/msm/cvp/hfi_response_handler.c b/drivers/media/platform/msm/cvp/hfi_response_handler.c
index d0ea374..1a1cb3c 100644
--- a/drivers/media/platform/msm/cvp/hfi_response_handler.c
+++ b/drivers/media/platform/msm/cvp/hfi_response_handler.c
@@ -14,6 +14,9 @@
#include "cvp_hfi_io.h"
#include "msm_cvp_debug.h"
#include "cvp_hfi.h"
+#include "msm_cvp_common.h"
+
+extern struct msm_cvp_drv *cvp_driver;
static enum cvp_status hfi_map_err_status(u32 hfi_err)
{
@@ -823,12 +826,12 @@ static int hfi_process_session_set_buf_done(u32 device_id,
static int hfi_process_session_rel_buf_done(u32 device_id,
- struct hfi_msg_session_cvp_release_buffers_done_packet *pkt,
+ struct hfi_msg_session_hdr *pkt,
struct msm_cvp_cb_info *info)
{
struct msm_cvp_cb_cmd_done cmd_done = {0};
unsigned int pkt_size =
- sizeof(struct hfi_msg_session_cvp_release_buffers_done_packet);
+ sizeof(struct hfi_msg_session_hdr);
if (!pkt || pkt->size < pkt_size) {
dprintk(CVP_ERR, "bad packet/packet size %d\n",
@@ -880,6 +883,9 @@ static int hfi_process_session_cvp_operation_config(u32 device_id,
case HFI_CMD_SESSION_CVP_DME_CONFIG:
info->response_type = HAL_SESSION_DME_CONFIG_CMD_DONE;
break;
+ case HFI_CMD_SESSION_CVP_DME_BASIC_CONFIG:
+ info->response_type = HAL_SESSION_DME_BASIC_CONFIG_CMD_DONE;
+ break;
default:
dprintk(CVP_ERR, "%s Invalid op config id\n", __func__);
return -EINVAL;
@@ -918,6 +924,84 @@ static int hfi_process_session_cvp_dfs(u32 device_id,
return 0;
}
+static struct msm_cvp_inst *cvp_get_inst_from_id(struct msm_cvp_core *core,
+ void *session_id)
+{
+ struct msm_cvp_inst *inst = NULL;
+ bool match = false;
+
+ if (!core || !session_id)
+ return NULL;
+
+ mutex_lock(&core->lock);
+ list_for_each_entry(inst, &core->instances, list) {
+ if (hash32_ptr(inst->session) == (unsigned int)session_id) {
+ match = true;
+ break;
+ }
+ }
+
+ inst = match ? inst : NULL;
+ mutex_unlock(&core->lock);
+
+ return inst;
+
+}
+
+static int hfi_process_session_cvp_msg(u32 device_id,
+ struct hfi_msg_session_hdr *pkt,
+ struct msm_cvp_cb_info *info)
+{
+ struct session_msg *sess_msg;
+ struct msm_cvp_inst *inst = NULL;
+ struct msm_cvp_core *core;
+ void *session_id;
+
+ if (!pkt) {
+ dprintk(CVP_ERR, "%s: invalid param\n", __func__);
+ return -EINVAL;
+ } else if (pkt->size > MAX_HFI_PKT_SIZE * sizeof(unsigned int)) {
+ dprintk(CVP_ERR, "%s: bad_pkt_size %d\n", __func__, pkt->size);
+ return -E2BIG;
+ }
+ session_id = (void *)(uintptr_t)pkt->session_id;
+ core = list_first_entry(&cvp_driver->cores, struct msm_cvp_core, list);
+ inst = cvp_get_inst_from_id(core, session_id);
+
+ if (!inst) {
+ dprintk(CVP_ERR, "%s: invalid session\n", __func__);
+ return -EINVAL;
+ }
+
+ sess_msg = kmem_cache_alloc(inst->session_queue.msg_cache, GFP_KERNEL);
+ if (sess_msg == NULL) {
+ dprintk(CVP_ERR, "%s runs out msg cache memory\n", __func__);
+ return -ENOMEM;
+ }
+
+ memcpy(&sess_msg->pkt, pkt, sizeof(struct hfi_msg_session_hdr));
+
+ spin_lock(&inst->session_queue.lock);
+ if (inst->session_queue.msg_count >= MAX_NUM_MSGS_PER_SESSION) {
+ dprintk(CVP_ERR, "Reached session queue size limit\n");
+ goto error_handle_msg;
+ }
+ list_add_tail(&sess_msg->node, &inst->session_queue.msgs);
+ inst->session_queue.msg_count++;
+ spin_unlock(&inst->session_queue.lock);
+
+ wake_up_all(&inst->session_queue.wq);
+
+ info->response_type = HAL_NO_RESP;
+
+ return 0;
+
+error_handle_msg:
+ spin_unlock(&inst->session_queue.lock);
+ kmem_cache_free(inst->session_queue.msg_cache, sess_msg);
+ return -ENOMEM;
+}
+
static int hfi_process_session_cvp_dme(u32 device_id,
struct hfi_msg_session_cvp_dme_packet_type *pkt,
struct msm_cvp_cb_info *info)
@@ -927,9 +1011,8 @@ static int hfi_process_session_cvp_dme(u32 device_id,
if (!pkt) {
dprintk(CVP_ERR, "%s: invalid param\n", __func__);
return -EINVAL;
- } else if (pkt->size < sizeof(*pkt)) {
- dprintk(CVP_ERR,
- "%s: bad_pkt_size\n", __func__);
+ } else if (pkt->size > sizeof(*pkt)) {
+ dprintk(CVP_ERR, "%s: bad_pkt_size %d\n", __func__, pkt->size);
return -E2BIG;
}
@@ -1065,7 +1148,7 @@ int cvp_hfi_process_msg_packet(u32 device_id,
return -EINVAL;
}
- dprintk(CVP_DBG, "Parse response %#x\n", msg_hdr->packet);
+ dprintk(CVP_DBG, "Received HFI MSG with type %d\n", msg_hdr->packet);
switch (msg_hdr->packet) {
case HFI_MSG_EVENT_NOTIFY:
pkt_func = (pkt_func_def)hfi_process_event_notify;
@@ -1092,27 +1175,21 @@ int cvp_hfi_process_msg_packet(u32 device_id,
pkt_func = (pkt_func_def)hfi_process_session_abort_done;
break;
case HFI_MSG_SESSION_CVP_OPERATION_CONFIG:
- dprintk(CVP_DBG,
- "Received HFI_MSG_SESSION_CVP_OPERATION_CONFIG from firmware");
pkt_func =
(pkt_func_def)hfi_process_session_cvp_operation_config;
break;
case HFI_MSG_SESSION_CVP_DFS:
- dprintk(CVP_DBG,
- "Received HFI_MSG_SESSION_CVP_DFS from firmware");
pkt_func = (pkt_func_def)hfi_process_session_cvp_dfs;
break;
case HFI_MSG_SESSION_CVP_DME:
- dprintk(CVP_DBG,
- "Received HFI_MSG_SESSION_CVP_DME from firmware");
pkt_func = (pkt_func_def)hfi_process_session_cvp_dme;
break;
case HFI_MSG_SESSION_CVP_SET_PERSIST_BUFFERS:
- dprintk(CVP_DBG,
- "Received HFI_MSG_SESSION_CVP_PERSIST from firmware");
pkt_func = (pkt_func_def)hfi_process_session_cvp_persist;
break;
-
+ case HFI_MSG_SESSION_CVP_DS:
+ pkt_func = (pkt_func_def)hfi_process_session_cvp_msg;
+ break;
default:
dprintk(CVP_DBG, "Unable to parse message: %#x\n",
msg_hdr->packet);
diff --git a/drivers/media/platform/msm/cvp/msm_cvp.c b/drivers/media/platform/msm/cvp/msm_cvp.c
index 6aaef05..ea68328 100644
--- a/drivers/media/platform/msm/cvp/msm_cvp.c
+++ b/drivers/media/platform/msm/cvp/msm_cvp.c
@@ -4,6 +4,7 @@
*/
#include "msm_cvp.h"
+#include <synx_api.h>
#define MSM_CVP_NOMINAL_CYCLES (444 * 1000 * 1000)
#define MSM_CVP_UHD60E_VPSS_CYCLES (111 * 1000 * 1000)
@@ -13,8 +14,17 @@
#define MAX_CVP_ISE_CYCLES (MSM_CVP_NOMINAL_CYCLES - \
MSM_CVP_UHD60E_ISE_CYCLES)
+struct msm_cvp_fence_thread_data {
+ struct msm_cvp_inst *inst;
+ unsigned int device_id;
+ struct cvp_kmd_hfi_fence_packet in_fence_pkt;
+ unsigned int arg_type;
+};
+
+static struct msm_cvp_fence_thread_data fence_thread_data;
+
static void print_client_buffer(u32 tag, const char *str,
- struct msm_cvp_inst *inst, struct msm_cvp_buffer *cbuf)
+ struct msm_cvp_inst *inst, struct cvp_kmd_buffer *cbuf)
{
if (!(tag & msm_cvp_debug) || !inst || !cbuf)
return;
@@ -42,13 +52,13 @@ static enum hal_buffer get_hal_buftype(const char *str, unsigned int type)
{
enum hal_buffer buftype = HAL_BUFFER_NONE;
- if (type == MSM_CVP_BUFTYPE_INPUT)
+ if (type == CVP_KMD_BUFTYPE_INPUT)
buftype = HAL_BUFFER_INPUT;
- else if (type == MSM_CVP_BUFTYPE_OUTPUT)
+ else if (type == CVP_KMD_BUFTYPE_OUTPUT)
buftype = HAL_BUFFER_OUTPUT;
- else if (type == MSM_CVP_BUFTYPE_INTERNAL_1)
+ else if (type == CVP_KMD_BUFTYPE_INTERNAL_1)
buftype = HAL_BUFFER_INTERNAL_SCRATCH_1;
- else if (type == MSM_CVP_BUFTYPE_INTERNAL_2)
+ else if (type == CVP_KMD_BUFTYPE_INTERNAL_2)
buftype = HAL_BUFFER_INTERNAL_SCRATCH_1;
else
dprintk(CVP_ERR, "%s: unknown buffer type %#x\n",
@@ -87,7 +97,7 @@ static int msm_cvp_scale_clocks_and_bus(struct msm_cvp_inst *inst)
}
static int msm_cvp_get_session_info(struct msm_cvp_inst *inst,
- struct msm_cvp_session_info *session)
+ struct cvp_kmd_session_info *session)
{
int rc = 0;
@@ -133,366 +143,11 @@ static int msm_cvp_session_get_iova_addr(
return 0;
}
-/* DFS feature system call handling */
-static int msm_cvp_session_cvp_dfs_config(
- struct msm_cvp_inst *inst,
- struct msm_cvp_dfs_config *dfs_config)
-{
- int rc = 0;
- struct hfi_device *hdev;
- struct msm_cvp_internal_dfsconfig internal_dfs_config;
-
- dprintk(CVP_DBG, "%s:: Enter inst = %pK\n", __func__, inst);
-
- if (!inst || !inst->core || !dfs_config) {
- dprintk(CVP_ERR, "%s: invalid params\n", __func__);
- return -EINVAL;
- }
- hdev = inst->core->device;
- memcpy(&internal_dfs_config.dfs_config.cvp_dfs_config,
- dfs_config, sizeof(struct msm_cvp_dfs_config));
-
- rc = call_hfi_op(hdev, session_cvp_dfs_config,
- (void *)inst->session, &internal_dfs_config);
- if (!rc) {
- rc = wait_for_sess_signal_receipt(inst,
- HAL_SESSION_DFS_CONFIG_CMD_DONE);
- if (rc)
- dprintk(CVP_ERR,
- "%s: wait for signal failed, rc %d\n",
- __func__, rc);
- } else {
- dprintk(CVP_ERR,
- "%s: Failed in call_hfi_op for session_cvp_dfs_config\n",
- __func__);
- }
- return rc;
-}
-
-static int msm_cvp_session_cvp_dfs_frame(
- struct msm_cvp_inst *inst,
- struct msm_cvp_dfs_frame *dfs_frame)
-{
- int rc = 0;
- struct hfi_device *hdev;
- struct msm_cvp_internal_dfsframe internal_dfs_frame;
- struct msm_cvp_dfs_frame_kmd *dest_ptr = &internal_dfs_frame.dfs_frame;
- struct msm_cvp_dfs_frame_kmd src_frame;
- struct msm_cvp_internal_buffer *cbuf;
-
- dprintk(CVP_DBG, "%s:: Enter inst = %pK\n", __func__, inst);
-
- if (!inst || !inst->core || !dfs_frame) {
- dprintk(CVP_ERR, "%s: invalid params\n", __func__);
- return -EINVAL;
- }
- src_frame = *(struct msm_cvp_dfs_frame_kmd *)dfs_frame;
- hdev = inst->core->device;
- memset(&internal_dfs_frame, 0,
- sizeof(struct msm_cvp_internal_dfsframe));
-
- memcpy(&internal_dfs_frame.dfs_frame, dfs_frame,
- CVP_DFS_FRAME_CMD_SIZE*sizeof(unsigned int));
-
- rc = msm_cvp_session_get_iova_addr(inst, cbuf,
- src_frame.left_view_buffer_fd,
- src_frame.left_view_buffer_size,
- &dest_ptr->left_view_buffer_fd,
- &dest_ptr->left_view_buffer_size);
- if (rc) {
- dprintk(CVP_ERR, "%s:: left buffer not registered. rc=%d\n",
- __func__, rc);
- return rc;
- }
-
- rc = msm_cvp_session_get_iova_addr(inst, cbuf,
- src_frame.right_view_buffer_fd,
- src_frame.right_view_buffer_size,
- &dest_ptr->right_view_buffer_fd,
- &dest_ptr->right_view_buffer_size);
- if (rc) {
- dprintk(CVP_ERR, "%s:: right buffer not registered. rc=%d\n",
- __func__, rc);
- return rc;
- }
-
- rc = msm_cvp_session_get_iova_addr(inst, cbuf,
- src_frame.disparity_map_buffer_fd,
- src_frame.disparity_map_buffer_size,
- &dest_ptr->disparity_map_buffer_fd,
- &dest_ptr->disparity_map_buffer_size);
- if (rc) {
- dprintk(CVP_ERR, "%s:: disparity map not registered. rc=%d\n",
- __func__, rc);
- return rc;
- }
-
- rc = msm_cvp_session_get_iova_addr(inst, cbuf,
- src_frame.occlusion_mask_buffer_fd,
- src_frame.occlusion_mask_buffer_size,
- &dest_ptr->occlusion_mask_buffer_fd,
- &dest_ptr->occlusion_mask_buffer_size);
- if (rc) {
- dprintk(CVP_ERR, "%s:: occlusion mask not registered. rc=%d\n",
- __func__, rc);
- return rc;
- }
-
- rc = call_hfi_op(hdev, session_cvp_dfs_frame,
- (void *)inst->session, &internal_dfs_frame);
-
- if (rc) {
- dprintk(CVP_ERR,
- "%s: Failed in call_hfi_op for session_cvp_dfs_frame\n",
- __func__);
- }
-
- return rc;
-}
-
-static int msm_cvp_session_cvp_dfs_frame_response(
- struct msm_cvp_inst *inst,
- struct msm_cvp_dfs_frame *dfs_frame)
-{
- int rc = 0;
-
- dprintk(CVP_DBG, "%s:: Enter inst = %pK\n", __func__, inst);
-
- if (!inst || !inst->core || !dfs_frame) {
- dprintk(CVP_ERR, "%s: invalid params\n", __func__);
- return -EINVAL;
- }
- rc = wait_for_sess_signal_receipt(inst,
- HAL_SESSION_DFS_FRAME_CMD_DONE);
- if (rc)
- dprintk(CVP_ERR,
- "%s: wait for signal failed, rc %d\n",
- __func__, rc);
- return rc;
-}
-
-/* DME feature system call handling */
-static int msm_cvp_session_cvp_dme_config(
- struct msm_cvp_inst *inst,
- struct msm_cvp_dme_config *dme_config)
-{
- int rc = 0;
- struct hfi_device *hdev;
- struct msm_cvp_internal_dmeconfig internal_dme_config;
-
- dprintk(CVP_DBG, "%s:: Enter inst = %d", __func__, inst);
-
- if (!inst || !inst->core || !dme_config) {
- dprintk(CVP_ERR, "%s: invalid params\n", __func__);
- return -EINVAL;
- }
- hdev = inst->core->device;
- memcpy(&internal_dme_config.dme_config.cvp_dme_config,
- dme_config, sizeof(struct msm_cvp_dme_config));
-
- rc = call_hfi_op(hdev, session_cvp_dme_config,
- (void *)inst->session, &internal_dme_config);
- if (!rc) {
- rc = wait_for_sess_signal_receipt(inst,
- HAL_SESSION_DME_CONFIG_CMD_DONE);
- if (rc)
- dprintk(CVP_ERR,
- "%s: wait for signal failed, rc %d\n",
- __func__, rc);
- } else {
- dprintk(CVP_ERR, "%s Failed in call_hfi_op\n", __func__);
- }
- return rc;
-}
-
-static int msm_cvp_session_cvp_dme_frame(
- struct msm_cvp_inst *inst,
- struct msm_cvp_dme_frame *dme_frame)
-{
- int i, rc = 0;
- struct hfi_device *hdev;
- struct msm_cvp_internal_dmeframe internal_dme_frame;
- struct msm_cvp_dme_frame_kmd *dest_ptr = &internal_dme_frame.dme_frame;
- struct msm_cvp_dme_frame_kmd src_frame;
- struct msm_cvp_internal_buffer *cbuf;
-
- dprintk(CVP_DBG, "%s:: Enter inst = %d", __func__, inst);
-
- if (!inst || !inst->core || !dme_frame) {
- dprintk(CVP_ERR, "%s: invalid params\n", __func__);
- return -EINVAL;
- }
- src_frame = *(struct msm_cvp_dme_frame_kmd *)dme_frame;
- hdev = inst->core->device;
- memset(&internal_dme_frame, 0,
- sizeof(struct msm_cvp_internal_dmeframe));
-
- memcpy(&internal_dme_frame.dme_frame, dme_frame,
- CVP_DME_FRAME_CMD_SIZE*sizeof(unsigned int));
-
- for (i = 0; i < CVP_DME_BUF_NUM; i++) {
- if (!src_frame.bufs[i].fd) {
- dest_ptr->bufs[i].fd = src_frame.bufs[i].fd;
- dest_ptr->bufs[i].size = src_frame.bufs[i].size;
- continue;
- }
-
- rc = msm_cvp_session_get_iova_addr(inst, cbuf,
- src_frame.bufs[i].fd,
- src_frame.bufs[i].size,
- &dest_ptr->bufs[i].fd,
- &dest_ptr->bufs[i].size);
- if (rc) {
- dprintk(CVP_ERR,
- "%s: %d buffer not registered. rc=%d\n",
- __func__, i, rc);
- return rc;
- }
-
- }
-
- rc = call_hfi_op(hdev, session_cvp_dme_frame,
- (void *)inst->session, &internal_dme_frame);
-
- if (rc) {
- dprintk(CVP_ERR,
- "%s:: Failed in call_hfi_op\n",
- __func__);
- }
-
- return rc;
-}
-
-static int msm_cvp_session_cvp_persist(
- struct msm_cvp_inst *inst,
- struct msm_cvp_persist_buf *pbuf_cmd)
-{
- int i, rc = 0;
- struct hfi_device *hdev;
- struct msm_cvp_internal_persist_cmd internal_pcmd;
- struct msm_cvp_persist_kmd *dest_ptr = &internal_pcmd.persist_cmd;
- struct msm_cvp_persist_kmd src_frame;
- struct msm_cvp_internal_buffer *cbuf;
-
- dprintk(CVP_DBG, "%s:: Enter inst = %d", __func__, inst);
-
- if (!inst || !inst->core || !pbuf_cmd) {
- dprintk(CVP_ERR, "%s: invalid params\n", __func__);
- return -EINVAL;
- }
- src_frame = *(struct msm_cvp_persist_kmd *)pbuf_cmd;
- hdev = inst->core->device;
- memset(&internal_pcmd, 0,
- sizeof(struct msm_cvp_internal_persist_cmd));
-
- memcpy(&internal_pcmd.persist_cmd, pbuf_cmd,
- CVP_PERSIST_CMD_SIZE*sizeof(unsigned int));
-
- for (i = 0; i < CVP_PSRSIST_BUF_NUM; i++) {
- if (!src_frame.bufs[i].fd) {
- dest_ptr->bufs[i].fd = src_frame.bufs[i].fd;
- dest_ptr->bufs[i].size = src_frame.bufs[i].size;
- continue;
- }
-
- rc = msm_cvp_session_get_iova_addr(inst, cbuf,
- src_frame.bufs[i].fd,
- src_frame.bufs[i].size,
- &dest_ptr->bufs[i].fd,
- &dest_ptr->bufs[i].size);
- if (rc) {
- dprintk(CVP_ERR,
- "%s:: %d buffer not registered. rc=%d\n",
- __func__, i, rc);
- return rc;
- }
- }
-
- rc = call_hfi_op(hdev, session_cvp_persist,
- (void *)inst->session, &internal_pcmd);
-
- if (rc)
- dprintk(CVP_ERR, "%s: Failed in call_hfi_op\n", __func__);
-
- return rc;
-}
-
-static int msm_cvp_session_cvp_dme_frame_response(
- struct msm_cvp_inst *inst,
- struct msm_cvp_dme_frame *dme_frame)
-{
- int rc = 0;
-
- dprintk(CVP_DBG, "%s:: Enter inst = %d", __func__, inst);
-
- if (!inst || !inst->core || !dme_frame) {
- dprintk(CVP_ERR, "%s: invalid params\n", __func__);
- return -EINVAL;
- }
- rc = wait_for_sess_signal_receipt(inst,
- HAL_SESSION_DME_FRAME_CMD_DONE);
- if (rc)
- dprintk(CVP_ERR,
- "%s: wait for signal failed, rc %d\n",
- __func__, rc);
- return rc;
-}
-
-static int msm_cvp_session_cvp_persist_response(
- struct msm_cvp_inst *inst,
- struct msm_cvp_persist_buf *pbuf_cmd)
-{
- int rc = 0;
-
- dprintk(CVP_DBG, "%s:: Enter inst = %d", __func__, inst);
-
- if (!inst || !inst->core || !pbuf_cmd) {
- dprintk(CVP_ERR, "%s: invalid params\n", __func__);
- return -EINVAL;
- }
- rc = wait_for_sess_signal_receipt(inst,
- HAL_SESSION_PERSIST_CMD_DONE);
- if (rc)
- dprintk(CVP_ERR,
- "%s: wait for signal failed, rc %d\n",
- __func__, rc);
- return rc;
-}
-
-
-
-static int msm_cvp_send_cmd(struct msm_cvp_inst *inst,
- struct msm_cvp_send_cmd *send_cmd)
-{
- dprintk(CVP_ERR, "%s: UMD gave a deprecated cmd", __func__);
-
- return 0;
-}
-
-static int msm_cvp_request_power(struct msm_cvp_inst *inst,
- struct msm_cvp_request_power *power)
-{
- int rc = 0;
-
- if (!inst || !power) {
- dprintk(CVP_ERR, "%s: invalid params\n", __func__);
- return -EINVAL;
- }
-
- dprintk(CVP_DBG,
- "%s: clock_cycles_a %d, clock_cycles_b %d, ddr_bw %d sys_cache_bw %d\n",
- __func__, power->clock_cycles_a, power->clock_cycles_b,
- power->ddr_bw, power->sys_cache_bw);
-
- return rc;
-}
-
-static int msm_cvp_register_buffer(struct msm_cvp_inst *inst,
- struct msm_cvp_buffer *buf)
+static int msm_cvp_map_buf(struct msm_cvp_inst *inst,
+ struct cvp_kmd_buffer *buf)
{
int rc = 0;
bool found;
- struct hfi_device *hdev;
struct msm_cvp_internal_buffer *cbuf;
struct hal_session *session;
@@ -502,13 +157,6 @@ static int msm_cvp_register_buffer(struct msm_cvp_inst *inst,
}
session = (struct hal_session *)inst->session;
- if (!session) {
- dprintk(CVP_ERR, "%s: invalid session\n", __func__);
- return -EINVAL;
- }
- hdev = inst->core->device;
- print_client_buffer(CVP_DBG, "register", inst, buf);
-
mutex_lock(&inst->cvpbufs.lock);
found = false;
list_for_each_entry(cbuf, &inst->cvpbufs.list, list) {
@@ -533,7 +181,7 @@ static int msm_cvp_register_buffer(struct msm_cvp_inst *inst,
list_add_tail(&cbuf->list, &inst->cvpbufs.list);
mutex_unlock(&inst->cvpbufs.lock);
- memcpy(&cbuf->buf, buf, sizeof(struct msm_cvp_buffer));
+ memcpy(&cbuf->buf, buf, sizeof(struct cvp_kmd_buffer));
cbuf->smem.buffer_type = get_hal_buftype(__func__, buf->type);
cbuf->smem.fd = buf->fd;
cbuf->smem.offset = buf->offset;
@@ -555,7 +203,6 @@ static int msm_cvp_register_buffer(struct msm_cvp_inst *inst,
goto exit;
}
}
-
return rc;
exit:
@@ -570,8 +217,428 @@ static int msm_cvp_register_buffer(struct msm_cvp_inst *inst,
return rc;
}
+static bool _cvp_msg_pending(struct msm_cvp_inst *inst,
+ struct cvp_session_queue *sq,
+ struct session_msg **msg)
+{
+ struct session_msg *mptr = NULL;
+ bool result = false;
+
+ spin_lock(&sq->lock);
+ if (!kref_read(&inst->kref)) {
+ /* The session is being deleted */
+ spin_unlock(&sq->lock);
+ *msg = NULL;
+ return true;
+ }
+ result = list_empty(&sq->msgs);
+ if (!result) {
+ mptr = list_first_entry(&sq->msgs, struct session_msg, node);
+ list_del_init(&mptr->node);
+ sq->msg_count--;
+ }
+ spin_unlock(&sq->lock);
+ *msg = mptr;
+ return !result;
+}
+
+
+static int msm_cvp_session_receive_hfi(struct msm_cvp_inst *inst,
+ struct cvp_kmd_hfi_packet *out_pkt)
+{
+ unsigned long wait_time;
+ struct session_msg *msg = NULL;
+ struct cvp_session_queue *sq;
+
+ if (!inst) {
+ dprintk(CVP_ERR, "%s invalid session\n", __func__);
+ return -EINVAL;
+ }
+
+ sq = &inst->session_queue;
+
+ wait_time = msecs_to_jiffies(CVP_MAX_WAIT_TIME);
+
+ if (wait_event_timeout(sq->wq,
+ _cvp_msg_pending(inst, sq, &msg), wait_time) == 0) {
+ dprintk(CVP_ERR, "session queue wait timeout\n");
+ return -ETIMEDOUT;
+ }
+
+ if (msg == NULL) {
+ dprintk(CVP_ERR, "%s: session is deleted, no msg\n", __func__);
+ return -EINVAL;
+ }
+
+ memcpy(out_pkt, &msg->pkt, sizeof(struct hfi_msg_session_hdr));
+ kmem_cache_free(inst->session_queue.msg_cache, msg);
+
+ return 0;
+}
+
+static int msm_cvp_session_process_hfi(
+ struct msm_cvp_inst *inst,
+ struct cvp_kmd_hfi_packet *in_pkt)
+{
+ int i, pkt_idx, rc = 0;
+ struct hfi_device *hdev;
+ struct msm_cvp_internal_buffer *cbuf;
+ struct buf_desc *buf_ptr;
+ unsigned int offset, buf_num;
+
+ if (!inst || !inst->core || !in_pkt) {
+ dprintk(CVP_ERR, "%s: invalid params\n", __func__);
+ return -EINVAL;
+ }
+ hdev = inst->core->device;
+
+ pkt_idx = get_pkt_index((struct cvp_hal_session_cmd_pkt *)in_pkt);
+ if (pkt_idx < 0) {
+ dprintk(CVP_ERR, "%s incorrect packet %d, %x\n", __func__,
+ in_pkt->pkt_data[0],
+ in_pkt->pkt_data[1]);
+ return pkt_idx;
+ }
+ offset = cvp_hfi_defs[pkt_idx].buf_offset;
+ buf_num = cvp_hfi_defs[pkt_idx].buf_num;
+
+ if (offset != 0 && buf_num != 0) {
+ buf_ptr = (struct buf_desc *)&in_pkt->pkt_data[offset];
+
+ for (i = 0; i < buf_num; i++) {
+ if (!buf_ptr[i].fd)
+ continue;
+
+ rc = msm_cvp_session_get_iova_addr(inst, cbuf,
+ buf_ptr[i].fd,
+ buf_ptr[i].size,
+ &buf_ptr[i].fd,
+ &buf_ptr[i].size);
+ if (rc) {
+ dprintk(CVP_ERR,
+ "%s: buf %d unregistered. rc=%d\n",
+ __func__, i, rc);
+ return rc;
+ }
+ }
+ }
+ rc = call_hfi_op(hdev, session_cvp_hfi_send,
+ (void *)inst->session, in_pkt);
+ if (rc) {
+ dprintk(CVP_ERR,
+ "%s: Failed in call_hfi_op %d, %x\n",
+ __func__, in_pkt->pkt_data[0], in_pkt->pkt_data[1]);
+ }
+
+ if (cvp_hfi_defs[pkt_idx].resp != HAL_NO_RESP) {
+ rc = wait_for_sess_signal_receipt(inst,
+ cvp_hfi_defs[pkt_idx].resp);
+ if (rc)
+ dprintk(CVP_ERR,
+ "%s: wait for signal failed, rc %d %d, %x %d\n",
+ __func__, rc,
+ in_pkt->pkt_data[0],
+ in_pkt->pkt_data[1],
+ cvp_hfi_defs[pkt_idx].resp);
+
+ }
+
+ return rc;
+}
+
+static int msm_cvp_thread_fence_run(void *data)
+{
+ int i, pkt_idx, rc = 0;
+ unsigned long timeout_ms = 1000;
+ int synx_obj;
+ struct hfi_device *hdev;
+ struct msm_cvp_fence_thread_data *fence_thread_data;
+ struct cvp_kmd_hfi_fence_packet *in_fence_pkt;
+ struct cvp_kmd_hfi_packet *in_pkt;
+ struct msm_cvp_inst *inst;
+ int *fence;
+ struct msm_cvp_internal_buffer *cbuf;
+ struct buf_desc *buf_ptr;
+ unsigned int offset, buf_num;
+
+ if (!data) {
+ dprintk(CVP_ERR, "%s Wrong input data %pK\n", __func__, data);
+ do_exit(-EINVAL);
+ }
+
+ fence_thread_data = data;
+ inst = cvp_get_inst(get_cvp_core(fence_thread_data->device_id),
+ (void *)fence_thread_data->inst);
+ if (!inst) {
+ dprintk(CVP_ERR, "%s Wrong inst %pK\n", __func__, inst);
+ do_exit(-EINVAL);
+ }
+ in_fence_pkt = (struct cvp_kmd_hfi_fence_packet *)
+ &fence_thread_data->in_fence_pkt;
+ in_pkt = (struct cvp_kmd_hfi_packet *)(in_fence_pkt);
+ fence = (int *)(in_fence_pkt->fence_data);
+ hdev = inst->core->device;
+
+ pkt_idx = get_pkt_index((struct cvp_hal_session_cmd_pkt *)in_pkt);
+ if (pkt_idx < 0) {
+ dprintk(CVP_ERR, "%s incorrect packet %d, %x\n", __func__,
+ in_pkt->pkt_data[0],
+ in_pkt->pkt_data[1]);
+ do_exit(pkt_idx);
+ }
+
+ offset = cvp_hfi_defs[pkt_idx].buf_offset;
+ buf_num = cvp_hfi_defs[pkt_idx].buf_num;
+
+ if (offset != 0 && buf_num != 0) {
+ buf_ptr = (struct buf_desc *)&in_pkt->pkt_data[offset];
+
+ for (i = 0; i < buf_num; i++) {
+ if (!buf_ptr[i].fd)
+ continue;
+
+ rc = msm_cvp_session_get_iova_addr(inst, cbuf,
+ buf_ptr[i].fd,
+ buf_ptr[i].size,
+ &buf_ptr[i].fd,
+ &buf_ptr[i].size);
+ if (rc) {
+ dprintk(CVP_ERR,
+ "%s: buf %d unregistered. rc=%d\n",
+ __func__, i, rc);
+ do_exit(rc);
+ }
+ }
+ }
+
+ //wait on synx before signaling HFI
+ switch (fence_thread_data->arg_type) {
+ case CVP_KMD_HFI_DME_FRAME_FENCE_CMD:
+ {
+ for (i = 0; i < HFI_DME_BUF_NUM-1; i++) {
+ if (fence[(i<<1)]) {
+ rc = synx_import(fence[(i<<1)],
+ fence[((i<<1)+1)], &synx_obj);
+ if (rc) {
+ dprintk(CVP_ERR,
+ "%s: synx_import failed\n",
+ __func__);
+ do_exit(rc);
+ }
+ rc = synx_wait(synx_obj, timeout_ms);
+ if (rc) {
+ dprintk(CVP_ERR,
+ "%s: synx_wait failed\n",
+ __func__);
+ do_exit(rc);
+ }
+ rc = synx_release(synx_obj);
+ if (rc) {
+ dprintk(CVP_ERR,
+ "%s: synx_release failed\n",
+ __func__);
+ do_exit(rc);
+ }
+ }
+ }
+
+ rc = call_hfi_op(hdev, session_cvp_hfi_send,
+ (void *)inst->session, in_pkt);
+ if (rc) {
+ dprintk(CVP_ERR,
+ "%s: Failed in call_hfi_op %d, %x\n",
+ __func__, in_pkt->pkt_data[0],
+ in_pkt->pkt_data[1]);
+ do_exit(rc);
+ }
+
+ rc = wait_for_sess_signal_receipt(inst,
+ HAL_SESSION_DME_FRAME_CMD_DONE);
+ if (rc) {
+ dprintk(CVP_ERR, "%s: wait for signal failed, rc %d\n",
+ __func__, rc);
+ do_exit(rc);
+ }
+ rc = synx_import(fence[((HFI_DME_BUF_NUM-1)<<1)],
+ fence[((HFI_DME_BUF_NUM-1)<<1)+1],
+ &synx_obj);
+ if (rc) {
+ dprintk(CVP_ERR, "%s: synx_import failed\n", __func__);
+ do_exit(rc);
+ }
+ rc = synx_signal(synx_obj, SYNX_STATE_SIGNALED_SUCCESS);
+ if (rc) {
+ dprintk(CVP_ERR, "%s: synx_signal failed\n", __func__);
+ do_exit(rc);
+ }
+ if (synx_get_status(synx_obj) != SYNX_STATE_SIGNALED_SUCCESS) {
+ dprintk(CVP_ERR, "%s: synx_get_status failed\n",
+ __func__);
+ do_exit(rc);
+ }
+ rc = synx_release(synx_obj);
+ if (rc) {
+ dprintk(CVP_ERR, "%s: synx_release failed\n", __func__);
+ do_exit(rc);
+ }
+ break;
+ }
+ default:
+ dprintk(CVP_ERR, "%s: unknown hfi cmd type 0x%x\n",
+ __func__, fence_thread_data->arg_type);
+ rc = -EINVAL;
+ do_exit(rc);
+ break;
+ }
+
+ do_exit(0);
+}
+
+static int msm_cvp_session_process_hfifence(
+ struct msm_cvp_inst *inst,
+ struct cvp_kmd_arg *arg)
+{
+ static int thread_num;
+ struct task_struct *thread;
+ int rc = 0;
+ char thread_fence_name[32];
+
+ dprintk(CVP_DBG, "%s:: Enter inst = %d", __func__, inst);
+ if (!inst || !inst->core || !arg) {
+ dprintk(CVP_ERR, "%s: invalid params\n", __func__);
+ return -EINVAL;
+ }
+
+ thread_num = thread_num + 1;
+ fence_thread_data.inst = inst;
+ fence_thread_data.device_id = (unsigned int)inst->core->id;
+ memcpy(&fence_thread_data.in_fence_pkt, &arg->data.hfi_fence_pkt,
+ sizeof(struct cvp_kmd_hfi_fence_packet));
+ fence_thread_data.arg_type = arg->type;
+ snprintf(thread_fence_name, sizeof(thread_fence_name),
+ "thread_fence_%d", thread_num);
+ thread = kthread_run(msm_cvp_thread_fence_run,
+ &fence_thread_data, thread_fence_name);
+
+ return rc;
+}
+
+static int msm_cvp_session_cvp_dfs_frame_response(
+ struct msm_cvp_inst *inst,
+ struct cvp_kmd_hfi_packet *dfs_frame)
+{
+ int rc = 0;
+
+ dprintk(CVP_DBG, "%s:: Enter inst = %pK\n", __func__, inst);
+
+ if (!inst || !inst->core || !dfs_frame) {
+ dprintk(CVP_ERR, "%s: invalid params\n", __func__);
+ return -EINVAL;
+ }
+ rc = wait_for_sess_signal_receipt(inst,
+ HAL_SESSION_DFS_FRAME_CMD_DONE);
+ if (rc)
+ dprintk(CVP_ERR,
+ "%s: wait for signal failed, rc %d\n",
+ __func__, rc);
+ return rc;
+}
+
+static int msm_cvp_session_cvp_dme_frame_response(
+ struct msm_cvp_inst *inst,
+ struct cvp_kmd_hfi_packet *dme_frame)
+{
+ int rc = 0;
+
+ dprintk(CVP_DBG, "%s:: Enter inst = %d", __func__, inst);
+
+ if (!inst || !inst->core || !dme_frame) {
+ dprintk(CVP_ERR, "%s: invalid params\n", __func__);
+ return -EINVAL;
+ }
+ rc = wait_for_sess_signal_receipt(inst,
+ HAL_SESSION_DME_FRAME_CMD_DONE);
+ if (rc)
+ dprintk(CVP_ERR,
+ "%s: wait for signal failed, rc %d\n",
+ __func__, rc);
+ return rc;
+}
+
+static int msm_cvp_session_cvp_persist_response(
+ struct msm_cvp_inst *inst,
+ struct cvp_kmd_hfi_packet *pbuf_cmd)
+{
+ int rc = 0;
+
+ dprintk(CVP_DBG, "%s:: Enter inst = %d", __func__, inst);
+
+ if (!inst || !inst->core || !pbuf_cmd) {
+ dprintk(CVP_ERR, "%s: invalid params\n", __func__);
+ return -EINVAL;
+ }
+ rc = wait_for_sess_signal_receipt(inst,
+ HAL_SESSION_PERSIST_CMD_DONE);
+ if (rc)
+ dprintk(CVP_ERR,
+ "%s: wait for signal failed, rc %d\n",
+ __func__, rc);
+ return rc;
+}
+
+
+
+static int msm_cvp_send_cmd(struct msm_cvp_inst *inst,
+ struct cvp_kmd_send_cmd *send_cmd)
+{
+ dprintk(CVP_ERR, "%s: UMD gave a deprecated cmd", __func__);
+
+ return 0;
+}
+
+static int msm_cvp_request_power(struct msm_cvp_inst *inst,
+ struct cvp_kmd_request_power *power)
+{
+ int rc = 0;
+
+ if (!inst || !power) {
+ dprintk(CVP_ERR, "%s: invalid params\n", __func__);
+ return -EINVAL;
+ }
+
+ dprintk(CVP_DBG,
+ "%s: clock_cycles_a %d, clock_cycles_b %d, ddr_bw %d sys_cache_bw %d\n",
+ __func__, power->clock_cycles_a, power->clock_cycles_b,
+ power->ddr_bw, power->sys_cache_bw);
+
+ return rc;
+}
+
+static int msm_cvp_register_buffer(struct msm_cvp_inst *inst,
+ struct cvp_kmd_buffer *buf)
+{
+ struct hfi_device *hdev;
+ struct hal_session *session;
+
+ if (!inst || !inst->core || !buf) {
+ dprintk(CVP_ERR, "%s: invalid params\n", __func__);
+ return -EINVAL;
+ }
+
+ session = (struct hal_session *)inst->session;
+ if (!session) {
+ dprintk(CVP_ERR, "%s: invalid session\n", __func__);
+ return -EINVAL;
+ }
+ hdev = inst->core->device;
+ print_client_buffer(CVP_DBG, "register", inst, buf);
+
+ return msm_cvp_map_buf(inst, buf);
+
+}
+
static int msm_cvp_unregister_buffer(struct msm_cvp_inst *inst,
- struct msm_cvp_buffer *buf)
+ struct cvp_kmd_buffer *buf)
{
int rc = 0;
bool found;
@@ -626,7 +693,7 @@ static int msm_cvp_unregister_buffer(struct msm_cvp_inst *inst,
return rc;
}
-int msm_cvp_handle_syscall(struct msm_cvp_inst *inst, struct msm_cvp_arg *arg)
+int msm_cvp_handle_syscall(struct msm_cvp_inst *inst, struct cvp_kmd_arg *arg)
{
int rc = 0;
@@ -637,112 +704,95 @@ int msm_cvp_handle_syscall(struct msm_cvp_inst *inst, struct msm_cvp_arg *arg)
dprintk(CVP_DBG, "%s:: arg->type = %x", __func__, arg->type);
switch (arg->type) {
- case MSM_CVP_GET_SESSION_INFO:
+ case CVP_KMD_GET_SESSION_INFO:
{
- struct msm_cvp_session_info *session =
- (struct msm_cvp_session_info *)&arg->data.session;
+ struct cvp_kmd_session_info *session =
+ (struct cvp_kmd_session_info *)&arg->data.session;
rc = msm_cvp_get_session_info(inst, session);
break;
}
- case MSM_CVP_REQUEST_POWER:
+ case CVP_KMD_REQUEST_POWER:
{
- struct msm_cvp_request_power *power =
- (struct msm_cvp_request_power *)&arg->data.req_power;
+ struct cvp_kmd_request_power *power =
+ (struct cvp_kmd_request_power *)&arg->data.req_power;
rc = msm_cvp_request_power(inst, power);
break;
}
- case MSM_CVP_REGISTER_BUFFER:
+ case CVP_KMD_REGISTER_BUFFER:
{
- struct msm_cvp_buffer *buf =
- (struct msm_cvp_buffer *)&arg->data.regbuf;
+ struct cvp_kmd_buffer *buf =
+ (struct cvp_kmd_buffer *)&arg->data.regbuf;
rc = msm_cvp_register_buffer(inst, buf);
break;
}
- case MSM_CVP_UNREGISTER_BUFFER:
+ case CVP_KMD_UNREGISTER_BUFFER:
{
- struct msm_cvp_buffer *buf =
- (struct msm_cvp_buffer *)&arg->data.unregbuf;
+ struct cvp_kmd_buffer *buf =
+ (struct cvp_kmd_buffer *)&arg->data.unregbuf;
rc = msm_cvp_unregister_buffer(inst, buf);
break;
}
- case MSM_CVP_HFI_SEND_CMD:
+ case CVP_KMD_HFI_SEND_CMD:
{
- //struct msm_cvp_buffer *buf =
- //(struct msm_cvp_buffer *)&arg->data.unregbuf;
- struct msm_cvp_send_cmd *send_cmd =
- (struct msm_cvp_send_cmd *)&arg->data.send_cmd;
+ struct cvp_kmd_send_cmd *send_cmd =
+ (struct cvp_kmd_send_cmd *)&arg->data.send_cmd;
rc = msm_cvp_send_cmd(inst, send_cmd);
break;
}
- case MSM_CVP_HFI_DFS_CONFIG_CMD:
+ case CVP_KMD_RECEIVE_MSG_PKT:
{
- struct msm_cvp_dfs_config *dfs_config =
- (struct msm_cvp_dfs_config *)&arg->data.dfs_config;
-
- rc = msm_cvp_session_cvp_dfs_config(inst, dfs_config);
+ struct cvp_kmd_hfi_packet *out_pkt =
+ (struct cvp_kmd_hfi_packet *)&arg->data.hfi_pkt;
+ rc = msm_cvp_session_receive_hfi(inst, out_pkt);
break;
}
- case MSM_CVP_HFI_DFS_FRAME_CMD:
+ case CVP_KMD_SEND_CMD_PKT:
+ case CVP_KMD_HFI_DFS_CONFIG_CMD:
+ case CVP_KMD_HFI_DFS_FRAME_CMD:
+ case CVP_KMD_HFI_DME_CONFIG_CMD:
+ case CVP_KMD_HFI_DME_FRAME_CMD:
+ case CVP_KMD_HFI_PERSIST_CMD:
{
- struct msm_cvp_dfs_frame *dfs_frame =
- (struct msm_cvp_dfs_frame *)&arg->data.dfs_frame;
+ struct cvp_kmd_hfi_packet *in_pkt =
+ (struct cvp_kmd_hfi_packet *)&arg->data.hfi_pkt;
- rc = msm_cvp_session_cvp_dfs_frame(inst, dfs_frame);
+ rc = msm_cvp_session_process_hfi(inst, in_pkt);
break;
}
- case MSM_CVP_HFI_DFS_FRAME_CMD_RESPONSE:
+ case CVP_KMD_HFI_DFS_FRAME_CMD_RESPONSE:
{
- struct msm_cvp_dfs_frame *dfs_frame =
- (struct msm_cvp_dfs_frame *)&arg->data.dfs_frame;
+ struct cvp_kmd_hfi_packet *dfs_frame =
+ (struct cvp_kmd_hfi_packet *)&arg->data.hfi_pkt;
rc = msm_cvp_session_cvp_dfs_frame_response(inst, dfs_frame);
break;
}
- case MSM_CVP_HFI_DME_CONFIG_CMD:
+ case CVP_KMD_HFI_DME_FRAME_CMD_RESPONSE:
{
- struct msm_cvp_dme_config *dme_config =
- (struct msm_cvp_dme_config *)&arg->data.dme_config;
+ struct cvp_kmd_hfi_packet *dme_frame =
+ (struct cvp_kmd_hfi_packet *)&arg->data.hfi_pkt;
- rc = msm_cvp_session_cvp_dme_config(inst, dme_config);
+ rc = msm_cvp_session_cvp_dme_frame_response(inst, dme_frame);
break;
}
- case MSM_CVP_HFI_DME_FRAME_CMD:
+ case CVP_KMD_HFI_PERSIST_CMD_RESPONSE:
{
- struct msm_cvp_dme_frame *dme_frame =
- (struct msm_cvp_dme_frame *)&arg->data.dme_frame;
-
- rc = msm_cvp_session_cvp_dme_frame(inst, dme_frame);
- break;
- }
- case MSM_CVP_HFI_DME_FRAME_CMD_RESPONSE:
- {
- struct msm_cvp_dme_frame *dmeframe =
- (struct msm_cvp_dme_frame *)&arg->data.dme_frame;
-
- rc = msm_cvp_session_cvp_dme_frame_response(inst, dmeframe);
- break;
- }
- case MSM_CVP_HFI_PERSIST_CMD:
- {
- struct msm_cvp_persist_buf *pbuf_cmd =
- (struct msm_cvp_persist_buf *)&arg->data.pbuf_cmd;
-
- rc = msm_cvp_session_cvp_persist(inst, pbuf_cmd);
- break;
- }
- case MSM_CVP_HFI_PERSIST_CMD_RESPONSE:
- {
- struct msm_cvp_persist_buf *pbuf_cmd =
- (struct msm_cvp_persist_buf *)&arg->data.pbuf_cmd;
+ struct cvp_kmd_hfi_packet *pbuf_cmd =
+ (struct cvp_kmd_hfi_packet *)&arg->data.hfi_pkt;
rc = msm_cvp_session_cvp_persist_response(inst, pbuf_cmd);
break;
}
+ case CVP_KMD_HFI_DME_FRAME_FENCE_CMD:
+ {
+ rc = msm_cvp_session_process_hfifence(inst, arg);
+ break;
+ }
default:
dprintk(CVP_ERR, "%s: unknown arg type 0x%x\n",
__func__, arg->type);
diff --git a/drivers/media/platform/msm/cvp/msm_cvp.h b/drivers/media/platform/msm/cvp/msm_cvp.h
index 6bcb799..9443dab 100644
--- a/drivers/media/platform/msm/cvp/msm_cvp.h
+++ b/drivers/media/platform/msm/cvp/msm_cvp.h
@@ -11,7 +11,7 @@
#include "msm_cvp_clocks.h"
#include "msm_cvp_debug.h"
#include "msm_cvp_dsp.h"
-int msm_cvp_handle_syscall(struct msm_cvp_inst *inst, struct msm_cvp_arg *arg);
+int msm_cvp_handle_syscall(struct msm_cvp_inst *inst, struct cvp_kmd_arg *arg);
int msm_cvp_session_init(struct msm_cvp_inst *inst);
int msm_cvp_session_deinit(struct msm_cvp_inst *inst);
int msm_cvp_session_pause(struct msm_cvp_inst *inst);
diff --git a/drivers/media/platform/msm/cvp/msm_cvp_clocks.c b/drivers/media/platform/msm/cvp/msm_cvp_clocks.c
index aba906b..2b9f902 100644
--- a/drivers/media/platform/msm/cvp/msm_cvp_clocks.c
+++ b/drivers/media/platform/msm/cvp/msm_cvp_clocks.c
@@ -717,7 +717,7 @@ int msm_cvp_set_clocks(struct msm_cvp_core *core)
* keep checking from lowest to highest rate until
* table rate >= requested rate
*/
- for (i = core->resources.allowed_clks_tbl_size - 1; i >= 0; i--) {
+ for (i = 0; i < core->resources.allowed_clks_tbl_size; i++) {
rate = allowed_clks_tbl[i].clock_rate;
if (rate >= freq_core_max)
break;
diff --git a/drivers/media/platform/msm/cvp/msm_cvp_common.c b/drivers/media/platform/msm/cvp/msm_cvp_common.c
index 4f68dad..af485af 100644
--- a/drivers/media/platform/msm/cvp/msm_cvp_common.c
+++ b/drivers/media/platform/msm/cvp/msm_cvp_common.c
@@ -1729,6 +1729,7 @@ void cvp_handle_cmd_response(enum hal_command_response cmd, void *data)
case HAL_SESSION_DFS_CONFIG_CMD_DONE:
case HAL_SESSION_DFS_FRAME_CMD_DONE:
case HAL_SESSION_DME_CONFIG_CMD_DONE:
+ case HAL_SESSION_DME_BASIC_CONFIG_CMD_DONE:
case HAL_SESSION_DME_FRAME_CMD_DONE:
case HAL_SESSION_PERSIST_CMD_DONE:
cvp_handle_session_cmd_done(cmd, data);
diff --git a/drivers/media/platform/msm/cvp/msm_cvp_core.c b/drivers/media/platform/msm/cvp/msm_cvp_core.c
index 75de60d..07a3bcb 100644
--- a/drivers/media/platform/msm/cvp/msm_cvp_core.c
+++ b/drivers/media/platform/msm/cvp/msm_cvp_core.c
@@ -830,7 +830,7 @@ int msm_cvp_dqevent(void *inst, struct v4l2_event *event)
EXPORT_SYMBOL(msm_cvp_dqevent);
int msm_cvp_private(void *cvp_inst, unsigned int cmd,
- struct msm_cvp_arg *arg)
+ struct cvp_kmd_arg *arg)
{
int rc = 0;
struct msm_cvp_inst *inst = (struct msm_cvp_inst *)cvp_inst;
@@ -975,6 +975,38 @@ static const struct v4l2_ctrl_ops msm_cvp_ctrl_ops = {
.g_volatile_ctrl = msm_cvp_op_g_volatile_ctrl,
};
+static int _init_session_queue(struct msm_cvp_inst *inst)
+{
+ spin_lock_init(&inst->session_queue.lock);
+ INIT_LIST_HEAD(&inst->session_queue.msgs);
+ inst->session_queue.msg_count = 0;
+ init_waitqueue_head(&inst->session_queue.wq);
+ inst->session_queue.msg_cache = KMEM_CACHE(session_msg, 0);
+ if (!inst->session_queue.msg_cache) {
+ dprintk(CVP_ERR, "Failed to allocate msg quque\n");
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static void _deinit_session_queue(struct msm_cvp_inst *inst)
+{
+ struct session_msg *msg, *tmpmsg;
+
+ /* free all messages */
+ spin_lock(&inst->session_queue.lock);
+ list_for_each_entry_safe(msg, tmpmsg, &inst->session_queue.msgs, node) {
+ list_del_init(&msg->node);
+ kmem_cache_free(inst->session_queue.msg_cache, msg);
+ }
+ inst->session_queue.msg_count = 0;
+ spin_unlock(&inst->session_queue.lock);
+
+ wake_up_all(&inst->session_queue.wq);
+
+ kmem_cache_destroy(inst->session_queue.msg_cache);
+}
+
void *msm_cvp_open(int core_id, int session_type)
{
struct msm_cvp_inst *inst = NULL;
@@ -1077,6 +1109,11 @@ void *msm_cvp_open(int core_id, int session_type)
list_add_tail(&inst->list, &core->instances);
mutex_unlock(&core->lock);
+
+ rc = _init_session_queue(inst);
+ if (rc)
+ goto fail_init;
+
rc = msm_cvp_comm_try_state(inst, MSM_CVP_CORE_INIT_DONE);
if (rc) {
dprintk(CVP_ERR,
@@ -1115,6 +1152,7 @@ void *msm_cvp_open(int core_id, int session_type)
return inst;
fail_init:
+ _deinit_session_queue(inst);
mutex_lock(&core->lock);
list_del(&inst->list);
mutex_unlock(&core->lock);
@@ -1261,6 +1299,7 @@ int msm_cvp_destroy(struct msm_cvp_inst *inst)
mutex_destroy(&inst->flush_lock);
msm_cvp_debugfs_deinit_inst(inst);
+ _deinit_session_queue(inst);
pr_info(CVP_DBG_TAG "Closed cvp instance: %pK\n",
"info", inst);
diff --git a/drivers/media/platform/msm/cvp/msm_cvp_core.h b/drivers/media/platform/msm/cvp/msm_cvp_core.h
index 97339ed..a5692c0 100644
--- a/drivers/media/platform/msm/cvp/msm_cvp_core.h
+++ b/drivers/media/platform/msm/cvp/msm_cvp_core.h
@@ -124,5 +124,5 @@ int msm_cvp_dqevent(void *instance, struct v4l2_event *event);
int msm_cvp_g_crop(void *instance, struct v4l2_crop *a);
int msm_cvp_enum_framesizes(void *instance, struct v4l2_frmsizeenum *fsize);
int msm_cvp_private(void *cvp_inst, unsigned int cmd,
- struct msm_cvp_arg *arg);
+ struct cvp_kmd_arg *arg);
#endif
diff --git a/drivers/media/platform/msm/cvp/msm_cvp_internal.h b/drivers/media/platform/msm/cvp/msm_cvp_internal.h
index a5ca2f2..360a9a9 100644
--- a/drivers/media/platform/msm/cvp/msm_cvp_internal.h
+++ b/drivers/media/platform/msm/cvp/msm_cvp_internal.h
@@ -403,6 +403,22 @@ struct msm_cvp_core_ops {
int (*decide_work_mode)(struct msm_cvp_inst *inst);
};
+#define MAX_NUM_MSGS_PER_SESSION 128
+#define CVP_MAX_WAIT_TIME 2000
+
+struct session_msg {
+ struct list_head node;
+ struct hfi_msg_session_hdr pkt;
+};
+
+struct cvp_session_queue {
+ spinlock_t lock;
+ unsigned int msg_count;
+ struct list_head msgs;
+ wait_queue_head_t wq;
+ struct kmem_cache *msg_cache;
+};
+
struct msm_cvp_core {
struct list_head list;
struct mutex lock;
@@ -436,6 +452,7 @@ struct msm_cvp_inst {
struct mutex sync_lock, lock, flush_lock;
struct msm_cvp_core *core;
enum session_type session_type;
+ struct cvp_session_queue session_queue;
void *session;
struct session_prop prop;
enum instance_state state;
@@ -535,13 +552,7 @@ struct msm_video_buffer {
struct msm_cvp_internal_buffer {
struct list_head list;
struct msm_smem smem;
- struct msm_cvp_buffer buf;
-};
-
-struct msm_cvp_internal_send_cmd {
- struct list_head list;
- struct msm_smem smem;
- struct msm_cvp_send_cmd send_cmd;
+ struct cvp_kmd_buffer buf;
};
void msm_cvp_comm_handle_thermal_event(void);
diff --git a/drivers/media/platform/msm/cvp/msm_cvp_res_parse.c b/drivers/media/platform/msm/cvp/msm_cvp_res_parse.c
index c4962ff..d14e489 100644
--- a/drivers/media/platform/msm/cvp/msm_cvp_res_parse.c
+++ b/drivers/media/platform/msm/cvp/msm_cvp_res_parse.c
@@ -19,7 +19,6 @@ enum clock_properties {
};
#define PERF_GOV "performance"
-#define DEFAULT_CVP_CLK_SVS2
static inline struct device *msm_iommu_get_ctx(const char *ctx_name)
{
@@ -329,14 +328,8 @@ EXPORT_SYMBOL(msm_cvp_load_u32_table);
/* A comparator to compare loads (needed later on) */
static int cmp(const void *a, const void *b)
{
-#ifdef DEFAULT_CVP_CLK_SVS2
return ((struct allowed_clock_rates_table *)a)->clock_rate -
((struct allowed_clock_rates_table *)b)->clock_rate;
-#else
- /* want to sort in reverse so flip the comparison */
- return ((struct allowed_clock_rates_table *)b)->clock_rate -
- ((struct allowed_clock_rates_table *)a)->clock_rate;
-#endif
}
static int msm_cvp_load_allowed_clocks_table(
diff --git a/drivers/media/platform/msm/cvp/msm_v4l2_private.c b/drivers/media/platform/msm/cvp/msm_v4l2_private.c
index 35196a3..a578d8f55 100644
--- a/drivers/media/platform/msm/cvp/msm_v4l2_private.c
+++ b/drivers/media/platform/msm/cvp/msm_v4l2_private.c
@@ -4,12 +4,138 @@
*/
#include "msm_v4l2_private.h"
+#include "cvp_hfi_api.h"
-static int convert_from_user(struct msm_cvp_arg *kp, unsigned long arg)
+static int _get_pkt_hdr_from_user(struct cvp_kmd_arg __user *up,
+ struct cvp_hal_session_cmd_pkt *pkt_hdr)
+{
+ struct cvp_kmd_hfi_packet *u;
+
+ u = &up->data.hfi_pkt;
+
+ if (get_user(pkt_hdr->size, &u->pkt_data[0]))
+ return -EFAULT;
+
+ if (get_user(pkt_hdr->packet_type, &u->pkt_data[1]))
+ return -EFAULT;
+
+ if (get_pkt_index(pkt_hdr) < 0) {
+ dprintk(CVP_DBG, "user mode provides incorrect hfi\n");
+ goto set_default_pkt_hdr;
+ }
+
+ if (pkt_hdr->size > MAX_HFI_PKT_SIZE*sizeof(unsigned int)) {
+ dprintk(CVP_ERR, "user HFI packet too large %x\n",
+ pkt_hdr->size);
+ return -EINVAL;
+ }
+
+ return 0;
+
+set_default_pkt_hdr:
+ pkt_hdr->size = sizeof(struct hfi_msg_session_hdr);
+ return 0;
+}
+
+static int _get_fence_pkt_hdr_from_user(struct cvp_kmd_arg __user *up,
+ struct cvp_hal_session_cmd_pkt *pkt_hdr)
+{
+ struct cvp_kmd_hfi_fence_packet *u;
+
+ u = &up->data.hfi_fence_pkt;
+
+ if (get_user(pkt_hdr->packet_type, &u->pkt_data[1]))
+ return -EFAULT;
+
+ pkt_hdr->size = (MAX_HFI_FENCE_OFFSET + MAX_HFI_FENCE_SIZE)
+ * sizeof(unsigned int);
+
+ if (pkt_hdr->size > (MAX_HFI_PKT_SIZE*sizeof(unsigned int)))
+ return -EINVAL;
+
+ return 0;
+}
+
+/* Size is in unit of u32 */
+static int _copy_pkt_from_user(struct cvp_kmd_arg *kp,
+ struct cvp_kmd_arg __user *up,
+ unsigned int size)
+{
+ struct cvp_kmd_hfi_packet *k, *u;
+ int i;
+
+ k = &kp->data.hfi_pkt;
+ u = &up->data.hfi_pkt;
+ for (i = 0; i < size; i++)
+ if (get_user(k->pkt_data[i], &u->pkt_data[i]))
+ return -EFAULT;
+
+ return 0;
+}
+
+/* Size is in unit of u32 */
+static int _copy_fence_pkt_from_user(struct cvp_kmd_arg *kp,
+ struct cvp_kmd_arg __user *up,
+ unsigned int size)
+{
+ struct cvp_kmd_hfi_fence_packet *k, *u;
+ int i;
+
+ k = &kp->data.hfi_fence_pkt;
+ u = &up->data.hfi_fence_pkt;
+ for (i = 0; i < MAX_HFI_FENCE_OFFSET; i++) {
+ if (get_user(k->pkt_data[i], &u->pkt_data[i]))
+ return -EFAULT;
+ }
+ for (i = 0; i < MAX_HFI_FENCE_SIZE; i++) {
+ if (get_user(k->fence_data[i], &u->fence_data[i]))
+ return -EFAULT;
+ }
+ return 0;
+}
+
+static int _copy_pkt_to_user(struct cvp_kmd_arg *kp,
+ struct cvp_kmd_arg __user *up,
+ unsigned int size)
+{
+ struct cvp_kmd_hfi_packet *k, *u;
+ int i;
+
+ k = &kp->data.hfi_pkt;
+ u = &up->data.hfi_pkt;
+ for (i = 0; i < size; i++)
+ if (put_user(k->pkt_data[i], &u->pkt_data[i]))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int _copy_fence_pkt_to_user(struct cvp_kmd_arg *kp,
+ struct cvp_kmd_arg __user *up,
+ unsigned int size)
+{
+ struct cvp_kmd_hfi_fence_packet *k, *u;
+ int i;
+
+ k = &kp->data.hfi_fence_pkt;
+ u = &up->data.hfi_fence_pkt;
+ for (i = 0; i < MAX_HFI_FENCE_OFFSET; i++) {
+ if (put_user(k->pkt_data[i], &u->pkt_data[i]))
+ return -EFAULT;
+ }
+ for (i = 0; i < MAX_HFI_FENCE_SIZE; i++) {
+ if (put_user(k->fence_data[i], &u->fence_data[i]))
+ return -EFAULT;
+ }
+ return 0;
+}
+
+static int convert_from_user(struct cvp_kmd_arg *kp, unsigned long arg)
{
int rc = 0;
int i;
- struct msm_cvp_arg __user *up = compat_ptr(arg);
+ struct cvp_kmd_arg __user *up = compat_ptr(arg);
+ struct cvp_hal_session_cmd_pkt pkt_hdr;
if (!kp || !up) {
dprintk(CVP_ERR, "%s: invalid params\n", __func__);
@@ -20,9 +146,9 @@ static int convert_from_user(struct msm_cvp_arg *kp, unsigned long arg)
return -EFAULT;
switch (kp->type) {
- case MSM_CVP_GET_SESSION_INFO:
+ case CVP_KMD_GET_SESSION_INFO:
{
- struct msm_cvp_session_info *k, *u;
+ struct cvp_kmd_session_info *k, *u;
k = &kp->data.session;
u = &up->data.session;
@@ -33,9 +159,9 @@ static int convert_from_user(struct msm_cvp_arg *kp, unsigned long arg)
return -EFAULT;
break;
}
- case MSM_CVP_REQUEST_POWER:
+ case CVP_KMD_REQUEST_POWER:
{
- struct msm_cvp_request_power *k, *u;
+ struct cvp_kmd_request_power *k, *u;
k = &kp->data.req_power;
u = &up->data.req_power;
@@ -49,9 +175,9 @@ static int convert_from_user(struct msm_cvp_arg *kp, unsigned long arg)
return -EFAULT;
break;
}
- case MSM_CVP_REGISTER_BUFFER:
+ case CVP_KMD_REGISTER_BUFFER:
{
- struct msm_cvp_buffer *k, *u;
+ struct cvp_kmd_buffer *k, *u;
k = &kp->data.regbuf;
u = &up->data.regbuf;
@@ -68,9 +194,9 @@ static int convert_from_user(struct msm_cvp_arg *kp, unsigned long arg)
return -EFAULT;
break;
}
- case MSM_CVP_UNREGISTER_BUFFER:
+ case CVP_KMD_UNREGISTER_BUFFER:
{
- struct msm_cvp_buffer *k, *u;
+ struct cvp_kmd_buffer *k, *u;
k = &kp->data.unregbuf;
u = &up->data.unregbuf;
@@ -87,12 +213,10 @@ static int convert_from_user(struct msm_cvp_arg *kp, unsigned long arg)
return -EFAULT;
break;
}
- case MSM_CVP_HFI_SEND_CMD:
+ case CVP_KMD_HFI_SEND_CMD:
{
- struct msm_cvp_send_cmd *k, *u;
+ struct cvp_kmd_send_cmd *k, *u;
- dprintk(CVP_DBG, "%s: MSM_CVP_HFI_SEND_CMD\n",
- __func__);
k = &kp->data.send_cmd;
u = &up->data.send_cmd;
if (get_user(k->cmd_address_fd, &u->cmd_address_fd) ||
@@ -103,79 +227,42 @@ static int convert_from_user(struct msm_cvp_arg *kp, unsigned long arg)
return -EFAULT;
break;
}
- case MSM_CVP_HFI_DFS_CONFIG_CMD:
+ case CVP_KMD_SEND_CMD_PKT:
+ case CVP_KMD_HFI_DFS_CONFIG_CMD:
+ case CVP_KMD_HFI_DFS_FRAME_CMD:
+ case CVP_KMD_HFI_DME_CONFIG_CMD:
+ case CVP_KMD_HFI_DME_FRAME_CMD:
+ case CVP_KMD_HFI_PERSIST_CMD:
{
- struct msm_cvp_dfs_config *k, *u;
+ if (_get_pkt_hdr_from_user(up, &pkt_hdr)) {
+ dprintk(CVP_ERR, "Invalid syscall: %x, %x, %x\n",
+ kp->type, pkt_hdr.size, pkt_hdr.packet_type);
+ return -EFAULT;
+ }
- dprintk(CVP_DBG, "%s: MSM_CVP_HFI_DFS_CONFIG_CMD\n", __func__);
- k = &kp->data.dfs_config;
- u = &up->data.dfs_config;
- for (i = 0; i < CVP_DFS_CONFIG_CMD_SIZE; i++)
- if (get_user(k->cvp_dfs_config[i],
- &u->cvp_dfs_config[i]))
- return -EFAULT;
+ dprintk(CVP_DBG, "system call cmd pkt: %d 0x%x\n",
+ pkt_hdr.size, pkt_hdr.packet_type);
+ rc = _copy_pkt_from_user(kp, up, (pkt_hdr.size >> 2));
break;
}
- case MSM_CVP_HFI_DFS_FRAME_CMD:
- case MSM_CVP_HFI_DFS_FRAME_CMD_RESPONSE:
+ case CVP_KMD_HFI_DME_FRAME_FENCE_CMD:
{
- struct msm_cvp_dfs_frame *k, *u;
+ if (_get_fence_pkt_hdr_from_user(up, &pkt_hdr)) {
+ dprintk(CVP_ERR, "Invalid syscall: %x, %x, %x\n",
+ kp->type, pkt_hdr.size, pkt_hdr.packet_type);
+ return -EFAULT;
+ }
- dprintk(CVP_DBG, "%s: Type =%d\n", __func__, kp->type);
- k = &kp->data.dfs_frame;
- u = &up->data.dfs_frame;
- for (i = 0; i < CVP_DFS_FRAME_CMD_SIZE; i++)
- if (get_user(k->frame_data[i], &u->frame_data[i]))
- return -EFAULT;
-
+ dprintk(CVP_DBG, "system call cmd pkt: %d 0x%x\n",
+ pkt_hdr.size, pkt_hdr.packet_type);
+ rc = _copy_fence_pkt_from_user(kp, up, (pkt_hdr.size >> 2));
break;
}
- case MSM_CVP_HFI_DME_CONFIG_CMD:
- {
- struct msm_cvp_dme_config *k, *u;
-
- dprintk(CVP_DBG, "%s: MSM_CVP_HFI_DFS_CONFIG_CMD\n", __func__);
- k = &kp->data.dme_config;
- u = &up->data.dme_config;
- for (i = 0; i < CVP_DME_CONFIG_CMD_SIZE; i++)
- if (get_user(k->cvp_dme_config[i],
- &u->cvp_dme_config[i]))
- return -EFAULT;
+ case CVP_KMD_HFI_DFS_FRAME_CMD_RESPONSE:
+ case CVP_KMD_HFI_DME_FRAME_CMD_RESPONSE:
+ case CVP_KMD_HFI_PERSIST_CMD_RESPONSE:
+ case CVP_KMD_RECEIVE_MSG_PKT:
break;
- }
- case MSM_CVP_HFI_DME_FRAME_CMD:
- case MSM_CVP_HFI_DME_FRAME_CMD_RESPONSE:
- {
- struct msm_cvp_dme_frame *k, *u;
-
- dprintk(CVP_DBG, "%s: type = %d\n",
- __func__, kp->type);
- k = &kp->data.dme_frame;
- u = &up->data.dme_frame;
-
- for (i = 0; i < CVP_DME_FRAME_CMD_SIZE; i++)
- if (get_user(k->frame_data[i], &u->frame_data[i]))
- return -EFAULT;
-
- break;
- }
- case MSM_CVP_HFI_PERSIST_CMD:
- case MSM_CVP_HFI_PERSIST_CMD_RESPONSE:
- {
- struct msm_cvp_persist_buf *k, *u;
-
- dprintk(CVP_DBG, "%s: type = %d\n",
- __func__, kp->type);
- k = &kp->data.pbuf_cmd;
- u = &up->data.pbuf_cmd;
-
- for (i = 0; i < CVP_PERSIST_CMD_SIZE; i++)
- if (get_user(k->persist_data[i], &u->persist_data[i]))
- return -EFAULT;
-
- break;
- }
-
default:
dprintk(CVP_ERR, "%s: unknown cmd type 0x%x\n",
__func__, kp->type);
@@ -186,11 +273,12 @@ static int convert_from_user(struct msm_cvp_arg *kp, unsigned long arg)
return rc;
}
-static int convert_to_user(struct msm_cvp_arg *kp, unsigned long arg)
+static int convert_to_user(struct cvp_kmd_arg *kp, unsigned long arg)
{
int rc = 0;
- int i;
- struct msm_cvp_arg __user *up = compat_ptr(arg);
+ int i, size = sizeof(struct hfi_msg_session_hdr) >> 2;
+ struct cvp_kmd_arg __user *up = compat_ptr(arg);
+ struct cvp_hal_session_cmd_pkt pkt_hdr;
if (!kp || !up) {
dprintk(CVP_ERR, "%s: invalid params\n", __func__);
@@ -201,9 +289,20 @@ static int convert_to_user(struct msm_cvp_arg *kp, unsigned long arg)
return -EFAULT;
switch (kp->type) {
- case MSM_CVP_GET_SESSION_INFO:
+ case CVP_KMD_RECEIVE_MSG_PKT:
{
- struct msm_cvp_session_info *k, *u;
+ struct cvp_kmd_hfi_packet *k, *u;
+
+ k = &kp->data.hfi_pkt;
+ u = &up->data.hfi_pkt;
+ for (i = 0; i < size; i++)
+ if (put_user(k->pkt_data[i], &u->pkt_data[i]))
+ return -EFAULT;
+ break;
+ }
+ case CVP_KMD_GET_SESSION_INFO:
+ {
+ struct cvp_kmd_session_info *k, *u;
k = &kp->data.session;
u = &up->data.session;
@@ -214,9 +313,9 @@ static int convert_to_user(struct msm_cvp_arg *kp, unsigned long arg)
return -EFAULT;
break;
}
- case MSM_CVP_REQUEST_POWER:
+ case CVP_KMD_REQUEST_POWER:
{
- struct msm_cvp_request_power *k, *u;
+ struct cvp_kmd_request_power *k, *u;
k = &kp->data.req_power;
u = &up->data.req_power;
@@ -230,9 +329,9 @@ static int convert_to_user(struct msm_cvp_arg *kp, unsigned long arg)
return -EFAULT;
break;
}
- case MSM_CVP_REGISTER_BUFFER:
+ case CVP_KMD_REGISTER_BUFFER:
{
- struct msm_cvp_buffer *k, *u;
+ struct cvp_kmd_buffer *k, *u;
k = &kp->data.regbuf;
u = &up->data.regbuf;
@@ -249,9 +348,9 @@ static int convert_to_user(struct msm_cvp_arg *kp, unsigned long arg)
return -EFAULT;
break;
}
- case MSM_CVP_UNREGISTER_BUFFER:
+ case CVP_KMD_UNREGISTER_BUFFER:
{
- struct msm_cvp_buffer *k, *u;
+ struct cvp_kmd_buffer *k, *u;
k = &kp->data.unregbuf;
u = &up->data.unregbuf;
@@ -268,11 +367,11 @@ static int convert_to_user(struct msm_cvp_arg *kp, unsigned long arg)
return -EFAULT;
break;
}
- case MSM_CVP_HFI_SEND_CMD:
+ case CVP_KMD_HFI_SEND_CMD:
{
- struct msm_cvp_send_cmd *k, *u;
+ struct cvp_kmd_send_cmd *k, *u;
- dprintk(CVP_DBG, "%s: MSM_CVP_HFI_SEND_CMD\n",
+ dprintk(CVP_DBG, "%s: CVP_KMD_HFI_SEND_CMD\n",
__func__);
k = &kp->data.send_cmd;
@@ -285,80 +384,32 @@ static int convert_to_user(struct msm_cvp_arg *kp, unsigned long arg)
return -EFAULT;
break;
}
- case MSM_CVP_HFI_DFS_CONFIG_CMD:
+ case CVP_KMD_SEND_CMD_PKT:
+ case CVP_KMD_HFI_DFS_CONFIG_CMD:
+ case CVP_KMD_HFI_DFS_FRAME_CMD:
+ case CVP_KMD_HFI_DFS_FRAME_CMD_RESPONSE:
+ case CVP_KMD_HFI_DME_CONFIG_CMD:
+ case CVP_KMD_HFI_DME_FRAME_CMD:
+ case CVP_KMD_HFI_DME_FRAME_CMD_RESPONSE:
+ case CVP_KMD_HFI_PERSIST_CMD:
+ case CVP_KMD_HFI_PERSIST_CMD_RESPONSE:
{
- struct msm_cvp_dfs_config *k, *u;
+ if (_get_pkt_hdr_from_user(up, &pkt_hdr))
+ return -EFAULT;
- dprintk(CVP_DBG, "%s: MSM_CVP_HFI_DFS_CONFIG_CMD\n",
- __func__);
-
- k = &kp->data.dfs_config;
- u = &up->data.dfs_config;
- for (i = 0; i < CVP_DFS_CONFIG_CMD_SIZE; i++)
- if (put_user(k->cvp_dfs_config[i],
- &u->cvp_dfs_config[i]))
- return -EFAULT;
+ dprintk(CVP_DBG, "Send user cmd pkt: %d %d\n",
+ pkt_hdr.size, pkt_hdr.packet_type);
+ rc = _copy_pkt_to_user(kp, up, (pkt_hdr.size >> 2));
break;
}
- case MSM_CVP_HFI_DFS_FRAME_CMD:
- case MSM_CVP_HFI_DFS_FRAME_CMD_RESPONSE:
+ case CVP_KMD_HFI_DME_FRAME_FENCE_CMD:
{
- struct msm_cvp_dfs_frame *k, *u;
+ if (_get_fence_pkt_hdr_from_user(up, &pkt_hdr))
+ return -EFAULT;
- dprintk(CVP_DBG, "%s: type = %d\n",
- __func__, kp->type);
- k = &kp->data.dfs_frame;
- u = &up->data.dfs_frame;
-
- for (i = 0; i < CVP_DFS_FRAME_CMD_SIZE; i++)
- if (put_user(k->frame_data[i], &u->frame_data[i]))
- return -EFAULT;
-
- break;
- }
- case MSM_CVP_HFI_DME_CONFIG_CMD:
- {
- struct msm_cvp_dme_config *k, *u;
-
- dprintk(CVP_DBG, "%s: MSM_CVP_HFI_DME_CONFIG_CMD\n", __func__);
- k = &kp->data.dme_config;
- u = &up->data.dme_config;
- for (i = 0; i < CVP_DME_CONFIG_CMD_SIZE; i++)
- if (put_user(k->cvp_dme_config[i],
- &u->cvp_dme_config[i]))
- return -EFAULT;
- break;
- }
- case MSM_CVP_HFI_DME_FRAME_CMD:
- case MSM_CVP_HFI_DME_FRAME_CMD_RESPONSE:
- {
- struct msm_cvp_dme_frame *k, *u;
-
- dprintk(CVP_DBG, "%s: type = %d\n",
- __func__, kp->type);
- k = &kp->data.dme_frame;
- u = &up->data.dme_frame;
-
- for (i = 0; i < CVP_DME_FRAME_CMD_SIZE; i++)
- if (put_user(k->frame_data[i], &u->frame_data[i]))
- return -EFAULT;
-
- break;
- }
- case MSM_CVP_HFI_PERSIST_CMD:
- case MSM_CVP_HFI_PERSIST_CMD_RESPONSE:
- {
- struct msm_cvp_persist_buf *k, *u;
-
- dprintk(CVP_DBG, "%s: type = %d\n",
- __func__, kp->type);
- k = &kp->data.pbuf_cmd;
- u = &up->data.pbuf_cmd;
-
- for (i = 0; i < CVP_PERSIST_CMD_SIZE; i++)
- if (put_user(k->persist_data[i], &u->persist_data[i]))
- return -EFAULT;
-
+ dprintk(CVP_DBG, "Send user cmd pkt: %d %d\n",
+ pkt_hdr.size, pkt_hdr.packet_type);
+ rc = _copy_fence_pkt_to_user(kp, up, (pkt_hdr.size >> 2));
break;
}
default:
@@ -376,7 +427,7 @@ long msm_cvp_v4l2_private(struct file *filp,
{
int rc;
struct msm_cvp_inst *inst;
- struct msm_cvp_arg karg;
+ struct cvp_kmd_arg karg;
if (!filp || !filp->private_data) {
dprintk(CVP_ERR, "%s: invalid params\n", __func__);
@@ -385,15 +436,18 @@ long msm_cvp_v4l2_private(struct file *filp,
inst = container_of(filp->private_data, struct msm_cvp_inst,
event_handler);
- memset(&karg, 0, sizeof(struct msm_cvp_arg));
+ memset(&karg, 0, sizeof(struct cvp_kmd_arg));
/*
* the arg points to user space memory and needs
* to be converted to kernel space before using it.
* Check do_video_ioctl() for more details.
*/
- if (convert_from_user(&karg, arg))
+ if (convert_from_user(&karg, arg)) {
+ dprintk(CVP_ERR, "%s: failed to get from user cmd %x\n",
+ __func__, karg.type);
return -EFAULT;
+ }
rc = msm_cvp_private((void *)inst, cmd, &karg);
if (rc) {
@@ -402,8 +456,11 @@ long msm_cvp_v4l2_private(struct file *filp,
return -EINVAL;
}
- if (convert_to_user(&karg, arg))
+ if (convert_to_user(&karg, arg)) {
+ dprintk(CVP_ERR, "%s: failed to copy to user cmd %x\n",
+ __func__, karg.type);
return -EFAULT;
+ }
return rc;
}
diff --git a/drivers/media/platform/msm/npu/npu_common.h b/drivers/media/platform/msm/npu/npu_common.h
index ac122e1..672b207 100644
--- a/drivers/media/platform/msm/npu/npu_common.h
+++ b/drivers/media/platform/msm/npu/npu_common.h
@@ -55,6 +55,15 @@ enum npu_power_level {
NPU_PWRLEVEL_OFF = 0xFFFFFFFF,
};
+#define NPU_ERR(fmt, args...) \
+ pr_err("NPU_ERR: %s: %d " fmt "\n", __func__, __LINE__, ##args)
+#define NPU_WARN(fmt, args...) \
+ pr_warn("NPU_WARN: %s: %d " fmt "\n", __func__, __LINE__, ##args)
+#define NPU_INFO(fmt, args...) \
+ pr_info("NPU_INFO: %s: %d " fmt "\n", __func__, __LINE__, ##args)
+#define NPU_DBG(fmt, args...) \
+ pr_debug("NPU_DBG: %s: %d " fmt "\n", __func__, __LINE__, ##args)
+
/* -------------------------------------------------------------------------
* Data Structures
* -------------------------------------------------------------------------
@@ -159,7 +168,6 @@ struct npu_pwrctrl {
uint32_t num_pwrlevels;
struct device *devbw;
- uint32_t bwmon_enabled;
uint32_t uc_pwrlevel;
uint32_t cdsprm_pwrlevel;
uint32_t fmax_pwrlevel;
@@ -206,7 +214,6 @@ struct npu_device {
struct npu_io_data tcm_io;
struct npu_io_data qdsp_io;
struct npu_io_data apss_shared_io;
- struct npu_io_data bwmon_io;
struct npu_io_data qfprom_io;
uint32_t core_clk_num;
diff --git a/drivers/media/platform/msm/npu/npu_dbg.c b/drivers/media/platform/msm/npu/npu_dbg.c
index f1486a2..b0dfc85 100644
--- a/drivers/media/platform/msm/npu/npu_dbg.c
+++ b/drivers/media/platform/msm/npu/npu_dbg.c
@@ -3,8 +3,6 @@
* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
*/
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
/* -------------------------------------------------------------------------
* Includes
* -------------------------------------------------------------------------
@@ -24,9 +22,9 @@ void npu_dump_debug_timeout_stats(struct npu_device *npu_dev)
uint32_t reg_val;
reg_val = REGR(npu_dev, REG_FW_JOB_CNT_START);
- pr_info("fw jobs execute started count = %d\n", reg_val);
+ NPU_INFO("fw jobs execute started count = %d\n", reg_val);
reg_val = REGR(npu_dev, REG_FW_JOB_CNT_END);
- pr_info("fw jobs execute finished count = %d\n", reg_val);
+ NPU_INFO("fw jobs execute finished count = %d\n", reg_val);
reg_val = REGR(npu_dev, REG_NPU_FW_DEBUG_DATA);
- pr_info("fw jobs aco parser debug = %d\n", reg_val);
+ NPU_INFO("fw jobs aco parser debug = %d\n", reg_val);
}
diff --git a/drivers/media/platform/msm/npu/npu_debugfs.c b/drivers/media/platform/msm/npu/npu_debugfs.c
index df83e2a..987e182 100644
--- a/drivers/media/platform/msm/npu/npu_debugfs.c
+++ b/drivers/media/platform/msm/npu/npu_debugfs.c
@@ -3,8 +3,6 @@
* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
*/
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
/* -------------------------------------------------------------------------
* Includes
* -------------------------------------------------------------------------
@@ -120,7 +118,7 @@ static ssize_t npu_debug_reg_write(struct file *file,
buf[count] = 0; /* end of string */
cnt = sscanf(buf, "%zx %x", &off, &data);
- pr_debug("%s %s 0x%zx, 0x%08x\n", __func__, buf, off, data);
+ NPU_DBG("%s 0x%zx, 0x%08x\n", buf, off, data);
return count;
if (cnt < 2)
@@ -133,7 +131,7 @@ static ssize_t npu_debug_reg_write(struct file *file,
npu_disable_core_power(npu_dev);
- pr_debug("write: addr=%zx data=%x\n", off, data);
+ NPU_DBG("write: addr=%zx data=%x\n", off, data);
return count;
}
@@ -193,9 +191,9 @@ static ssize_t npu_debug_reg_read(struct file *file,
return 0; /* done reading */
len = min(count, debugfs->buf_len - (size_t) *ppos);
- pr_debug("read %zi %zi\n", count, debugfs->buf_len - (size_t) *ppos);
+ NPU_DBG("read %zi %zi\n", count, debugfs->buf_len - (size_t) *ppos);
if (copy_to_user(user_buf, debugfs->buf + *ppos, len)) {
- pr_err("failed to copy to user\n");
+ NPU_ERR("failed to copy to user\n");
return -EFAULT;
}
@@ -216,7 +214,7 @@ static ssize_t npu_debug_off_write(struct file *file,
struct npu_device *npu_dev = file->private_data;
struct npu_debugfs_ctx *debugfs;
- pr_debug("npu_dev %pK %pK\n", npu_dev, g_npu_dev);
+ NPU_DBG("npu_dev %pK %pK\n", npu_dev, g_npu_dev);
npu_dev = g_npu_dev;
debugfs = &npu_dev->debugfs_ctx;
@@ -231,7 +229,7 @@ static ssize_t npu_debug_off_write(struct file *file,
cnt = sscanf(buf, "%zx %x", &off, ®_cnt);
if (cnt == 1)
reg_cnt = DEFAULT_REG_DUMP_NUM;
- pr_debug("reg off = %zx, %d cnt=%d\n", off, reg_cnt, cnt);
+ NPU_DBG("reg off = %zx, %d cnt=%d\n", off, reg_cnt, cnt);
if (cnt >= 1) {
debugfs->reg_off = off;
debugfs->reg_cnt = reg_cnt;
@@ -248,7 +246,7 @@ static ssize_t npu_debug_off_read(struct file *file,
struct npu_device *npu_dev = file->private_data;
struct npu_debugfs_ctx *debugfs;
- pr_debug("npu_dev %pK %pK\n", npu_dev, g_npu_dev);
+ NPU_DBG("npu_dev %pK %pK\n", npu_dev, g_npu_dev);
npu_dev = g_npu_dev;
debugfs = &npu_dev->debugfs_ctx;
@@ -259,7 +257,7 @@ static ssize_t npu_debug_off_read(struct file *file,
debugfs->reg_off, debugfs->reg_cnt);
if (copy_to_user(user_buf, buf, len)) {
- pr_err("failed to copy to user\n");
+ NPU_ERR("failed to copy to user\n");
return -EFAULT;
}
@@ -278,7 +276,7 @@ static ssize_t npu_debug_log_read(struct file *file,
struct npu_device *npu_dev = file->private_data;
struct npu_debugfs_ctx *debugfs;
- pr_debug("npu_dev %pK %pK\n", npu_dev, g_npu_dev);
+ NPU_DBG("npu_dev %pK %pK\n", npu_dev, g_npu_dev);
npu_dev = g_npu_dev;
debugfs = &npu_dev->debugfs_ctx;
@@ -298,7 +296,7 @@ static ssize_t npu_debug_log_read(struct file *file,
if (copy_to_user(dst_addr, src_addr,
remaining_to_end)) {
- pr_err("%s failed to copy to user\n", __func__);
+ NPU_ERR("failed to copy to user\n");
mutex_unlock(&debugfs->log_lock);
return -EFAULT;
}
@@ -307,7 +305,7 @@ static ssize_t npu_debug_log_read(struct file *file,
if (copy_to_user(dst_addr, src_addr,
debugfs->log_num_bytes_buffered -
remaining_to_end)) {
- pr_err("%s failed to copy to user\n", __func__);
+ NPU_ERR("failed to copy to user\n");
mutex_unlock(&debugfs->log_lock);
return -EFAULT;
}
@@ -318,7 +316,7 @@ static ssize_t npu_debug_log_read(struct file *file,
if (copy_to_user(user_buf, (debugfs->log_buf +
debugfs->log_read_index),
debugfs->log_num_bytes_buffered)) {
- pr_err("%s failed to copy to user\n", __func__);
+ NPU_ERR("failed to copy to user\n");
mutex_unlock(&debugfs->log_lock);
return -EFAULT;
}
@@ -350,7 +348,7 @@ static ssize_t npu_debug_ctrl_write(struct file *file,
int32_t rc = 0;
uint32_t val;
- pr_debug("npu_dev %pK %pK\n", npu_dev, g_npu_dev);
+ NPU_DBG("npu_dev %pK %pK\n", npu_dev, g_npu_dev);
npu_dev = g_npu_dev;
debugfs = &npu_dev->debugfs_ctx;
@@ -366,14 +364,14 @@ static ssize_t npu_debug_ctrl_write(struct file *file,
buf[count-1] = 0;/* remove line feed */
if (strcmp(buf, "on") == 0) {
- pr_info("triggering fw_init\n");
+ NPU_INFO("triggering fw_init\n");
if (fw_init(npu_dev) != 0)
- pr_info("error in fw_init\n");
+ NPU_INFO("error in fw_init\n");
} else if (strcmp(buf, "off") == 0) {
- pr_info("triggering fw_deinit\n");
+ NPU_INFO("triggering fw_deinit\n");
fw_deinit(npu_dev, false, true);
} else if (strcmp(buf, "ssr") == 0) {
- pr_info("trigger error irq\n");
+ NPU_INFO("trigger error irq\n");
if (npu_enable_core_power(npu_dev))
return -EPERM;
@@ -381,20 +379,20 @@ static ssize_t npu_debug_ctrl_write(struct file *file,
REGW(npu_dev, NPU_MASTERn_ERROR_IRQ_SET(0), 2);
npu_disable_core_power(npu_dev);
} else if (strcmp(buf, "ssr_wdt") == 0) {
- pr_info("trigger wdt irq\n");
+ NPU_INFO("trigger wdt irq\n");
npu_disable_post_pil_clocks(npu_dev);
} else if (strcmp(buf, "loopback") == 0) {
- pr_debug("loopback test\n");
+ NPU_DBG("loopback test\n");
rc = npu_host_loopback_test(npu_dev);
- pr_debug("loopback test end: %d\n", rc);
+ NPU_DBG("loopback test end: %d\n", rc);
} else {
rc = kstrtou32(buf, 10, &val);
if (rc) {
- pr_err("Invalid input for power level settings\n");
+ NPU_ERR("Invalid input for power level settings\n");
} else {
val = min(val, npu_dev->pwrctrl.max_pwrlevel);
npu_dev->pwrctrl.active_pwrlevel = val;
- pr_info("setting power state to %d\n", val);
+ NPU_INFO("setting power state to %d\n", val);
}
}
@@ -414,62 +412,62 @@ int npu_debugfs_init(struct npu_device *npu_dev)
debugfs->root = debugfs_create_dir("npu", NULL);
if (IS_ERR_OR_NULL(debugfs->root)) {
- pr_err("debugfs_create_dir for npu failed, error %ld\n",
+ NPU_ERR("debugfs_create_dir for npu failed, error %ld\n",
PTR_ERR(debugfs->root));
return -ENODEV;
}
if (!debugfs_create_file("reg", 0644, debugfs->root,
npu_dev, &npu_reg_fops)) {
- pr_err("debugfs_create_file reg fail\n");
+ NPU_ERR("debugfs_create_file reg fail\n");
goto err;
}
if (!debugfs_create_file("off", 0644, debugfs->root,
npu_dev, &npu_off_fops)) {
- pr_err("debugfs_create_file off fail\n");
+ NPU_ERR("debugfs_create_file off fail\n");
goto err;
}
if (!debugfs_create_file("log", 0644, debugfs->root,
npu_dev, &npu_log_fops)) {
- pr_err("debugfs_create_file log fail\n");
+ NPU_ERR("debugfs_create_file log fail\n");
goto err;
}
if (!debugfs_create_file("ctrl", 0644, debugfs->root,
npu_dev, &npu_ctrl_fops)) {
- pr_err("debugfs_create_file ctrl fail\n");
+ NPU_ERR("debugfs_create_file ctrl fail\n");
goto err;
}
if (!debugfs_create_bool("sys_cache_disable", 0644,
debugfs->root, &(host_ctx->sys_cache_disable))) {
- pr_err("debugfs_creat_bool fail for sys cache\n");
+ NPU_ERR("debugfs_creat_bool fail for sys cache\n");
goto err;
}
if (!debugfs_create_u32("fw_dbg_mode", 0644,
debugfs->root, &(host_ctx->fw_dbg_mode))) {
- pr_err("debugfs_create_u32 fail for fw_dbg_mode\n");
+ NPU_ERR("debugfs_create_u32 fail for fw_dbg_mode\n");
goto err;
}
if (!debugfs_create_u32("fw_state", 0444,
debugfs->root, &(host_ctx->fw_state))) {
- pr_err("debugfs_create_u32 fail for fw_state\n");
+ NPU_ERR("debugfs_create_u32 fail for fw_state\n");
goto err;
}
if (!debugfs_create_u32("pwr_level", 0444,
debugfs->root, &(pwr->active_pwrlevel))) {
- pr_err("debugfs_create_u32 fail for pwr_level\n");
+ NPU_ERR("debugfs_create_u32 fail for pwr_level\n");
goto err;
}
if (!debugfs_create_u32("exec_flags", 0644,
debugfs->root, &(host_ctx->exec_flags_override))) {
- pr_err("debugfs_create_u32 fail for exec_flags\n");
+ NPU_ERR("debugfs_create_u32 fail for exec_flags\n");
goto err;
}
diff --git a/drivers/media/platform/msm/npu/npu_dev.c b/drivers/media/platform/msm/npu/npu_dev.c
index f8f0d51..196b51a 100644
--- a/drivers/media/platform/msm/npu/npu_dev.c
+++ b/drivers/media/platform/msm/npu/npu_dev.c
@@ -3,8 +3,6 @@
* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
*/
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
/* -------------------------------------------------------------------------
* Includes
* -------------------------------------------------------------------------
@@ -106,23 +104,10 @@ static int npu_set_power_level(struct npu_device *npu_dev, bool notify_cxlimit);
* -------------------------------------------------------------------------
*/
static const char * const npu_post_clocks[] = {
- "npu_cpc_clk",
- "npu_cpc_timer_clk"
};
static const char * const npu_exclude_rate_clocks[] = {
- "qdss_clk",
- "at_clk",
- "trig_clk",
- "sleep_clk",
"xo_clk",
- "conf_noc_ahb_clk",
- "comp_noc_axi_clk",
- "npu_core_cti_clk",
- "npu_core_apb_clk",
- "npu_core_atb_clk",
- "npu_cpc_timer_clk",
- "qtimer_core_clk",
"bwmon_clk",
"bto_core_clk",
"llm_xo_clk",
@@ -134,7 +119,14 @@ static const char * const npu_exclude_rate_clocks[] = {
"dsp_bwmon_ahb_clk",
"cal_hm0_perf_cnt_clk",
"cal_hm1_perf_cnt_clk",
- "dsp_ahbs_clk"
+ "dsp_ahbs_clk",
+ "axi_clk",
+ "ahb_clk",
+ "dma_clk",
+ "llm_temp_clk",
+ "llm_curr_clk",
+ "atb_clk",
+ "s2p_clk",
};
static const struct npu_irq npu_irq_info[NPU_MAX_IRQ] = {
@@ -281,13 +273,13 @@ static ssize_t perf_mode_override_store(struct device *dev,
rc = kstrtou32(buf, 10, &val);
if (rc) {
- pr_err("Invalid input for perf mode setting\n");
+ NPU_ERR("Invalid input for perf mode setting\n");
return -EINVAL;
}
val = min(val, npu_dev->pwrctrl.num_pwrlevels);
npu_dev->pwrctrl.perf_mode_override = val;
- pr_info("setting uc_pwrlevel_override to %d\n", val);
+ NPU_INFO("setting uc_pwrlevel_override to %d\n", val);
npu_set_power_level(npu_dev, true);
return count;
@@ -333,7 +325,7 @@ void npu_disable_core_power(struct npu_device *npu_dev)
pwr->active_pwrlevel = thermalctrl->pwr_level;
pwr->uc_pwrlevel = pwr->max_pwrlevel;
pwr->cdsprm_pwrlevel = pwr->max_pwrlevel;
- pr_debug("setting back to power level=%d\n",
+ NPU_DBG("setting back to power level=%d\n",
pwr->active_pwrlevel);
}
}
@@ -404,8 +396,8 @@ static uint32_t npu_calc_power_level(struct npu_device *npu_dev)
* settings
*/
ret_level = min(therm_pwr_level, uc_pwr_level);
- pr_debug("%s therm=%d active=%d uc=%d set level=%d\n",
- __func__, therm_pwr_level, active_pwr_level, uc_pwr_level,
+ NPU_DBG("therm=%d active=%d uc=%d set level=%d\n",
+ therm_pwr_level, active_pwr_level, uc_pwr_level,
ret_level);
return ret_level;
@@ -423,7 +415,7 @@ static int npu_set_power_level(struct npu_device *npu_dev, bool notify_cxlimit)
pwr_level_to_cdsprm = pwr_level_to_set;
if (!pwr->pwr_vote_num) {
- pr_debug("power is not enabled during set request\n");
+ NPU_DBG("power is not enabled during set request\n");
pwr->active_pwrlevel = min(pwr_level_to_set,
npu_dev->pwrctrl.cdsprm_pwrlevel);
return 0;
@@ -434,11 +426,11 @@ static int npu_set_power_level(struct npu_device *npu_dev, bool notify_cxlimit)
/* if the same as current, dont do anything */
if (pwr_level_to_set == pwr->active_pwrlevel) {
- pr_debug("power level %d doesn't change\n", pwr_level_to_set);
+ NPU_DBG("power level %d doesn't change\n", pwr_level_to_set);
return 0;
}
- pr_debug("setting power level to [%d]\n", pwr_level_to_set);
+ NPU_DBG("setting power level to [%d]\n", pwr_level_to_set);
pwr_level_idx = npu_power_level_to_index(npu_dev, pwr_level_to_set);
pwrlevel = &npu_dev->pwrctrl.pwrlevels[pwr_level_idx];
@@ -453,13 +445,13 @@ static int npu_set_power_level(struct npu_device *npu_dev, bool notify_cxlimit)
continue;
}
- pr_debug("requested rate of clock [%s] to [%ld]\n",
+ NPU_DBG("requested rate of clock [%s] to [%ld]\n",
npu_dev->core_clks[i].clk_name, pwrlevel->clk_freq[i]);
ret = clk_set_rate(npu_dev->core_clks[i].clk,
pwrlevel->clk_freq[i]);
if (ret) {
- pr_debug("clk_set_rate %s to %ld failed with %d\n",
+ NPU_DBG("clk_set_rate %s to %ld failed with %d\n",
npu_dev->core_clks[i].clk_name,
pwrlevel->clk_freq[i], ret);
break;
@@ -541,11 +533,11 @@ static int npu_enable_clocks(struct npu_device *npu_dev, bool post_pil)
continue;
}
- pr_debug("enabling clock %s\n", core_clks[i].clk_name);
+ NPU_DBG("enabling clock %s\n", core_clks[i].clk_name);
rc = clk_prepare_enable(core_clks[i].clk);
if (rc) {
- pr_err("%s enable failed\n",
+ NPU_ERR("%s enable failed\n",
core_clks[i].clk_name);
break;
}
@@ -553,14 +545,14 @@ static int npu_enable_clocks(struct npu_device *npu_dev, bool post_pil)
if (npu_is_exclude_rate_clock(core_clks[i].clk_name))
continue;
- pr_debug("setting rate of clock %s to %ld\n",
+ NPU_DBG("setting rate of clock %s to %ld\n",
core_clks[i].clk_name, pwrlevel->clk_freq[i]);
rc = clk_set_rate(core_clks[i].clk,
pwrlevel->clk_freq[i]);
/* not fatal error, keep using previous clk rate */
if (rc) {
- pr_err("clk_set_rate %s to %ld failed\n",
+ NPU_ERR("clk_set_rate %s to %ld failed\n",
core_clks[i].clk_name,
pwrlevel->clk_freq[i]);
rc = 0;
@@ -576,7 +568,7 @@ static int npu_enable_clocks(struct npu_device *npu_dev, bool post_pil)
if (npu_is_post_clock(core_clks[i].clk_name))
continue;
}
- pr_debug("disabling clock %s\n", core_clks[i].clk_name);
+ NPU_DBG("disabling clock %s\n", core_clks[i].clk_name);
clk_disable_unprepare(core_clks[i].clk);
}
}
@@ -600,17 +592,17 @@ static void npu_disable_clocks(struct npu_device *npu_dev, bool post_pil)
/* set clock rate to 0 before disabling it */
if (!npu_is_exclude_rate_clock(core_clks[i].clk_name)) {
- pr_debug("setting rate of clock %s to 0\n",
+ NPU_DBG("setting rate of clock %s to 0\n",
core_clks[i].clk_name);
rc = clk_set_rate(core_clks[i].clk, 0);
if (rc) {
- pr_err("clk_set_rate %s to 0 failed\n",
+ NPU_ERR("clk_set_rate %s to 0 failed\n",
core_clks[i].clk_name);
}
}
- pr_debug("disabling clock %s\n", core_clks[i].clk_name);
+ NPU_DBG("disabling clock %s\n", core_clks[i].clk_name);
clk_disable_unprepare(core_clks[i].clk);
}
}
@@ -625,8 +617,7 @@ static int npu_get_max_state(struct thermal_cooling_device *cdev,
struct npu_device *npu_dev = cdev->devdata;
struct npu_thermalctrl *thermalctrl = &npu_dev->thermalctrl;
- pr_debug("enter %s thermal max state=%lu\n", __func__,
- thermalctrl->max_state);
+ NPU_DBG("thermal max state=%lu\n", thermalctrl->max_state);
*state = thermalctrl->max_state;
@@ -639,8 +630,7 @@ static int npu_get_cur_state(struct thermal_cooling_device *cdev,
struct npu_device *npu_dev = cdev->devdata;
struct npu_thermalctrl *thermal = &npu_dev->thermalctrl;
- pr_debug("enter %s thermal current state=%lu\n", __func__,
- thermal->current_state);
+ NPU_DBG("thermal current state=%lu\n", thermal->current_state);
*state = thermal->current_state;
@@ -653,7 +643,7 @@ npu_set_cur_state(struct thermal_cooling_device *cdev, unsigned long state)
struct npu_device *npu_dev = cdev->devdata;
struct npu_thermalctrl *thermal = &npu_dev->thermalctrl;
- pr_debug("enter %s request state=%lu\n", __func__, state);
+ NPU_DBG("request state=%lu\n", state);
if (state > thermal->max_state)
return -EINVAL;
@@ -679,11 +669,11 @@ static int npu_enable_regulators(struct npu_device *npu_dev)
for (i = 0; i < npu_dev->regulator_num; i++) {
rc = regulator_enable(regulators[i].regulator);
if (rc < 0) {
- pr_err("%s enable failed\n",
+ NPU_ERR("%s enable failed\n",
regulators[i].regulator_name);
break;
}
- pr_debug("regulator %s enabled\n",
+ NPU_DBG("regulator %s enabled\n",
regulators[i].regulator_name);
}
}
@@ -700,7 +690,7 @@ static void npu_disable_regulators(struct npu_device *npu_dev)
if (host_ctx->power_vote_num > 0) {
for (i = 0; i < npu_dev->regulator_num; i++) {
regulator_disable(regulators[i].regulator);
- pr_debug("regulator %s disabled\n",
+ NPU_DBG("regulator %s disabled\n",
regulators[i].regulator_name);
}
host_ctx->power_vote_num--;
@@ -718,7 +708,7 @@ int npu_enable_irq(struct npu_device *npu_dev)
for (i = 0; i < NPU_MAX_IRQ; i++) {
if (npu_dev->irq[i].irq != 0) {
enable_irq(npu_dev->irq[i].irq);
- pr_debug("enable irq %d\n", npu_dev->irq[i].irq);
+ NPU_DBG("enable irq %d\n", npu_dev->irq[i].irq);
}
}
@@ -732,7 +722,7 @@ void npu_disable_irq(struct npu_device *npu_dev)
for (i = 0; i < NPU_MAX_IRQ; i++) {
if (npu_dev->irq[i].irq != 0) {
disable_irq(npu_dev->irq[i].irq);
- pr_debug("disable irq %d\n", npu_dev->irq[i].irq);
+ NPU_DBG("disable irq %d\n", npu_dev->irq[i].irq);
}
}
}
@@ -749,7 +739,7 @@ int npu_enable_sys_cache(struct npu_device *npu_dev)
if (!npu_dev->host_ctx.sys_cache_disable) {
npu_dev->sys_cache = llcc_slice_getd(LLCC_NPU);
if (IS_ERR_OR_NULL(npu_dev->sys_cache)) {
- pr_warn("unable to init sys cache\n");
+ NPU_WARN("unable to init sys cache\n");
npu_dev->sys_cache = NULL;
npu_dev->host_ctx.sys_cache_disable = true;
return 0;
@@ -780,12 +770,12 @@ int npu_enable_sys_cache(struct npu_device *npu_dev)
REGW(npu_dev, NPU_CACHEMAP1_ATTR_METADATA_IDn(3), reg_val);
REGW(npu_dev, NPU_CACHEMAP1_ATTR_METADATA_IDn(4), reg_val);
- pr_debug("prior to activate sys cache\n");
+ NPU_DBG("prior to activate sys cache\n");
rc = llcc_slice_activate(npu_dev->sys_cache);
if (rc)
- pr_err("failed to activate sys cache\n");
+ NPU_ERR("failed to activate sys cache\n");
else
- pr_debug("sys cache activated\n");
+ NPU_DBG("sys cache activated\n");
}
return rc;
@@ -799,10 +789,10 @@ void npu_disable_sys_cache(struct npu_device *npu_dev)
if (npu_dev->sys_cache) {
rc = llcc_slice_deactivate(npu_dev->sys_cache);
if (rc) {
- pr_err("failed to deactivate sys cache\n");
+ NPU_ERR("failed to deactivate sys cache\n");
return;
}
- pr_debug("sys cache deactivated\n");
+ NPU_DBG("sys cache deactivated\n");
llcc_slice_putd(npu_dev->sys_cache);
npu_dev->sys_cache = NULL;
}
@@ -866,21 +856,21 @@ static int npu_get_info(struct npu_client *client, unsigned long arg)
ret = copy_from_user(&req, argp, sizeof(req));
if (ret) {
- pr_err("fail to copy from user\n");
+ NPU_ERR("fail to copy from user\n");
return -EFAULT;
}
ret = npu_host_get_info(npu_dev, &req);
if (ret) {
- pr_err("npu_host_get_info failed\n");
+ NPU_ERR("npu_host_get_info failed\n");
return ret;
}
ret = copy_to_user(argp, &req, sizeof(req));
if (ret) {
- pr_err("fail to copy to user\n");
+ NPU_ERR("fail to copy to user\n");
return -EFAULT;
}
return 0;
@@ -895,21 +885,21 @@ static int npu_map_buf(struct npu_client *client, unsigned long arg)
ret = copy_from_user(&req, argp, sizeof(req));
if (ret) {
- pr_err("fail to copy from user\n");
+ NPU_ERR("fail to copy from user\n");
return -EFAULT;
}
ret = npu_host_map_buf(client, &req);
if (ret) {
- pr_err("npu_host_map_buf failed\n");
+ NPU_ERR("npu_host_map_buf failed\n");
return ret;
}
ret = copy_to_user(argp, &req, sizeof(req));
if (ret) {
- pr_err("fail to copy to user\n");
+ NPU_ERR("fail to copy to user\n");
return -EFAULT;
}
return 0;
@@ -924,21 +914,21 @@ static int npu_unmap_buf(struct npu_client *client, unsigned long arg)
ret = copy_from_user(&req, argp, sizeof(req));
if (ret) {
- pr_err("fail to copy from user\n");
+ NPU_ERR("fail to copy from user\n");
return -EFAULT;
}
ret = npu_host_unmap_buf(client, &req);
if (ret) {
- pr_err("npu_host_unmap_buf failed\n");
+ NPU_ERR("npu_host_unmap_buf failed\n");
return ret;
}
ret = copy_to_user(argp, &req, sizeof(req));
if (ret) {
- pr_err("fail to copy to user\n");
+ NPU_ERR("fail to copy to user\n");
return -EFAULT;
}
return 0;
@@ -955,21 +945,21 @@ static int npu_load_network(struct npu_client *client,
ret = copy_from_user(&req, argp, sizeof(req));
if (ret) {
- pr_err("fail to copy from user\n");
+ NPU_ERR("fail to copy from user\n");
return -EFAULT;
}
- pr_debug("network load with perf request %d\n", req.perf_mode);
+ NPU_DBG("network load with perf request %d\n", req.perf_mode);
ret = npu_host_load_network(client, &req);
if (ret) {
- pr_err("npu_host_load_network failed %d\n", ret);
+ NPU_ERR("npu_host_load_network failed %d\n", ret);
return ret;
}
ret = copy_to_user(argp, &req, sizeof(req));
if (ret) {
- pr_err("fail to copy to user\n");
+ NPU_ERR("fail to copy to user\n");
ret = -EFAULT;
unload_req.network_hdl = req.network_hdl;
npu_host_unload_network(client, &unload_req);
@@ -988,12 +978,12 @@ static int npu_load_network_v2(struct npu_client *client,
ret = copy_from_user(&req, argp, sizeof(req));
if (ret) {
- pr_err("fail to copy from user\n");
+ NPU_ERR("fail to copy from user\n");
return -EFAULT;
}
if (req.patch_info_num > MSM_NPU_MAX_PATCH_LAYER_NUM) {
- pr_err("Invalid patch info num %d[max:%d]\n",
+ NPU_ERR("Invalid patch info num %d[max:%d]\n",
req.patch_info_num, MSM_NPU_MAX_PATCH_LAYER_NUM);
return -EINVAL;
}
@@ -1008,25 +998,25 @@ static int npu_load_network_v2(struct npu_client *client,
(void __user *)req.patch_info,
req.patch_info_num * sizeof(*patch_info));
if (ret) {
- pr_err("fail to copy patch info\n");
+ NPU_ERR("fail to copy patch info\n");
kfree(patch_info);
return -EFAULT;
}
}
- pr_debug("network load with perf request %d\n", req.perf_mode);
+ NPU_DBG("network load with perf request %d\n", req.perf_mode);
ret = npu_host_load_network_v2(client, &req, patch_info);
kfree(patch_info);
if (ret) {
- pr_err("npu_host_load_network_v2 failed %d\n", ret);
+ NPU_ERR("npu_host_load_network_v2 failed %d\n", ret);
return ret;
}
ret = copy_to_user(argp, &req, sizeof(req));
if (ret) {
- pr_err("fail to copy to user\n");
+ NPU_ERR("fail to copy to user\n");
ret = -EFAULT;
unload_req.network_hdl = req.network_hdl;
npu_host_unload_network(client, &unload_req);
@@ -1045,21 +1035,21 @@ static int npu_unload_network(struct npu_client *client,
ret = copy_from_user(&req, argp, sizeof(req));
if (ret) {
- pr_err("fail to copy from user\n");
+ NPU_ERR("fail to copy from user\n");
return -EFAULT;
}
ret = npu_host_unload_network(client, &req);
if (ret) {
- pr_err("npu_host_unload_network failed %d\n", ret);
+ NPU_ERR("npu_host_unload_network failed %d\n", ret);
return ret;
}
ret = copy_to_user(argp, &req, sizeof(req));
if (ret) {
- pr_err("fail to copy to user\n");
+ NPU_ERR("fail to copy to user\n");
return -EFAULT;
}
return 0;
@@ -1075,13 +1065,13 @@ static int npu_exec_network(struct npu_client *client,
ret = copy_from_user(&req, argp, sizeof(req));
if (ret) {
- pr_err("fail to copy from user\n");
+ NPU_ERR("fail to copy from user\n");
return -EFAULT;
}
if ((req.input_layer_num > MSM_NPU_MAX_INPUT_LAYER_NUM) ||
(req.output_layer_num > MSM_NPU_MAX_OUTPUT_LAYER_NUM)) {
- pr_err("Invalid input/out layer num %d[max:%d] %d[max:%d]\n",
+ NPU_ERR("Invalid input/out layer num %d[max:%d] %d[max:%d]\n",
req.input_layer_num, MSM_NPU_MAX_INPUT_LAYER_NUM,
req.output_layer_num, MSM_NPU_MAX_OUTPUT_LAYER_NUM);
return -EINVAL;
@@ -1090,14 +1080,14 @@ static int npu_exec_network(struct npu_client *client,
ret = npu_host_exec_network(client, &req);
if (ret) {
- pr_err("npu_host_exec_network failed %d\n", ret);
+ NPU_ERR("npu_host_exec_network failed %d\n", ret);
return ret;
}
ret = copy_to_user(argp, &req, sizeof(req));
if (ret) {
- pr_err("fail to copy to user\n");
+ NPU_ERR("fail to copy to user\n");
return -EFAULT;
}
return 0;
@@ -1113,18 +1103,18 @@ static int npu_exec_network_v2(struct npu_client *client,
ret = copy_from_user(&req, argp, sizeof(req));
if (ret) {
- pr_err("fail to copy from user\n");
+ NPU_ERR("fail to copy from user\n");
return -EFAULT;
}
if (req.patch_buf_info_num > MSM_NPU_MAX_PATCH_LAYER_NUM) {
- pr_err("Invalid patch buf info num %d[max:%d]\n",
+ NPU_ERR("Invalid patch buf info num %d[max:%d]\n",
req.patch_buf_info_num, MSM_NPU_MAX_PATCH_LAYER_NUM);
return -EINVAL;
}
if (req.stats_buf_size > NPU_MAX_STATS_BUF_SIZE) {
- pr_err("Invalid stats buffer size %d max %d\n",
+ NPU_ERR("Invalid stats buffer size %d max %d\n",
req.stats_buf_size, NPU_MAX_STATS_BUF_SIZE);
return -EINVAL;
}
@@ -1139,7 +1129,7 @@ static int npu_exec_network_v2(struct npu_client *client,
(void __user *)req.patch_buf_info,
req.patch_buf_info_num * sizeof(*patch_buf_info));
if (ret) {
- pr_err("fail to copy patch buf info\n");
+ NPU_ERR("fail to copy patch buf info\n");
kfree(patch_buf_info);
return -EFAULT;
}
@@ -1149,13 +1139,13 @@ static int npu_exec_network_v2(struct npu_client *client,
kfree(patch_buf_info);
if (ret) {
- pr_err("npu_host_exec_network_v2 failed %d\n", ret);
+ NPU_ERR("npu_host_exec_network_v2 failed %d\n", ret);
return ret;
}
ret = copy_to_user(argp, &req, sizeof(req));
if (ret) {
- pr_err("fail to copy to user\n");
+ NPU_ERR("fail to copy to user\n");
ret = -EFAULT;
}
@@ -1172,7 +1162,7 @@ static int npu_process_kevent(struct npu_kevent *kevt)
(void *)&kevt->reserved[0],
kevt->evt.u.exec_v2_done.stats_buf_size);
if (ret) {
- pr_err("fail to copy to user\n");
+ NPU_ERR("fail to copy to user\n");
kevt->evt.u.exec_v2_done.stats_buf_size = 0;
ret = -EFAULT;
}
@@ -1193,7 +1183,7 @@ static int npu_receive_event(struct npu_client *client,
mutex_lock(&client->list_lock);
if (list_empty(&client->evt_list)) {
- pr_err("event list is empty\n");
+ NPU_ERR("event list is empty\n");
ret = -EINVAL;
} else {
kevt = list_first_entry(&client->evt_list,
@@ -1203,7 +1193,7 @@ static int npu_receive_event(struct npu_client *client,
ret = copy_to_user(argp, &kevt->evt,
sizeof(struct msm_npu_event));
if (ret) {
- pr_err("fail to copy to user\n");
+ NPU_ERR("fail to copy to user\n");
ret = -EFAULT;
}
kfree(kevt);
@@ -1248,7 +1238,7 @@ static long npu_ioctl(struct file *file, unsigned int cmd,
ret = npu_receive_event(client, arg);
break;
default:
- pr_err("unexpected IOCTL %x\n", cmd);
+ NPU_ERR("unexpected IOCTL %x\n", cmd);
}
return ret;
@@ -1263,7 +1253,7 @@ static unsigned int npu_poll(struct file *filp, struct poll_table_struct *p)
mutex_lock(&client->list_lock);
if (!list_empty(&client->evt_list)) {
- pr_debug("poll cmd done\n");
+ NPU_DBG("poll cmd done\n");
rc = POLLIN | POLLRDNORM;
}
mutex_unlock(&client->list_lock);
@@ -1287,11 +1277,11 @@ static int npu_parse_dt_clock(struct npu_device *npu_dev)
num_clk = of_property_count_strings(pdev->dev.of_node,
"clock-names");
if (num_clk <= 0) {
- pr_err("clocks are not defined\n");
+ NPU_ERR("clocks are not defined\n");
rc = -EINVAL;
goto clk_err;
} else if (num_clk > NUM_MAX_CLK_NUM) {
- pr_err("number of clocks %d exceeds limit\n", num_clk);
+ NPU_ERR("number of clocks %d exceeds limit\n", num_clk);
rc = -EINVAL;
goto clk_err;
}
@@ -1304,7 +1294,7 @@ static int npu_parse_dt_clock(struct npu_device *npu_dev)
sizeof(core_clks[i].clk_name));
core_clks[i].clk = devm_clk_get(&pdev->dev, clock_name);
if (IS_ERR(core_clks[i].clk)) {
- pr_err("unable to get clk: %s\n", clock_name);
+ NPU_ERR("unable to get clk: %s\n", clock_name);
rc = -EINVAL;
break;
}
@@ -1327,12 +1317,12 @@ static int npu_parse_dt_regulator(struct npu_device *npu_dev)
"qcom,proxy-reg-names");
if (num <= 0) {
rc = -EINVAL;
- pr_err("regulator not defined\n");
+ NPU_ERR("regulator not defined\n");
goto regulator_err;
}
if (num > NPU_MAX_REGULATOR_NUM) {
rc = -EINVAL;
- pr_err("regulator number %d is over the limit %d\n", num,
+ NPU_ERR("regulator number %d is over the limit %d\n", num,
NPU_MAX_REGULATOR_NUM);
num = NPU_MAX_REGULATOR_NUM;
}
@@ -1345,7 +1335,7 @@ static int npu_parse_dt_regulator(struct npu_device *npu_dev)
sizeof(regulators[i].regulator_name));
regulators[i].regulator = devm_regulator_get(&pdev->dev, name);
if (IS_ERR(regulators[i].regulator)) {
- pr_err("unable to get regulator: %s\n", name);
+ NPU_ERR("unable to get regulator: %s\n", name);
rc = -EINVAL;
break;
}
@@ -1376,17 +1366,17 @@ static int npu_of_parse_pwrlevels(struct npu_device *npu_dev,
struct npu_pwrlevel *level;
if (of_property_read_u32(child, "reg", &index)) {
- pr_err("Can't find reg property\n");
+ NPU_ERR("Can't find reg property\n");
return -EINVAL;
}
if (of_property_read_u32(child, "vreg", &pwr_level)) {
- pr_err("Can't find vreg property\n");
+ NPU_ERR("Can't find vreg property\n");
return -EINVAL;
}
if (index >= NPU_MAX_PWRLEVELS) {
- pr_err("pwrlevel index %d is out of range\n",
+ NPU_ERR("pwrlevel index %d is out of range\n",
index);
continue;
}
@@ -1396,7 +1386,7 @@ static int npu_of_parse_pwrlevels(struct npu_device *npu_dev,
if (of_property_read_u32_array(child, "clk-freq",
clk_array_values, npu_dev->core_clk_num)) {
- pr_err("pwrlevel index %d read clk-freq failed %d\n",
+ NPU_ERR("pwrlevel index %d read clk-freq failed %d\n",
index, npu_dev->core_clk_num);
return -EINVAL;
}
@@ -1415,7 +1405,7 @@ static int npu_of_parse_pwrlevels(struct npu_device *npu_dev,
clk_rate = clk_round_rate(npu_dev->core_clks[i].clk,
clk_array_values[i]);
- pr_debug("clk %s rate [%u]:[%u]\n",
+ NPU_DBG("clk %s rate [%u]:[%u]\n",
npu_dev->core_clks[i].clk_name,
clk_array_values[i], clk_rate);
level->clk_freq[i] = clk_rate;
@@ -1427,7 +1417,7 @@ static int npu_of_parse_pwrlevels(struct npu_device *npu_dev,
fmax = (npu_qfprom_reg_read(npu_dev,
QFPROM_FMAX_REG_OFFSET) & QFPROM_FMAX_BITS_MASK) >>
QFPROM_FMAX_BITS_SHIFT;
- pr_debug("fmax %x\n", fmax);
+ NPU_DBG("fmax %x\n", fmax);
switch (fmax) {
case 1:
@@ -1447,7 +1437,7 @@ static int npu_of_parse_pwrlevels(struct npu_device *npu_dev,
}
of_property_read_u32(node, "initial-pwrlevel", &init_level_index);
- pr_debug("initial-pwrlevel %d\n", init_level_index);
+ NPU_DBG("initial-pwrlevel %d\n", init_level_index);
if (init_level_index >= pwr->num_pwrlevels)
init_level_index = pwr->num_pwrlevels - 1;
@@ -1456,10 +1446,10 @@ static int npu_of_parse_pwrlevels(struct npu_device *npu_dev,
init_level_index);
if (init_power_level > pwr->max_pwrlevel) {
init_power_level = pwr->max_pwrlevel;
- pr_debug("Adjust init power level to %d\n", init_power_level);
+ NPU_DBG("Adjust init power level to %d\n", init_power_level);
}
- pr_debug("init power level %d max %d min %d\n", init_power_level,
+ NPU_DBG("init power level %d max %d min %d\n", init_power_level,
pwr->max_pwrlevel, pwr->min_pwrlevel);
pwr->active_pwrlevel = pwr->default_pwrlevel = init_power_level;
pwr->uc_pwrlevel = pwr->max_pwrlevel;
@@ -1474,14 +1464,12 @@ static int npu_pwrctrl_init(struct npu_device *npu_dev)
struct platform_device *pdev = npu_dev->pdev;
struct device_node *node;
int ret = 0;
- struct platform_device *p2dev;
- struct npu_pwrctrl *pwr = &npu_dev->pwrctrl;
/* Power levels */
node = of_find_node_by_name(pdev->dev.of_node, "qcom,npu-pwrlevels");
if (!node) {
- pr_err("unable to find 'qcom,npu-pwrlevels'\n");
+ NPU_ERR("unable to find 'qcom,npu-pwrlevels'\n");
return -EINVAL;
}
@@ -1489,26 +1477,6 @@ static int npu_pwrctrl_init(struct npu_device *npu_dev)
if (ret)
return ret;
- /* Parse Bandwidth */
- node = of_parse_phandle(pdev->dev.of_node,
- "qcom,npubw-dev", 0);
-
- if (node) {
- /* Set to 1 initially - we assume bwmon is on */
- pwr->bwmon_enabled = 1;
- p2dev = of_find_device_by_node(node);
- if (p2dev) {
- pwr->devbw = &p2dev->dev;
- } else {
- pr_err("parser power level failed\n");
- ret = -EINVAL;
- return ret;
- }
- } else {
- pr_warn("bwdev is not defined in dts\n");
- pwr->devbw = NULL;
- }
-
return ret;
}
@@ -1533,13 +1501,13 @@ static int npu_irq_init(struct npu_device *npu_dev)
npu_dev->irq[i].irq = platform_get_irq_byname(
npu_dev->pdev, npu_dev->irq[i].name);
if (npu_dev->irq[i].irq < 0) {
- pr_err("get_irq for %s failed\n\n",
+ NPU_ERR("get_irq for %s failed\n\n",
npu_dev->irq[i].name);
ret = -EINVAL;
break;
}
- pr_debug("irq %s: %d\n", npu_dev->irq[i].name,
+ NPU_DBG("irq %s: %d\n", npu_dev->irq[i].name,
npu_dev->irq[i].irq);
irq_set_status_flags(npu_dev->irq[i].irq,
IRQ_NOAUTOEN);
@@ -1548,7 +1516,7 @@ static int npu_irq_init(struct npu_device *npu_dev)
irq_type, npu_dev->irq[i].name,
npu_dev);
if (ret) {
- pr_err("devm_request_irq(%s:%d) failed\n",
+ NPU_ERR("devm_request_irq(%s:%d) failed\n",
npu_dev->irq[i].name,
npu_dev->irq[i].irq);
break;
@@ -1571,7 +1539,7 @@ static int npu_mbox_init(struct npu_device *npu_dev)
mbox_aop->chan = mbox_request_channel(&mbox_aop->client, 0);
if (IS_ERR(mbox_aop->chan)) {
- pr_warn("aop mailbox is not available\n");
+ NPU_WARN("aop mailbox is not available\n");
mbox_aop->chan = NULL;
}
}
@@ -1609,7 +1577,7 @@ static int npu_probe(struct platform_device *pdev)
res = platform_get_resource_byname(pdev,
IORESOURCE_MEM, "core");
if (!res) {
- pr_err("unable to get core resource\n");
+ NPU_ERR("unable to get core resource\n");
rc = -ENODEV;
goto error_get_dev_num;
}
@@ -1618,17 +1586,17 @@ static int npu_probe(struct platform_device *pdev)
npu_dev->core_io.base = devm_ioremap(&pdev->dev, res->start,
npu_dev->core_io.size);
if (unlikely(!npu_dev->core_io.base)) {
- pr_err("unable to map core\n");
+ NPU_ERR("unable to map core\n");
rc = -ENOMEM;
goto error_get_dev_num;
}
- pr_debug("core phy address=0x%llx virt=%pK\n",
+ NPU_DBG("core phy address=0x%llx virt=%pK\n",
res->start, npu_dev->core_io.base);
res = platform_get_resource_byname(pdev,
IORESOURCE_MEM, "tcm");
if (!res) {
- pr_err("unable to get tcm resource\n");
+ NPU_ERR("unable to get tcm resource\n");
rc = -ENODEV;
goto error_get_dev_num;
}
@@ -1637,17 +1605,17 @@ static int npu_probe(struct platform_device *pdev)
npu_dev->tcm_io.base = devm_ioremap(&pdev->dev, res->start,
npu_dev->tcm_io.size);
if (unlikely(!npu_dev->tcm_io.base)) {
- pr_err("unable to map tcm\n");
+ NPU_ERR("unable to map tcm\n");
rc = -ENOMEM;
goto error_get_dev_num;
}
- pr_debug("tcm phy address=0x%llx virt=%pK\n",
+ NPU_DBG("tcm phy address=0x%llx virt=%pK\n",
res->start, npu_dev->tcm_io.base);
res = platform_get_resource_byname(pdev,
IORESOURCE_MEM, "qdsp");
if (!res) {
- pr_err("unable to get qdsp resource\n");
+ NPU_ERR("unable to get qdsp resource\n");
rc = -ENODEV;
goto error_get_dev_num;
}
@@ -1656,17 +1624,17 @@ static int npu_probe(struct platform_device *pdev)
npu_dev->qdsp_io.base = devm_ioremap(&pdev->dev, res->start,
npu_dev->qdsp_io.size);
if (unlikely(!npu_dev->qdsp_io.base)) {
- pr_err("unable to map qdsp\n");
+ NPU_ERR("unable to map qdsp\n");
rc = -ENOMEM;
goto error_get_dev_num;
}
- pr_debug("qdsp phy address=0x%llx virt=%pK\n",
+ NPU_DBG("qdsp phy address=0x%llx virt=%pK\n",
res->start, npu_dev->qdsp_io.base);
res = platform_get_resource_byname(pdev,
IORESOURCE_MEM, "apss_shared");
if (!res) {
- pr_err("unable to get apss_shared resource\n");
+ NPU_ERR("unable to get apss_shared resource\n");
rc = -ENODEV;
goto error_get_dev_num;
}
@@ -1675,46 +1643,28 @@ static int npu_probe(struct platform_device *pdev)
npu_dev->apss_shared_io.base = devm_ioremap(&pdev->dev, res->start,
npu_dev->apss_shared_io.size);
if (unlikely(!npu_dev->apss_shared_io.base)) {
- pr_err("unable to map apss_shared\n");
+ NPU_ERR("unable to map apss_shared\n");
rc = -ENOMEM;
goto error_get_dev_num;
}
- pr_debug("apss_shared phy address=0x%llx virt=%pK\n",
+ NPU_DBG("apss_shared phy address=0x%llx virt=%pK\n",
res->start, npu_dev->apss_shared_io.base);
res = platform_get_resource_byname(pdev,
- IORESOURCE_MEM, "bwmon");
- if (!res) {
- pr_info("unable to get bwmon resource\n");
- } else {
- npu_dev->bwmon_io.size = resource_size(res);
- npu_dev->bwmon_io.phy_addr = res->start;
- npu_dev->bwmon_io.base = devm_ioremap(&pdev->dev, res->start,
- npu_dev->bwmon_io.size);
- if (unlikely(!npu_dev->bwmon_io.base)) {
- pr_err("unable to map bwmon\n");
- rc = -ENOMEM;
- goto error_get_dev_num;
- }
- pr_debug("bwmon phy address=0x%llx virt=%pK\n",
- res->start, npu_dev->bwmon_io.base);
- }
-
- res = platform_get_resource_byname(pdev,
IORESOURCE_MEM, "qfprom_physical");
if (!res) {
- pr_info("unable to get qfprom_physical resource\n");
+ NPU_INFO("unable to get qfprom_physical resource\n");
} else {
npu_dev->qfprom_io.size = resource_size(res);
npu_dev->qfprom_io.phy_addr = res->start;
npu_dev->qfprom_io.base = devm_ioremap(&pdev->dev, res->start,
npu_dev->qfprom_io.size);
if (unlikely(!npu_dev->qfprom_io.base)) {
- pr_err("unable to map qfprom_physical\n");
+ NPU_ERR("unable to map qfprom_physical\n");
rc = -ENOMEM;
goto error_get_dev_num;
}
- pr_debug("qfprom_physical phy address=0x%llx virt=%pK\n",
+ NPU_DBG("qfprom_physical phy address=0x%llx virt=%pK\n",
res->start, npu_dev->qfprom_io.base);
}
@@ -1745,14 +1695,14 @@ static int npu_probe(struct platform_device *pdev)
/* character device might be optional */
rc = alloc_chrdev_region(&npu_dev->dev_num, 0, 1, DRIVER_NAME);
if (rc < 0) {
- pr_err("alloc_chrdev_region failed: %d\n", rc);
+ NPU_ERR("alloc_chrdev_region failed: %d\n", rc);
goto error_get_dev_num;
}
npu_dev->class = class_create(THIS_MODULE, CLASS_NAME);
if (IS_ERR(npu_dev->class)) {
rc = PTR_ERR(npu_dev->class);
- pr_err("class_create failed: %d\n", rc);
+ NPU_ERR("class_create failed: %d\n", rc);
goto error_class_create;
}
@@ -1760,7 +1710,7 @@ static int npu_probe(struct platform_device *pdev)
npu_dev->dev_num, NULL, DRIVER_NAME);
if (IS_ERR(npu_dev->device)) {
rc = PTR_ERR(npu_dev->device);
- pr_err("device_create failed: %d\n", rc);
+ NPU_ERR("device_create failed: %d\n", rc);
goto error_class_device_create;
}
@@ -1768,15 +1718,15 @@ static int npu_probe(struct platform_device *pdev)
rc = cdev_add(&npu_dev->cdev,
MKDEV(MAJOR(npu_dev->dev_num), 0), 1);
if (rc < 0) {
- pr_err("cdev_add failed %d\n", rc);
+ NPU_ERR("cdev_add failed %d\n", rc);
goto error_cdev_add;
}
dev_set_drvdata(npu_dev->device, npu_dev);
- pr_debug("drvdata %pK %pK\n", dev_get_drvdata(&pdev->dev),
+ NPU_DBG("drvdata %pK %pK\n", dev_get_drvdata(&pdev->dev),
dev_get_drvdata(npu_dev->device));
rc = sysfs_create_group(&npu_dev->device->kobj, &npu_fs_attr_group);
if (rc) {
- pr_err("unable to register npu sysfs nodes\n");
+ NPU_ERR("unable to register npu sysfs nodes\n");
goto error_res_init;
}
@@ -1802,7 +1752,7 @@ static int npu_probe(struct platform_device *pdev)
rc = npu_host_init(npu_dev);
if (rc) {
- pr_err("unable to init host\n");
+ NPU_ERR("unable to init host\n");
goto error_driver_init;
}
@@ -1874,7 +1824,7 @@ static int __init npu_init(void)
rc = platform_driver_register(&npu_driver);
if (rc)
- pr_err("register failed %d\n", rc);
+ NPU_ERR("register failed %d\n", rc);
return rc;
}
diff --git a/drivers/media/platform/msm/npu/npu_host_ipc.c b/drivers/media/platform/msm/npu/npu_host_ipc.c
index b26d221..85e8187 100644
--- a/drivers/media/platform/msm/npu/npu_host_ipc.c
+++ b/drivers/media/platform/msm/npu/npu_host_ipc.c
@@ -3,8 +3,6 @@
* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
*/
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
/* -------------------------------------------------------------------------
* Includes
* -------------------------------------------------------------------------
@@ -169,9 +167,9 @@ static int npu_host_ipc_send_cmd_hfi(struct npu_device *npu_dev,
}
if (status == 0)
- pr_debug("Cmd Msg put on Command Queue - SUCCESSS\n");
+ NPU_DBG("Cmd Msg put on Command Queue - SUCCESSS\n");
else
- pr_err("Cmd Msg put on Command Queue - FAILURE\n");
+ NPU_ERR("Cmd Msg put on Command Queue - FAILURE\n");
return status;
}
@@ -232,7 +230,7 @@ static int ipc_queue_read(struct npu_device *npu_dev,
MEMR(npu_dev, (void *)((size_t)read_ptr), packet, 4);
packet_size = *((uint32_t *)packet);
- pr_debug("target_que: %d, packet_size: %d\n",
+ NPU_DBG("target_que: %d, packet_size: %d\n",
target_que,
packet_size);
diff --git a/drivers/media/platform/msm/npu/npu_hw.h b/drivers/media/platform/msm/npu/npu_hw.h
index 9894a28..7a884dc 100644
--- a/drivers/media/platform/msm/npu/npu_hw.h
+++ b/drivers/media/platform/msm/npu/npu_hw.h
@@ -47,13 +47,6 @@
#define NPU_GPR14 (0x00000138)
#define NPU_GPR15 (0x0000013C)
-#define BWMON2_SAMPLING_WINDOW (0x000003A8)
-#define BWMON2_BYTE_COUNT_THRESHOLD_HIGH (0x000003AC)
-#define BWMON2_BYTE_COUNT_THRESHOLD_MEDIUM (0x000003B0)
-#define BWMON2_BYTE_COUNT_THRESHOLD_LOW (0x000003B4)
-#define BWMON2_ZONE_ACTIONS (0x000003B8)
-#define BWMON2_ZONE_COUNT_THRESHOLD (0x000003BC)
-
#define NPU_QDSP6SS_IPC 0x00088000
#define NPU_QDSP6SS_IPC1 0x00088004
diff --git a/drivers/media/platform/msm/npu/npu_hw_access.c b/drivers/media/platform/msm/npu/npu_hw_access.c
index 43d3189..f2862ab 100644
--- a/drivers/media/platform/msm/npu/npu_hw_access.c
+++ b/drivers/media/platform/msm/npu/npu_hw_access.c
@@ -3,8 +3,6 @@
* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
*/
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
/* -------------------------------------------------------------------------
* Includes
* -------------------------------------------------------------------------
@@ -65,21 +63,6 @@ void npu_apss_shared_reg_write(struct npu_device *npu_dev, uint32_t off,
__iowmb();
}
-uint32_t npu_bwmon_reg_read(struct npu_device *npu_dev, uint32_t off)
-{
- uint32_t ret = 0;
-
- ret = readl(npu_dev->bwmon_io.base + off);
- return ret;
-}
-
-void npu_bwmon_reg_write(struct npu_device *npu_dev, uint32_t off,
- uint32_t val)
-{
- writel_relaxed(val, npu_dev->bwmon_io.base + off);
- __iowmb();
-}
-
uint32_t npu_qfprom_reg_read(struct npu_device *npu_dev, uint32_t off)
{
uint32_t ret = 0;
@@ -103,7 +86,7 @@ void npu_mem_write(struct npu_device *npu_dev, void *dst, void *src,
uint32_t i = 0;
uint32_t num = 0;
- pr_debug("write dst_off %zx size %x\n", dst_off, size);
+ NPU_DBG("write dst_off %zx size %x\n", dst_off, size);
num = size/4;
for (i = 0; i < num; i++) {
writel_relaxed(src_ptr32[i], npu_dev->tcm_io.base + dst_off);
@@ -130,7 +113,7 @@ int32_t npu_mem_read(struct npu_device *npu_dev, void *src, void *dst,
uint32_t i = 0;
uint32_t num = 0;
- pr_debug("read src_off %zx size %x\n", src_off, size);
+ NPU_DBG("read src_off %zx size %x\n", src_off, size);
num = size/4;
for (i = 0; i < num; i++) {
@@ -195,7 +178,7 @@ static struct npu_ion_buf *npu_alloc_npu_ion_buffer(struct npu_client
if (ret_val) {
/* mapped already, treat as invalid request */
- pr_err("ion buf has been mapped\n");
+ NPU_ERR("ion buf has been mapped\n");
ret_val = NULL;
} else {
ret_val = kzalloc(sizeof(*ret_val), GFP_KERNEL);
@@ -262,7 +245,7 @@ int npu_mem_map(struct npu_client *client, int buf_hdl, uint32_t size,
ion_buf = npu_alloc_npu_ion_buffer(client, buf_hdl, size);
if (!ion_buf) {
- pr_err("%s fail to alloc npu_ion_buffer\n", __func__);
+ NPU_ERR("fail to alloc npu_ion_buffer\n");
ret = -ENOMEM;
return ret;
}
@@ -271,7 +254,7 @@ int npu_mem_map(struct npu_client *client, int buf_hdl, uint32_t size,
ion_buf->dma_buf = dma_buf_get(ion_buf->fd);
if (IS_ERR_OR_NULL(ion_buf->dma_buf)) {
- pr_err("dma_buf_get failed %d\n", ion_buf->fd);
+ NPU_ERR("dma_buf_get failed %d\n", ion_buf->fd);
ret = -ENOMEM;
ion_buf->dma_buf = NULL;
goto map_end;
@@ -290,7 +273,7 @@ int npu_mem_map(struct npu_client *client, int buf_hdl, uint32_t size,
ion_buf->table = dma_buf_map_attachment(ion_buf->attachment,
DMA_BIDIRECTIONAL);
if (IS_ERR(ion_buf->table)) {
- pr_err("npu dma_buf_map_attachment failed\n");
+ NPU_ERR("npu dma_buf_map_attachment failed\n");
ret = -ENOMEM;
ion_buf->table = NULL;
goto map_end;
@@ -301,9 +284,9 @@ int npu_mem_map(struct npu_client *client, int buf_hdl, uint32_t size,
ion_buf->iova = ion_buf->table->sgl->dma_address;
ion_buf->size = ion_buf->dma_buf->size;
*addr = ion_buf->iova;
- pr_debug("mapped mem addr:0x%llx size:0x%x\n", ion_buf->iova,
+ NPU_DBG("mapped mem addr:0x%llx size:0x%x\n", ion_buf->iova,
ion_buf->size);
- pr_debug("physical address 0x%llx\n", sg_phys(ion_buf->table->sgl));
+ NPU_DBG("physical address 0x%llx\n", sg_phys(ion_buf->table->sgl));
map_end:
if (ret)
npu_mem_unmap(client, buf_hdl, 0);
@@ -318,7 +301,7 @@ void npu_mem_invalidate(struct npu_client *client, int buf_hdl)
buf_hdl);
if (!ion_buf)
- pr_err("%s cant find ion buf\n", __func__);
+ NPU_ERR("cant find ion buf\n");
else
dma_sync_sg_for_cpu(&(npu_dev->pdev->dev), ion_buf->table->sgl,
ion_buf->table->nents, DMA_BIDIRECTIONAL);
@@ -351,12 +334,12 @@ void npu_mem_unmap(struct npu_client *client, int buf_hdl, uint64_t addr)
/* clear entry and retrieve the corresponding buffer */
ion_buf = npu_get_npu_ion_buffer(client, buf_hdl);
if (!ion_buf) {
- pr_err("%s could not find buffer\n", __func__);
+ NPU_ERR("could not find buffer\n");
return;
}
if (ion_buf->iova != addr)
- pr_warn("unmap address %llu doesn't match %llu\n", addr,
+ NPU_WARN("unmap address %llu doesn't match %llu\n", addr,
ion_buf->iova);
if (ion_buf->table)
@@ -368,7 +351,7 @@ void npu_mem_unmap(struct npu_client *client, int buf_hdl, uint64_t addr)
dma_buf_put(ion_buf->dma_buf);
npu_dev->smmu_ctx.attach_cnt--;
- pr_debug("unmapped mem addr:0x%llx size:0x%x\n", ion_buf->iova,
+ NPU_DBG("unmapped mem addr:0x%llx size:0x%x\n", ion_buf->iova,
ion_buf->size);
npu_free_npu_ion_buffer(client, buf_hdl);
}
diff --git a/drivers/media/platform/msm/npu/npu_hw_access.h b/drivers/media/platform/msm/npu/npu_hw_access.h
index d893faa..24da853 100644
--- a/drivers/media/platform/msm/npu/npu_hw_access.h
+++ b/drivers/media/platform/msm/npu/npu_hw_access.h
@@ -56,9 +56,6 @@ void npu_qdsp_reg_write(struct npu_device *npu_dev, uint32_t off, uint32_t val);
uint32_t npu_apss_shared_reg_read(struct npu_device *npu_dev, uint32_t off);
void npu_apss_shared_reg_write(struct npu_device *npu_dev, uint32_t off,
uint32_t val);
-uint32_t npu_bwmon_reg_read(struct npu_device *npu_dev, uint32_t off);
-void npu_bwmon_reg_write(struct npu_device *npu_dev, uint32_t off,
- uint32_t val);
void npu_mem_write(struct npu_device *npu_dev, void *dst, void *src,
uint32_t size);
int32_t npu_mem_read(struct npu_device *npu_dev, void *src, void *dst,
diff --git a/drivers/media/platform/msm/npu/npu_mgr.c b/drivers/media/platform/msm/npu/npu_mgr.c
index 4cbea60..3c716da 100644
--- a/drivers/media/platform/msm/npu/npu_mgr.c
+++ b/drivers/media/platform/msm/npu/npu_mgr.c
@@ -3,8 +3,6 @@
* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
*/
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
/* -------------------------------------------------------------------------
* Includes
* -------------------------------------------------------------------------
@@ -93,7 +91,7 @@ int fw_init(struct npu_device *npu_dev)
REGW(npu_dev, REG_NPU_HOST_CTRL_VALUE, 0x0);
REGW(npu_dev, REG_FW_TO_HOST_EVENT, 0x0);
- pr_debug("fw_dbg_mode %x\n", host_ctx->fw_dbg_mode);
+ NPU_DBG("fw_dbg_mode %x\n", host_ctx->fw_dbg_mode);
reg_val = 0;
if (host_ctx->fw_dbg_mode & FW_DBG_MODE_PAUSE)
reg_val |= HOST_CTRL_STATUS_FW_PAUSE_VAL;
@@ -115,7 +113,7 @@ int fw_init(struct npu_device *npu_dev)
/* Boot the NPU subsystem */
host_ctx->subsystem_handle = subsystem_get_local("npu");
if (IS_ERR(host_ctx->subsystem_handle)) {
- pr_err("pil load npu fw failed\n");
+ NPU_ERR("pil load npu fw failed\n");
ret = -ENODEV;
goto subsystem_get_fail;
}
@@ -127,7 +125,7 @@ int fw_init(struct npu_device *npu_dev)
}
/* Keep reading ctrl status until NPU is ready */
- pr_debug("waiting for status ready from fw\n");
+ NPU_DBG("waiting for status ready from fw\n");
if (wait_for_status_ready(npu_dev, REG_NPU_FW_CTRL_STATUS,
FW_CTRL_STATUS_MAIN_THREAD_READY_VAL)) {
@@ -149,11 +147,11 @@ int fw_init(struct npu_device *npu_dev)
reinit_completion(&host_ctx->fw_deinit_done);
mutex_unlock(&host_ctx->lock);
- pr_debug("firmware init complete\n");
+ NPU_DBG("firmware init complete\n");
/* Set logging state */
if (!npu_hw_log_enabled()) {
- pr_debug("fw logging disabled\n");
+ NPU_DBG("fw logging disabled\n");
turn_off_fw_logging(npu_dev);
}
@@ -185,10 +183,10 @@ void fw_deinit(struct npu_device *npu_dev, bool ssr, bool fw_alive)
if (!ssr && (host_ctx->fw_ref_cnt > 0))
host_ctx->fw_ref_cnt--;
- pr_debug("fw_ref_cnt %d\n", host_ctx->fw_ref_cnt);
+ NPU_DBG("fw_ref_cnt %d\n", host_ctx->fw_ref_cnt);
if (host_ctx->fw_state != FW_ENABLED) {
- pr_err("fw is not enabled\n");
+ NPU_ERR("fw is not enabled\n");
mutex_unlock(&host_ctx->lock);
return;
}
@@ -211,17 +209,17 @@ void fw_deinit(struct npu_device *npu_dev, bool ssr, bool fw_alive)
ret = npu_host_ipc_send_cmd(npu_dev,
IPC_QUEUE_CMD_HIGH_PRIORITY, &cmd_shutdown_pkt);
- pr_debug("NPU_IPC_CMD_SHUTDOWN sent status: %d\n", ret);
+ NPU_DBG("NPU_IPC_CMD_SHUTDOWN sent status: %d\n", ret);
if (ret) {
- pr_err("npu_host_ipc_send_cmd failed\n");
+ NPU_ERR("npu_host_ipc_send_cmd failed\n");
} else {
/* Keep reading ctrl status until NPU shuts down */
- pr_debug("waiting for shutdown status from fw\n");
+ NPU_DBG("waiting for shutdown status from fw\n");
if (wait_for_status_ready(npu_dev,
REG_NPU_FW_CTRL_STATUS,
FW_CTRL_STATUS_SHUTDOWN_DONE_VAL)) {
- pr_err("wait for fw shutdown timedout\n");
+ NPU_ERR("wait for fw shutdown timedout\n");
ret = -ETIMEDOUT;
}
}
@@ -256,7 +254,7 @@ void fw_deinit(struct npu_device *npu_dev, bool ssr, bool fw_alive)
complete(&host_ctx->fw_deinit_done);
mutex_unlock(&host_ctx->lock);
- pr_debug("firmware deinit complete\n");
+ NPU_DBG("firmware deinit complete\n");
npu_notify_aop(npu_dev, false);
}
@@ -298,7 +296,7 @@ irqreturn_t npu_intr_hdler(int irq, void *ptr)
struct npu_device *npu_dev = (struct npu_device *)ptr;
struct npu_host_ctx *host_ctx = &npu_dev->host_ctx;
- pr_debug("NPU irq %d\n", irq);
+ NPU_DBG("NPU irq %d\n", irq);
INTERRUPT_ACK(npu_dev, irq);
/* Check that the event thread currently is running */
@@ -324,7 +322,7 @@ static int host_error_hdlr(struct npu_device *npu_dev, bool force)
return 0;
if (host_ctx->wdg_irq_sts)
- pr_info("watchdog irq triggered\n");
+ NPU_INFO("watchdog irq triggered\n");
fw_deinit(npu_dev, true, force);
host_ctx->wdg_irq_sts = 0;
@@ -337,14 +335,14 @@ static int host_error_hdlr(struct npu_device *npu_dev, bool force)
if (network->is_valid && network->cmd_pending &&
network->fw_error) {
if (network->cmd_async) {
- pr_debug("async cmd, queue ssr event\n");
+ NPU_DBG("async cmd, queue ssr event\n");
kevt.evt.type = MSM_NPU_EVENT_TYPE_SSR;
kevt.evt.u.ssr.network_hdl =
network->network_hdl;
if (npu_queue_event(network->client, &kevt))
- pr_err("queue npu event failed\n");
+ NPU_ERR("queue npu event failed\n");
} else {
- pr_debug("complete network %llx\n",
+ NPU_DBG("complete network %llx\n",
network->id);
complete(&network->cmd_done);
}
@@ -387,10 +385,10 @@ static void turn_off_fw_logging(struct npu_device *npu_dev)
ret = npu_send_misc_cmd(npu_dev, IPC_QUEUE_CMD_HIGH_PRIORITY,
&log_packet);
- pr_debug("NPU_IPC_CMD_CONFIG_LOG sent status: %d\n", ret);
+ NPU_DBG("NPU_IPC_CMD_CONFIG_LOG sent status: %d\n", ret);
if (ret)
- pr_err("npu_host_ipc_send_cmd failed\n");
+ NPU_ERR("npu_host_ipc_send_cmd failed\n");
}
static int wait_for_status_ready(struct npu_device *npu_dev,
@@ -409,12 +407,12 @@ static int wait_for_status_ready(struct npu_device *npu_dev,
msleep(NPU_FW_TIMEOUT_POLL_INTERVAL_MS);
wait_cnt += NPU_FW_TIMEOUT_POLL_INTERVAL_MS;
if (wait_cnt >= max_wait_ms) {
- pr_err("timeout wait for status %x[%x] in reg %x\n",
+ NPU_ERR("timeout wait for status %x[%x] in reg %x\n",
status_bits, ctrl_sts, status_reg);
return -EPERM;
}
}
- pr_debug("status %x[reg %x] ready received\n", status_bits, status_reg);
+ NPU_DBG("status %x[reg %x] ready received\n", status_bits, status_reg);
return 0;
}
@@ -432,25 +430,25 @@ static int npu_notify_aop(struct npu_device *npu_dev, bool on)
int buf_size, rc = 0;
if (!npu_dev->mbox_aop.chan) {
- pr_warn("aop mailbox channel is not available\n");
+ NPU_WARN("aop mailbox channel is not available\n");
return 0;
}
buf_size = scnprintf(buf, MAX_LEN, "{class: bcm, res: npu_on, val: %d}",
on ? 1 : 0);
if (buf_size < 0) {
- pr_err("prepare qmp notify buf failed\n");
+ NPU_ERR("prepare qmp notify buf failed\n");
return -EINVAL;
}
- pr_debug("send msg %s to aop\n", buf);
+ NPU_DBG("send msg %s to aop\n", buf);
memset(&pkt, 0, sizeof(pkt));
pkt.size = (buf_size + 3) & ~0x3;
pkt.data = buf;
rc = mbox_send_message(npu_dev->mbox_aop.chan, &pkt);
if (rc < 0)
- pr_err("qmp message send failed, ret=%d\n", rc);
+ NPU_ERR("qmp message send failed, ret=%d\n", rc);
return rc;
}
@@ -491,7 +489,7 @@ static struct npu_network *alloc_network(struct npu_host_ctx *ctx,
}
if (i == MAX_LOADED_NETWORK) {
- pr_err("No free network\n");
+ NPU_ERR("No free network\n");
return NULL;
}
@@ -527,12 +525,12 @@ static struct npu_network *get_network_by_hdl(struct npu_host_ctx *ctx,
}
if ((i == MAX_LOADED_NETWORK) || !network->is_valid) {
- pr_err("network hdl invalid %d\n", hdl);
+ NPU_ERR("network hdl invalid %d\n", hdl);
return NULL;
}
if (client && (client != network->client)) {
- pr_err("network %lld doesn't belong to this client\n",
+ NPU_ERR("network %lld doesn't belong to this client\n",
network->id);
return NULL;
}
@@ -550,13 +548,13 @@ static struct npu_network *get_network_by_id(struct npu_host_ctx *ctx,
if (id < 1 || id > MAX_LOADED_NETWORK ||
!ctx->networks[id - 1].is_valid) {
- pr_err("Invalid network id %d\n", (int32_t)id);
+ NPU_ERR("Invalid network id %d\n", (int32_t)id);
return NULL;
}
network = &ctx->networks[id - 1];
if (client && (client != network->client)) {
- pr_err("network %lld doesn't belong to this client\n", id);
+ NPU_ERR("network %lld doesn't belong to this client\n", id);
return NULL;
}
@@ -579,7 +577,7 @@ static void free_network(struct npu_host_ctx *ctx, struct npu_client *client,
memset(network, 0, sizeof(struct npu_network));
ctx->network_num--;
} else {
- pr_warn("network %lld:%d is in use\n", network->id,
+ NPU_WARN("network %lld:%d is in use\n", network->id,
atomic_read(&network->ref_cnt));
}
}
@@ -619,28 +617,28 @@ static void app_msg_proc(struct npu_host_ctx *host_ctx, uint32_t *msg)
struct ipc_msg_execute_pkt *exe_rsp_pkt =
(struct ipc_msg_execute_pkt *)msg;
- pr_debug("NPU_IPC_MSG_EXECUTE_DONE status: %d\n",
+ NPU_DBG("NPU_IPC_MSG_EXECUTE_DONE status: %d\n",
exe_rsp_pkt->header.status);
- pr_debug("trans_id : %d\n", exe_rsp_pkt->header.trans_id);
- pr_debug("e2e_IPC_time: %d (in tick count)\n",
+ NPU_DBG("trans_id : %d\n", exe_rsp_pkt->header.trans_id);
+ NPU_DBG("e2e_IPC_time: %d (in tick count)\n",
exe_rsp_pkt->stats.e2e_ipc_tick_count);
- pr_debug("aco_load_time: %d (in tick count)\n",
+ NPU_DBG("aco_load_time: %d (in tick count)\n",
exe_rsp_pkt->stats.aco_load_tick_count);
- pr_debug("aco_execute_time: %d (in tick count)\n",
+ NPU_DBG("aco_execute_time: %d (in tick count)\n",
exe_rsp_pkt->stats.aco_execution_tick_count);
- pr_debug("total_num_layers: %d\n",
+ NPU_DBG("total_num_layers: %d\n",
exe_rsp_pkt->stats.exe_stats.total_num_layers);
network = get_network_by_hdl(host_ctx, NULL,
exe_rsp_pkt->network_hdl);
if (!network) {
- pr_err("can't find network %x\n",
+ NPU_ERR("can't find network %x\n",
exe_rsp_pkt->network_hdl);
break;
}
if (network->trans_id != exe_rsp_pkt->header.trans_id) {
- pr_err("execute_pkt trans_id is not match %d:%d\n",
+ NPU_ERR("execute_pkt trans_id is not match %d:%d\n",
network->trans_id,
exe_rsp_pkt->header.trans_id);
network_put(network);
@@ -653,14 +651,14 @@ static void app_msg_proc(struct npu_host_ctx *host_ctx, uint32_t *msg)
if (!network->cmd_async) {
complete(&network->cmd_done);
} else {
- pr_debug("async cmd, queue event\n");
+ NPU_DBG("async cmd, queue event\n");
kevt.evt.type = MSM_NPU_EVENT_TYPE_EXEC_DONE;
kevt.evt.u.exec_done.network_hdl =
exe_rsp_pkt->network_hdl;
kevt.evt.u.exec_done.exec_result =
exe_rsp_pkt->header.status;
if (npu_queue_event(network->client, &kevt))
- pr_err("queue npu event failed\n");
+ NPU_ERR("queue npu event failed\n");
}
network_put(network);
@@ -672,29 +670,29 @@ static void app_msg_proc(struct npu_host_ctx *host_ctx, uint32_t *msg)
(struct ipc_msg_execute_pkt_v2 *)msg;
uint32_t stats_size = 0;
- pr_debug("NPU_IPC_MSG_EXECUTE_V2_DONE status: %d\n",
+ NPU_DBG("NPU_IPC_MSG_EXECUTE_V2_DONE status: %d\n",
exe_rsp_pkt->header.status);
- pr_debug("trans_id : %d\n", exe_rsp_pkt->header.trans_id);
+ NPU_DBG("trans_id : %d\n", exe_rsp_pkt->header.trans_id);
network = get_network_by_hdl(host_ctx, NULL,
exe_rsp_pkt->network_hdl);
if (!network) {
- pr_err("can't find network %x\n",
+ NPU_ERR("can't find network %x\n",
exe_rsp_pkt->network_hdl);
break;
}
if (network->trans_id != exe_rsp_pkt->header.trans_id) {
- pr_err("execute_pkt_v2 trans_id is not match %d:%d\n",
+ NPU_ERR("execute_pkt_v2 trans_id is not match %d:%d\n",
network->trans_id,
exe_rsp_pkt->header.trans_id);
network_put(network);
break;
}
- pr_debug("network id : %lld\n", network->id);
+ NPU_DBG("network id : %lld\n", network->id);
stats_size = exe_rsp_pkt->header.size - sizeof(*exe_rsp_pkt);
- pr_debug("stats_size %d:%d\n", exe_rsp_pkt->header.size,
+ NPU_DBG("stats_size %d:%d\n", exe_rsp_pkt->header.size,
stats_size);
stats_size = stats_size < network->stats_buf_size ?
stats_size : network->stats_buf_size;
@@ -707,7 +705,7 @@ static void app_msg_proc(struct npu_host_ctx *host_ctx, uint32_t *msg)
network->cmd_ret_status = exe_rsp_pkt->header.status;
if (network->cmd_async) {
- pr_debug("async cmd, queue event\n");
+ NPU_DBG("async cmd, queue event\n");
kevt.evt.type = MSM_NPU_EVENT_TYPE_EXEC_V2_DONE;
kevt.evt.u.exec_v2_done.network_hdl =
exe_rsp_pkt->network_hdl;
@@ -717,7 +715,7 @@ static void app_msg_proc(struct npu_host_ctx *host_ctx, uint32_t *msg)
kevt.reserved[0] = (uint64_t)network->stats_buf;
kevt.reserved[1] = (uint64_t)network->stats_buf_u;
if (npu_queue_event(network->client, &kevt))
- pr_err("queue npu event failed\n");
+ NPU_ERR("queue npu event failed\n");
} else {
complete(&network->cmd_done);
}
@@ -730,7 +728,7 @@ static void app_msg_proc(struct npu_host_ctx *host_ctx, uint32_t *msg)
struct ipc_msg_load_pkt *load_rsp_pkt =
(struct ipc_msg_load_pkt *)msg;
- pr_debug("NPU_IPC_MSG_LOAD_DONE status: %d, trans_id: %d\n",
+ NPU_DBG("NPU_IPC_MSG_LOAD_DONE status: %d, trans_id: %d\n",
load_rsp_pkt->header.status,
load_rsp_pkt->header.trans_id);
@@ -738,16 +736,16 @@ static void app_msg_proc(struct npu_host_ctx *host_ctx, uint32_t *msg)
* the upper 16 bits in returned network_hdl is
* the network ID
*/
- pr_debug("network_hdl: %x\n", load_rsp_pkt->network_hdl);
+ NPU_DBG("network_hdl: %x\n", load_rsp_pkt->network_hdl);
network_id = load_rsp_pkt->network_hdl >> 16;
network = get_network_by_id(host_ctx, NULL, network_id);
if (!network) {
- pr_err("can't find network %d\n", network_id);
+ NPU_ERR("can't find network %d\n", network_id);
break;
}
if (network->trans_id != load_rsp_pkt->header.trans_id) {
- pr_err("load_rsp_pkt trans_id is not match %d:%d\n",
+ NPU_ERR("load_rsp_pkt trans_id is not match %d:%d\n",
network->trans_id,
load_rsp_pkt->header.trans_id);
network_put(network);
@@ -767,20 +765,20 @@ static void app_msg_proc(struct npu_host_ctx *host_ctx, uint32_t *msg)
struct ipc_msg_unload_pkt *unload_rsp_pkt =
(struct ipc_msg_unload_pkt *)msg;
- pr_debug("NPU_IPC_MSG_UNLOAD_DONE status: %d, trans_id: %d\n",
+ NPU_DBG("NPU_IPC_MSG_UNLOAD_DONE status: %d, trans_id: %d\n",
unload_rsp_pkt->header.status,
unload_rsp_pkt->header.trans_id);
network = get_network_by_hdl(host_ctx, NULL,
unload_rsp_pkt->network_hdl);
if (!network) {
- pr_err("can't find network %x\n",
+ NPU_ERR("can't find network %x\n",
unload_rsp_pkt->network_hdl);
break;
}
if (network->trans_id != unload_rsp_pkt->header.trans_id) {
- pr_err("unload_rsp_pkt trans_id is not match %d:%d\n",
+ NPU_ERR("unload_rsp_pkt trans_id is not match %d:%d\n",
network->trans_id,
unload_rsp_pkt->header.trans_id);
network_put(network);
@@ -799,13 +797,13 @@ static void app_msg_proc(struct npu_host_ctx *host_ctx, uint32_t *msg)
struct ipc_msg_loopback_pkt *lb_rsp_pkt =
(struct ipc_msg_loopback_pkt *)msg;
- pr_debug("NPU_IPC_MSG_LOOPBACK_DONE loopbackParams: 0x%x\n",
+ NPU_DBG("NPU_IPC_MSG_LOOPBACK_DONE loopbackParams: 0x%x\n",
lb_rsp_pkt->loopbackParams);
complete_all(&host_ctx->loopback_done);
break;
}
default:
- pr_err("Not supported apps response received %d\n",
+ NPU_ERR("Not supported apps response received %d\n",
msg_id);
break;
}
@@ -822,12 +820,12 @@ static void host_session_msg_hdlr(struct npu_device *npu_dev)
mutex_lock(&host_ctx->lock);
if (host_ctx->fw_state == FW_DISABLED) {
- pr_warn("handle npu session msg when FW is disabled\n");
+ NPU_WARN("handle npu session msg when FW is disabled\n");
goto skip_read_msg;
}
while (npu_host_ipc_read_msg(npu_dev, IPC_QUEUE_APPS_RSP, msg) == 0) {
- pr_debug("received from msg queue\n");
+ NPU_DBG("received from msg queue\n");
app_msg_proc(host_ctx, msg);
}
@@ -852,7 +850,7 @@ static void log_msg_proc(struct npu_device *npu_dev, uint32_t *msg)
npu_process_log_message(npu_dev, log_msg, size);
break;
default:
- pr_err("unsupported log response received %d\n", msg_id);
+ NPU_ERR("unsupported log response received %d\n", msg_id);
break;
}
}
@@ -869,12 +867,12 @@ static void host_session_log_hdlr(struct npu_device *npu_dev)
mutex_lock(&host_ctx->lock);
if (host_ctx->fw_state == FW_DISABLED) {
- pr_warn("handle npu session msg when FW is disabled\n");
+ NPU_WARN("handle npu session msg when FW is disabled\n");
goto skip_read_msg;
}
while (npu_host_ipc_read_msg(npu_dev, IPC_QUEUE_LOG, msg) == 0) {
- pr_debug("received from log queue\n");
+ NPU_DBG("received from log queue\n");
log_msg_proc(npu_dev, msg);
}
@@ -915,7 +913,7 @@ int32_t npu_host_unmap_buf(struct npu_client *client,
if (host_ctx->fw_error && (host_ctx->fw_state == FW_ENABLED) &&
!wait_for_completion_interruptible_timeout(
&host_ctx->fw_deinit_done, NW_CMD_TIMEOUT))
- pr_warn("npu: wait for fw_deinit_done time out\n");
+ NPU_WARN("npu: wait for fw_deinit_done time out\n");
npu_mem_unmap(client, unmap_ioctl->buf_ion_hdl,
unmap_ioctl->npu_phys_addr);
@@ -930,13 +928,13 @@ static int npu_send_network_cmd(struct npu_device *npu_dev,
if (network->fw_error || host_ctx->fw_error ||
(host_ctx->fw_state == FW_DISABLED)) {
- pr_err("fw is in error state or disabled, can't send network cmd\n");
+ NPU_ERR("fw is in error state or disabled\n");
ret = -EIO;
} else if (network->cmd_pending) {
- pr_err("Another cmd is pending\n");
+ NPU_ERR("Another cmd is pending\n");
ret = -EBUSY;
} else {
- pr_debug("Send cmd %d network id %lld\n",
+ NPU_DBG("Send cmd %d network id %lld\n",
((struct ipc_cmd_header_pkt *)cmd_ptr)->cmd_type,
network->id);
network->cmd_async = async;
@@ -960,10 +958,10 @@ static int npu_send_misc_cmd(struct npu_device *npu_dev, uint32_t q_idx,
mutex_lock(&host_ctx->lock);
if (host_ctx->fw_error || (host_ctx->fw_state == FW_DISABLED)) {
- pr_err("fw is in error state or disabled, can't send misc cmd\n");
+ NPU_ERR("fw is in error state or disabled\n");
ret = -EIO;
} else {
- pr_debug("Send cmd %d\n",
+ NPU_DBG("Send cmd %d\n",
((struct ipc_cmd_header_pkt *)cmd_ptr)->cmd_type);
ret = npu_host_ipc_send_cmd(npu_dev, q_idx, cmd_ptr);
}
@@ -985,7 +983,7 @@ static void host_copy_patch_data(struct npu_patch_tuple *param, uint32_t value,
param->variable_size_in_bits =
layer_info->patch_info.variable_size_in_bits;
- pr_debug("copy_patch_data: %x %d %x %x %x %x\n",
+ NPU_DBG("copy_patch_data: %x %d %x %x %x %x\n",
param->value,
param->chunk_id,
param->loc_offset,
@@ -1004,7 +1002,7 @@ static void host_copy_patch_data_v2(struct npu_patch_tuple_v2 *param,
patch_info->instruction_size_in_bytes;
param->shift_value_in_bits = patch_info->shift_value_in_bits;
param->variable_size_in_bits = patch_info->variable_size_in_bits;
- pr_debug("copy_patch_data_v2: %x %d %x %x %x %x\n",
+ NPU_DBG("copy_patch_data_v2: %x %d %x %x %x %x\n",
param->value,
param->chunk_id,
param->loc_offset,
@@ -1028,7 +1026,7 @@ static uint32_t find_networks_perf_mode(struct npu_host_ctx *host_ctx)
max_perf_mode = network->perf_mode;
network++;
}
- pr_debug("max perf mode for networks: %d\n", max_perf_mode);
+ NPU_DBG("max perf mode for networks: %d\n", max_perf_mode);
return max_perf_mode;
}
@@ -1072,7 +1070,7 @@ int32_t npu_host_load_network(struct npu_client *client,
ret = npu_set_uc_power_level(npu_dev, networks_perf_mode);
if (ret) {
- pr_err("network load failed due to power level set\n");
+ NPU_ERR("network load failed due to power level set\n");
goto error_free_network;
}
@@ -1091,7 +1089,7 @@ int32_t npu_host_load_network(struct npu_client *client,
reinit_completion(&network->cmd_done);
ret = npu_send_network_cmd(npu_dev, network, &load_packet, false);
if (ret) {
- pr_err("NPU_IPC_CMD_LOAD sent failed: %d\n", ret);
+ NPU_ERR("NPU_IPC_CMD_LOAD sent failed: %d\n", ret);
goto error_free_network;
}
@@ -1104,17 +1102,17 @@ int32_t npu_host_load_network(struct npu_client *client,
mutex_lock(&host_ctx->lock);
if (!ret) {
- pr_err_ratelimited("NPU_IPC_CMD_LOAD time out\n");
+ NPU_ERR("NPU_IPC_CMD_LOAD time out\n");
ret = -ETIMEDOUT;
goto error_free_network;
} else if (ret < 0) {
- pr_err("NPU_IPC_CMD_LOAD is interrupted by signal\n");
+ NPU_ERR("NPU_IPC_CMD_LOAD is interrupted by signal\n");
goto error_free_network;
}
if (network->fw_error) {
ret = -EIO;
- pr_err("fw is in error state during load network\n");
+ NPU_ERR("fw is in error state during load network\n");
goto error_free_network;
}
@@ -1186,17 +1184,17 @@ int32_t npu_host_load_network_v2(struct npu_client *client,
/* verify mapped physical address */
if (!npu_mem_verify_addr(client, network->phy_add)) {
- pr_err("Invalid network address %llx\n", network->phy_add);
+ NPU_ERR("Invalid network address %llx\n", network->phy_add);
ret = -EINVAL;
goto error_free_network;
}
- pr_debug("network address %llx\n", network->phy_add);
+ NPU_DBG("network address %llx\n", network->phy_add);
networks_perf_mode = find_networks_perf_mode(host_ctx);
ret = npu_set_uc_power_level(npu_dev, networks_perf_mode);
if (ret) {
- pr_err("network load failed due to power level set\n");
+ NPU_ERR("network load failed due to power level set\n");
goto error_free_network;
}
@@ -1217,7 +1215,7 @@ int32_t npu_host_load_network_v2(struct npu_client *client,
reinit_completion(&network->cmd_done);
ret = npu_send_network_cmd(npu_dev, network, load_packet, false);
if (ret) {
- pr_debug("NPU_IPC_CMD_LOAD_V2 sent failed: %d\n", ret);
+ NPU_DBG("NPU_IPC_CMD_LOAD_V2 sent failed: %d\n", ret);
goto error_free_network;
}
@@ -1231,17 +1229,17 @@ int32_t npu_host_load_network_v2(struct npu_client *client,
mutex_lock(&host_ctx->lock);
if (!ret) {
- pr_err_ratelimited("npu: NPU_IPC_CMD_LOAD time out\n");
+ NPU_ERR("npu: NPU_IPC_CMD_LOAD time out\n");
ret = -ETIMEDOUT;
goto error_free_network;
} else if (ret < 0) {
- pr_err("NPU_IPC_CMD_LOAD_V2 is interrupted by signal\n");
+ NPU_ERR("NPU_IPC_CMD_LOAD_V2 is interrupted by signal\n");
goto error_free_network;
}
if (network->fw_error) {
ret = -EIO;
- pr_err("fw is in error state during load_v2 network\n");
+ NPU_ERR("fw is in error state during load_v2 network\n");
goto error_free_network;
}
@@ -1287,18 +1285,18 @@ int32_t npu_host_unload_network(struct npu_client *client,
}
if (!network->is_active) {
- pr_err("network is not active\n");
+ NPU_ERR("network is not active\n");
network_put(network);
mutex_unlock(&host_ctx->lock);
return -EINVAL;
}
if (network->fw_error) {
- pr_err("fw in error state, skip unload network in fw\n");
+ NPU_ERR("fw in error state, skip unload network in fw\n");
goto free_network;
}
- pr_debug("Unload network %lld\n", network->id);
+ NPU_DBG("Unload network %lld\n", network->id);
/* prepare IPC packet for UNLOAD */
unload_packet.header.cmd_type = NPU_IPC_CMD_UNLOAD;
unload_packet.header.size = sizeof(struct ipc_cmd_unload_pkt);
@@ -1312,13 +1310,13 @@ int32_t npu_host_unload_network(struct npu_client *client,
ret = npu_send_network_cmd(npu_dev, network, &unload_packet, false);
if (ret) {
- pr_err("NPU_IPC_CMD_UNLOAD sent failed: %d\n", ret);
+ NPU_ERR("NPU_IPC_CMD_UNLOAD sent failed: %d\n", ret);
/*
* If another command is running on this network,
* don't free_network now.
*/
if (ret == -EBUSY) {
- pr_err("Network is running, retry later\n");
+ NPU_ERR("Network is running, retry later\n");
network_put(network);
mutex_unlock(&host_ctx->lock);
return ret;
@@ -1336,22 +1334,22 @@ int32_t npu_host_unload_network(struct npu_client *client,
mutex_lock(&host_ctx->lock);
if (!ret) {
- pr_err_ratelimited("npu: NPU_IPC_CMD_UNLOAD time out\n");
+ NPU_ERR("npu: NPU_IPC_CMD_UNLOAD time out\n");
network->cmd_pending = false;
ret = -ETIMEDOUT;
goto free_network;
} else if (ret < 0) {
- pr_err("Wait for unload done interrupted by signal\n");
+ NPU_ERR("Wait for unload done interrupted by signal\n");
network->cmd_pending = false;
goto free_network;
}
if (network->fw_error) {
ret = -EIO;
- pr_err("fw is in error state during unload network\n");
+ NPU_ERR("fw is in error state during unload network\n");
} else {
ret = network->cmd_ret_status;
- pr_debug("unload network status %d\n", ret);
+ NPU_DBG("unload network status %d\n", ret);
}
free_network:
@@ -1366,7 +1364,7 @@ int32_t npu_host_unload_network(struct npu_client *client,
if (networks_perf_mode > 0) {
ret = npu_set_uc_power_level(npu_dev, networks_perf_mode);
if (ret)
- pr_warn("restore uc power level failed\n");
+ NPU_WARN("restore uc power level failed\n");
}
mutex_unlock(&host_ctx->lock);
fw_deinit(npu_dev, false, true);
@@ -1395,23 +1393,23 @@ int32_t npu_host_exec_network(struct npu_client *client,
}
if (!network->is_active) {
- pr_err("network is not active\n");
+ NPU_ERR("network is not active\n");
ret = -EINVAL;
goto exec_done;
}
if (network->fw_error) {
- pr_err("fw is in error state\n");
+ NPU_ERR("fw is in error state\n");
ret = -EIO;
goto exec_done;
}
- pr_debug("execute network %lld\n", network->id);
+ NPU_DBG("execute network %lld\n", network->id);
memset(&exec_packet, 0, sizeof(exec_packet));
if (exec_ioctl->patching_required) {
if ((exec_ioctl->input_layer_num != 1) ||
(exec_ioctl->output_layer_num != 1)) {
- pr_err("Invalid input/output layer num\n");
+ NPU_ERR("Invalid input/output layer num\n");
ret = -EINVAL;
goto exec_done;
}
@@ -1421,7 +1419,7 @@ int32_t npu_host_exec_network(struct npu_client *client,
/* verify mapped physical address */
if (!npu_mem_verify_addr(client, input_off) ||
!npu_mem_verify_addr(client, output_off)) {
- pr_err("Invalid patch buf address\n");
+ NPU_ERR("Invalid patch buf address\n");
ret = -EINVAL;
goto exec_done;
}
@@ -1447,12 +1445,12 @@ int32_t npu_host_exec_network(struct npu_client *client,
ret = npu_send_network_cmd(npu_dev, network, &exec_packet, async_ioctl);
if (ret) {
- pr_err("NPU_IPC_CMD_EXECUTE sent failed: %d\n", ret);
+ NPU_ERR("NPU_IPC_CMD_EXECUTE sent failed: %d\n", ret);
goto exec_done;
}
if (async_ioctl) {
- pr_debug("Async ioctl, return now\n");
+ NPU_DBG("Async ioctl, return now\n");
goto exec_done;
}
@@ -1465,24 +1463,24 @@ int32_t npu_host_exec_network(struct npu_client *client,
mutex_lock(&host_ctx->lock);
if (!ret) {
- pr_err_ratelimited("npu: NPU_IPC_CMD_EXECUTE time out\n");
+ NPU_ERR("npu: NPU_IPC_CMD_EXECUTE time out\n");
/* dump debug stats */
npu_dump_debug_timeout_stats(npu_dev);
network->cmd_pending = false;
ret = -ETIMEDOUT;
goto exec_done;
} else if (ret == -ERESTARTSYS) {
- pr_err("Wait for execution done interrupted by signal\n");
+ NPU_ERR("Wait for execution done interrupted by signal\n");
network->cmd_pending = false;
goto exec_done;
}
if (network->fw_error) {
ret = -EIO;
- pr_err("fw is in error state during execute network\n");
+ NPU_ERR("fw is in error state during execute network\n");
} else {
ret = network->cmd_ret_status;
- pr_debug("execution status %d\n", ret);
+ NPU_DBG("execution status %d\n", ret);
}
exec_done:
@@ -1494,7 +1492,7 @@ int32_t npu_host_exec_network(struct npu_client *client,
* as error in order to force npu fw to stop execution
*/
if ((ret == -ETIMEDOUT) || (ret == -ERESTARTSYS)) {
- pr_err("Error handling after execution failure\n");
+ NPU_ERR("Error handling after execution failure\n");
host_error_hdlr(npu_dev, true);
}
@@ -1524,18 +1522,18 @@ int32_t npu_host_exec_network_v2(struct npu_client *client,
}
if (!network->is_active) {
- pr_err("network is not active\n");
+ NPU_ERR("network is not active\n");
ret = -EINVAL;
goto exec_v2_done;
}
if (network->fw_error) {
- pr_err("fw is in error state\n");
+ NPU_ERR("fw is in error state\n");
ret = -EIO;
goto exec_v2_done;
}
- pr_debug("execute_v2 network %lld\n", network->id);
+ NPU_DBG("execute_v2 network %lld\n", network->id);
num_patch_params = exec_ioctl->patch_buf_info_num;
pkt_size = num_patch_params * sizeof(struct npu_patch_params_v2) +
sizeof(*exec_packet);
@@ -1548,17 +1546,17 @@ int32_t npu_host_exec_network_v2(struct npu_client *client,
for (i = 0; i < num_patch_params; i++) {
exec_packet->patch_params[i].id = patch_buf_info[i].buf_id;
- pr_debug("%d: patch_id: %x\n", i,
+ NPU_DBG("%d: patch_id: %x\n", i,
exec_packet->patch_params[i].id);
exec_packet->patch_params[i].value =
patch_buf_info[i].buf_phys_addr;
- pr_debug("%d: patch value: %x\n", i,
+ NPU_DBG("%d: patch value: %x\n", i,
exec_packet->patch_params[i].value);
/* verify mapped physical address */
if (!npu_mem_verify_addr(client,
patch_buf_info[i].buf_phys_addr)) {
- pr_err("Invalid patch value\n");
+ NPU_ERR("Invalid patch value\n");
ret = -EINVAL;
goto free_exec_packet;
}
@@ -1576,7 +1574,7 @@ int32_t npu_host_exec_network_v2(struct npu_client *client,
network->stats_buf_u = (void __user *)exec_ioctl->stats_buf_addr;
network->stats_buf_size = exec_ioctl->stats_buf_size;
- pr_debug("Execute_v2 flags %x stats_buf_size %d\n",
+ NPU_DBG("Execute_v2 flags %x stats_buf_size %d\n",
exec_packet->header.flags, exec_ioctl->stats_buf_size);
/* Send it on the high priority queue */
@@ -1584,12 +1582,12 @@ int32_t npu_host_exec_network_v2(struct npu_client *client,
ret = npu_send_network_cmd(npu_dev, network, exec_packet, async_ioctl);
if (ret) {
- pr_err("NPU_IPC_CMD_EXECUTE_V2 sent failed: %d\n", ret);
+ NPU_ERR("NPU_IPC_CMD_EXECUTE_V2 sent failed: %d\n", ret);
goto free_exec_packet;
}
if (async_ioctl) {
- pr_debug("Async ioctl, return now\n");
+ NPU_DBG("Async ioctl, return now\n");
goto free_exec_packet;
}
@@ -1602,21 +1600,21 @@ int32_t npu_host_exec_network_v2(struct npu_client *client,
mutex_lock(&host_ctx->lock);
if (!ret) {
- pr_err_ratelimited("npu: NPU_IPC_CMD_EXECUTE_V2 time out\n");
+ NPU_ERR("npu: NPU_IPC_CMD_EXECUTE_V2 time out\n");
/* dump debug stats */
npu_dump_debug_timeout_stats(npu_dev);
network->cmd_pending = false;
ret = -ETIMEDOUT;
goto free_exec_packet;
} else if (ret == -ERESTARTSYS) {
- pr_err("Wait for execution_v2 done interrupted by signal\n");
+ NPU_ERR("Wait for execution_v2 done interrupted by signal\n");
network->cmd_pending = false;
goto free_exec_packet;
}
if (network->fw_error) {
ret = -EIO;
- pr_err("fw is in error state during execute_v2 network\n");
+ NPU_ERR("fw is in error state during execute_v2 network\n");
goto free_exec_packet;
}
@@ -1627,11 +1625,11 @@ int32_t npu_host_exec_network_v2(struct npu_client *client,
(void __user *)exec_ioctl->stats_buf_addr,
network->stats_buf,
exec_ioctl->stats_buf_size)) {
- pr_err("copy stats to user failed\n");
+ NPU_ERR("copy stats to user failed\n");
exec_ioctl->stats_buf_size = 0;
}
} else {
- pr_err("execution failed %d\n", ret);
+ NPU_ERR("execution failed %d\n", ret);
}
free_exec_packet:
@@ -1645,7 +1643,7 @@ int32_t npu_host_exec_network_v2(struct npu_client *client,
* as error in order to force npu fw to stop execution
*/
if ((ret == -ETIMEDOUT) || (ret == -ERESTARTSYS)) {
- pr_err("Error handling after execution failure\n");
+ NPU_ERR("Error handling after execution failure\n");
host_error_hdlr(npu_dev, true);
}
@@ -1673,7 +1671,7 @@ int32_t npu_host_loopback_test(struct npu_device *npu_dev)
ret = npu_send_misc_cmd(npu_dev, IPC_QUEUE_APPS_EXEC, &loopback_packet);
if (ret) {
- pr_err("NPU_IPC_CMD_LOOPBACK sent failed: %d\n", ret);
+ NPU_ERR("NPU_IPC_CMD_LOOPBACK sent failed: %d\n", ret);
goto loopback_exit;
}
@@ -1683,10 +1681,10 @@ int32_t npu_host_loopback_test(struct npu_device *npu_dev)
NW_DEBUG_TIMEOUT : NW_CMD_TIMEOUT);
if (!ret) {
- pr_err_ratelimited("npu: NPU_IPC_CMD_LOOPBACK time out\n");
+ NPU_ERR("npu: NPU_IPC_CMD_LOOPBACK time out\n");
ret = -ETIMEDOUT;
} else if (ret < 0) {
- pr_err("Wait for loopback done interrupted by signal\n");
+ NPU_ERR("Wait for loopback done interrupted by signal\n");
}
loopback_exit:
@@ -1708,7 +1706,7 @@ void npu_host_cleanup_networks(struct npu_client *client)
for (i = 0; i < MAX_LOADED_NETWORK; i++) {
network = &host_ctx->networks[i];
if (network->client == client) {
- pr_warn("network %d is not unloaded before close\n",
+ NPU_WARN("network %d is not unloaded before close\n",
network->network_hdl);
unload_req.network_hdl = network->network_hdl;
npu_host_unload_network(client, &unload_req);
@@ -1719,7 +1717,7 @@ void npu_host_cleanup_networks(struct npu_client *client)
while (!list_empty(&client->mapped_buffer_list)) {
ion_buf = list_first_entry(&client->mapped_buffer_list,
struct npu_ion_buf, list);
- pr_warn("unmap buffer %x:%llx\n", ion_buf->fd, ion_buf->iova);
+ NPU_WARN("unmap buffer %x:%llx\n", ion_buf->fd, ion_buf->iova);
unmap_req.buf_ion_hdl = ion_buf->fd;
unmap_req.npu_phys_addr = ion_buf->iova;
npu_host_unmap_buf(client, &unmap_req);
diff --git a/drivers/media/platform/msm/synx/synx_util.c b/drivers/media/platform/msm/synx/synx_util.c
index c72fac9..829298d 100644
--- a/drivers/media/platform/msm/synx/synx_util.c
+++ b/drivers/media/platform/msm/synx/synx_util.c
@@ -516,7 +516,9 @@ int synx_generate_secure_key(struct synx_table_row *row)
if (!row)
return -EINVAL;
- get_random_bytes(&row->secure_key, sizeof(row->secure_key));
+ if (!row->secure_key)
+ get_random_bytes(&row->secure_key, sizeof(row->secure_key));
+
return row->secure_key;
}
diff --git a/drivers/media/platform/msm/vidc/msm_vdec.c b/drivers/media/platform/msm/vidc/msm_vdec.c
index 69d41ed..e7cf18ce 100644
--- a/drivers/media/platform/msm/vidc/msm_vdec.c
+++ b/drivers/media/platform/msm/vidc/msm_vdec.c
@@ -904,21 +904,7 @@ int msm_vdec_s_ctrl(struct msm_vidc_inst *inst, struct v4l2_ctrl *ctrl)
inst->flags |= VIDC_REALTIME;
break;
case V4L2_CID_MPEG_VIDC_VIDEO_OPERATING_RATE:
- if (ctrl->val == INT_MAX) {
- dprintk(VIDC_DBG,
- "inst(%pK) Request for turbo mode\n", inst);
- inst->clk_data.turbo_mode = true;
- } else if (msm_vidc_validate_operating_rate(inst, ctrl->val)) {
- dprintk(VIDC_ERR, "Failed to set operating rate\n");
- rc = -ENOTSUPP;
- } else {
- dprintk(VIDC_DBG,
- "inst(%pK) operating rate changed from %d to %d\n",
- inst, inst->clk_data.operating_rate >> 16,
- ctrl->val >> 16);
- inst->clk_data.operating_rate = ctrl->val;
- inst->clk_data.turbo_mode = false;
- }
+ inst->clk_data.operating_rate = ctrl->val;
break;
case V4L2_CID_MPEG_VIDC_VIDEO_LOWLATENCY_MODE:
inst->clk_data.low_latency_mode = !!ctrl->val;
@@ -1051,6 +1037,22 @@ int msm_vdec_set_output_buffer_counts(struct msm_vidc_inst *inst)
__func__, buffer_type);
return -EINVAL;
}
+ if (buffer_type == HAL_BUFFER_OUTPUT2) {
+ /*
+ * For split mode set DPB count as well
+ * For DPB actual count is same as min output count
+ */
+ rc = msm_comm_set_buffer_count(inst,
+ bufreq->buffer_count_min,
+ bufreq->buffer_count_min,
+ HAL_BUFFER_OUTPUT);
+ if (rc) {
+ dprintk(VIDC_ERR,
+ "%s: failed to set buffer count(%#x)\n",
+ __func__, buffer_type);
+ return -EINVAL;
+ }
+ }
rc = msm_comm_set_buffer_count(inst,
bufreq->buffer_count_min,
bufreq->buffer_count_actual,
@@ -1382,7 +1384,6 @@ int msm_vdec_set_extradata(struct msm_vidc_inst *inst)
{
uint32_t display_info = HFI_PROPERTY_PARAM_VUI_DISPLAY_INFO_EXTRADATA;
u32 value = 0x0;
- u32 hdr10_hist = 0x0;
switch (inst->fmts[OUTPUT_PORT].fourcc) {
case V4L2_PIX_FMT_H264:
@@ -1407,10 +1408,10 @@ int msm_vdec_set_extradata(struct msm_vidc_inst *inst)
msm_comm_set_extradata(inst, display_info, 0x1);
if (inst->fmts[OUTPUT_PORT].fourcc == V4L2_PIX_FMT_VP9 ||
inst->fmts[OUTPUT_PORT].fourcc == V4L2_PIX_FMT_HEVC) {
- hdr10_hist = 0x1;
+ msm_comm_set_extradata(inst,
+ HFI_PROPERTY_PARAM_HDR10_HIST_EXTRADATA, 0x1);
}
- msm_comm_set_extradata(inst,
- HFI_PROPERTY_PARAM_HDR10_HIST_EXTRADATA, hdr10_hist);
+
msm_comm_set_extradata(inst,
HFI_PROPERTY_PARAM_VDEC_NUM_CONCEALED_MB, 0x1);
if (inst->fmts[OUTPUT_PORT].fourcc == V4L2_PIX_FMT_HEVC) {
diff --git a/drivers/media/platform/msm/vidc/msm_venc.c b/drivers/media/platform/msm/vidc/msm_venc.c
index bbbf3ea..62896d9 100644
--- a/drivers/media/platform/msm/vidc/msm_venc.c
+++ b/drivers/media/platform/msm/vidc/msm_venc.c
@@ -954,6 +954,15 @@ static struct msm_vidc_ctrl msm_venc_ctrls[] = {
.default_value = V4L2_MPEG_MSM_VIDC_DISABLE,
.step = 1,
},
+ {
+ .id = V4L2_CID_MPEG_VIDC_VENC_BITRATE_SAVINGS,
+ .name = "Enable/Disable bitrate savings",
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .minimum = V4L2_MPEG_MSM_VIDC_DISABLE,
+ .maximum = V4L2_MPEG_MSM_VIDC_ENABLE,
+ .default_value = V4L2_MPEG_MSM_VIDC_ENABLE,
+ .step = 1,
+ },
};
#define NUM_CTRLS ARRAY_SIZE(msm_venc_ctrls)
@@ -1487,21 +1496,7 @@ int msm_venc_s_ctrl(struct msm_vidc_inst *inst, struct v4l2_ctrl *ctrl)
}
break;
case V4L2_CID_MPEG_VIDC_VIDEO_OPERATING_RATE:
- if (ctrl->val == INT_MAX) {
- dprintk(VIDC_DBG, "inst(%pK) Request for turbo mode\n",
- inst);
- inst->clk_data.turbo_mode = true;
- } else if (msm_vidc_validate_operating_rate(inst, ctrl->val)) {
- dprintk(VIDC_ERR, "Failed to set operating rate\n");
- rc = -ENOTSUPP;
- } else {
- dprintk(VIDC_DBG,
- "inst(%pK) operating rate changed from %d to %d\n",
- inst, inst->clk_data.operating_rate >> 16,
- ctrl->val >> 16);
- inst->clk_data.operating_rate = ctrl->val;
- inst->clk_data.turbo_mode = false;
- }
+ inst->clk_data.operating_rate = ctrl->val;
if (inst->state == MSM_VIDC_START_DONE) {
rc = msm_venc_set_operating_rate(inst);
if (rc)
@@ -2853,6 +2848,9 @@ int msm_venc_set_intra_refresh_mode(struct msm_vidc_inst *inst)
rc_mode->val == V4L2_MPEG_VIDEO_BITRATE_MODE_CBR))
return 0;
+ /* Firmware supports only random mode */
+ intra_refresh.mode = HFI_INTRA_REFRESH_RANDOM;
+
ctrl = get_ctrl(inst, V4L2_CID_MPEG_VIDC_VIDEO_INTRA_REFRESH_RANDOM);
intra_refresh.mbs = 0;
if (ctrl->val) {
@@ -2860,9 +2858,6 @@ int msm_venc_set_intra_refresh_mode(struct msm_vidc_inst *inst)
u32 width = inst->prop.width[CAPTURE_PORT];
u32 height = inst->prop.height[CAPTURE_PORT];
- /* ignore cyclic mode if random mode is set */
- intra_refresh.mode = HFI_INTRA_REFRESH_RANDOM;
-
num_mbs_per_frame = NUM_MBS_PER_FRAME(height, width);
intra_refresh.mbs = num_mbs_per_frame / ctrl->val;
if (num_mbs_per_frame % ctrl->val) {
@@ -2871,7 +2866,6 @@ int msm_venc_set_intra_refresh_mode(struct msm_vidc_inst *inst)
} else {
ctrl = get_ctrl(inst,
V4L2_CID_MPEG_VIDEO_CYCLIC_INTRA_REFRESH_MB);
- intra_refresh.mode = HFI_INTRA_REFRESH_CYCLIC;
intra_refresh.mbs = ctrl->val;
}
if (!intra_refresh.mbs) {
@@ -2890,6 +2884,37 @@ int msm_venc_set_intra_refresh_mode(struct msm_vidc_inst *inst)
return rc;
}
+int msm_venc_set_bitrate_savings_mode(struct msm_vidc_inst *inst)
+{
+ int rc = 0;
+ struct hfi_device *hdev;
+ struct v4l2_ctrl *ctrl = NULL;
+ struct hfi_enable enable;
+
+ if (!inst || !inst->core) {
+ dprintk(VIDC_ERR, "%s: invalid params\n", __func__);
+ return -EINVAL;
+ }
+ hdev = inst->core->device;
+
+ ctrl = get_ctrl(inst, V4L2_CID_MPEG_VIDC_VENC_BITRATE_SAVINGS);
+ enable.enable = !!ctrl->val;
+ if (!ctrl->val && inst->rc_type != V4L2_MPEG_VIDEO_BITRATE_MODE_VBR) {
+ dprintk(VIDC_DBG,
+ "Can't disable bitrate savings for non-VBR_CFR\n");
+ enable.enable = 1;
+ }
+
+ dprintk(VIDC_DBG, "%s: %d\n", __func__, enable.enable);
+ rc = call_hfi_op(hdev, session_set_property, inst->session,
+ HFI_PROPERTY_PARAM_VENC_BITRATE_SAVINGS, &enable,
+ sizeof(enable));
+ if (rc)
+ dprintk(VIDC_ERR, "%s: set property failed\n", __func__);
+
+ return rc;
+}
+
int msm_venc_set_loop_filter_mode(struct msm_vidc_inst *inst)
{
int rc = 0;
@@ -3820,6 +3845,9 @@ int msm_venc_set_properties(struct msm_vidc_inst *inst)
rc = msm_venc_set_rate_control(inst);
if (rc)
goto exit;
+ rc = msm_venc_set_bitrate_savings_mode(inst);
+ if (rc)
+ goto exit;
rc = msm_venc_set_input_timestamp_rc(inst);
if (rc)
goto exit;
diff --git a/drivers/media/platform/msm/vidc/msm_vidc.c b/drivers/media/platform/msm/vidc/msm_vidc.c
index 068d8d0..ccf807b5 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc.c
@@ -817,7 +817,7 @@ static inline int start_streaming(struct msm_vidc_inst *inst)
}
/* Assign Core and LP mode for current session */
- rc = msm_vidc_decide_core_and_power_mode(inst);
+ rc = call_core_op(inst->core, decide_core_and_power_mode, inst);
if (rc) {
dprintk(VIDC_ERR,
"This session can't be submitted to HW %pK\n", inst);
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_buffer_calculations.c b/drivers/media/platform/msm/vidc/msm_vidc_buffer_calculations.c
index ea0107d..60e5f3a 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_buffer_calculations.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_buffer_calculations.c
@@ -286,14 +286,11 @@ static inline u32 calculate_mpeg2d_scratch1_size(struct msm_vidc_inst *inst,
u32 width, u32 height, u32 min_buf_count, bool split_mode_enabled);
static inline u32 calculate_h264e_scratch1_size(struct msm_vidc_inst *inst,
- u32 width, u32 height, u32 num_ref, bool ten_bit,
- u32 num_vpp_pipes);
+ u32 width, u32 height, u32 num_ref, bool ten_bit);
static inline u32 calculate_h265e_scratch1_size(struct msm_vidc_inst *inst,
- u32 width, u32 height, u32 num_ref, bool ten_bit,
- u32 num_vpp_pipes);
+ u32 width, u32 height, u32 num_ref, bool ten_bit);
static inline u32 calculate_vp8e_scratch1_size(struct msm_vidc_inst *inst,
- u32 width, u32 height, u32 num_ref, bool ten_bit,
- u32 num_vpp_pipes);
+ u32 width, u32 height, u32 num_ref, bool ten_bit);
static inline u32 calculate_enc_scratch2_size(struct msm_vidc_inst *inst,
u32 width, u32 height, u32 num_ref, bool ten_bit);
@@ -536,8 +533,7 @@ int msm_vidc_get_encoder_internal_buffer_sizes(struct msm_vidc_inst *inst)
curr_req->buffer_size =
enc_calculators->calculate_scratch1_size(
inst, width, height, num_ref,
- is_tenbit,
- inst->clk_data.work_route);
+ is_tenbit);
valid_buffer_type = true;
} else if (curr_req->buffer_type ==
HAL_BUFFER_INTERNAL_SCRATCH_2) {
@@ -1244,7 +1240,8 @@ static inline u32 calculate_enc_scratch_size(struct msm_vidc_inst *inst,
size_singlePipe = sao_bin_buffer_size + padded_bin_size;
size_singlePipe = ALIGN(size_singlePipe, VENUS_DMA_ALIGNMENT);
bitbin_size = size_singlePipe * NUM_OF_VPP_PIPES;
- size = ALIGN(bitbin_size, VENUS_DMA_ALIGNMENT) * total_bitbin_buffers;
+ size = ALIGN(bitbin_size, VENUS_DMA_ALIGNMENT) * total_bitbin_buffers
+ + 512;
return size;
}
@@ -1503,13 +1500,13 @@ static inline u32 calculate_enc_scratch1_size(struct msm_vidc_inst *inst,
bse_slice_cmd_buffer_size = ((((8192 << 2) + 7) & (~7)) * 6);
bse_reg_buffer_size = ((((512 << 3) + 7) & (~7)) * 4);
vpp_reg_buffer_size = ((((HFI_VENUS_VPPSG_MAX_REGISTERS << 3) + 31) &
- (~31)) * 8);
- lambda_lut_size = ((((52 << 1) + 7) & (~7)) * 3);
+ (~31)) * 10);
+ lambda_lut_size = ((((52 << 1) + 7) & (~7)) * 11);
override_buffer_size = 16 * ((frame_num_lcu + 7) >> 3);
override_buffer_size = ALIGN(override_buffer_size,
VENUS_DMA_ALIGNMENT) * 2;
ir_buffer_size = (((frame_num_lcu << 1) + 7) & (~7)) * 3;
- vpss_line_buf = ((16 * width_coded) + (16 * height_coded));
+ vpss_line_buf = ((((width_coded + 3) >> 2) << 5) + 256) * 16;
topline_bufsize_fe_1stg_sao = (16 * (width_coded >> 5));
topline_bufsize_fe_1stg_sao = ALIGN(topline_bufsize_fe_1stg_sao,
VENUS_DMA_ALIGNMENT);
@@ -1531,27 +1528,24 @@ static inline u32 calculate_enc_scratch1_size(struct msm_vidc_inst *inst,
}
static inline u32 calculate_h264e_scratch1_size(struct msm_vidc_inst *inst,
- u32 width, u32 height, u32 num_ref, bool ten_bit,
- u32 num_vpp_pipes)
+ u32 width, u32 height, u32 num_ref, bool ten_bit)
{
return calculate_enc_scratch1_size(inst, width, height, 16,
- num_ref, ten_bit, num_vpp_pipes, false);
+ num_ref, ten_bit, NUM_OF_VPP_PIPES, false);
}
static inline u32 calculate_h265e_scratch1_size(struct msm_vidc_inst *inst,
- u32 width, u32 height, u32 num_ref, bool ten_bit,
- u32 num_vpp_pipes)
+ u32 width, u32 height, u32 num_ref, bool ten_bit)
{
return calculate_enc_scratch1_size(inst, width, height, 32,
- num_ref, ten_bit, num_vpp_pipes, true);
+ num_ref, ten_bit, NUM_OF_VPP_PIPES, true);
}
static inline u32 calculate_vp8e_scratch1_size(struct msm_vidc_inst *inst,
- u32 width, u32 height, u32 num_ref, bool ten_bit,
- u32 num_vpp_pipes)
+ u32 width, u32 height, u32 num_ref, bool ten_bit)
{
return calculate_enc_scratch1_size(inst, width, height, 16,
- num_ref, ten_bit, num_vpp_pipes, false);
+ num_ref, ten_bit, 1, false);
}
@@ -1612,16 +1606,11 @@ static inline u32 calculate_enc_scratch2_size(struct msm_vidc_inst *inst,
16, HFI_COLOR_FORMAT_YUV420_NV12_UBWC_Y_TILE_HEIGHT);
meta_size_y = hfi_ubwc_metadata_plane_buffer_size(
metadata_stride, meta_buf_height);
- metadata_stride = hfi_ubwc_uv_metadata_plane_stride(width,
- 64, HFI_COLOR_FORMAT_YUV420_NV12_UBWC_UV_TILE_WIDTH);
- meta_buf_height = hfi_ubwc_uv_metadata_plane_bufheight(
- height, 16,
- HFI_COLOR_FORMAT_YUV420_NV12_UBWC_UV_TILE_HEIGHT);
meta_size_c = hfi_ubwc_metadata_plane_buffer_size(
metadata_stride, meta_buf_height);
size = (aligned_height + chroma_height) * aligned_width +
meta_size_y + meta_size_c;
- size = (size * ((num_ref)+1)) + 4096;
+ size = (size * ((num_ref)+2)) + 4096;
} else {
ref_buf_height = (height + (HFI_VENUS_HEIGHT_ALIGNMENT - 1))
& (~(HFI_VENUS_HEIGHT_ALIGNMENT - 1));
@@ -1644,13 +1633,6 @@ static inline u32 calculate_enc_scratch2_size(struct msm_vidc_inst *inst,
metadata_stride = hfi_ubwc_calc_metadata_plane_stride(
width,
VENUS_METADATA_STRIDE_MULTIPLE,
- HFI_COLOR_FORMAT_YUV420_NV12_UBWC_Y_TILE_WIDTH);
- meta_buf_height = hfi_ubwc_metadata_plane_bufheight(height,
- VENUS_METADATA_HEIGHT_MULTIPLE,
- HFI_COLOR_FORMAT_YUV420_NV12_UBWC_Y_TILE_HEIGHT);
- metadata_stride = hfi_ubwc_calc_metadata_plane_stride(
- width,
- VENUS_METADATA_STRIDE_MULTIPLE,
HFI_COLOR_FORMAT_YUV420_TP10_UBWC_Y_TILE_WIDTH);
meta_buf_height = hfi_ubwc_metadata_plane_bufheight(
height,
@@ -1661,7 +1643,7 @@ static inline u32 calculate_enc_scratch2_size(struct msm_vidc_inst *inst,
meta_size_c = hfi_ubwc_metadata_plane_buffer_size(
metadata_stride, meta_buf_height);
size = ref_buf_size + meta_size_y + meta_size_c;
- size = (size * ((num_ref)+1)) + 4096;
+ size = (size * ((num_ref)+2)) + 4096;
}
return size;
}
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_buffer_calculations.h b/drivers/media/platform/msm/vidc/msm_vidc_buffer_calculations.h
index cddae12..29fe98c 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_buffer_calculations.h
+++ b/drivers/media/platform/msm/vidc/msm_vidc_buffer_calculations.h
@@ -18,8 +18,7 @@ struct msm_vidc_enc_buff_size_calculators {
u32 (*calculate_scratch_size)(struct msm_vidc_inst *inst, u32 width,
u32 height, u32 work_mode);
u32 (*calculate_scratch1_size)(struct msm_vidc_inst *inst,
- u32 width, u32 height, u32 num_ref, bool ten_bit,
- u32 num_vpp_pipes);
+ u32 width, u32 height, u32 num_ref, bool ten_bit);
u32 (*calculate_scratch2_size)(struct msm_vidc_inst *inst,
u32 width, u32 height, u32 num_ref, bool ten_bit);
u32 (*calculate_persist_size)(void);
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_clocks.c b/drivers/media/platform/msm/vidc/msm_vidc_clocks.c
index 7e10ec6..3077152a 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_clocks.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_clocks.c
@@ -27,18 +27,21 @@ struct msm_vidc_core_ops core_ops_ar50 = {
.calc_freq = msm_vidc_calc_freq_ar50,
.decide_work_route = NULL,
.decide_work_mode = msm_vidc_decide_work_mode_ar50,
+ .decide_core_and_power_mode = NULL,
};
struct msm_vidc_core_ops core_ops_iris1 = {
.calc_freq = msm_vidc_calc_freq_iris1,
.decide_work_route = msm_vidc_decide_work_route_iris1,
.decide_work_mode = msm_vidc_decide_work_mode_iris1,
+ .decide_core_and_power_mode = msm_vidc_decide_core_and_power_mode_iris1,
};
struct msm_vidc_core_ops core_ops_iris2 = {
.calc_freq = msm_vidc_calc_freq_iris2,
.decide_work_route = msm_vidc_decide_work_route_iris2,
.decide_work_mode = msm_vidc_decide_work_mode_iris2,
+ .decide_core_and_power_mode = msm_vidc_decide_core_and_power_mode_iris2,
};
static inline void msm_dcvs_print_dcvs_stats(struct clock_data *dcvs)
@@ -847,7 +850,9 @@ int msm_vidc_set_clocks(struct msm_vidc_core *core)
struct hfi_device *hdev;
unsigned long freq_core_1 = 0, freq_core_2 = 0, rate = 0;
unsigned long freq_core_max = 0;
- struct msm_vidc_inst *temp = NULL;
+ struct msm_vidc_inst *inst = NULL;
+ struct msm_vidc_buffer *temp, *next;
+ u32 device_addr, filled_len;
int rc = 0, i = 0;
struct allowed_clock_rates_table *allowed_clks_tbl = NULL;
bool increment, decrement;
@@ -863,15 +868,34 @@ int msm_vidc_set_clocks(struct msm_vidc_core *core)
mutex_lock(&core->lock);
increment = false;
decrement = true;
- list_for_each_entry(temp, &core->instances, list) {
+ list_for_each_entry(inst, &core->instances, list) {
+ device_addr = 0;
+ filled_len = 0;
+ mutex_lock(&inst->registeredbufs.lock);
+ list_for_each_entry_safe(temp, next,
+ &inst->registeredbufs.list, list) {
+ if (temp->vvb.vb2_buf.type ==
+ V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ filled_len = max(filled_len,
+ temp->vvb.vb2_buf.planes[0].bytesused);
+ device_addr = temp->smem[0].device_addr;
+ }
+ }
+ mutex_unlock(&inst->registeredbufs.lock);
- if (temp->clk_data.core_id == VIDC_CORE_ID_1)
- freq_core_1 += temp->clk_data.min_freq;
- else if (temp->clk_data.core_id == VIDC_CORE_ID_2)
- freq_core_2 += temp->clk_data.min_freq;
- else if (temp->clk_data.core_id == VIDC_CORE_ID_3) {
- freq_core_1 += temp->clk_data.min_freq;
- freq_core_2 += temp->clk_data.min_freq;
+ if (!filled_len || !device_addr) {
+ dprintk(VIDC_DBG, "%s no input for session %x\n",
+ __func__, hash32_ptr(inst->session));
+ continue;
+ }
+
+ if (inst->clk_data.core_id == VIDC_CORE_ID_1)
+ freq_core_1 += inst->clk_data.min_freq;
+ else if (inst->clk_data.core_id == VIDC_CORE_ID_2)
+ freq_core_2 += inst->clk_data.min_freq;
+ else if (inst->clk_data.core_id == VIDC_CORE_ID_3) {
+ freq_core_1 += inst->clk_data.min_freq;
+ freq_core_2 += inst->clk_data.min_freq;
}
freq_core_max = max_t(unsigned long, freq_core_1, freq_core_2);
@@ -885,18 +909,11 @@ int msm_vidc_set_clocks(struct msm_vidc_core *core)
break;
}
- if (temp->clk_data.turbo_mode) {
- dprintk(VIDC_PROF,
- "Found an instance with Turbo request\n");
- freq_core_max = msm_vidc_max_freq(core);
- decrement = false;
- break;
- }
/* increment even if one session requested for it */
- if (temp->clk_data.dcvs_flags & MSM_VIDC_DCVS_INCR)
+ if (inst->clk_data.dcvs_flags & MSM_VIDC_DCVS_INCR)
increment = true;
/* decrement only if all sessions requested for it */
- if (!(temp->clk_data.dcvs_flags & MSM_VIDC_DCVS_DECR))
+ if (!(inst->clk_data.dcvs_flags & MSM_VIDC_DCVS_DECR))
decrement = false;
}
@@ -931,69 +948,6 @@ int msm_vidc_set_clocks(struct msm_vidc_core *core)
return rc;
}
-int msm_vidc_validate_operating_rate(struct msm_vidc_inst *inst,
- u32 operating_rate)
-{
- struct msm_vidc_inst *temp;
- struct msm_vidc_core *core;
- unsigned long max_freq, freq_left, ops_left, load, cycles, freq = 0;
- unsigned long mbs_per_second;
- int rc = 0;
- u32 curr_operating_rate = 0;
-
- if (!inst || !inst->core) {
- dprintk(VIDC_ERR, "%s Invalid args\n", __func__);
- return -EINVAL;
- }
- core = inst->core;
- curr_operating_rate = inst->clk_data.operating_rate >> 16;
-
- mutex_lock(&core->lock);
- max_freq = msm_vidc_max_freq(core);
- list_for_each_entry(temp, &core->instances, list) {
- if (temp == inst ||
- temp->state < MSM_VIDC_START_DONE ||
- temp->state >= MSM_VIDC_RELEASE_RESOURCES_DONE)
- continue;
-
- freq += temp->clk_data.min_freq;
- }
-
- freq_left = max_freq - freq;
-
- mbs_per_second = msm_comm_get_inst_load_per_core(inst,
- LOAD_CALC_NO_QUIRKS);
-
- cycles = inst->clk_data.entry->vpp_cycles;
- if (inst->session_type == MSM_VIDC_ENCODER)
- cycles = inst->flags & VIDC_LOW_POWER ?
- inst->clk_data.entry->low_power_cycles :
- cycles;
-
- load = cycles * mbs_per_second;
-
- ops_left = load ? (freq_left / load) : 0;
-
- operating_rate = operating_rate >> 16;
-
- if ((curr_operating_rate * (1 + ops_left)) >= operating_rate ||
- msm_vidc_clock_voting ||
- inst->clk_data.buffer_counter < DCVS_FTB_WINDOW) {
- dprintk(VIDC_DBG,
- "Requestd operating rate is valid %u\n",
- operating_rate);
- rc = 0;
- } else {
- dprintk(VIDC_DBG,
- "Current load is high for requested settings. Cannot set operating rate to %u\n",
- operating_rate);
- rc = -EINVAL;
- }
- mutex_unlock(&core->lock);
-
- return rc;
-}
-
int msm_comm_scale_clocks(struct msm_vidc_inst *inst)
{
struct msm_vidc_buffer *temp, *next;
@@ -1027,7 +981,7 @@ int msm_comm_scale_clocks(struct msm_vidc_inst *inst)
if (!filled_len || !device_addr) {
dprintk(VIDC_DBG, "%s no input for session %x\n",
__func__, hash32_ptr(inst->session));
- goto no_clock_change;
+ return 0;
}
freq = call_core_op(inst->core, calc_freq, inst, filled_len);
@@ -1045,7 +999,6 @@ int msm_comm_scale_clocks(struct msm_vidc_inst *inst)
msm_vidc_set_clocks(inst->core);
-no_clock_change:
return 0;
}
@@ -1574,7 +1527,7 @@ int msm_vidc_decide_work_mode_iris2(struct msm_vidc_inst *inst)
static inline int msm_vidc_power_save_mode_enable(struct msm_vidc_inst *inst,
bool enable)
{
- u32 rc = 0, mbs_per_frame;
+ u32 rc = 0, mbs_per_frame, mbs_per_sec;
u32 prop_id = 0;
void *pdata = NULL;
struct hfi_device *hdev = NULL;
@@ -1587,15 +1540,17 @@ static inline int msm_vidc_power_save_mode_enable(struct msm_vidc_inst *inst,
__func__);
return 0;
}
- mbs_per_frame = msm_vidc_get_mbs_per_frame(inst);
- if (mbs_per_frame > inst->core->resources.max_hq_mbs_per_frame ||
- msm_vidc_get_fps(inst) >
- (int) inst->core->resources.max_hq_fps) {
- enable = true;
- }
+
/* Power saving always disabled for CQ RC mode. */
- if (inst->rc_type == V4L2_MPEG_VIDEO_BITRATE_MODE_CQ)
+ mbs_per_frame = msm_vidc_get_mbs_per_frame(inst);
+ mbs_per_sec = mbs_per_frame * msm_vidc_get_fps(inst);
+ if (inst->rc_type == V4L2_MPEG_VIDEO_BITRATE_MODE_CQ ||
+ (mbs_per_frame <=
+ inst->core->resources.max_hq_mbs_per_frame &&
+ mbs_per_sec <=
+ inst->core->resources.max_hq_mbs_per_sec)) {
enable = false;
+ }
prop_id = HFI_PROPERTY_CONFIG_VENC_PERF_MODE;
hfi_perf_mode = enable ? HFI_VENC_PERFMODE_POWER_SAVE :
@@ -1673,7 +1628,7 @@ static u32 get_core_load(struct msm_vidc_core *core,
return load;
}
-int msm_vidc_decide_core_and_power_mode(struct msm_vidc_inst *inst)
+int msm_vidc_decide_core_and_power_mode_iris1(struct msm_vidc_inst *inst)
{
int rc = 0, hier_mode = 0;
struct hfi_device *hdev;
@@ -1813,6 +1768,14 @@ int msm_vidc_decide_core_and_power_mode(struct msm_vidc_inst *inst)
return rc;
}
+int msm_vidc_decide_core_and_power_mode_iris2(struct msm_vidc_inst *inst)
+{
+ inst->clk_data.core_id = VIDC_CORE_ID_1;
+ msm_print_core_status(inst->core, VIDC_CORE_ID_1);
+
+ return msm_vidc_power_save_mode_enable(inst, true);
+}
+
void msm_vidc_init_core_clk_ops(struct msm_vidc_core *core)
{
if (!core)
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_clocks.h b/drivers/media/platform/msm/vidc/msm_vidc_clocks.h
index 4742d37..3882f5e 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_clocks.h
+++ b/drivers/media/platform/msm/vidc/msm_vidc_clocks.h
@@ -8,8 +8,6 @@
#include "msm_vidc_internal.h"
void msm_clock_data_reset(struct msm_vidc_inst *inst);
-int msm_vidc_validate_operating_rate(struct msm_vidc_inst *inst,
- u32 operating_rate);
int msm_vidc_set_clocks(struct msm_vidc_core *core);
int msm_comm_vote_bus(struct msm_vidc_core *core);
int msm_dcvs_try_enable(struct msm_vidc_inst *inst);
@@ -21,7 +19,8 @@ int msm_vidc_decide_work_route_iris1(struct msm_vidc_inst *inst);
int msm_vidc_decide_work_mode_iris1(struct msm_vidc_inst *inst);
int msm_vidc_decide_work_route_iris2(struct msm_vidc_inst *inst);
int msm_vidc_decide_work_mode_iris2(struct msm_vidc_inst *inst);
-int msm_vidc_decide_core_and_power_mode(struct msm_vidc_inst *inst);
+int msm_vidc_decide_core_and_power_mode_iris1(struct msm_vidc_inst *inst);
+int msm_vidc_decide_core_and_power_mode_iris2(struct msm_vidc_inst *inst);
void msm_print_core_status(struct msm_vidc_core *core, u32 core_id);
void msm_vidc_clear_freq_entry(struct msm_vidc_inst *inst,
u32 device_addr);
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_common.c b/drivers/media/platform/msm/vidc/msm_vidc_common.c
index c77cba9..4fdf7fd 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_common.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_common.c
@@ -2582,6 +2582,10 @@ static void handle_fbd(enum hal_command_response cmd, void *data)
msm_comm_store_mark_data(&inst->fbd_data, vb->index,
fill_buf_done->mark_data, fill_buf_done->mark_target);
}
+ if (inst->session_type == MSM_VIDC_ENCODER) {
+ msm_comm_store_filled_length(&inst->fbd_data, vb->index,
+ fill_buf_done->filled_len1);
+ }
extra_idx = EXTRADATA_IDX(inst->bufq[CAPTURE_PORT].num_planes);
if (extra_idx && extra_idx < VIDEO_MAX_PLANES)
@@ -6056,9 +6060,14 @@ int msm_comm_qbuf_cache_operations(struct msm_vidc_inst *inst,
} else if (vb->type ==
V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
if (!i) { /* bitstream */
+ u32 size_u32;
skip = false;
offset = 0;
- size = vb->planes[i].length;
+ size_u32 = vb->planes[i].length;
+ msm_comm_fetch_filled_length(
+ &inst->fbd_data, vb->index,
+ &size_u32);
+ size = size_u32;
cache_op = SMEM_CACHE_INVALIDATE;
}
}
@@ -6520,6 +6529,63 @@ bool kref_get_mbuf(struct msm_vidc_inst *inst, struct msm_vidc_buffer *mbuf)
return ret;
}
+void msm_comm_store_filled_length(struct msm_vidc_list *data_list,
+ u32 index, u32 filled_length)
+{
+ struct msm_vidc_buf_data *pdata = NULL;
+ bool found = false;
+
+ if (!data_list) {
+ dprintk(VIDC_ERR, "%s: invalid params %pK\n",
+ __func__, data_list);
+ return;
+ }
+
+ mutex_lock(&data_list->lock);
+ list_for_each_entry(pdata, &data_list->list, list) {
+ if (pdata->index == index) {
+ pdata->filled_length = filled_length;
+ found = true;
+ break;
+ }
+ }
+
+ if (!found) {
+ pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
+ if (!pdata) {
+ dprintk(VIDC_WARN, "%s: malloc failure.\n", __func__);
+ goto exit;
+ }
+ pdata->index = index;
+ pdata->filled_length = filled_length;
+ list_add_tail(&pdata->list, &data_list->list);
+ }
+
+exit:
+ mutex_unlock(&data_list->lock);
+}
+
+void msm_comm_fetch_filled_length(struct msm_vidc_list *data_list,
+ u32 index, u32 *filled_length)
+{
+ struct msm_vidc_buf_data *pdata = NULL;
+
+ if (!data_list || !filled_length) {
+ dprintk(VIDC_ERR, "%s: invalid params %pK %pK\n",
+ __func__, data_list, filled_length);
+ return;
+ }
+
+ mutex_lock(&data_list->lock);
+ list_for_each_entry(pdata, &data_list->list, list) {
+ if (pdata->index == index) {
+ *filled_length = pdata->filled_length;
+ break;
+ }
+ }
+ mutex_unlock(&data_list->lock);
+}
+
void msm_comm_store_mark_data(struct msm_vidc_list *data_list,
u32 index, u32 mark_data, u32 mark_target)
{
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_common.h b/drivers/media/platform/msm/vidc/msm_vidc_common.h
index f79a6b7..ce806fe 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_common.h
+++ b/drivers/media/platform/msm/vidc/msm_vidc_common.h
@@ -252,6 +252,10 @@ void print_v4l2_buffer(u32 tag, const char *str, struct msm_vidc_inst *inst,
struct v4l2_buffer *v4l2);
void kref_put_mbuf(struct msm_vidc_buffer *mbuf);
bool kref_get_mbuf(struct msm_vidc_inst *inst, struct msm_vidc_buffer *mbuf);
+void msm_comm_store_filled_length(struct msm_vidc_list *data_list,
+ u32 index, u32 filled_length);
+void msm_comm_fetch_filled_length(struct msm_vidc_list *data_list,
+ u32 index, u32 *filled_length);
void msm_comm_store_mark_data(struct msm_vidc_list *data_list,
u32 index, u32 mark_data, u32 mark_target);
void msm_comm_fetch_mark_data(struct msm_vidc_list *data_list,
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_dyn_gov.c b/drivers/media/platform/msm/vidc/msm_vidc_dyn_gov.c
index f158c35..52875695 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_dyn_gov.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_dyn_gov.c
@@ -273,8 +273,6 @@ static void __dump(struct dump dump[], int len)
}
}
-
- dprintk(VIDC_PROF, "%s", formatted_line);
}
}
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_internal.h b/drivers/media/platform/msm/vidc/msm_vidc_internal.h
index 7f266c3..27c9ceb 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_internal.h
+++ b/drivers/media/platform/msm/vidc/msm_vidc_internal.h
@@ -187,6 +187,7 @@ struct msm_vidc_buf_data {
u32 index;
u32 mark_data;
u32 mark_target;
+ u32 filled_length;
};
struct msm_vidc_common_data {
@@ -383,7 +384,6 @@ struct clock_data {
u32 work_mode;
bool low_latency_mode;
bool is_cbr_plus;
- bool turbo_mode;
u32 work_route;
u32 dcvs_flags;
u32 frame_rate;
@@ -416,6 +416,7 @@ struct msm_vidc_core_ops {
unsigned long (*calc_freq)(struct msm_vidc_inst *inst, u32 filled_len);
int (*decide_work_route)(struct msm_vidc_inst *inst);
int (*decide_work_mode)(struct msm_vidc_inst *inst);
+ int (*decide_core_and_power_mode)(struct msm_vidc_inst *inst);
};
struct msm_vidc_core {
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_platform.c b/drivers/media/platform/msm/vidc/msm_vidc_platform.c
index e9fce79..8120992 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_platform.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_platform.c
@@ -223,7 +223,6 @@ static struct msm_vidc_common_data default_common_data[] = {
},
};
-/* Update with kona */
static struct msm_vidc_common_data kona_common_data[] = {
{
.key = "qcom,never-unload-fw",
@@ -251,19 +250,19 @@ static struct msm_vidc_common_data kona_common_data[] = {
{
.key = "qcom,max-hw-load",
.value = 3916800, /*
- * 1920x1088/256 MBs@480fps. It is less
- * any other usecases (ex:
+ * 1920x1088/256 MB's@480fps. It is more
+ * than any other usecases (ex:
* 3840x2160@120fps, 4096x2160@96ps,
* 7680x4320@30fps)
*/
},
{
.key = "qcom,max-hq-mbs-per-frame",
- .value = 8160,
+ .value = 34560, /* 4096x2160 */
},
{
- .key = "qcom,max-hq-frames-per-sec",
- .value = 60,
+ .key = "qcom,max-hq-mbs-per-sec",
+ .value = 1036800, /* 4096x2160@30fps */
},
{
.key = "qcom,max-b-frame-mbs-per-frame",
@@ -333,8 +332,8 @@ static struct msm_vidc_common_data sm6150_common_data[] = {
.value = 8160,
},
{
- .key = "qcom,max-hq-frames-per-sec",
- .value = 60,
+ .key = "qcom,max-hq-mbs-per-sec",
+ .value = 244800, /* 1920 x 1088 @ 30 fps */
},
{
.key = "qcom,max-b-frame-mbs-per-frame",
@@ -404,8 +403,8 @@ static struct msm_vidc_common_data sm8150_common_data[] = {
.value = 8160,
},
{
- .key = "qcom,max-hq-frames-per-sec",
- .value = 60,
+ .key = "qcom,max-hq-mbs-per-sec",
+ .value = 244800, /* 1920 x 1088 @ 30 fps */
},
{
.key = "qcom,max-b-frame-mbs-per-frame",
@@ -479,8 +478,8 @@ static struct msm_vidc_common_data sdm845_common_data[] = {
.value = 8160,
},
{
- .key = "qcom,max-hq-frames-per-sec",
- .value = 60,
+ .key = "qcom,max-hq-mbs-per-sec",
+ .value = 244800, /* 1920 x 1088 @ 30 fps */
},
{
.key = "qcom,max-b-frame-mbs-per-frame",
@@ -534,8 +533,8 @@ static struct msm_vidc_common_data sdm670_common_data_v0[] = {
.value = 8160,
},
{
- .key = "qcom,max-hq-frames-per-sec",
- .value = 60,
+ .key = "qcom,max-hq-mbs-per-sec",
+ .value = 244800, /* 1920 x 1088 @ 30 fps */
},
{
.key = "qcom,max-b-frame-mbs-per-frame",
@@ -585,8 +584,8 @@ static struct msm_vidc_common_data sdm670_common_data_v1[] = {
.value = 8160,
},
{
- .key = "qcom,max-hq-frames-per-sec",
- .value = 60,
+ .key = "qcom,max-hq-mbs-per-sec",
+ .value = 244800, /* 1920 x 1088 @ 30 fps */
},
{
.key = "qcom,max-b-frame-mbs-per-frame",
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c b/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c
index ce7d3a3..aa68062 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c
@@ -785,8 +785,8 @@ int read_platform_resources_from_drv_data(
res->max_hq_mbs_per_frame = find_key_value(platform_data,
"qcom,max-hq-mbs-per-frame");
- res->max_hq_fps = find_key_value(platform_data,
- "qcom,max-hq-frames-per-sec");
+ res->max_hq_mbs_per_sec = find_key_value(platform_data,
+ "qcom,max-hq-mbs-per-sec");
res->max_bframe_mbs_per_frame = find_key_value(platform_data,
"qcom,max-b-frame-mbs-per-frame");
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_resources.h b/drivers/media/platform/msm/vidc/msm_vidc_resources.h
index 31b1df6..f134212 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_resources.h
+++ b/drivers/media/platform/msm/vidc/msm_vidc_resources.h
@@ -170,7 +170,7 @@ struct msm_vidc_platform_resources {
struct buffer_usage_set buffer_usage_set;
uint32_t max_load;
uint32_t max_hq_mbs_per_frame;
- uint32_t max_hq_fps;
+ uint32_t max_hq_mbs_per_sec;
uint32_t max_bframe_mbs_per_frame;
uint32_t max_bframe_mbs_per_sec;
struct platform_device *pdev;
diff --git a/drivers/media/platform/msm/vidc/venus_hfi.c b/drivers/media/platform/msm/vidc/venus_hfi.c
index f0aeb37..0ca6fa3 100644
--- a/drivers/media/platform/msm/vidc/venus_hfi.c
+++ b/drivers/media/platform/msm/vidc/venus_hfi.c
@@ -107,12 +107,18 @@ static void setup_dsp_uc_memmap_iris1(struct venus_hfi_device *device);
static void clock_config_on_enable_iris1(struct venus_hfi_device *device);
static int reset_ahb2axi_bridge(struct venus_hfi_device *device);
static int __set_ubwc_config(struct venus_hfi_device *device);
+static void power_off_common(struct venus_hfi_device *device);
+static void power_off_iris2(struct venus_hfi_device *device);
+static void noc_error_info_common(struct venus_hfi_device *device);
+static void noc_error_info_iris2(struct venus_hfi_device *device);
struct venus_hfi_vpu_ops vpu4_ops = {
.interrupt_init = interrupt_init_vpu4,
.setup_dsp_uc_memmap = NULL,
.clock_config_on_enable = NULL,
.reset_ahb2axi_bridge = NULL,
+ .power_off = power_off_common,
+ .noc_error_info = noc_error_info_common,
};
struct venus_hfi_vpu_ops iris1_ops = {
@@ -120,6 +126,8 @@ struct venus_hfi_vpu_ops iris1_ops = {
.setup_dsp_uc_memmap = setup_dsp_uc_memmap_iris1,
.clock_config_on_enable = clock_config_on_enable_iris1,
.reset_ahb2axi_bridge = reset_ahb2axi_bridge,
+ .power_off = power_off_common,
+ .noc_error_info = noc_error_info_common,
};
struct venus_hfi_vpu_ops iris2_ops = {
@@ -127,6 +135,8 @@ struct venus_hfi_vpu_ops iris2_ops = {
.setup_dsp_uc_memmap = NULL,
.clock_config_on_enable = NULL,
.reset_ahb2axi_bridge = reset_ahb2axi_bridge,
+ .power_off = power_off_iris2,
+ .noc_error_info = noc_error_info_iris2,
};
/**
@@ -3698,67 +3708,55 @@ static inline int __init_clocks(struct venus_hfi_device *device)
}
static int __handle_reset_clk(struct msm_vidc_platform_resources *res,
- enum reset_state state)
+ int reset_index, enum reset_state state)
{
- int rc = 0, i;
+ int rc = 0;
struct reset_control *rst;
struct reset_set *rst_set = &res->reset_set;
if (!rst_set->reset_tbl)
return 0;
- dprintk(VIDC_DBG, "%s reset_state %d rst_set->count = %d\n",
- __func__, state, rst_set->count);
+ rst = rst_set->reset_tbl[reset_index].rst;
+ dprintk(VIDC_DBG, "reset_clk: name %s reset_state %d rst %pK\n",
+ rst_set->reset_tbl[reset_index].name, state, rst);
- for (i = 0; i < rst_set->count; i++) {
- rst = rst_set->reset_tbl[i].rst;
- switch (state) {
- case INIT:
- dprintk(VIDC_DBG, "%s reset_state name = %s %pK\n",
- __func__, rst_set->reset_tbl[i].name, rst);
+ switch (state) {
+ case INIT:
+ if (rst)
+ goto skip_reset_init;
- if (rst)
- continue;
- rst = devm_reset_control_get(&res->pdev->dev,
- rst_set->reset_tbl[i].name);
- if (IS_ERR(rst))
- rc = PTR_ERR(rst);
+ rst = devm_reset_control_get(&res->pdev->dev,
+ rst_set->reset_tbl[reset_index].name);
+ if (IS_ERR(rst))
+ rc = PTR_ERR(rst);
- rst_set->reset_tbl[i].rst = rst;
- break;
- case ASSERT:
- if (!rst) {
- dprintk(VIDC_DBG,
- "%s reset_state name = %s %pK\n",
- __func__, rst_set->reset_tbl[i].name,
- rst);
- rc = PTR_ERR(rst);
- goto failed_to_reset;
- }
-
- rc = reset_control_assert(rst);
- break;
- case DEASSERT:
- if (!rst) {
- dprintk(VIDC_DBG,
- "%s reset_state name = %s %pK\n",
- __func__, rst_set->reset_tbl[i].name,
- rst);
- rc = PTR_ERR(rst);
- goto failed_to_reset;
- }
- rc = reset_control_deassert(rst);
- break;
- default:
- dprintk(VIDC_ERR, "Invalid reset request\n");
+ rst_set->reset_tbl[reset_index].rst = rst;
+ break;
+ case ASSERT:
+ if (!rst) {
+ rc = PTR_ERR(rst);
+ goto failed_to_reset;
}
+ rc = reset_control_assert(rst);
+ break;
+ case DEASSERT:
+ if (!rst) {
+ rc = PTR_ERR(rst);
+ goto failed_to_reset;
+ }
+ rc = reset_control_deassert(rst);
+ break;
+ default:
+ dprintk(VIDC_ERR, "Invalid reset request\n");
if (rc)
goto failed_to_reset;
}
return 0;
+skip_reset_init:
failed_to_reset:
return rc;
}
@@ -3794,29 +3792,37 @@ static inline void __disable_unprepare_clks(struct venus_hfi_device *device)
static int reset_ahb2axi_bridge(struct venus_hfi_device *device)
{
- int rc;
+ int rc, i;
if (!device) {
dprintk(VIDC_ERR, "NULL device\n");
- return -EINVAL;
+ rc = -EINVAL;
+ goto failed_to_reset;
}
- rc = __handle_reset_clk(device->res, ASSERT);
- if (rc) {
- dprintk(VIDC_ERR, "failed to assert reset clocks\n");
- return rc;
- }
+ for (i = 0; i < device->res->reset_set.count; i++) {
+ rc = __handle_reset_clk(device->res, i, ASSERT);
+ if (rc) {
+ dprintk(VIDC_ERR,
+ "failed to assert reset clocks\n");
+ goto failed_to_reset;
+ }
- /* wait for deassert */
- usleep_range(150, 250);
+ /* wait for deassert */
+ usleep_range(150, 250);
- rc = __handle_reset_clk(device->res, DEASSERT);
- if (rc) {
- dprintk(VIDC_ERR, "failed to deassert reset clocks\n");
- return rc;
+ rc = __handle_reset_clk(device->res, i, DEASSERT);
+ if (rc) {
+ dprintk(VIDC_ERR,
+ "failed to deassert reset clocks\n");
+ goto failed_to_reset;
+ }
}
return 0;
+
+failed_to_reset:
+ return rc;
}
static inline int __prepare_enable_clks(struct venus_hfi_device *device)
@@ -4087,7 +4093,7 @@ static int __init_subcaches(struct venus_hfi_device *device)
static int __init_resources(struct venus_hfi_device *device,
struct msm_vidc_platform_resources *res)
{
- int rc = 0;
+ int i, rc = 0;
rc = __init_regulators(device);
if (rc) {
@@ -4102,11 +4108,13 @@ static int __init_resources(struct venus_hfi_device *device,
goto err_init_clocks;
}
- rc = __handle_reset_clk(res, INIT);
- if (rc) {
- dprintk(VIDC_ERR, "Failed to init reset clocks\n");
- rc = -ENODEV;
- goto err_init_reset_clk;
+ for (i = 0; i < device->res->reset_set.count; i++) {
+ rc = __handle_reset_clk(res, i, INIT);
+ if (rc) {
+ dprintk(VIDC_ERR, "Failed to init reset clocks\n");
+ rc = -ENODEV;
+ goto err_init_reset_clk;
+ }
}
rc = __init_bus(device);
@@ -4598,7 +4606,7 @@ static int __venus_power_on(struct venus_hfi_device *device)
return rc;
}
-static void __venus_power_off(struct venus_hfi_device *device)
+static void power_off_common(struct venus_hfi_device *device)
{
if (!device->power_enabled)
return;
@@ -4619,6 +4627,93 @@ static void __venus_power_off(struct venus_hfi_device *device)
device->power_enabled = false;
}
+static void power_off_iris2(struct venus_hfi_device *device)
+{
+ u32 lpi_status, reg_status = 0, count = 0, max_count = 10;
+
+ if (!device->power_enabled)
+ return;
+
+ if (!(device->intr_status & VIDC_WRAPPER_INTR_STATUS_A2HWD_BMSK))
+ disable_irq_nosync(device->hal_data->irq);
+ device->intr_status = 0;
+
+ /* HPG 6.1.2 Step 1 */
+ __write_register(device, VIDC_CPU_CS_X2RPMh, 0x3);
+
+ /* HPG 6.1.2 Step 2, noc to low power */
+ __write_register(device, VIDC_AON_WRAPPER_MVP_NOC_LPI_CONTROL, 0x1);
+ while (!reg_status && count < max_count) {
+ lpi_status =
+ __read_register(device,
+ VIDC_AON_WRAPPER_MVP_NOC_LPI_STATUS);
+ reg_status = lpi_status & BIT(0);
+ dprintk(VIDC_DBG,
+ "Noc: lpi_status %d noc_status %d (count %d)\n",
+ lpi_status, reg_status, count);
+ usleep_range(50, 100);
+ count++;
+ }
+ if (count == max_count) {
+ dprintk(VIDC_ERR,
+ "NOC not in qaccept status %d\n", reg_status);
+ }
+
+ /* HPG 6.1.2 Step 3, debug bridge to low power */
+ __write_register(device,
+ VIDC_WRAPPER_DEBUG_BRIDGE_LPI_CONTROL, 0x7);
+ reg_status = 0;
+ count = 0;
+ while ((reg_status != 0x7) && count < max_count) {
+ lpi_status = __read_register(device,
+ VIDC_WRAPPER_DEBUG_BRIDGE_LPI_STATUS);
+ reg_status = lpi_status & 0x7;
+ dprintk(VIDC_DBG,
+ "DBLP Set : lpi_status %d reg_status %d (count %d)\n",
+ lpi_status, reg_status, count);
+ usleep_range(50, 100);
+ count++;
+ }
+ if (count == max_count) {
+ dprintk(VIDC_ERR,
+ "DBLP Set: status %d\n", reg_status);
+ }
+
+ /* HPG 6.1.2 Step 4, debug bridge to lpi release */
+ __write_register(device,
+ VIDC_WRAPPER_DEBUG_BRIDGE_LPI_CONTROL, 0x0);
+ lpi_status = 0x1;
+ count = 0;
+ while (lpi_status && count < max_count) {
+ lpi_status = __read_register(device,
+ VIDC_WRAPPER_DEBUG_BRIDGE_LPI_STATUS);
+ dprintk(VIDC_DBG,
+ "DBLP Release: lpi_status %d(count %d)\n",
+ lpi_status, count);
+ usleep_range(50, 100);
+ count++;
+ }
+ if (count == max_count) {
+ dprintk(VIDC_ERR,
+ "DBLP Release: lpi_status %d\n", lpi_status);
+ }
+
+ /* HPG 6.1.2 Step 6 */
+ __disable_unprepare_clks(device);
+
+ /* HPG 6.1.2 Step 7 & 8 */
+ if (call_venus_op(device, reset_ahb2axi_bridge, device))
+ dprintk(VIDC_ERR, "Failed to reset ahb2axi\n");
+
+ /* HPG 6.1.2 Step 5 */
+ if (__disable_regulators(device))
+ dprintk(VIDC_WARN, "Failed to disable regulators\n");
+
+ if (__unvote_buses(device))
+ dprintk(VIDC_WARN, "Failed to unvote for buses\n");
+ device->power_enabled = false;
+}
+
static inline int __suspend(struct venus_hfi_device *device)
{
int rc = 0;
@@ -4645,7 +4740,7 @@ static inline int __suspend(struct venus_hfi_device *device)
__disable_subcaches(device);
- __venus_power_off(device);
+ call_venus_op(device, power_off, device);
dprintk(VIDC_PROF, "Venus power off\n");
return rc;
@@ -4720,7 +4815,7 @@ static inline int __resume(struct venus_hfi_device *device)
err_reset_core:
__tzbsp_set_video_state(TZBSP_VIDEO_STATE_SUSPEND);
err_set_video_state:
- __venus_power_off(device);
+ call_venus_op(device, power_off, device);
err_venus_power_on:
dprintk(VIDC_ERR, "Failed to resume from power collapse\n");
return rc;
@@ -4780,7 +4875,7 @@ static int __load_fw(struct venus_hfi_device *device)
subsystem_put(device->resources.fw.cookie);
device->resources.fw.cookie = NULL;
fail_load_fw:
- __venus_power_off(device);
+ call_venus_op(device, power_off, device);
fail_venus_power_on:
fail_init_pkt:
__deinit_resources(device);
@@ -4801,7 +4896,7 @@ static void __unload_fw(struct venus_hfi_device *device)
__vote_buses(device, NULL, 0);
subsystem_put(device->resources.fw.cookie);
__interface_queues_release(device);
- __venus_power_off(device);
+ call_venus_op(device, power_off, device);
device->resources.fw.cookie = NULL;
__deinit_resources(device);
@@ -4934,10 +5029,54 @@ static void __noc_error_info(struct venus_hfi_device *device, u32 core_num)
dprintk(VIDC_ERR, "CORE%d_NOC_ERR_ERRLOG3_HIGH: %#x\n", core_num, val);
}
+static void noc_error_info_common(struct venus_hfi_device *device)
+{
+ const u32 core0 = 0, core1 = 1;
+
+ if (__read_register(device, VCODEC_CORE0_VIDEO_NOC_BASE_OFFS +
+ VCODEC_COREX_VIDEO_NOC_ERR_ERRVLD_LOW_OFFS))
+ __noc_error_info(device, core0);
+
+ if (__read_register(device, VCODEC_CORE1_VIDEO_NOC_BASE_OFFS +
+ VCODEC_COREX_VIDEO_NOC_ERR_ERRVLD_LOW_OFFS))
+ __noc_error_info(device, core1);
+}
+
+static void noc_error_info_iris2(struct venus_hfi_device *device)
+{
+ u32 val = 0;
+
+ val = __read_register(device, VCODEC_NOC_ERL_MAIN_SWID_LOW);
+ dprintk(VIDC_ERR, "VCODEC_NOC_ERL_MAIN_SWID_LOW: %#x\n", val);
+ val = __read_register(device, VCODEC_NOC_ERL_MAIN_SWID_HIGH);
+ dprintk(VIDC_ERR, "VCODEC_NOC_ERL_MAIN_SWID_HIGH: %#x\n", val);
+ val = __read_register(device, VCODEC_NOC_ERL_MAIN_MAINCTL_LOW);
+ dprintk(VIDC_ERR, "VCODEC_NOC_ERL_MAIN_MAINCTL_LOW: %#x\n", val);
+ val = __read_register(device, VCODEC_NOC_ERL_MAIN_ERRVLD_LOW);
+ dprintk(VIDC_ERR, "VCODEC_NOC_ERL_MAIN_ERRVLD_LOW: %#x\n", val);
+ val = __read_register(device, VCODEC_NOC_ERL_MAIN_ERRCLR_LOW);
+ dprintk(VIDC_ERR, "VCODEC_NOC_ERL_MAIN_ERRCLR_LOW: %#x\n", val);
+ val = __read_register(device, VCODEC_NOC_ERL_MAIN_ERRLOG0_LOW);
+ dprintk(VIDC_ERR, "VCODEC_NOC_ERL_MAIN_ERRLOG0_LOW: %#x\n", val);
+ val = __read_register(device, VCODEC_NOC_ERL_MAIN_ERRLOG0_HIGH);
+ dprintk(VIDC_ERR, "VCODEC_NOC_ERL_MAIN_ERRLOG0_HIGH: %#x\n", val);
+ val = __read_register(device, VCODEC_NOC_ERL_MAIN_ERRLOG1_LOW);
+ dprintk(VIDC_ERR, "VCODEC_NOC_ERL_MAIN_ERRLOG1_LOW: %#x\n", val);
+ val = __read_register(device, VCODEC_NOC_ERL_MAIN_ERRLOG1_HIGH);
+ dprintk(VIDC_ERR, "VCODEC_NOC_ERL_MAIN_ERRLOG1_HIGH: %#x\n", val);
+ val = __read_register(device, VCODEC_NOC_ERL_MAIN_ERRLOG2_LOW);
+ dprintk(VIDC_ERR, "VCODEC_NOC_ERL_MAIN_ERRLOG2_LOW: %#x\n", val);
+ val = __read_register(device, VCODEC_NOC_ERL_MAIN_ERRLOG2_HIGH);
+ dprintk(VIDC_ERR, "VCODEC_NOC_ERL_MAIN_ERRLOG2_HIGH: %#x\n", val);
+ val = __read_register(device, VCODEC_NOC_ERL_MAIN_ERRLOG3_LOW);
+ dprintk(VIDC_ERR, "VCODEC_NOC_ERL_MAIN_ERRLOG3_LOW: %#x\n", val);
+ val = __read_register(device, VCODEC_NOC_ERL_MAIN_ERRLOG3_HIGH);
+ dprintk(VIDC_ERR, "VCODEC_NOC_ERL_MAIN_ERRLOG3_HIGH: %#x\n", val);
+}
+
static int venus_hfi_noc_error_info(void *dev)
{
struct venus_hfi_device *device;
- const u32 core0 = 0, core1 = 1;
if (!dev) {
dprintk(VIDC_ERR, "%s: null device\n", __func__);
@@ -4948,13 +5087,7 @@ static int venus_hfi_noc_error_info(void *dev)
mutex_lock(&device->lock);
dprintk(VIDC_ERR, "%s: non error information\n", __func__);
- if (__read_register(device, VCODEC_CORE0_VIDEO_NOC_BASE_OFFS +
- VCODEC_COREX_VIDEO_NOC_ERR_ERRVLD_LOW_OFFS))
- __noc_error_info(device, core0);
-
- if (__read_register(device, VCODEC_CORE1_VIDEO_NOC_BASE_OFFS +
- VCODEC_COREX_VIDEO_NOC_ERR_ERRVLD_LOW_OFFS))
- __noc_error_info(device, core1);
+ call_venus_op(device, noc_error_info, device);
mutex_unlock(&device->lock);
diff --git a/drivers/media/platform/msm/vidc/venus_hfi.h b/drivers/media/platform/msm/vidc/venus_hfi.h
index 6a89b19..ecea88b 100644
--- a/drivers/media/platform/msm/vidc/venus_hfi.h
+++ b/drivers/media/platform/msm/vidc/venus_hfi.h
@@ -237,6 +237,8 @@ struct venus_hfi_vpu_ops {
void (*setup_dsp_uc_memmap)(struct venus_hfi_device *device);
void (*clock_config_on_enable)(struct venus_hfi_device *device);
int (*reset_ahb2axi_bridge)(struct venus_hfi_device *device);
+ void (*power_off)(struct venus_hfi_device *device);
+ void (*noc_error_info)(struct venus_hfi_device *device);
};
struct venus_hfi_device {
diff --git a/drivers/media/platform/msm/vidc/vidc_hfi_helper.h b/drivers/media/platform/msm/vidc/vidc_hfi_helper.h
index fadc48f..49c0856 100644
--- a/drivers/media/platform/msm/vidc/vidc_hfi_helper.h
+++ b/drivers/media/platform/msm/vidc/vidc_hfi_helper.h
@@ -346,6 +346,8 @@ struct hfi_buffer_info {
(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x036)
#define HFI_PROPERTY_PARAM_VENC_ADAPTIVE_B \
(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x037)
+#define HFI_PROPERTY_PARAM_VENC_BITRATE_SAVINGS \
+ (HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x038)
#define HFI_PROPERTY_CONFIG_VENC_COMMON_START \
(HFI_DOMAIN_BASE_VENC + HFI_ARCH_COMMON_OFFSET + 0x6000)
diff --git a/drivers/media/platform/msm/vidc/vidc_hfi_io.h b/drivers/media/platform/msm/vidc/vidc_hfi_io.h
index eb47f68..847c75f 100644
--- a/drivers/media/platform/msm/vidc/vidc_hfi_io.h
+++ b/drivers/media/platform/msm/vidc/vidc_hfi_io.h
@@ -12,6 +12,7 @@
#define VIDC_CPU_BASE_OFFS 0x000A0000
#define VIDEO_CC_BASE_OFFS 0x000F0000
+#define VIDC_AON_BASE_OFFS 0x000E0000
#define VIDC_CPU_CS_BASE_OFFS (VIDC_CPU_BASE_OFFS)
#define VIDC_CPU_IC_BASE_OFFS (VIDC_CPU_BASE_OFFS)
@@ -107,6 +108,8 @@
#define VIDC_WRAPPER_CPU_CGC_DIS (VIDC_WRAPPER_BASE_OFFS + 0x2010)
#define VIDC_WRAPPER_CPU_STATUS (VIDC_WRAPPER_BASE_OFFS + 0x2014)
+#define VIDC_WRAPPER_DEBUG_BRIDGE_LPI_CONTROL (VIDC_WRAPPER_BASE_OFFS + 0x54)
+#define VIDC_WRAPPER_DEBUG_BRIDGE_LPI_STATUS (VIDC_WRAPPER_BASE_OFFS + 0x58)
/*
* --------------------------------------------------------------------------
* MODULE: vidc_tz_wrapper
@@ -172,7 +175,7 @@
/*
* --------------------------------------------------------------------------
- * MODULE: vcodec noc error log registers
+ * MODULE: vcodec noc error log registers (iris1)
* --------------------------------------------------------------------------
*/
#define VCODEC_CORE0_VIDEO_NOC_BASE_OFFS 0x00004000
@@ -191,4 +194,27 @@
#define VCODEC_COREX_VIDEO_NOC_ERR_ERRLOG3_LOW_OFFS 0x0538
#define VCODEC_COREX_VIDEO_NOC_ERR_ERRLOG3_HIGH_OFFS 0x053C
+#define VIDC_AON_WRAPPER_MVP_NOC_LPI_CONTROL (VIDC_AON_BASE_OFFS)
+#define VIDC_AON_WRAPPER_MVP_NOC_LPI_STATUS (VIDC_AON_BASE_OFFS + 0x4)
+
+/*
+ * --------------------------------------------------------------------------
+ * MODULE: vcodec noc error log registers (iris2)
+ * --------------------------------------------------------------------------
+ */
+#define VCODEC_NOC_VIDEO_A_NOC_BASE_OFFS 0x00010000
+#define VCODEC_NOC_ERL_MAIN_SWID_LOW 0x00011200
+#define VCODEC_NOC_ERL_MAIN_SWID_HIGH 0x00011204
+#define VCODEC_NOC_ERL_MAIN_MAINCTL_LOW 0x00011208
+#define VCODEC_NOC_ERL_MAIN_ERRVLD_LOW 0x00011210
+#define VCODEC_NOC_ERL_MAIN_ERRCLR_LOW 0x00011218
+#define VCODEC_NOC_ERL_MAIN_ERRLOG0_LOW 0x00011220
+#define VCODEC_NOC_ERL_MAIN_ERRLOG0_HIGH 0x00011224
+#define VCODEC_NOC_ERL_MAIN_ERRLOG1_LOW 0x00011228
+#define VCODEC_NOC_ERL_MAIN_ERRLOG1_HIGH 0x0001122C
+#define VCODEC_NOC_ERL_MAIN_ERRLOG2_LOW 0x00011230
+#define VCODEC_NOC_ERL_MAIN_ERRLOG2_HIGH 0x00011234
+#define VCODEC_NOC_ERL_MAIN_ERRLOG3_LOW 0x00011238
+#define VCODEC_NOC_ERL_MAIN_ERRLOG3_HIGH 0x0001123C
+#
#endif
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 413308a..ea76572 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -97,7 +97,7 @@ static void mmc_should_fail_request(struct mmc_host *host,
if (!data)
return;
- if (cmd->error || data->error ||
+ if ((cmd && cmd->error) || data->error ||
!should_fail(&host->fail_mmc_request, data->blksz * data->blocks))
return;
diff --git a/drivers/mmc/host/cqhci.c b/drivers/mmc/host/cqhci.c
index 159270e..a8af682 100644
--- a/drivers/mmc/host/cqhci.c
+++ b/drivers/mmc/host/cqhci.c
@@ -201,7 +201,7 @@ static int cqhci_host_alloc_tdl(struct cqhci_host *cq_host)
cq_host->desc_size = cq_host->slot_sz * cq_host->num_slots;
cq_host->data_size = cq_host->trans_desc_len * cq_host->mmc->max_segs *
- (cq_host->num_slots - 1);
+ cq_host->mmc->cqe_qdepth;
pr_debug("%s: cqhci: desc_size: %zu data_sz: %zu slot-sz: %d\n",
mmc_hostname(cq_host->mmc), cq_host->desc_size, cq_host->data_size,
@@ -217,12 +217,21 @@ static int cqhci_host_alloc_tdl(struct cqhci_host *cq_host)
cq_host->desc_size,
&cq_host->desc_dma_base,
GFP_KERNEL);
+ if (!cq_host->desc_base)
+ return -ENOMEM;
+
cq_host->trans_desc_base = dmam_alloc_coherent(mmc_dev(cq_host->mmc),
cq_host->data_size,
&cq_host->trans_desc_dma_base,
GFP_KERNEL);
- if (!cq_host->desc_base || !cq_host->trans_desc_base)
+ if (!cq_host->trans_desc_base) {
+ dmam_free_coherent(mmc_dev(cq_host->mmc), cq_host->desc_size,
+ cq_host->desc_base,
+ cq_host->desc_dma_base);
+ cq_host->desc_base = NULL;
+ cq_host->desc_dma_base = 0;
return -ENOMEM;
+ }
pr_debug("%s: cqhci: desc-base: 0x%p trans-base: 0x%p\n desc_dma 0x%llx trans_dma: 0x%llx\n",
mmc_hostname(cq_host->mmc), cq_host->desc_base, cq_host->trans_desc_base,
diff --git a/drivers/mmc/host/mmc_spi.c b/drivers/mmc/host/mmc_spi.c
index 476e53d..67f6bd2 100644
--- a/drivers/mmc/host/mmc_spi.c
+++ b/drivers/mmc/host/mmc_spi.c
@@ -1447,6 +1447,7 @@ static int mmc_spi_probe(struct spi_device *spi)
mmc->caps &= ~MMC_CAP_NEEDS_POLL;
mmc_gpiod_request_cd_irq(mmc);
}
+ mmc_detect_change(mmc, 0);
if (host->pdata && host->pdata->flags & MMC_SPI_USE_RO_GPIO) {
has_ro = true;
diff --git a/drivers/mmc/host/renesas_sdhi_sys_dmac.c b/drivers/mmc/host/renesas_sdhi_sys_dmac.c
index 5389c48..c3d63ed 100644
--- a/drivers/mmc/host/renesas_sdhi_sys_dmac.c
+++ b/drivers/mmc/host/renesas_sdhi_sys_dmac.c
@@ -68,6 +68,7 @@ static const struct renesas_sdhi_of_data of_rcar_gen2_compatible = {
.scc_offset = 0x0300,
.taps = rcar_gen2_scc_taps,
.taps_num = ARRAY_SIZE(rcar_gen2_scc_taps),
+ .max_blk_count = 0xffffffff,
};
/* Definitions for sampling clocks */
diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
index f44e490..753973d 100644
--- a/drivers/mmc/host/sdhci-esdhc-imx.c
+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
@@ -1097,11 +1097,12 @@ static void sdhci_esdhc_imx_hwinit(struct sdhci_host *host)
writel(readl(host->ioaddr + SDHCI_HOST_CONTROL)
| ESDHC_BURST_LEN_EN_INCR,
host->ioaddr + SDHCI_HOST_CONTROL);
+
/*
- * erratum ESDHC_FLAG_ERR004536 fix for MX6Q TO1.2 and MX6DL
- * TO1.1, it's harmless for MX6SL
- */
- writel(readl(host->ioaddr + 0x6c) | BIT(7),
+ * erratum ESDHC_FLAG_ERR004536 fix for MX6Q TO1.2 and MX6DL
+ * TO1.1, it's harmless for MX6SL
+ */
+ writel(readl(host->ioaddr + 0x6c) & ~BIT(7),
host->ioaddr + 0x6c);
/* disable DLL_CTRL delay line settings */
diff --git a/drivers/mmc/host/tmio_mmc.h b/drivers/mmc/host/tmio_mmc.h
index 5d141f7..7c40a7e 100644
--- a/drivers/mmc/host/tmio_mmc.h
+++ b/drivers/mmc/host/tmio_mmc.h
@@ -279,6 +279,11 @@ static inline void sd_ctrl_write32_as_16_and_16(struct tmio_mmc_host *host,
iowrite16(val >> 16, host->ctl + ((addr + 2) << host->bus_shift));
}
+static inline void sd_ctrl_write32(struct tmio_mmc_host *host, int addr, u32 val)
+{
+ iowrite32(val, host->ctl + (addr << host->bus_shift));
+}
+
static inline void sd_ctrl_write32_rep(struct tmio_mmc_host *host, int addr,
const u32 *buf, int count)
{
diff --git a/drivers/mmc/host/tmio_mmc_core.c b/drivers/mmc/host/tmio_mmc_core.c
index 261b4d6..7d13ca9 100644
--- a/drivers/mmc/host/tmio_mmc_core.c
+++ b/drivers/mmc/host/tmio_mmc_core.c
@@ -46,6 +46,7 @@
#include <linux/regulator/consumer.h>
#include <linux/mmc/sdio.h>
#include <linux/scatterlist.h>
+#include <linux/sizes.h>
#include <linux/spinlock.h>
#include <linux/swiotlb.h>
#include <linux/workqueue.h>
@@ -703,7 +704,7 @@ static bool __tmio_mmc_sdcard_irq(struct tmio_mmc_host *host, int ireg,
return false;
}
-static void __tmio_mmc_sdio_irq(struct tmio_mmc_host *host)
+static bool __tmio_mmc_sdio_irq(struct tmio_mmc_host *host)
{
struct mmc_host *mmc = host->mmc;
struct tmio_mmc_data *pdata = host->pdata;
@@ -711,7 +712,7 @@ static void __tmio_mmc_sdio_irq(struct tmio_mmc_host *host)
unsigned int sdio_status;
if (!(pdata->flags & TMIO_MMC_SDIO_IRQ))
- return;
+ return false;
status = sd_ctrl_read16(host, CTL_SDIO_STATUS);
ireg = status & TMIO_SDIO_MASK_ALL & ~host->sdio_irq_mask;
@@ -724,6 +725,8 @@ static void __tmio_mmc_sdio_irq(struct tmio_mmc_host *host)
if (mmc->caps & MMC_CAP_SDIO_IRQ && ireg & TMIO_SDIO_STAT_IOIRQ)
mmc_signal_sdio_irq(mmc);
+
+ return ireg;
}
irqreturn_t tmio_mmc_irq(int irq, void *devid)
@@ -742,9 +745,10 @@ irqreturn_t tmio_mmc_irq(int irq, void *devid)
if (__tmio_mmc_sdcard_irq(host, ireg, status))
return IRQ_HANDLED;
- __tmio_mmc_sdio_irq(host);
+ if (__tmio_mmc_sdio_irq(host))
+ return IRQ_HANDLED;
- return IRQ_HANDLED;
+ return IRQ_NONE;
}
EXPORT_SYMBOL_GPL(tmio_mmc_irq);
@@ -774,7 +778,10 @@ static int tmio_mmc_start_data(struct tmio_mmc_host *host,
/* Set transfer length / blocksize */
sd_ctrl_write16(host, CTL_SD_XFER_LEN, data->blksz);
- sd_ctrl_write16(host, CTL_XFER_BLK_COUNT, data->blocks);
+ if (host->mmc->max_blk_count >= SZ_64K)
+ sd_ctrl_write32(host, CTL_XFER_BLK_COUNT, data->blocks);
+ else
+ sd_ctrl_write16(host, CTL_XFER_BLK_COUNT, data->blocks);
tmio_mmc_start_dma(host, data);
diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c
index baca8f7..c3c1195 100644
--- a/drivers/net/ethernet/altera/altera_tse_main.c
+++ b/drivers/net/ethernet/altera/altera_tse_main.c
@@ -714,8 +714,10 @@ static struct phy_device *connect_local_phy(struct net_device *dev)
phydev = phy_connect(dev, phy_id_fmt, &altera_tse_adjust_link,
priv->phy_iface);
- if (IS_ERR(phydev))
+ if (IS_ERR(phydev)) {
netdev_err(dev, "Could not attach to PHY\n");
+ phydev = NULL;
+ }
} else {
int ret;
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index 91f48c0..f70cb4d 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -1314,7 +1314,6 @@ static int ibmveth_poll(struct napi_struct *napi, int budget)
unsigned long lpar_rc;
u16 mss = 0;
-restart_poll:
while (frames_processed < budget) {
if (!ibmveth_rxq_pending_buffer(adapter))
break;
@@ -1402,7 +1401,6 @@ static int ibmveth_poll(struct napi_struct *napi, int budget)
napi_reschedule(napi)) {
lpar_rc = h_vio_signal(adapter->vdev->unit_address,
VIO_IRQ_DISABLE);
- goto restart_poll;
}
}
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index a32ded5..42d2846 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -144,6 +144,8 @@ struct hv_netvsc_packet {
u32 total_data_buflen;
};
+#define NETVSC_HASH_KEYLEN 40
+
struct netvsc_device_info {
unsigned char mac_adr[ETH_ALEN];
u32 num_chn;
@@ -151,6 +153,8 @@ struct netvsc_device_info {
u32 recv_sections;
u32 send_section_size;
u32 recv_section_size;
+
+ u8 rss_key[NETVSC_HASH_KEYLEN];
};
enum rndis_device_state {
@@ -160,8 +164,6 @@ enum rndis_device_state {
RNDIS_DEV_DATAINITIALIZED,
};
-#define NETVSC_HASH_KEYLEN 40
-
struct rndis_device {
struct net_device *ndev;
@@ -210,7 +212,9 @@ int netvsc_recv_callback(struct net_device *net,
void netvsc_channel_cb(void *context);
int netvsc_poll(struct napi_struct *napi, int budget);
-int rndis_set_subchannel(struct net_device *ndev, struct netvsc_device *nvdev);
+int rndis_set_subchannel(struct net_device *ndev,
+ struct netvsc_device *nvdev,
+ struct netvsc_device_info *dev_info);
int rndis_filter_open(struct netvsc_device *nvdev);
int rndis_filter_close(struct netvsc_device *nvdev);
struct netvsc_device *rndis_filter_device_add(struct hv_device *dev,
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index fe01e14..1a942fe 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -84,7 +84,7 @@ static void netvsc_subchan_work(struct work_struct *w)
rdev = nvdev->extension;
if (rdev) {
- ret = rndis_set_subchannel(rdev->ndev, nvdev);
+ ret = rndis_set_subchannel(rdev->ndev, nvdev, NULL);
if (ret == 0) {
netif_device_attach(rdev->ndev);
} else {
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 1c37a82..c9e2a98 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -856,6 +856,39 @@ static void netvsc_get_channels(struct net_device *net,
}
}
+/* Alloc struct netvsc_device_info, and initialize it from either existing
+ * struct netvsc_device, or from default values.
+ */
+static struct netvsc_device_info *netvsc_devinfo_get
+ (struct netvsc_device *nvdev)
+{
+ struct netvsc_device_info *dev_info;
+
+ dev_info = kzalloc(sizeof(*dev_info), GFP_ATOMIC);
+
+ if (!dev_info)
+ return NULL;
+
+ if (nvdev) {
+ dev_info->num_chn = nvdev->num_chn;
+ dev_info->send_sections = nvdev->send_section_cnt;
+ dev_info->send_section_size = nvdev->send_section_size;
+ dev_info->recv_sections = nvdev->recv_section_cnt;
+ dev_info->recv_section_size = nvdev->recv_section_size;
+
+ memcpy(dev_info->rss_key, nvdev->extension->rss_key,
+ NETVSC_HASH_KEYLEN);
+ } else {
+ dev_info->num_chn = VRSS_CHANNEL_DEFAULT;
+ dev_info->send_sections = NETVSC_DEFAULT_TX;
+ dev_info->send_section_size = NETVSC_SEND_SECTION_SIZE;
+ dev_info->recv_sections = NETVSC_DEFAULT_RX;
+ dev_info->recv_section_size = NETVSC_RECV_SECTION_SIZE;
+ }
+
+ return dev_info;
+}
+
static int netvsc_detach(struct net_device *ndev,
struct netvsc_device *nvdev)
{
@@ -907,7 +940,7 @@ static int netvsc_attach(struct net_device *ndev,
return PTR_ERR(nvdev);
if (nvdev->num_chn > 1) {
- ret = rndis_set_subchannel(ndev, nvdev);
+ ret = rndis_set_subchannel(ndev, nvdev, dev_info);
/* if unavailable, just proceed with one queue */
if (ret) {
@@ -941,7 +974,7 @@ static int netvsc_set_channels(struct net_device *net,
struct net_device_context *net_device_ctx = netdev_priv(net);
struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev);
unsigned int orig, count = channels->combined_count;
- struct netvsc_device_info device_info;
+ struct netvsc_device_info *device_info;
int ret;
/* We do not support separate count for rx, tx, or other */
@@ -960,24 +993,26 @@ static int netvsc_set_channels(struct net_device *net,
orig = nvdev->num_chn;
- memset(&device_info, 0, sizeof(device_info));
- device_info.num_chn = count;
- device_info.send_sections = nvdev->send_section_cnt;
- device_info.send_section_size = nvdev->send_section_size;
- device_info.recv_sections = nvdev->recv_section_cnt;
- device_info.recv_section_size = nvdev->recv_section_size;
+ device_info = netvsc_devinfo_get(nvdev);
+
+ if (!device_info)
+ return -ENOMEM;
+
+ device_info->num_chn = count;
ret = netvsc_detach(net, nvdev);
if (ret)
- return ret;
+ goto out;
- ret = netvsc_attach(net, &device_info);
+ ret = netvsc_attach(net, device_info);
if (ret) {
- device_info.num_chn = orig;
- if (netvsc_attach(net, &device_info))
+ device_info->num_chn = orig;
+ if (netvsc_attach(net, device_info))
netdev_err(net, "restoring channel setting failed\n");
}
+out:
+ kfree(device_info);
return ret;
}
@@ -1044,48 +1079,45 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu)
struct net_device *vf_netdev = rtnl_dereference(ndevctx->vf_netdev);
struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev);
int orig_mtu = ndev->mtu;
- struct netvsc_device_info device_info;
+ struct netvsc_device_info *device_info;
int ret = 0;
if (!nvdev || nvdev->destroy)
return -ENODEV;
+ device_info = netvsc_devinfo_get(nvdev);
+
+ if (!device_info)
+ return -ENOMEM;
+
/* Change MTU of underlying VF netdev first. */
if (vf_netdev) {
ret = dev_set_mtu(vf_netdev, mtu);
if (ret)
- return ret;
+ goto out;
}
- memset(&device_info, 0, sizeof(device_info));
- device_info.num_chn = nvdev->num_chn;
- device_info.send_sections = nvdev->send_section_cnt;
- device_info.send_section_size = nvdev->send_section_size;
- device_info.recv_sections = nvdev->recv_section_cnt;
- device_info.recv_section_size = nvdev->recv_section_size;
-
ret = netvsc_detach(ndev, nvdev);
if (ret)
goto rollback_vf;
ndev->mtu = mtu;
- ret = netvsc_attach(ndev, &device_info);
- if (ret)
- goto rollback;
+ ret = netvsc_attach(ndev, device_info);
+ if (!ret)
+ goto out;
- return 0;
-
-rollback:
/* Attempt rollback to original MTU */
ndev->mtu = orig_mtu;
- if (netvsc_attach(ndev, &device_info))
+ if (netvsc_attach(ndev, device_info))
netdev_err(ndev, "restoring mtu failed\n");
rollback_vf:
if (vf_netdev)
dev_set_mtu(vf_netdev, orig_mtu);
+out:
+ kfree(device_info);
return ret;
}
@@ -1690,7 +1722,7 @@ static int netvsc_set_ringparam(struct net_device *ndev,
{
struct net_device_context *ndevctx = netdev_priv(ndev);
struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev);
- struct netvsc_device_info device_info;
+ struct netvsc_device_info *device_info;
struct ethtool_ringparam orig;
u32 new_tx, new_rx;
int ret = 0;
@@ -1710,26 +1742,29 @@ static int netvsc_set_ringparam(struct net_device *ndev,
new_rx == orig.rx_pending)
return 0; /* no change */
- memset(&device_info, 0, sizeof(device_info));
- device_info.num_chn = nvdev->num_chn;
- device_info.send_sections = new_tx;
- device_info.send_section_size = nvdev->send_section_size;
- device_info.recv_sections = new_rx;
- device_info.recv_section_size = nvdev->recv_section_size;
+ device_info = netvsc_devinfo_get(nvdev);
+
+ if (!device_info)
+ return -ENOMEM;
+
+ device_info->send_sections = new_tx;
+ device_info->recv_sections = new_rx;
ret = netvsc_detach(ndev, nvdev);
if (ret)
- return ret;
+ goto out;
- ret = netvsc_attach(ndev, &device_info);
+ ret = netvsc_attach(ndev, device_info);
if (ret) {
- device_info.send_sections = orig.tx_pending;
- device_info.recv_sections = orig.rx_pending;
+ device_info->send_sections = orig.tx_pending;
+ device_info->recv_sections = orig.rx_pending;
- if (netvsc_attach(ndev, &device_info))
+ if (netvsc_attach(ndev, device_info))
netdev_err(ndev, "restoring ringparam failed");
}
+out:
+ kfree(device_info);
return ret;
}
@@ -2158,7 +2193,7 @@ static int netvsc_probe(struct hv_device *dev,
{
struct net_device *net = NULL;
struct net_device_context *net_device_ctx;
- struct netvsc_device_info device_info;
+ struct netvsc_device_info *device_info = NULL;
struct netvsc_device *nvdev;
int ret = -ENOMEM;
@@ -2205,21 +2240,21 @@ static int netvsc_probe(struct hv_device *dev,
netif_set_real_num_rx_queues(net, 1);
/* Notify the netvsc driver of the new device */
- memset(&device_info, 0, sizeof(device_info));
- device_info.num_chn = VRSS_CHANNEL_DEFAULT;
- device_info.send_sections = NETVSC_DEFAULT_TX;
- device_info.send_section_size = NETVSC_SEND_SECTION_SIZE;
- device_info.recv_sections = NETVSC_DEFAULT_RX;
- device_info.recv_section_size = NETVSC_RECV_SECTION_SIZE;
+ device_info = netvsc_devinfo_get(NULL);
- nvdev = rndis_filter_device_add(dev, &device_info);
+ if (!device_info) {
+ ret = -ENOMEM;
+ goto devinfo_failed;
+ }
+
+ nvdev = rndis_filter_device_add(dev, device_info);
if (IS_ERR(nvdev)) {
ret = PTR_ERR(nvdev);
netdev_err(net, "unable to add netvsc device (ret %d)\n", ret);
goto rndis_failed;
}
- memcpy(net->dev_addr, device_info.mac_adr, ETH_ALEN);
+ memcpy(net->dev_addr, device_info->mac_adr, ETH_ALEN);
/* We must get rtnl lock before scheduling nvdev->subchan_work,
* otherwise netvsc_subchan_work() can get rtnl lock first and wait
@@ -2257,12 +2292,16 @@ static int netvsc_probe(struct hv_device *dev,
list_add(&net_device_ctx->list, &netvsc_dev_list);
rtnl_unlock();
+
+ kfree(device_info);
return 0;
register_failed:
rtnl_unlock();
rndis_filter_device_remove(dev, nvdev);
rndis_failed:
+ kfree(device_info);
+devinfo_failed:
free_percpu(net_device_ctx->vf_stats);
no_stats:
hv_set_drvdata(dev, NULL);
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
index 2a5209f..53c6039 100644
--- a/drivers/net/hyperv/rndis_filter.c
+++ b/drivers/net/hyperv/rndis_filter.c
@@ -715,8 +715,8 @@ rndis_filter_set_offload_params(struct net_device *ndev,
return ret;
}
-int rndis_filter_set_rss_param(struct rndis_device *rdev,
- const u8 *rss_key)
+static int rndis_set_rss_param_msg(struct rndis_device *rdev,
+ const u8 *rss_key, u16 flag)
{
struct net_device *ndev = rdev->ndev;
struct rndis_request *request;
@@ -745,7 +745,7 @@ int rndis_filter_set_rss_param(struct rndis_device *rdev,
rssp->hdr.type = NDIS_OBJECT_TYPE_RSS_PARAMETERS;
rssp->hdr.rev = NDIS_RECEIVE_SCALE_PARAMETERS_REVISION_2;
rssp->hdr.size = sizeof(struct ndis_recv_scale_param);
- rssp->flag = 0;
+ rssp->flag = flag;
rssp->hashinfo = NDIS_HASH_FUNC_TOEPLITZ | NDIS_HASH_IPV4 |
NDIS_HASH_TCP_IPV4 | NDIS_HASH_IPV6 |
NDIS_HASH_TCP_IPV6;
@@ -770,9 +770,12 @@ int rndis_filter_set_rss_param(struct rndis_device *rdev,
wait_for_completion(&request->wait_event);
set_complete = &request->response_msg.msg.set_complete;
- if (set_complete->status == RNDIS_STATUS_SUCCESS)
- memcpy(rdev->rss_key, rss_key, NETVSC_HASH_KEYLEN);
- else {
+ if (set_complete->status == RNDIS_STATUS_SUCCESS) {
+ if (!(flag & NDIS_RSS_PARAM_FLAG_DISABLE_RSS) &&
+ !(flag & NDIS_RSS_PARAM_FLAG_HASH_KEY_UNCHANGED))
+ memcpy(rdev->rss_key, rss_key, NETVSC_HASH_KEYLEN);
+
+ } else {
netdev_err(ndev, "Fail to set RSS parameters:0x%x\n",
set_complete->status);
ret = -EINVAL;
@@ -783,6 +786,16 @@ int rndis_filter_set_rss_param(struct rndis_device *rdev,
return ret;
}
+int rndis_filter_set_rss_param(struct rndis_device *rdev,
+ const u8 *rss_key)
+{
+ /* Disable RSS before change */
+ rndis_set_rss_param_msg(rdev, rss_key,
+ NDIS_RSS_PARAM_FLAG_DISABLE_RSS);
+
+ return rndis_set_rss_param_msg(rdev, rss_key, 0);
+}
+
static int rndis_filter_query_device_link_status(struct rndis_device *dev,
struct netvsc_device *net_device)
{
@@ -1062,7 +1075,9 @@ static void netvsc_sc_open(struct vmbus_channel *new_sc)
* This breaks overlap of processing the host message for the
* new primary channel with the initialization of sub-channels.
*/
-int rndis_set_subchannel(struct net_device *ndev, struct netvsc_device *nvdev)
+int rndis_set_subchannel(struct net_device *ndev,
+ struct netvsc_device *nvdev,
+ struct netvsc_device_info *dev_info)
{
struct nvsp_message *init_packet = &nvdev->channel_init_pkt;
struct net_device_context *ndev_ctx = netdev_priv(ndev);
@@ -1103,7 +1118,10 @@ int rndis_set_subchannel(struct net_device *ndev, struct netvsc_device *nvdev)
atomic_read(&nvdev->open_chn) == nvdev->num_chn);
/* ignore failues from setting rss parameters, still have channels */
- rndis_filter_set_rss_param(rdev, netvsc_hash_key);
+ if (dev_info)
+ rndis_filter_set_rss_param(rdev, dev_info->rss_key);
+ else
+ rndis_filter_set_rss_param(rdev, netvsc_hash_key);
netif_set_real_num_tx_queues(ndev, nvdev->num_chn);
netif_set_real_num_rx_queues(ndev, nvdev->num_chn);
diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c
index b654f05..3d93993 100644
--- a/drivers/net/usb/asix_devices.c
+++ b/drivers/net/usb/asix_devices.c
@@ -739,8 +739,13 @@ static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf)
asix_read_cmd(dev, AX_CMD_STATMNGSTS_REG, 0, 0, 1, &chipcode, 0);
chipcode &= AX_CHIPCODE_MASK;
- (chipcode == AX_AX88772_CHIPCODE) ? ax88772_hw_reset(dev, 0) :
- ax88772a_hw_reset(dev, 0);
+ ret = (chipcode == AX_AX88772_CHIPCODE) ? ax88772_hw_reset(dev, 0) :
+ ax88772a_hw_reset(dev, 0);
+
+ if (ret < 0) {
+ netdev_dbg(dev->net, "Failed to reset AX88772: %d\n", ret);
+ return ret;
+ }
/* Read PHYID register *AFTER* the PHY was reset properly */
phyid = asix_get_phyid(dev);
diff --git a/drivers/net/wireless/cnss2/Makefile b/drivers/net/wireless/cnss2/Makefile
index 4859eb2..3d6b813 100644
--- a/drivers/net/wireless/cnss2/Makefile
+++ b/drivers/net/wireless/cnss2/Makefile
@@ -7,4 +7,5 @@
cnss2-y += debug.o
cnss2-y += pci.o
cnss2-y += power.o
+cnss2-$(CONFIG_CNSS2_DEBUG) += genl.o
cnss2-$(CONFIG_CNSS2_QMI) += qmi.o wlan_firmware_service_v01.o coexistence_service_v01.o
diff --git a/drivers/net/wireless/cnss2/bus.c b/drivers/net/wireless/cnss2/bus.c
index f808ca1..99ce869 100644
--- a/drivers/net/wireless/cnss2/bus.c
+++ b/drivers/net/wireless/cnss2/bus.c
@@ -124,6 +124,37 @@ int cnss_bus_alloc_fw_mem(struct cnss_plat_data *plat_priv)
}
}
+int cnss_bus_alloc_qdss_mem(struct cnss_plat_data *plat_priv)
+{
+ if (!plat_priv)
+ return -ENODEV;
+
+ switch (plat_priv->bus_type) {
+ case CNSS_BUS_PCI:
+ return cnss_pci_alloc_qdss_mem(plat_priv->bus_priv);
+ default:
+ cnss_pr_err("Unsupported bus type: %d\n",
+ plat_priv->bus_type);
+ return -EINVAL;
+ }
+}
+
+void cnss_bus_free_qdss_mem(struct cnss_plat_data *plat_priv)
+{
+ if (!plat_priv)
+ return;
+
+ switch (plat_priv->bus_type) {
+ case CNSS_BUS_PCI:
+ cnss_pci_free_qdss_mem(plat_priv->bus_priv);
+ return;
+ default:
+ cnss_pr_err("Unsupported bus type: %d\n",
+ plat_priv->bus_type);
+ return;
+ }
+}
+
u32 cnss_bus_get_wake_irq(struct cnss_plat_data *plat_priv)
{
if (!plat_priv)
diff --git a/drivers/net/wireless/cnss2/bus.h b/drivers/net/wireless/cnss2/bus.h
index ad5cb1b..710f92f 100644
--- a/drivers/net/wireless/cnss2/bus.h
+++ b/drivers/net/wireless/cnss2/bus.h
@@ -24,6 +24,8 @@ int cnss_bus_init(struct cnss_plat_data *plat_priv);
void cnss_bus_deinit(struct cnss_plat_data *plat_priv);
int cnss_bus_load_m3(struct cnss_plat_data *plat_priv);
int cnss_bus_alloc_fw_mem(struct cnss_plat_data *plat_priv);
+int cnss_bus_alloc_qdss_mem(struct cnss_plat_data *plat_priv);
+void cnss_bus_free_qdss_mem(struct cnss_plat_data *plat_priv);
u32 cnss_bus_get_wake_irq(struct cnss_plat_data *plat_priv);
int cnss_bus_force_fw_assert_hdlr(struct cnss_plat_data *plat_priv);
void cnss_bus_fw_boot_timeout_hdlr(struct timer_list *t);
diff --git a/drivers/net/wireless/cnss2/genl.c b/drivers/net/wireless/cnss2/genl.c
new file mode 100644
index 0000000..ecc6eb5
--- /dev/null
+++ b/drivers/net/wireless/cnss2/genl.c
@@ -0,0 +1,203 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2019, The Linux Foundation. All rights reserved. */
+
+#define pr_fmt(fmt) "cnss_genl: " fmt
+
+#include <linux/err.h>
+#include <linux/module.h>
+#include <net/netlink.h>
+#include <net/genetlink.h>
+
+#include "main.h"
+#include "debug.h"
+
+#define CNSS_GENL_FAMILY_NAME "cnss-genl"
+#define CNSS_GENL_MCAST_GROUP_NAME "cnss-genl-grp"
+#define CNSS_GENL_VERSION 1
+#define CNSS_GENL_DATA_LEN_MAX (15 * 1024)
+#define CNSS_GENL_STR_LEN_MAX 16
+
+enum {
+ CNSS_GENL_ATTR_MSG_UNSPEC,
+ CNSS_GENL_ATTR_MSG_TYPE,
+ CNSS_GENL_ATTR_MSG_FILE_NAME,
+ CNSS_GENL_ATTR_MSG_TOTAL_SIZE,
+ CNSS_GENL_ATTR_MSG_SEG_ID,
+ CNSS_GENL_ATTR_MSG_END,
+ CNSS_GENL_ATTR_MSG_DATA_LEN,
+ CNSS_GENL_ATTR_MSG_DATA,
+ __CNSS_GENL_ATTR_MAX,
+};
+
+#define CNSS_GENL_ATTR_MAX (__CNSS_GENL_ATTR_MAX - 1)
+
+enum {
+ CNSS_GENL_CMD_UNSPEC,
+ CNSS_GENL_CMD_MSG,
+ __CNSS_GENL_CMD_MAX,
+};
+
+#define CNSS_GENL_CMD_MAX (__CNSS_GENL_CMD_MAX - 1)
+
+static struct nla_policy cnss_genl_msg_policy[CNSS_GENL_ATTR_MAX + 1] = {
+ [CNSS_GENL_ATTR_MSG_TYPE] = { .type = NLA_U8 },
+ [CNSS_GENL_ATTR_MSG_FILE_NAME] = { .type = NLA_NUL_STRING,
+ .len = CNSS_GENL_STR_LEN_MAX },
+ [CNSS_GENL_ATTR_MSG_TOTAL_SIZE] = { .type = NLA_U32 },
+ [CNSS_GENL_ATTR_MSG_SEG_ID] = { .type = NLA_U32 },
+ [CNSS_GENL_ATTR_MSG_END] = { .type = NLA_U8 },
+ [CNSS_GENL_ATTR_MSG_DATA_LEN] = { .type = NLA_U32 },
+ [CNSS_GENL_ATTR_MSG_DATA] = { .type = NLA_BINARY,
+ .len = CNSS_GENL_DATA_LEN_MAX },
+};
+
+static int cnss_genl_process_msg(struct sk_buff *skb, struct genl_info *info)
+{
+ return 0;
+}
+
+static struct genl_ops cnss_genl_ops[] = {
+ {
+ .cmd = CNSS_GENL_CMD_MSG,
+ .policy = cnss_genl_msg_policy,
+ .doit = cnss_genl_process_msg,
+ },
+};
+
+static struct genl_multicast_group cnss_genl_mcast_grp[] = {
+ {
+ .name = CNSS_GENL_MCAST_GROUP_NAME,
+ },
+};
+
+static struct genl_family cnss_genl_family = {
+ .id = 0,
+ .hdrsize = 0,
+ .name = CNSS_GENL_FAMILY_NAME,
+ .version = CNSS_GENL_VERSION,
+ .maxattr = CNSS_GENL_ATTR_MAX,
+ .module = THIS_MODULE,
+ .ops = cnss_genl_ops,
+ .n_ops = ARRAY_SIZE(cnss_genl_ops),
+ .mcgrps = cnss_genl_mcast_grp,
+ .n_mcgrps = ARRAY_SIZE(cnss_genl_mcast_grp),
+};
+
+static int cnss_genl_send_data(u8 type, char *file_name, u32 total_size,
+ u32 seg_id, u8 end, u32 data_len, u8 *msg_buff)
+{
+ struct sk_buff *skb = NULL;
+ void *msg_header = NULL;
+ int ret = 0;
+ char filename[CNSS_GENL_STR_LEN_MAX + 1];
+
+ cnss_pr_dbg("type: %u, file_name %s, total_size: %x, seg_id %u, end %u, data_len %u\n",
+ type, file_name, total_size, seg_id, end, data_len);
+
+ if (!file_name)
+ strlcpy(filename, "default", sizeof(filename));
+ else
+ strlcpy(filename, file_name, sizeof(filename));
+
+ skb = genlmsg_new(NLMSG_HDRLEN +
+ nla_total_size(sizeof(type)) +
+ nla_total_size(strlen(filename) + 1) +
+ nla_total_size(sizeof(total_size)) +
+ nla_total_size(sizeof(seg_id)) +
+ nla_total_size(sizeof(end)) +
+ nla_total_size(sizeof(data_len)) +
+ nla_total_size(data_len), GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+
+ msg_header = genlmsg_put(skb, 0, 0,
+ &cnss_genl_family, 0,
+ CNSS_GENL_CMD_MSG);
+ if (!msg_header) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ ret = nla_put_u8(skb, CNSS_GENL_ATTR_MSG_TYPE, type);
+ if (ret < 0)
+ goto fail;
+ ret = nla_put_string(skb, CNSS_GENL_ATTR_MSG_FILE_NAME, filename);
+ if (ret < 0)
+ goto fail;
+ ret = nla_put_u32(skb, CNSS_GENL_ATTR_MSG_TOTAL_SIZE, total_size);
+ if (ret < 0)
+ goto fail;
+ ret = nla_put_u32(skb, CNSS_GENL_ATTR_MSG_SEG_ID, seg_id);
+ if (ret < 0)
+ goto fail;
+ ret = nla_put_u8(skb, CNSS_GENL_ATTR_MSG_END, end);
+ if (ret < 0)
+ goto fail;
+ ret = nla_put_u32(skb, CNSS_GENL_ATTR_MSG_DATA_LEN, data_len);
+ if (ret < 0)
+ goto fail;
+ ret = nla_put(skb, CNSS_GENL_ATTR_MSG_DATA, data_len, msg_buff);
+ if (ret < 0)
+ goto fail;
+
+ genlmsg_end(skb, msg_header);
+ ret = genlmsg_multicast(&cnss_genl_family, skb, 0, 0, GFP_KERNEL);
+ if (ret < 0)
+ goto fail;
+
+ return ret;
+fail:
+ cnss_pr_err("genl msg send fail: %d\n", ret);
+ if (skb)
+ nlmsg_free(skb);
+ return ret;
+}
+
+int cnss_genl_send_msg(void *buff, u8 type, char *file_name, u32 total_size)
+{
+ int ret = 0;
+ u8 *msg_buff = buff;
+ u32 remaining = total_size;
+ u32 seg_id = 0;
+ u32 data_len = 0;
+ u8 end = 0;
+
+ cnss_pr_dbg("type: %u, total_size: %x\n", type, total_size);
+
+ while (remaining) {
+ if (remaining > CNSS_GENL_DATA_LEN_MAX) {
+ data_len = CNSS_GENL_DATA_LEN_MAX;
+ } else {
+ data_len = remaining;
+ end = 1;
+ }
+ ret = cnss_genl_send_data(type, file_name, total_size,
+ seg_id, end, data_len, msg_buff);
+ if (ret < 0) {
+ cnss_pr_err("fail to send genl data, ret %d\n", ret);
+ return ret;
+ }
+
+ remaining -= data_len;
+ msg_buff += data_len;
+ seg_id++;
+ }
+
+ return ret;
+}
+
+int cnss_genl_init(void)
+{
+ int ret = 0;
+
+ ret = genl_register_family(&cnss_genl_family);
+ if (ret != 0)
+ cnss_pr_err("genl_register_family fail: %d\n", ret);
+
+ return ret;
+}
+
+void cnss_genl_exit(void)
+{
+ genl_unregister_family(&cnss_genl_family);
+}
diff --git a/drivers/net/wireless/cnss2/genl.h b/drivers/net/wireless/cnss2/genl.h
new file mode 100644
index 0000000..33ca30a
--- /dev/null
+++ b/drivers/net/wireless/cnss2/genl.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2019, The Linux Foundation. All rights reserved. */
+
+#ifndef __CNSS_GENL_H__
+#define __CNSS_GENL_H__
+
+enum cnss_genl_msg_type {
+ CNSS_GENL_MSG_TYPE_UNSPEC,
+ CNSS_GENL_MSG_TYPE_QDSS,
+};
+
+#ifdef CONFIG_CNSS2_DEBUG
+int cnss_genl_init(void);
+void cnss_genl_exit(void);
+int cnss_genl_send_msg(void *buff, u8 type,
+ char *file_name, u32 total_size);
+#else
+static inline int cnss_genl_init(void)
+{
+ return 0;
+}
+
+static inline void cnss_genl_exit(void)
+{
+}
+
+static inline int cnss_genl_send_msg(void *buff, u8 type,
+ char *file_name, u32 total_size)
+{
+ return 0;
+}
+#endif
+
+#endif
diff --git a/drivers/net/wireless/cnss2/main.c b/drivers/net/wireless/cnss2/main.c
index c6c66f2..898d59a 100644
--- a/drivers/net/wireless/cnss2/main.c
+++ b/drivers/net/wireless/cnss2/main.c
@@ -16,6 +16,7 @@
#include "main.h"
#include "bus.h"
#include "debug.h"
+#include "genl.h"
#define CNSS_DUMP_FORMAT_VER 0x11
#define CNSS_DUMP_FORMAT_VER_V2 0x22
@@ -484,6 +485,12 @@ static char *cnss_driver_event_to_str(enum cnss_driver_event_type type)
return "POWER_UP";
case CNSS_DRIVER_EVENT_POWER_DOWN:
return "POWER_DOWN";
+ case CNSS_DRIVER_EVENT_QDSS_TRACE_REQ_MEM:
+ return "QDSS_TRACE_REQ_MEM";
+ case CNSS_DRIVER_EVENT_QDSS_TRACE_SAVE:
+ return "QDSS_TRACE_SAVE";
+ case CNSS_DRIVER_EVENT_QDSS_TRACE_FREE:
+ return "QDSS_TRACE_FREE";
case CNSS_DRIVER_EVENT_MAX:
return "EVENT_MAX";
}
@@ -1137,6 +1144,109 @@ static int cnss_power_down_hdlr(struct cnss_plat_data *plat_priv)
return 0;
}
+static int cnss_qdss_trace_req_mem_hdlr(struct cnss_plat_data *plat_priv)
+{
+ int ret = 0;
+
+ ret = cnss_bus_alloc_qdss_mem(plat_priv);
+ if (ret < 0)
+ return ret;
+
+ return cnss_wlfw_qdss_trace_mem_info_send_sync(plat_priv);
+}
+
+static void *cnss_qdss_trace_pa_to_va(struct cnss_plat_data *plat_priv,
+ u64 pa, u32 size, int *seg_id)
+{
+ int i = 0;
+ struct cnss_fw_mem *qdss_mem = plat_priv->qdss_mem;
+ u64 offset = 0;
+ void *va = NULL;
+ u64 local_pa;
+ u32 local_size;
+
+ for (i = 0; i < plat_priv->qdss_mem_seg_len; i++) {
+ local_pa = (u64)qdss_mem[i].pa;
+ local_size = (u32)qdss_mem[i].size;
+ if (pa == local_pa && size <= local_size) {
+ va = qdss_mem[i].va;
+ break;
+ }
+ if (pa > local_pa &&
+ pa < local_pa + local_size &&
+ pa + size <= local_pa + local_size) {
+ offset = pa - local_pa;
+ va = qdss_mem[i].va + offset;
+ break;
+ }
+ }
+
+ *seg_id = i;
+ return va;
+}
+
+static int cnss_qdss_trace_save_hdlr(struct cnss_plat_data *plat_priv,
+ void *data)
+{
+ struct cnss_qmi_event_qdss_trace_save_data *event_data = data;
+ struct cnss_fw_mem *qdss_mem = plat_priv->qdss_mem;
+ int ret = 0;
+ int i;
+ void *va = NULL;
+ u64 pa;
+ u32 size;
+ int seg_id = 0;
+
+ if (!plat_priv->qdss_mem_seg_len) {
+ cnss_pr_err("Memory for QDSS trace is not available\n");
+ return -ENOMEM;
+ }
+
+ if (event_data->mem_seg_len == 0) {
+ for (i = 0; i < plat_priv->qdss_mem_seg_len; i++) {
+ ret = cnss_genl_send_msg(qdss_mem[i].va,
+ CNSS_GENL_MSG_TYPE_QDSS,
+ event_data->file_name,
+ qdss_mem[i].size);
+ if (ret < 0) {
+ cnss_pr_err("Fail to save QDSS data: %d\n",
+ ret);
+ break;
+ }
+ }
+ } else {
+ for (i = 0; i < event_data->mem_seg_len; i++) {
+ pa = event_data->mem_seg[i].addr;
+ size = event_data->mem_seg[i].size;
+ va = cnss_qdss_trace_pa_to_va(plat_priv, pa,
+ size, &seg_id);
+ if (!va) {
+ cnss_pr_err("Fail to find matching va for pa %pa\n",
+ pa);
+ ret = -EINVAL;
+ break;
+ }
+ ret = cnss_genl_send_msg(va, CNSS_GENL_MSG_TYPE_QDSS,
+ event_data->file_name, size);
+ if (ret < 0) {
+ cnss_pr_err("Fail to save QDSS data: %d\n",
+ ret);
+ break;
+ }
+ }
+ }
+
+ kfree(data);
+ return ret;
+}
+
+static int cnss_qdss_trace_free_hdlr(struct cnss_plat_data *plat_priv)
+{
+ cnss_bus_free_qdss_mem(plat_priv);
+
+ return 0;
+}
+
static void cnss_driver_event_work(struct work_struct *work)
{
struct cnss_plat_data *plat_priv =
@@ -1210,6 +1320,16 @@ static void cnss_driver_event_work(struct work_struct *work)
case CNSS_DRIVER_EVENT_POWER_DOWN:
ret = cnss_power_down_hdlr(plat_priv);
break;
+ case CNSS_DRIVER_EVENT_QDSS_TRACE_REQ_MEM:
+ ret = cnss_qdss_trace_req_mem_hdlr(plat_priv);
+ break;
+ case CNSS_DRIVER_EVENT_QDSS_TRACE_SAVE:
+ ret = cnss_qdss_trace_save_hdlr(plat_priv,
+ event->data);
+ break;
+ case CNSS_DRIVER_EVENT_QDSS_TRACE_FREE:
+ ret = cnss_qdss_trace_free_hdlr(plat_priv);
+ break;
default:
cnss_pr_err("Invalid driver event type: %d",
event->type);
@@ -1746,6 +1866,10 @@ static int cnss_probe(struct platform_device *plat_dev)
cnss_register_coex_service(plat_priv);
+ ret = cnss_genl_init();
+ if (ret < 0)
+ cnss_pr_err("CNSS genl init failed %d\n", ret);
+
cnss_pr_info("Platform driver probed successfully.\n");
return 0;
@@ -1781,6 +1905,7 @@ static int cnss_remove(struct platform_device *plat_dev)
{
struct cnss_plat_data *plat_priv = platform_get_drvdata(plat_dev);
+ cnss_genl_exit();
cnss_unregister_coex_service(plat_priv);
cnss_misc_deinit(plat_priv);
cnss_debugfs_destroy(plat_priv);
diff --git a/drivers/net/wireless/cnss2/main.h b/drivers/net/wireless/cnss2/main.h
index ac87b7d..2756d55 100644
--- a/drivers/net/wireless/cnss2/main.h
+++ b/drivers/net/wireless/cnss2/main.h
@@ -161,6 +161,9 @@ enum cnss_driver_event_type {
CNSS_DRIVER_EVENT_FORCE_FW_ASSERT,
CNSS_DRIVER_EVENT_POWER_UP,
CNSS_DRIVER_EVENT_POWER_DOWN,
+ CNSS_DRIVER_EVENT_QDSS_TRACE_REQ_MEM,
+ CNSS_DRIVER_EVENT_QDSS_TRACE_SAVE,
+ CNSS_DRIVER_EVENT_QDSS_TRACE_FREE,
CNSS_DRIVER_EVENT_MAX,
};
@@ -274,6 +277,9 @@ struct cnss_plat_data {
u32 fw_mem_seg_len;
struct cnss_fw_mem fw_mem[QMI_WLFW_MAX_NUM_MEM_SEG];
struct cnss_fw_mem m3_mem;
+ u32 qdss_mem_seg_len;
+ struct cnss_fw_mem qdss_mem[QMI_WLFW_MAX_NUM_MEM_SEG];
+ u32 *qdss_reg;
struct cnss_pin_connect_result pin_result;
struct dentry *root_dentry;
atomic_t pm_count;
diff --git a/drivers/net/wireless/cnss2/pci.c b/drivers/net/wireless/cnss2/pci.c
index 582aae0..163c84a 100644
--- a/drivers/net/wireless/cnss2/pci.c
+++ b/drivers/net/wireless/cnss2/pci.c
@@ -88,6 +88,13 @@ static DEFINE_SPINLOCK(pci_link_down_lock);
#define QCA6390_CE_REG_INTERVAL 0x2000
+#define QDSS_APB_DEC_CSR_BASE 0x1C01000
+
+#define QDSS_APB_DEC_CSR_ETRIRQCTRL_OFFSET 0x6C
+#define QDSS_APB_DEC_CSR_PRESERVEETF_OFFSET 0x70
+#define QDSS_APB_DEC_CSR_PRESERVEETR0_OFFSET 0x74
+#define QDSS_APB_DEC_CSR_PRESERVEETR1_OFFSET 0x78
+
#define MAX_UNWINDOWED_ADDRESS 0x80000
#define WINDOW_ENABLE_BIT 0x40000000
#define WINDOW_SHIFT 19
@@ -128,6 +135,14 @@ static struct cnss_pci_reg ce_cmn[] = {
{ NULL },
};
+static struct cnss_pci_reg qdss_csr[] = {
+ { "QDSSCSR_ETRIRQCTRL", QDSS_APB_DEC_CSR_ETRIRQCTRL_OFFSET },
+ { "QDSSCSR_PRESERVEETF", QDSS_APB_DEC_CSR_PRESERVEETF_OFFSET },
+ { "QDSSCSR_PRESERVEETR0", QDSS_APB_DEC_CSR_PRESERVEETR0_OFFSET },
+ { "QDSSCSR_PRESERVEETR1", QDSS_APB_DEC_CSR_PRESERVEETR1_OFFSET },
+ { NULL },
+};
+
static void cnss_pci_select_window(struct cnss_pci_data *pci_priv, u32 offset)
{
u32 window = (offset >> WINDOW_SHIFT) & WINDOW_VALUE_MASK;
@@ -669,6 +684,8 @@ static int cnss_qca6290_shutdown(struct cnss_pci_data *pci_priv)
cnss_power_off_device(plat_priv);
+ pci_priv->remap_window = 0;
+
clear_bit(CNSS_FW_READY, &plat_priv->driver_state);
clear_bit(CNSS_FW_MEM_READY, &plat_priv->driver_state);
clear_bit(CNSS_DRIVER_UNLOADING, &plat_priv->driver_state);
@@ -1493,6 +1510,63 @@ int cnss_pci_alloc_fw_mem(struct cnss_pci_data *pci_priv)
return 0;
}
+int cnss_pci_alloc_qdss_mem(struct cnss_pci_data *pci_priv)
+{
+ struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
+ struct cnss_fw_mem *qdss_mem = plat_priv->qdss_mem;
+ int i, j;
+
+ for (i = 0; i < plat_priv->qdss_mem_seg_len; i++) {
+ if (!qdss_mem[i].va && qdss_mem[i].size) {
+ qdss_mem[i].va =
+ dma_alloc_coherent(&pci_priv->pci_dev->dev,
+ qdss_mem[i].size,
+ &qdss_mem[i].pa,
+ GFP_KERNEL);
+ if (!qdss_mem[i].va) {
+ cnss_pr_err("Failed to allocate QDSS memory for FW, size: 0x%zx, type: %u, chuck-ID: %d\n",
+ qdss_mem[i].size,
+ qdss_mem[i].type, i);
+ break;
+ }
+ }
+ }
+
+ /* Best-effort allocation for QDSS trace */
+ if (i < plat_priv->qdss_mem_seg_len) {
+ for (j = i; j < plat_priv->qdss_mem_seg_len; j++) {
+ qdss_mem[j].type = 0;
+ qdss_mem[j].size = 0;
+ }
+ plat_priv->qdss_mem_seg_len = i;
+ }
+
+ return 0;
+}
+
+void cnss_pci_free_qdss_mem(struct cnss_pci_data *pci_priv)
+{
+ struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
+ struct cnss_fw_mem *qdss_mem = plat_priv->qdss_mem;
+ int i;
+
+ for (i = 0; i < plat_priv->qdss_mem_seg_len; i++) {
+ if (qdss_mem[i].va && qdss_mem[i].size) {
+ cnss_pr_dbg("Freeing memory for QDSS: pa: %pa, size: 0x%zx, type: %u\n",
+ &qdss_mem[i].pa, qdss_mem[i].size,
+ qdss_mem[i].type);
+ dma_free_coherent(&pci_priv->pci_dev->dev,
+ qdss_mem[i].size, qdss_mem[i].va,
+ qdss_mem[i].pa);
+ qdss_mem[i].va = NULL;
+ qdss_mem[i].pa = 0;
+ qdss_mem[i].size = 0;
+ qdss_mem[i].type = 0;
+ }
+ }
+ plat_priv->qdss_mem_seg_len = 0;
+}
+
static void cnss_pci_free_fw_mem(struct cnss_pci_data *pci_priv)
{
struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
@@ -1978,6 +2052,30 @@ static char *cnss_mhi_state_to_str(enum cnss_mhi_state mhi_state)
}
};
+static void cnss_pci_dump_qdss_reg(struct cnss_pci_data *pci_priv)
+{
+ struct cnss_plat_data *plat_priv = pci_priv->plat_priv;
+ int i, array_size = ARRAY_SIZE(qdss_csr) - 1;
+ gfp_t gfp = GFP_KERNEL;
+ u32 reg_offset;
+
+ if (in_interrupt() || irqs_disabled())
+ gfp = GFP_ATOMIC;
+
+ if (!plat_priv->qdss_reg)
+ plat_priv->qdss_reg = devm_kzalloc(&pci_priv->pci_dev->dev,
+ sizeof(*plat_priv->qdss_reg)
+ * array_size, gfp);
+
+ for (i = 0; qdss_csr[i].name; i++) {
+ reg_offset = QDSS_APB_DEC_CSR_BASE + qdss_csr[i].offset;
+ plat_priv->qdss_reg[i] = cnss_pci_reg_read(pci_priv,
+ reg_offset);
+ cnss_pr_dbg("%s[0x%x] = 0x%x\n", qdss_csr[i].name, reg_offset,
+ plat_priv->qdss_reg[i]);
+ }
+}
+
static void cnss_pci_dump_ce_reg(struct cnss_pci_data *pci_priv,
enum cnss_ce_index ce)
{
@@ -2047,6 +2145,8 @@ void cnss_pci_collect_dump_info(struct cnss_pci_data *pci_priv, bool in_panic)
return;
}
+ cnss_pci_dump_qdss_reg(pci_priv);
+
ret = mhi_download_rddm_img(pci_priv->mhi_ctrl, in_panic);
if (ret) {
cnss_pr_err("Failed to download RDDM image, err = %d\n", ret);
@@ -2687,6 +2787,7 @@ static void cnss_pci_remove(struct pci_dev *pci_dev)
cnss_pci_free_m3_mem(pci_priv);
cnss_pci_free_fw_mem(pci_priv);
+ cnss_pci_free_qdss_mem(pci_priv);
switch (pci_dev->device) {
case QCA6290_DEVICE_ID:
diff --git a/drivers/net/wireless/cnss2/pci.h b/drivers/net/wireless/cnss2/pci.h
index 43e42d1..ed28e86 100644
--- a/drivers/net/wireless/cnss2/pci.h
+++ b/drivers/net/wireless/cnss2/pci.h
@@ -123,6 +123,8 @@ int cnss_resume_pci_link(struct cnss_pci_data *pci_priv);
int cnss_pci_init(struct cnss_plat_data *plat_priv);
void cnss_pci_deinit(struct cnss_plat_data *plat_priv);
int cnss_pci_alloc_fw_mem(struct cnss_pci_data *pci_priv);
+int cnss_pci_alloc_qdss_mem(struct cnss_pci_data *pci_priv);
+void cnss_pci_free_qdss_mem(struct cnss_pci_data *pci_priv);
int cnss_pci_load_m3(struct cnss_pci_data *pci_priv);
int cnss_pci_set_mhi_state(struct cnss_pci_data *pci_priv,
enum cnss_mhi_state state);
diff --git a/drivers/net/wireless/cnss2/qmi.c b/drivers/net/wireless/cnss2/qmi.c
index dc60e9e..e21f182 100644
--- a/drivers/net/wireless/cnss2/qmi.c
+++ b/drivers/net/wireless/cnss2/qmi.c
@@ -13,6 +13,7 @@
#define WLFW_SERVICE_INS_ID_V01 1
#define WLFW_CLIENT_ID 0x4b4e454c
#define MAX_BDF_FILE_NAME 13
+#define BDF_FILE_NAME_PREFIX "bdwlan"
#define ELF_BDF_FILE_NAME "bdwlan.elf"
#define ELF_BDF_FILE_NAME_PREFIX "bdwlan.e"
#define BIN_BDF_FILE_NAME "bdwlan.bin"
@@ -84,6 +85,12 @@ static int cnss_wlfw_ind_register_send_sync(struct cnss_plat_data *plat_priv)
req->pin_connect_result_enable = 1;
req->cal_done_enable_valid = 1;
req->cal_done_enable = 1;
+ req->qdss_trace_req_mem_enable_valid = 1;
+ req->qdss_trace_req_mem_enable = 1;
+ req->qdss_trace_save_enable_valid = 1;
+ req->qdss_trace_save_enable = 1;
+ req->qdss_trace_free_enable_valid = 1;
+ req->qdss_trace_free_enable = 1;
ret = qmi_txn_init(&plat_priv->qmi_wlfw, &txn,
wlfw_ind_register_resp_msg_v01_ei, resp);
@@ -436,8 +443,9 @@ int cnss_wlfw_bdf_dnld_send_sync(struct cnss_plat_data *plat_priv,
plat_priv->board_info.board_id);
else
snprintf(filename, sizeof(filename),
- ELF_BDF_FILE_NAME_PREFIX "%04x",
- plat_priv->board_info.board_id);
+ BDF_FILE_NAME_PREFIX "%02x.e%02x",
+ plat_priv->board_info.board_id >> 8 & 0xFF,
+ plat_priv->board_info.board_id & 0xFF);
break;
case CNSS_BDF_BIN:
if (plat_priv->board_info.board_id == 0xFF)
@@ -448,8 +456,9 @@ int cnss_wlfw_bdf_dnld_send_sync(struct cnss_plat_data *plat_priv,
plat_priv->board_info.board_id);
else
snprintf(filename, sizeof(filename),
- BIN_BDF_FILE_NAME_PREFIX "%04x",
- plat_priv->board_info.board_id);
+ BDF_FILE_NAME_PREFIX "%02x.b%02x",
+ plat_priv->board_info.board_id >> 8 & 0xFF,
+ plat_priv->board_info.board_id & 0xFF);
break;
case CNSS_BDF_REGDB:
snprintf(filename, sizeof(filename), REGDB_FILE_NAME);
@@ -1201,6 +1210,82 @@ int cnss_wlfw_antenna_grant_send_sync(struct cnss_plat_data *plat_priv)
return ret;
}
+int cnss_wlfw_qdss_trace_mem_info_send_sync(struct cnss_plat_data *plat_priv)
+{
+ struct wlfw_qdss_trace_mem_info_req_msg_v01 *req;
+ struct wlfw_qdss_trace_mem_info_resp_msg_v01 *resp;
+ struct qmi_txn txn;
+ struct cnss_fw_mem *qdss_mem = plat_priv->qdss_mem;
+ int ret = 0;
+ int i;
+
+ cnss_pr_dbg("Sending QDSS trace mem info, state: 0x%lx\n",
+ plat_priv->driver_state);
+
+ req = kzalloc(sizeof(*req), GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ resp = kzalloc(sizeof(*resp), GFP_KERNEL);
+ if (!resp) {
+ kfree(req);
+ return -ENOMEM;
+ }
+
+ req->mem_seg_len = plat_priv->qdss_mem_seg_len;
+ for (i = 0; i < req->mem_seg_len; i++) {
+ cnss_pr_dbg("Memory for FW, va: 0x%pK, pa: %pa, size: 0x%zx, type: %u\n",
+ qdss_mem[i].va, &qdss_mem[i].pa,
+ qdss_mem[i].size, qdss_mem[i].type);
+
+ req->mem_seg[i].addr = qdss_mem[i].pa;
+ req->mem_seg[i].size = qdss_mem[i].size;
+ req->mem_seg[i].type = qdss_mem[i].type;
+ }
+
+ ret = qmi_txn_init(&plat_priv->qmi_wlfw, &txn,
+ wlfw_qdss_trace_mem_info_resp_msg_v01_ei, resp);
+ if (ret < 0) {
+ cnss_pr_err("Fail to initialize txn for QDSS trace mem request: err %d\n",
+ ret);
+ goto out;
+ }
+
+ ret = qmi_send_request(&plat_priv->qmi_wlfw, NULL, &txn,
+ QMI_WLFW_QDSS_TRACE_MEM_INFO_REQ_V01,
+ WLFW_QDSS_TRACE_MEM_INFO_REQ_MSG_V01_MAX_MSG_LEN,
+ wlfw_qdss_trace_mem_info_req_msg_v01_ei, req);
+ if (ret < 0) {
+ qmi_txn_cancel(&txn);
+ cnss_pr_err("Fail to send QDSS trace mem info request: err %d\n",
+ ret);
+ goto out;
+ }
+
+ ret = qmi_txn_wait(&txn, QMI_WLFW_TIMEOUT_JF);
+ if (ret < 0) {
+ cnss_pr_err("Fail to wait for response of QDSS trace mem info request, err %d\n",
+ ret);
+ goto out;
+ }
+
+ if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
+ cnss_pr_err("QDSS trace mem info request failed, result: %d, err: %d\n",
+ resp->resp.result, resp->resp.error);
+ ret = -resp->resp.result;
+ goto out;
+ }
+
+ kfree(req);
+ kfree(resp);
+ return 0;
+
+out:
+ kfree(req);
+ kfree(resp);
+ return ret;
+}
+
unsigned int cnss_get_qmi_timeout(struct cnss_plat_data *plat_priv)
{
cnss_pr_dbg("QMI timeout is %u ms\n", QMI_WLFW_TIMEOUT_MS);
@@ -1336,6 +1421,118 @@ static void cnss_wlfw_cal_done_ind_cb(struct qmi_handle *qmi_wlfw,
0, NULL);
}
+static void cnss_wlfw_qdss_trace_req_mem_ind_cb(struct qmi_handle *qmi_wlfw,
+ struct sockaddr_qrtr *sq,
+ struct qmi_txn *txn,
+ const void *data)
+{
+ struct cnss_plat_data *plat_priv =
+ container_of(qmi_wlfw, struct cnss_plat_data, qmi_wlfw);
+ const struct wlfw_qdss_trace_req_mem_ind_msg_v01 *ind_msg = data;
+ int i;
+
+ cnss_pr_dbg("Received QMI WLFW QDSS trace request mem indication\n");
+
+ if (!txn) {
+ cnss_pr_err("Spurious indication\n");
+ return;
+ }
+
+ if (plat_priv->qdss_mem_seg_len) {
+ cnss_pr_err("Ignore double allocation for QDSS trace, current len %u\n",
+ plat_priv->qdss_mem_seg_len);
+ return;
+ }
+
+ plat_priv->qdss_mem_seg_len = ind_msg->mem_seg_len;
+ for (i = 0; i < plat_priv->qdss_mem_seg_len; i++) {
+ cnss_pr_dbg("QDSS requests for memory, size: 0x%zx, type: %u\n",
+ ind_msg->mem_seg[i].size, ind_msg->mem_seg[i].type);
+ plat_priv->qdss_mem[i].type = ind_msg->mem_seg[i].type;
+ plat_priv->qdss_mem[i].size = ind_msg->mem_seg[i].size;
+ }
+
+ cnss_driver_event_post(plat_priv, CNSS_DRIVER_EVENT_QDSS_TRACE_REQ_MEM,
+ 0, NULL);
+}
+
+static void cnss_wlfw_qdss_trace_save_ind_cb(struct qmi_handle *qmi_wlfw,
+ struct sockaddr_qrtr *sq,
+ struct qmi_txn *txn,
+ const void *data)
+{
+ struct cnss_plat_data *plat_priv =
+ container_of(qmi_wlfw, struct cnss_plat_data, qmi_wlfw);
+ const struct wlfw_qdss_trace_save_ind_msg_v01 *ind_msg = data;
+ struct cnss_qmi_event_qdss_trace_save_data *event_data;
+ int i = 0;
+
+ cnss_pr_dbg("Received QMI WLFW QDSS trace save indication\n");
+
+ if (!txn) {
+ cnss_pr_err("Spurious indication\n");
+ return;
+ }
+
+ cnss_pr_dbg("QDSS_trace_save info: source %u, total_size %u, file_name_valid %u, file_name %s\n",
+ ind_msg->source, ind_msg->total_size,
+ ind_msg->file_name_valid, ind_msg->file_name);
+
+ if (ind_msg->source == 1)
+ return;
+
+ event_data = kzalloc(sizeof(*event_data), GFP_KERNEL);
+ if (!event_data)
+ return;
+
+ if (ind_msg->mem_seg_valid) {
+ if (ind_msg->mem_seg_len > QDSS_TRACE_SEG_LEN_MAX) {
+ cnss_pr_err("Invalid seg len %u\n",
+ ind_msg->mem_seg_len);
+ goto free_event_data;
+ }
+ cnss_pr_dbg("QDSS_trace_save seg len %u\n",
+ ind_msg->mem_seg_len);
+ event_data->mem_seg_len = ind_msg->mem_seg_len;
+ for (i = 0; i < ind_msg->mem_seg_len; i++) {
+ event_data->mem_seg[i].addr = ind_msg->mem_seg[i].addr;
+ event_data->mem_seg[i].size = ind_msg->mem_seg[i].size;
+ cnss_pr_dbg("seg-%d: addr 0x%llx size 0x%x\n",
+ i, ind_msg->mem_seg[i].addr,
+ ind_msg->mem_seg[i].size);
+ }
+ }
+
+ event_data->total_size = ind_msg->total_size;
+
+ if (ind_msg->file_name_valid)
+ strlcpy(event_data->file_name, ind_msg->file_name,
+ QDSS_TRACE_FILE_NAME_MAX + 1);
+ else
+ strlcpy(event_data->file_name, "qdss_trace",
+ QDSS_TRACE_FILE_NAME_MAX + 1);
+
+ cnss_driver_event_post(plat_priv, CNSS_DRIVER_EVENT_QDSS_TRACE_SAVE,
+ 0, event_data);
+
+ return;
+
+free_event_data:
+ kfree(event_data);
+}
+
+static void cnss_wlfw_qdss_trace_free_ind_cb(struct qmi_handle *qmi_wlfw,
+ struct sockaddr_qrtr *sq,
+ struct qmi_txn *txn,
+ const void *data)
+{
+ struct cnss_plat_data *plat_priv =
+ container_of(qmi_wlfw, struct cnss_plat_data, qmi_wlfw);
+
+ cnss_driver_event_post(plat_priv, CNSS_DRIVER_EVENT_QDSS_TRACE_FREE,
+ 0, NULL);
+}
+
static struct qmi_msg_handler qmi_wlfw_msg_handlers[] = {
{
.type = QMI_INDICATION,
@@ -1380,6 +1577,30 @@ static struct qmi_msg_handler qmi_wlfw_msg_handlers[] = {
.decoded_size = sizeof(struct wlfw_cal_done_ind_msg_v01),
.fn = cnss_wlfw_cal_done_ind_cb
},
+ {
+ .type = QMI_INDICATION,
+ .msg_id = QMI_WLFW_QDSS_TRACE_REQ_MEM_IND_V01,
+ .ei = wlfw_qdss_trace_req_mem_ind_msg_v01_ei,
+ .decoded_size =
+ sizeof(struct wlfw_qdss_trace_req_mem_ind_msg_v01),
+ .fn = cnss_wlfw_qdss_trace_req_mem_ind_cb
+ },
+ {
+ .type = QMI_INDICATION,
+ .msg_id = QMI_WLFW_QDSS_TRACE_SAVE_IND_V01,
+ .ei = wlfw_qdss_trace_save_ind_msg_v01_ei,
+ .decoded_size =
+ sizeof(struct wlfw_qdss_trace_save_ind_msg_v01),
+ .fn = cnss_wlfw_qdss_trace_save_ind_cb
+ },
+ {
+ .type = QMI_INDICATION,
+ .msg_id = QMI_WLFW_QDSS_TRACE_FREE_IND_V01,
+ .ei = wlfw_qdss_trace_free_ind_msg_v01_ei,
+ .decoded_size =
+ sizeof(struct wlfw_qdss_trace_free_ind_msg_v01),
+ .fn = cnss_wlfw_qdss_trace_free_ind_cb
+ },
{}
};
diff --git a/drivers/net/wireless/cnss2/qmi.h b/drivers/net/wireless/cnss2/qmi.h
index 137d549..784aadc 100644
--- a/drivers/net/wireless/cnss2/qmi.h
+++ b/drivers/net/wireless/cnss2/qmi.h
@@ -8,15 +8,30 @@
struct cnss_plat_data;
-#ifdef CONFIG_CNSS2_QMI
-#include "wlan_firmware_service_v01.h"
-#include "coexistence_service_v01.h"
-
struct cnss_qmi_event_server_arrive_data {
unsigned int node;
unsigned int port;
};
+#define QDSS_TRACE_SEG_LEN_MAX 32
+#define QDSS_TRACE_FILE_NAME_MAX 16
+
+struct cnss_mem_seg {
+ u64 addr;
+ u32 size;
+};
+
+struct cnss_qmi_event_qdss_trace_save_data {
+ u32 total_size;
+ u32 mem_seg_len;
+ struct cnss_mem_seg mem_seg[QDSS_TRACE_SEG_LEN_MAX];
+ char file_name[QDSS_TRACE_FILE_NAME_MAX + 1];
+};
+
+#ifdef CONFIG_CNSS2_QMI
+#include "wlan_firmware_service_v01.h"
+#include "coexistence_service_v01.h"
+
int cnss_qmi_init(struct cnss_plat_data *plat_priv);
void cnss_qmi_deinit(struct cnss_plat_data *plat_priv);
unsigned int cnss_get_qmi_timeout(struct cnss_plat_data *plat_priv);
@@ -46,7 +61,7 @@ int cnss_register_coex_service(struct cnss_plat_data *plat_priv);
void cnss_unregister_coex_service(struct cnss_plat_data *plat_priv);
int coex_antenna_switch_to_wlan_send_sync_msg(struct cnss_plat_data *plat_priv);
int coex_antenna_switch_to_mdm_send_sync_msg(struct cnss_plat_data *plat_priv);
-
+int cnss_wlfw_qdss_trace_mem_info_send_sync(struct cnss_plat_data *plat_priv);
#else
#define QMI_WLFW_TIMEOUT_MS 10000
@@ -165,6 +180,9 @@ int coex_antenna_switch_to_wlan_send_sync_msg(struct cnss_plat_data *plat_priv)
static inline
int coex_antenna_switch_to_mdm_send_sync_msg(struct cnss_plat_data *plat_priv)
+
+static inline
+int cnss_wlfw_qdss_trace_mem_info_send_sync(struct cnss_plat_data *plat_priv)
{
return 0;
}
diff --git a/drivers/nfc/nq-nci.c b/drivers/nfc/nq-nci.c
index 354bece..2db1bd1 100644
--- a/drivers/nfc/nq-nci.c
+++ b/drivers/nfc/nq-nci.c
@@ -1295,7 +1295,7 @@ static int nqx_probe(struct i2c_client *client,
gpio_free(platform_data->clkreq_gpio);
err_ese_gpio:
/* optional gpio, not sure was configured in probe */
- if (nqx_dev->ese_gpio > 0)
+ if (gpio_is_valid(platform_data->ese_gpio))
gpio_free(platform_data->ese_gpio);
err_firm_gpio:
gpio_free(platform_data->firm_gpio);
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index 815509d..da8f5ad 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -531,8 +531,7 @@ int nvme_mpath_init(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
timer_setup(&ctrl->anatt_timer, nvme_anatt_timeout, 0);
ctrl->ana_log_size = sizeof(struct nvme_ana_rsp_hdr) +
ctrl->nanagrpid * sizeof(struct nvme_ana_group_desc);
- if (!(ctrl->anacap & (1 << 6)))
- ctrl->ana_log_size += ctrl->max_namespaces * sizeof(__le32);
+ ctrl->ana_log_size += ctrl->max_namespaces * sizeof(__le32);
if (ctrl->ana_log_size > ctrl->max_hw_sectors << SECTOR_SHIFT) {
dev_err(ctrl->device,
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index b6a28de..0939a4e 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -1672,18 +1672,28 @@ static enum blk_eh_timer_return
nvme_rdma_timeout(struct request *rq, bool reserved)
{
struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
+ struct nvme_rdma_queue *queue = req->queue;
+ struct nvme_rdma_ctrl *ctrl = queue->ctrl;
- dev_warn(req->queue->ctrl->ctrl.device,
- "I/O %d QID %d timeout, reset controller\n",
- rq->tag, nvme_rdma_queue_idx(req->queue));
+ dev_warn(ctrl->ctrl.device, "I/O %d QID %d timeout\n",
+ rq->tag, nvme_rdma_queue_idx(queue));
- /* queue error recovery */
- nvme_rdma_error_recovery(req->queue->ctrl);
+ if (ctrl->ctrl.state != NVME_CTRL_LIVE) {
+ /*
+ * Teardown immediately if controller times out while starting
+ * or we are already started error recovery. all outstanding
+ * requests are completed on shutdown, so we return BLK_EH_DONE.
+ */
+ flush_work(&ctrl->err_work);
+ nvme_rdma_teardown_io_queues(ctrl, false);
+ nvme_rdma_teardown_admin_queue(ctrl, false);
+ return BLK_EH_DONE;
+ }
- /* fail with DNR on cmd timeout */
- nvme_req(rq)->status = NVME_SC_ABORT_REQ | NVME_SC_DNR;
+ dev_warn(ctrl->ctrl.device, "starting error recovery\n");
+ nvme_rdma_error_recovery(ctrl);
- return BLK_EH_DONE;
+ return BLK_EH_RESET_TIMER;
}
static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
diff --git a/drivers/phy/qualcomm/phy-ath79-usb.c b/drivers/phy/qualcomm/phy-ath79-usb.c
index 6fd6e07..09a77e5 100644
--- a/drivers/phy/qualcomm/phy-ath79-usb.c
+++ b/drivers/phy/qualcomm/phy-ath79-usb.c
@@ -31,7 +31,7 @@ static int ath79_usb_phy_power_on(struct phy *phy)
err = reset_control_deassert(priv->reset);
if (err && priv->no_suspend_override)
- reset_control_assert(priv->no_suspend_override);
+ reset_control_deassert(priv->no_suspend_override);
return err;
}
@@ -69,7 +69,7 @@ static int ath79_usb_phy_probe(struct platform_device *pdev)
if (!priv)
return -ENOMEM;
- priv->reset = devm_reset_control_get(&pdev->dev, "usb-phy");
+ priv->reset = devm_reset_control_get(&pdev->dev, "phy");
if (IS_ERR(priv->reset))
return PTR_ERR(priv->reset);
diff --git a/drivers/platform/msm/ipa/ipa_v3/dump/ipa4.5/ipa_hw_common_ex.h b/drivers/platform/msm/ipa/ipa_v3/dump/ipa4.5/ipa_hw_common_ex.h
index 9d8e8a2..7392102 100644
--- a/drivers/platform/msm/ipa/ipa_v3/dump/ipa4.5/ipa_hw_common_ex.h
+++ b/drivers/platform/msm/ipa/ipa_v3/dump/ipa4.5/ipa_hw_common_ex.h
@@ -8,21 +8,17 @@
/* VLVL defs are available for 854 */
#define FEATURE_VLVL_DEFS true
-/* IPAv4 version flag for Sdx24 */
-#define FEATURE_IPA_HW_VERSION_4_0 true
+#define FEATURE_IPA_HW_VERSION_4_5 true
/* Important Platform Specific Values : IRQ_NUM, IRQ_CNT, BCR */
-#define IPA_HW_BAM_IRQ_NUM 440
+#define IPA_HW_BAM_IRQ_NUM 639
/* Q6 IRQ number for IPA. */
-#define IPA_HW_IRQ_NUM 441
+#define IPA_HW_IRQ_NUM 640
/* Total number of different interrupts that can be enabled */
#define IPA_HW_IRQ_CNT_TOTAL 23
-/* IPAv4 BCR value */
-#define IPA_HW_BCR_REG_VAL 0x00000039
-
/* IPAv4 spare reg value */
#define IPA_HW_SPARE_1_REG_VAL 0xC0000005
@@ -50,6 +46,8 @@
/* HPS Sequences */
#define IPA_HW_PKT_PROCESS_HPS_DMA 0x0
+#define IPA_HW_PKT_PROCESS_HPS_DMA_DECIPH_CIPHE 0x1
+#define IPA_HW_PKT_PROCESS_HPS_PKT_PRS_NO_DECIPH_UCP 0x2
#define IPA_HW_PKT_PROCESS_HPS_PKT_PRS_DECIPH_UCP 0x3
#define IPA_HW_PKT_PROCESS_HPS_2_PKT_PRS_NO_DECIPH 0x4
#define IPA_HW_PKT_PROCESS_HPS_2_PKT_PRS_DECIPH 0x5
@@ -57,35 +55,48 @@
#define IPA_HW_PKT_PROCESS_HPS_PKT_PRS_DECIPH_NO_UCP 0x7
#define IPA_HW_PKT_PROCESS_HPS_DMA_PARSER 0x8
#define IPA_HW_PKT_PROCESS_HPS_DMA_DECIPH_PARSER 0x9
+#define IPA_HW_PKT_PROCESS_HPS_2_PKT_PRS_UCP_TWICE_NO_DECIPH 0xA
+#define IPA_HW_PKT_PROCESS_HPS_2_PKT_PRS_UCP_TWICE_DECIPH 0xB
+#define IPA_HW_PKT_PROCESS_HPS_3_PKT_PRS_UCP_TWICE_NO_DECIPH 0xC
+#define IPA_HW_PKT_PROCESS_HPS_3_PKT_PRS_UCP_TWICE_DECIPH 0xD
/* DPS Sequences */
#define IPA_HW_PKT_PROCESS_DPS_DMA 0x0
#define IPA_HW_PKT_PROCESS_DPS_DMA_WITH_DECIPH 0x1
#define IPA_HW_PKT_PROCESS_DPS_DMA_WITH_DECOMP 0x2
+#define IPA_HW_PKT_PROCESS_DPS_DMA_WITH_CIPH 0x3
/* Src RSRC GRP config */
-#define IPA_HW_SRC_RSRC_GRP_01_RSRC_TYPE_0 0x05050404
-#define IPA_HW_SRC_RSRC_GRP_01_RSRC_TYPE_1 0x0A0A0A0A
-#define IPA_HW_SRC_RSRC_GRP_01_RSRC_TYPE_2 0x0C0C0C0C
+#define IPA_HW_SRC_RSRC_GRP_01_RSRC_TYPE_0 0x0B040803
+#define IPA_HW_SRC_RSRC_GRP_01_RSRC_TYPE_1 0x0C0C0909
+#define IPA_HW_SRC_RSRC_GRP_01_RSRC_TYPE_2 0x0E0E0909
#define IPA_HW_SRC_RSRC_GRP_01_RSRC_TYPE_3 0x3F003F00
-#define IPA_HW_SRC_RSRC_GRP_01_RSRC_TYPE_4 0x0E0E0E0E
+#define IPA_HW_SRC_RSRC_GRP_01_RSRC_TYPE_4 0x10101616
-#define IPA_HW_SRC_RSRC_GRP_23_RSRC_TYPE_0 0x00000101
-#define IPA_HW_SRC_RSRC_GRP_23_RSRC_TYPE_1 0x00000808
-#define IPA_HW_SRC_RSRC_GRP_23_RSRC_TYPE_2 0x00000808
+#define IPA_HW_SRC_RSRC_GRP_23_RSRC_TYPE_0 0x01010101
+#define IPA_HW_SRC_RSRC_GRP_23_RSRC_TYPE_1 0x02020202
+#define IPA_HW_SRC_RSRC_GRP_23_RSRC_TYPE_2 0x04040404
#define IPA_HW_SRC_RSRC_GRP_23_RSRC_TYPE_3 0x3F003F00
-#define IPA_HW_SRC_RSRC_GRP_23_RSRC_TYPE_4 0x00000E0E
+#define IPA_HW_SRC_RSRC_GRP_23_RSRC_TYPE_4 0x02020606
+
+#define IPA_HW_SRC_RSRC_GRP_45_RSRC_TYPE_0 0x00000000
+#define IPA_HW_SRC_RSRC_GRP_45_RSRC_TYPE_1 0x00000000
+#define IPA_HW_SRC_RSRC_GRP_45_RSRC_TYPE_2 0x00000000
+#define IPA_HW_SRC_RSRC_GRP_45_RSRC_TYPE_3 0x00003F00
+#define IPA_HW_SRC_RSRC_GRP_45_RSRC_TYPE_4 0x00000000
/* Dest RSRC GRP config */
-#define IPA_HW_DST_RSRC_GRP_01_RSRC_TYPE_0 0x04040404
+#define IPA_HW_DST_RSRC_GRP_01_RSRC_TYPE_0 0x05051010
#define IPA_HW_DST_RSRC_GRP_01_RSRC_TYPE_1 0x3F013F02
-#define IPA_HW_DST_RSRC_GRP_23_RSRC_TYPE_0 0x02020303
-#define IPA_HW_DST_RSRC_GRP_23_RSRC_TYPE_1 0x02000201
+#define IPA_HW_DST_RSRC_GRP_23_RSRC_TYPE_0 0x02020202
+#define IPA_HW_DST_RSRC_GRP_23_RSRC_TYPE_1 0x02010201
+#define IPA_HW_DST_RSRC_GRP_45_RSRC_TYPE_0 0x00000000
+#define IPA_HW_DST_RSRC_GRP_45_RSRC_TYPE_1 0x00000200
-#define IPA_HW_RX_HPS_CLIENTS_MIN_DEPTH_0 0x00020703
-#define IPA_HW_RX_HPS_CLIENTS_MAX_DEPTH_0 0x00020703
+#define IPA_HW_RX_HPS_CLIENTS_MIN_DEPTH_0 0x03030303
+#define IPA_HW_RX_HPS_CLIENTS_MAX_DEPTH_0 0x03030303
#define IPA_HW_RSRP_GRP_0 0x0
#define IPA_HW_RSRP_GRP_1 0x1
@@ -98,8 +109,11 @@
#define IPA_HW_DDR_SRC_RSRP_GRP IPA_HW_RSRP_GRP_1
#define IPA_HW_DDR_DEST_RSRP_GRP IPA_HW_RSRP_GRP_1
-#define IPA_HW_SRC_RSRP_TYPE_MAX 0x4
-#define IPA_HW_DST_RSRP_TYPE_MAX 0x3
+#define IPA_HW_DMA_SRC_RSRP_GRP IPA_HW_RSRP_GRP_2
+#define IPA_HW_DMA_DEST_RSRP_GRP IPA_HW_RSRP_GRP_2
+
+#define IPA_HW_SRC_RSRP_TYPE_MAX HWIO_IPA_SRC_RSRC_GRP_01_RSRC_TYPE_n_MAXn
+#define IPA_HW_DST_RSRP_TYPE_MAX HWIO_IPA_DST_RSRC_GRP_01_RSRC_TYPE_n_MAXn
#define GSI_HW_QSB_LOG_MISC_MAX 0x4
@@ -112,6 +126,9 @@
/* Whether to allow setting step mode on IPA when we crash or not */
#define IPA_HW_IS_STEP_MODE_ALLOWED (true)
+/* Max number of virtual pipes for UL QBAP provided by HW */
+#define IPA_HW_MAX_VP_NUM (32)
+
/*
* HW specific clock vote freq values in KHz
* (BIMC/SNOC/PCNOC/IPA/Q6 CPU)
@@ -137,11 +154,11 @@ enum ipa_hw_clk_freq_e {
IPA_HW_CLK_FREQ_SNOC_SVS_2 = 50000,
/* IPA */
- IPA_HW_CLK_FREQ_IPA_PEAK = 500000,
- IPA_HW_CLK_FREQ_IPA_NOM_PLUS = 440000,
- IPA_HW_CLK_FREQ_IPA_NOM = 440000,
+ IPA_HW_CLK_FREQ_IPA_PEAK = 600000,
+ IPA_HW_CLK_FREQ_IPA_NOM_PLUS = 500000,
+ IPA_HW_CLK_FREQ_IPA_NOM = 500000,
IPA_HW_CLK_FREQ_IPA_SVS = 250000,
- IPA_HW_CLK_FREQ_IPA_SVS_2 = 120000,
+ IPA_HW_CLK_FREQ_IPA_SVS_2 = 150000,
/* Q6 CPU */
IPA_HW_CLK_FREQ_Q6_PEAK = 729600,
@@ -150,6 +167,12 @@ enum ipa_hw_clk_freq_e {
IPA_HW_CLK_FREQ_Q6_SVS = 729600,
};
+enum ipa_hw_qtimer_gran_e {
+ IPA_HW_QTIMER_GRAN_0 = 0, /* granularity 0 is 10us */
+ IPA_HW_QTIMER_GRAN_1 = 1, /* granularity 1 is 100us */
+ IPA_HW_QTIMER_GRAN_MAX,
+};
+
/* Pipe ID of all the IPA pipes */
enum ipa_hw_pipe_id_e {
IPA_HW_PIPE_ID_0,
@@ -175,62 +198,95 @@ enum ipa_hw_pipe_id_e {
IPA_HW_PIPE_ID_20,
IPA_HW_PIPE_ID_21,
IPA_HW_PIPE_ID_22,
+ IPA_HW_PIPE_ID_23,
+ IPA_HW_PIPE_ID_24,
+ IPA_HW_PIPE_ID_25,
+ IPA_HW_PIPE_ID_26,
+ IPA_HW_PIPE_ID_27,
+ IPA_HW_PIPE_ID_28,
+ IPA_HW_PIPE_ID_29,
+ IPA_HW_PIPE_ID_30,
IPA_HW_PIPE_ID_MAX
};
/* Pipe ID's of System Bam Endpoints between Q6 & IPA */
enum ipa_hw_q6_pipe_id_e {
/* Pipes used by IPA Q6 driver */
- IPA_HW_Q6_DL_CONSUMER_PIPE_ID = IPA_HW_PIPE_ID_3,
- IPA_HW_Q6_CTL_CONSUMER_PIPE_ID = IPA_HW_PIPE_ID_4,
- IPA_HW_Q6_UL_PRODUCER_PIPE_ID = IPA_HW_PIPE_ID_13,
- IPA_HW_Q6_DL_PRODUCER_PIPE_ID = IPA_HW_PIPE_ID_14,
+ IPA_HW_Q6_DL_CONSUMER_PIPE_ID = IPA_HW_PIPE_ID_5,
+ IPA_HW_Q6_CTL_CONSUMER_PIPE_ID = IPA_HW_PIPE_ID_6,
+ IPA_HW_Q6_DL_NLO_CONSUMER_PIPE_ID = IPA_HW_PIPE_ID_8,
- IPA_HW_Q6_LTE_DL_CONSUMER_PIPE_ID = IPA_HW_PIPE_ID_6,
- IPA_HW_Q6_LWA_DL_PRODUCER_PIPE_ID = IPA_HW_PIPE_ID_16,
+ IPA_HW_Q6_UL_ACC_ACK_PRODUCER_PIPE_ID = IPA_HW_PIPE_ID_20,
+ IPA_HW_Q6_UL_PRODUCER_PIPE_ID = IPA_HW_PIPE_ID_21,
+ IPA_HW_Q6_DL_PRODUCER_PIPE_ID = IPA_HW_PIPE_ID_17,
+ IPA_HW_Q6_QBAP_STATUS_PRODUCER_PIPE_ID = IPA_HW_PIPE_ID_18,
+ IPA_HW_Q6_UL_ACC_DATA_PRODUCER_PIPE_ID = IPA_HW_PIPE_ID_19,
+
+ IPA_HW_Q6_UL_ACK_PRODUCER_PIPE_ID =
+ IPA_HW_Q6_UL_ACC_ACK_PRODUCER_PIPE_ID,
+ IPA_HW_Q6_UL_DATA_PRODUCER_PIPE_ID =
+ IPA_HW_Q6_UL_ACC_DATA_PRODUCER_PIPE_ID,
+
+ IPA_HW_Q6_DMA_ASYNC_CONSUMER_PIPE_ID = IPA_HW_PIPE_ID_4,
+ IPA_HW_Q6_DMA_ASYNC_PRODUCER_PIPE_ID = IPA_HW_PIPE_ID_29,
+
/* Test Simulator Pipes */
IPA_HW_Q6_SIM_UL_CONSUMER_PIPE_0_ID = IPA_HW_PIPE_ID_0,
- IPA_HW_Q6_SIM_DL_PRODUCER_PIPE_0_ID = IPA_HW_PIPE_ID_12,
IPA_HW_Q6_SIM_UL_CONSUMER_PIPE_1_ID = IPA_HW_PIPE_ID_1,
- IPA_HW_Q6_SIM_DL_PRODUCER_PIPE_1_ID = IPA_HW_PIPE_ID_10,
- IPA_HW_Q6_SIM_UL_CONSUMER_PIPE_2_ID = IPA_HW_PIPE_ID_2,
- IPA_HW_Q6_SIM_DL_PRODUCER_PIPE_2_ID = IPA_HW_PIPE_ID_11,
+
/* GSI UT channel SW->IPA */
- IPA_HW_Q6_GSI_UT_CONSUMER_PIPE_1_ID = IPA_HW_PIPE_ID_5,
- /* GSI UT channel IPA->SW */
- IPA_HW_Q6_GSI_UT_PRODUCER_PIPE_1_ID = IPA_HW_PIPE_ID_17,
+ IPA_HW_Q6_GSI_UT_CONSUMER_PIPE_1_ID = IPA_HW_PIPE_ID_3,
/* GSI UT channel SW->IPA */
- IPA_HW_Q6_GSI_UT_CONSUMER_PIPE_2_ID = IPA_HW_PIPE_ID_7,
+ IPA_HW_Q6_GSI_UT_CONSUMER_PIPE_2_ID = IPA_HW_PIPE_ID_10,
+
+ IPA_HW_Q6_SIM_UL_CONSUMER_PIPE_2_ID = IPA_HW_PIPE_ID_7,
+
/* GSI UT channel IPA->SW */
- IPA_HW_Q6_GSI_UT_PRODUCER_PIPE_2_ID = IPA_HW_PIPE_ID_18,
- IPA_HW_Q6_DIAG_CONSUMER_PIPE_ID = IPA_HW_PIPE_ID_19,
+ IPA_HW_Q6_DIAG_CONSUMER_PIPE_ID = IPA_HW_PIPE_ID_9,
+
+ IPA_HW_Q6_SIM_DL_PRODUCER_PIPE_0_ID = IPA_HW_PIPE_ID_23,
+ IPA_HW_Q6_SIM_DL_PRODUCER_PIPE_1_ID = IPA_HW_PIPE_ID_24,
+
+ IPA_HW_Q6_SIM_DL_PRODUCER_PIPE_2_ID = IPA_HW_PIPE_ID_25,
+
+ /* GSI UT channel IPA->SW */
+ IPA_HW_Q6_GSI_UT_PRODUCER_PIPE_1_ID = IPA_HW_PIPE_ID_26,
+
+ /* GSI UT channel IPA->SW */
+ IPA_HW_Q6_GSI_UT_PRODUCER_PIPE_2_ID = IPA_HW_PIPE_ID_27,
IPA_HW_Q6_PIPE_ID_MAX = IPA_HW_PIPE_ID_MAX,
};
enum ipa_hw_q6_pipe_ch_id_e {
/* Channels used by IPA Q6 driver */
- IPA_HW_Q6_DL_CONSUMER_PIPE_CH_ID = 0,
- IPA_HW_Q6_CTL_CONSUMER_PIPE_CH_ID = 1,
- IPA_HW_Q6_UL_PRODUCER_PIPE_CH_ID = 3,
- IPA_HW_Q6_DL_PRODUCER_PIPE_CH_ID = 4,
+ IPA_HW_Q6_DL_CONSUMER_PIPE_CH_ID = 0,
+ IPA_HW_Q6_CTL_CONSUMER_PIPE_CH_ID = 1,
+ IPA_HW_Q6_DL_NLO_CONSUMER_PIPE_CH_ID = 2,
+ IPA_HW_Q6_UL_ACC_PATH_ACK_PRODUCER_PIPE_CH_ID = 6,
+ IPA_HW_Q6_UL_PRODUCER_PIPE_CH_ID = 7,
+ IPA_HW_Q6_DL_PRODUCER_PIPE_CH_ID = 3,
+ IPA_HW_Q6_UL_ACC_PATH_DATA_PRODUCER_PIPE_CH_ID = 5,
+ IPA_HW_Q6_QBAP_STATUS_PRODUCER_PIPE_CH_ID = 4,
- IPA_HW_Q6_LTE_DL_CONSUMER_PIPE_CH_ID = 2,
- IPA_HW_Q6_LWA_DL_PRODUCER_PIPE_CH_ID = 5,
+ IPA_HW_Q6_DMA_ASYNC_CONSUMER_PIPE_CH_ID = 8,
+ IPA_HW_Q6_DMA_ASYNC_PRODUCER_PIPE_CH_ID = 9,
+ /* CH_ID 8 and 9 are Q6 SPARE CONSUMERs */
+
/* Test Simulator Channels */
- IPA_HW_Q6_SIM_UL_CONSUMER_PIPE_0_CH_ID = 6,
- IPA_HW_Q6_SIM_DL_PRODUCER_PIPE_0_CH_ID = 8,
- IPA_HW_Q6_SIM_UL_CONSUMER_PIPE_1_CH_ID = 9,
- IPA_HW_Q6_SIM_DL_PRODUCER_PIPE_1_CH_ID = 10,
- IPA_HW_Q6_SIM_UL_CONSUMER_PIPE_2_CH_ID = 11,
- IPA_HW_Q6_SIM_DL_PRODUCER_PIPE_2_CH_ID = 12,
+ IPA_HW_Q6_SIM_UL_CONSUMER_PIPE_0_CH_ID = 10,
+ IPA_HW_Q6_SIM_DL_PRODUCER_PIPE_0_CH_ID = 11,
+ IPA_HW_Q6_SIM_UL_CONSUMER_PIPE_1_CH_ID = 12,
+ IPA_HW_Q6_SIM_DL_PRODUCER_PIPE_1_CH_ID = 13,
+ IPA_HW_Q6_SIM_UL_CONSUMER_PIPE_2_CH_ID = 14,
+ IPA_HW_Q6_SIM_DL_PRODUCER_PIPE_2_CH_ID = 15,
/* GSI UT channel SW->IPA */
- IPA_HW_Q6_GSI_UT_CONSUMER_PIPE_1_CH_ID = 13,
+ IPA_HW_Q6_GSI_UT_CONSUMER_PIPE_1_CH_ID = 16,
/* GSI UT channel IPA->SW */
- IPA_HW_Q6_GSI_UT_PRODUCER_PIPE_1_CH_ID = 14,
+ IPA_HW_Q6_GSI_UT_PRODUCER_PIPE_1_CH_ID = 17,
/* GSI UT channel SW->IPA */
- IPA_HW_Q6_GSI_UT_CONSUMER_PIPE_2_CH_ID = 15,
+ IPA_HW_Q6_GSI_UT_CONSUMER_PIPE_2_CH_ID = 18,
/* GSI UT channel IPA->SW */
- IPA_HW_Q6_GSI_UT_PRODUCER_PIPE_2_CH_ID = 16,
+ IPA_HW_Q6_GSI_UT_PRODUCER_PIPE_2_CH_ID = 19,
};
/* System Bam Endpoints between Q6 & IPA */
@@ -243,33 +299,42 @@ enum ipa_hw_q6_pipe_e {
IPA_HW_Q6_DL_CONSUMER_PIPE = 2,
/* CTL Pipe Q6->IPA */
IPA_HW_Q6_CTL_CONSUMER_PIPE = 3,
- /* Q6 -> IPA, LTE DL Optimized path */
- IPA_HW_Q6_LTE_DL_CONSUMER_PIPE = 4,
- /* LWA DL(Wifi to Q6) */
- IPA_HW_Q6_LWA_DL_PRODUCER_PIPE = 5,
+ /* Q6 -> IPA, DL NLO */
+ IPA_HW_Q6_DL_NLO_CONSUMER_PIPE = 4,
+ /* DMA ASYNC CONSUMER */
+ IPA_HW_Q6_DMA_ASYNC_CONSUMER_PIPE = 5,
+ /* DMA ASYNC PRODUCER */
+ IPA_HW_Q6_DMA_ASYNC_PRODUCER_PIPE = 6,
+ /* UL Acc Path Data Pipe IPA->Q6 */
+ IPA_HW_Q6_UL_ACC_DATA_PRODUCER_PIPE = 7,
+ /* UL Acc Path ACK Pipe IPA->Q6 */
+ IPA_HW_Q6_UL_ACC_ACK_PRODUCER_PIPE = 8,
+ /* UL Acc Path QBAP status Pipe IPA->Q6 */
+ IPA_HW_Q6_QBAP_STATUS_PRODUCER_PIPE = 9,
/* Diag status pipe IPA->Q6 */
/* Used only when FEATURE_IPA_TEST_PER_SIM is ON */
/* SIM Pipe IPA->Sim */
- IPA_HW_Q6_SIM_DL_PRODUCER_PIPE_0 = 7,
+ IPA_HW_Q6_SIM_DL_PRODUCER_PIPE_0 = 10,
/* SIM Pipe Sim->IPA */
- IPA_HW_Q6_SIM_DL_PRODUCER_PIPE_1 = 8,
+ IPA_HW_Q6_SIM_DL_PRODUCER_PIPE_1 = 11,
/* SIM Pipe Sim->IPA */
- IPA_HW_Q6_SIM_DL_PRODUCER_PIPE_2 = 9,
+ IPA_HW_Q6_SIM_DL_PRODUCER_PIPE_2 = 12,
/* SIM Pipe Sim->IPA */
- IPA_HW_Q6_SIM_UL_CONSUMER_PIPE_0 = 10,
+ IPA_HW_Q6_SIM_UL_CONSUMER_PIPE_0 = 13,
/* SIM B2B PROD Pipe */
- IPA_HW_Q6_SIM_UL_CONSUMER_PIPE_1 = 11,
+ IPA_HW_Q6_SIM_UL_CONSUMER_PIPE_1 = 14,
/* SIM Pipe IPA->Sim */
- IPA_HW_Q6_SIM_UL_CONSUMER_PIPE_2 = 12,
+ IPA_HW_Q6_SIM_UL_CONSUMER_PIPE_2 = 15,
/* End FEATURE_IPA_TEST_PER_SIM */
/* GSI UT channel SW->IPA */
- IPA_HW_Q6_GSI_UT_CONSUMER_PIPE_1 = 13,
+ IPA_HW_Q6_GSI_UT_CONSUMER_PIPE_1 = 16,
/* GSI UT channel IPA->SW */
- IPA_HW_Q6_GSI_UT_PRODUCER_PIPE_1 = 14,
+ IPA_HW_Q6_GSI_UT_PRODUCER_PIPE_1 = 17,
/* GSI UT channel SW->IPA */
- IPA_HW_Q6_GSI_UT_CONSUMER_PIPE_2 = 15,
+ IPA_HW_Q6_GSI_UT_CONSUMER_PIPE_2 = 18,
/* GSI UT channel IPA->SW */
- IPA_HW_Q6_GSI_UT_PRODUCER_PIPE_2 = 16,
+ IPA_HW_Q6_GSI_UT_PRODUCER_PIPE_2 = 19,
+
IPA_HW_Q6_PIPE_TOTAL
};
@@ -375,12 +440,7 @@ enum ipa_hw_irq_srcs_e {
/*
* Total number of channel contexts that need to be saved for APPS
*/
-#define IPA_HW_REG_SAVE_GSI_NUM_CH_CNTXT_A7 14
-
-/*
- * Total number of channel contexts that need to be saved for Q6
- */
-#define IPA_HW_REG_SAVE_GSI_NUM_CH_CNTXT_Q6 6
+#define IPA_HW_REG_SAVE_GSI_NUM_CH_CNTXT_A7 19
/*
* Total number of channel contexts that need to be saved for UC
@@ -390,12 +450,7 @@ enum ipa_hw_irq_srcs_e {
/*
* Total number of event ring contexts that need to be saved for APPS
*/
-#define IPA_HW_REG_SAVE_GSI_NUM_EVT_CNTXT_A7 12
-
-/*
- * Total number of event ring contexts that need to be saved for Q6
- */
-#define IPA_HW_REG_SAVE_GSI_NUM_EVT_CNTXT_Q6 4
+#define IPA_HW_REG_SAVE_GSI_NUM_EVT_CNTXT_A7 19
/*
* Total number of event ring contexts that need to be saved for UC
@@ -413,38 +468,12 @@ enum ipa_hw_irq_srcs_e {
* Total number of endpoints for which ipa_reg_save.pipes[endp_number]
* are always saved
*/
-#define IPA_HW_REG_SAVE_NUM_ACTIVE_PIPES 23
+#define IPA_HW_REG_SAVE_NUM_ACTIVE_PIPES IPA_HW_PIPE_ID_MAX
/*
- * Macro to set the active flag for all active pipe indexed register
+ * SHRAM Bytes per ch
*/
-#define IPA_HW_REG_SAVE_CFG_ENTRY_PIPE_ENDP_ACTIVE() \
- do { \
- ipa_reg_save.ipa.pipes[0].active = true; \
- ipa_reg_save.ipa.pipes[1].active = true; \
- ipa_reg_save.ipa.pipes[2].active = true; \
- ipa_reg_save.ipa.pipes[3].active = true; \
- ipa_reg_save.ipa.pipes[4].active = true; \
- ipa_reg_save.ipa.pipes[5].active = true; \
- ipa_reg_save.ipa.pipes[6].active = true; \
- ipa_reg_save.ipa.pipes[7].active = true; \
- ipa_reg_save.ipa.pipes[8].active = true; \
- ipa_reg_save.ipa.pipes[9].active = true; \
- ipa_reg_save.ipa.pipes[10].active = true; \
- ipa_reg_save.ipa.pipes[11].active = true; \
- ipa_reg_save.ipa.pipes[12].active = true; \
- ipa_reg_save.ipa.pipes[13].active = true; \
- ipa_reg_save.ipa.pipes[14].active = true; \
- ipa_reg_save.ipa.pipes[15].active = true; \
- ipa_reg_save.ipa.pipes[16].active = true; \
- ipa_reg_save.ipa.pipes[17].active = true; \
- ipa_reg_save.ipa.pipes[18].active = true; \
- ipa_reg_save.ipa.pipes[19].active = true; \
- ipa_reg_save.ipa.pipes[20].active = true; \
- ipa_reg_save.ipa.pipes[21].active = true; \
- ipa_reg_save.ipa.pipes[22].active = true; \
- } while (0)
-
+#define IPA_REG_SAVE_BYTES_PER_CHNL_SHRAM 12
/*
* Total number of rx splt cmdq's see:
@@ -453,6 +482,74 @@ enum ipa_hw_irq_srcs_e {
#define IPA_RX_SPLT_CMDQ_MAX 4
/*
+ * Macro to define a particular register cfg entry for all pipe
+ * indexed register
+ */
+#define IPA_REG_SAVE_CFG_ENTRY_PIPE_ENDP(reg_name, var_name) \
+ { GEN_1xVECTOR_REG_OFST(reg_name, 0), \
+ (u32 *)&ipa_reg_save.ipa.pipes[0].endp.var_name }, \
+ { GEN_1xVECTOR_REG_OFST(reg_name, 1), \
+ (u32 *)&ipa_reg_save.ipa.pipes[1].endp.var_name }, \
+ { GEN_1xVECTOR_REG_OFST(reg_name, 2), \
+ (u32 *)&ipa_reg_save.ipa.pipes[2].endp.var_name }, \
+ { GEN_1xVECTOR_REG_OFST(reg_name, 3), \
+ (u32 *)&ipa_reg_save.ipa.pipes[3].endp.var_name }, \
+ { GEN_1xVECTOR_REG_OFST(reg_name, 4), \
+ (u32 *)&ipa_reg_save.ipa.pipes[4].endp.var_name }, \
+ { GEN_1xVECTOR_REG_OFST(reg_name, 5), \
+ (u32 *)&ipa_reg_save.ipa.pipes[5].endp.var_name }, \
+ { GEN_1xVECTOR_REG_OFST(reg_name, 6), \
+ (u32 *)&ipa_reg_save.ipa.pipes[6].endp.var_name }, \
+ { GEN_1xVECTOR_REG_OFST(reg_name, 7), \
+ (u32 *)&ipa_reg_save.ipa.pipes[7].endp.var_name }, \
+ { GEN_1xVECTOR_REG_OFST(reg_name, 8), \
+ (u32 *)&ipa_reg_save.ipa.pipes[8].endp.var_name }, \
+ { GEN_1xVECTOR_REG_OFST(reg_name, 9), \
+ (u32 *)&ipa_reg_save.ipa.pipes[9].endp.var_name }, \
+ { GEN_1xVECTOR_REG_OFST(reg_name, 10), \
+ (u32 *)&ipa_reg_save.ipa.pipes[10].endp.var_name }, \
+ { GEN_1xVECTOR_REG_OFST(reg_name, 11), \
+ (u32 *)&ipa_reg_save.ipa.pipes[11].endp.var_name }, \
+ { GEN_1xVECTOR_REG_OFST(reg_name, 12), \
+ (u32 *)&ipa_reg_save.ipa.pipes[12].endp.var_name }, \
+ { GEN_1xVECTOR_REG_OFST(reg_name, 13), \
+ (u32 *)&ipa_reg_save.ipa.pipes[13].endp.var_name }, \
+ { GEN_1xVECTOR_REG_OFST(reg_name, 14), \
+ (u32 *)&ipa_reg_save.ipa.pipes[14].endp.var_name }, \
+ { GEN_1xVECTOR_REG_OFST(reg_name, 15), \
+ (u32 *)&ipa_reg_save.ipa.pipes[15].endp.var_name }, \
+ { GEN_1xVECTOR_REG_OFST(reg_name, 16), \
+ (u32 *)&ipa_reg_save.ipa.pipes[16].endp.var_name }, \
+ { GEN_1xVECTOR_REG_OFST(reg_name, 17), \
+ (u32 *)&ipa_reg_save.ipa.pipes[17].endp.var_name }, \
+ { GEN_1xVECTOR_REG_OFST(reg_name, 18), \
+ (u32 *)&ipa_reg_save.ipa.pipes[18].endp.var_name }, \
+ { GEN_1xVECTOR_REG_OFST(reg_name, 19), \
+ (u32 *)&ipa_reg_save.ipa.pipes[19].endp.var_name }, \
+ { GEN_1xVECTOR_REG_OFST(reg_name, 20), \
+ (u32 *)&ipa_reg_save.ipa.pipes[20].endp.var_name }, \
+ { GEN_1xVECTOR_REG_OFST(reg_name, 21), \
+ (u32 *)&ipa_reg_save.ipa.pipes[21].endp.var_name }, \
+ { GEN_1xVECTOR_REG_OFST(reg_name, 22), \
+ (u32 *)&ipa_reg_save.ipa.pipes[22].endp.var_name }, \
+ { GEN_1xVECTOR_REG_OFST(reg_name, 23), \
+ (u32 *)&ipa_reg_save.ipa.pipes[23].endp.var_name }, \
+ { GEN_1xVECTOR_REG_OFST(reg_name, 24), \
+ (u32 *)&ipa_reg_save.ipa.pipes[24].endp.var_name }, \
+ { GEN_1xVECTOR_REG_OFST(reg_name, 25), \
+ (u32 *)&ipa_reg_save.ipa.pipes[25].endp.var_name }, \
+ { GEN_1xVECTOR_REG_OFST(reg_name, 26), \
+ (u32 *)&ipa_reg_save.ipa.pipes[26].endp.var_name }, \
+ { GEN_1xVECTOR_REG_OFST(reg_name, 27), \
+ (u32 *)&ipa_reg_save.ipa.pipes[27].endp.var_name }, \
+ { GEN_1xVECTOR_REG_OFST(reg_name, 28), \
+ (u32 *)&ipa_reg_save.ipa.pipes[28].endp.var_name }, \
+ { GEN_1xVECTOR_REG_OFST(reg_name, 29), \
+ (u32 *)&ipa_reg_save.ipa.pipes[29].endp.var_name }, \
+ { GEN_1xVECTOR_REG_OFST(reg_name, 30), \
+ (u32 *)&ipa_reg_save.ipa.pipes[30].endp.var_name }
+
+/*
* Macro to define a particular register cfg entry for the remaining
* pipe indexed register. In Stingray case we don't have extra
* endpoints so it is intentially empty
diff --git a/drivers/platform/msm/ipa/ipa_v3/dump/ipa4.5/ipa_hwio.h b/drivers/platform/msm/ipa/ipa_v3/dump/ipa4.5/ipa_hwio.h
index 9ab8667..56b0713 100644
--- a/drivers/platform/msm/ipa/ipa_v3/dump/ipa4.5/ipa_hwio.h
+++ b/drivers/platform/msm/ipa/ipa_v3/dump/ipa4.5/ipa_hwio.h
@@ -935,7 +935,7 @@
#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_INT_MOD_STOPPED_INM(m) \
in_dword_masked(HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_INT_MOD_STOPPED_ADDR, \
m)
-#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_INT_MOD_STOPPED_OUT(v) out_dword( \
+#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_INT_MOD_STOPPED_OUT(v) out_dword( \
HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_INT_MOD_STOPPED_ADDR, \
v)
#define HWIO_IPA_GSI_TOP_GSI_IRAM_PTR_INT_MOD_STOPPED_OUTM(m, \
@@ -971,15 +971,24 @@
HWIO_IPA_GSI_TOP_GSI_INST_RAM_n_ADDR(n), \
mask)
#define HWIO_IPA_GSI_TOP_GSI_INST_RAM_n_OUTI(n, val) \
- out_dword( \
- HWIO_IPA_GSI_TOP_GSI_INST_RAM_n_ADDR(n), \
- val)
-#define HWIO_IPA_GSI_TOP_GSI_INST_RAM_n_OUTMI(n, mask, val) \
- out_dword_masked_ns( \
- HWIO_IPA_GSI_TOP_GSI_INST_RAM_n_ADDR(n), \
- mask, \
- val, \
- HWIO_IPA_GSI_TOP_GSI_INST_RAM_n_INI(n))
+ out_dword( \
+ HWIO_IPA_GSI_TOP_GSI_INST_RAM_n_ADDR(n), \
+ val)
+#define HWIO_IPA_GSI_TOP_GSI_INST_RAM_n_OUTMI(n, mask, \
+ val) out_dword_masked_ns( \
+ HWIO_IPA_GSI_TOP_GSI_INST_RAM_n_ADDR( \
+ n), \
+ mask, \
+ val, \
+ HWIO_IPA_GSI_TOP_GSI_INST_RAM_n_INI(n))
+#define HWIO_IPA_GSI_TOP_GSI_INST_RAM_n_INST_BYTE_3_BMSK 0xff000000
+#define HWIO_IPA_GSI_TOP_GSI_INST_RAM_n_INST_BYTE_3_SHFT 0x18
+#define HWIO_IPA_GSI_TOP_GSI_INST_RAM_n_INST_BYTE_2_BMSK 0xff0000
+#define HWIO_IPA_GSI_TOP_GSI_INST_RAM_n_INST_BYTE_2_SHFT 0x10
+#define HWIO_IPA_GSI_TOP_GSI_INST_RAM_n_INST_BYTE_1_BMSK 0xff00
+#define HWIO_IPA_GSI_TOP_GSI_INST_RAM_n_INST_BYTE_1_SHFT 0x8
+#define HWIO_IPA_GSI_TOP_GSI_INST_RAM_n_INST_BYTE_0_BMSK 0xff
+#define HWIO_IPA_GSI_TOP_GSI_INST_RAM_n_INST_BYTE_0_SHFT 0x0
#define HWIO_IPA_GSI_TOP_GSI_SHRAM_n_ADDR(n) (IPA_GSI_TOP_GSI_REG_BASE + \
0x00002000 + 0x4 * (n))
#define HWIO_IPA_GSI_TOP_GSI_SHRAM_n_PHYS(n) ( \
@@ -7404,11 +7413,47 @@
0x0000038c)
#define HWIO_IPA_RX_HPS_CMDQ_CFG_WR_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
0x0000038c)
+#define HWIO_IPA_RX_HPS_CMDQ_CFG_WR_RMSK 0x1f
+#define HWIO_IPA_RX_HPS_CMDQ_CFG_WR_ATTR 0x3
+#define HWIO_IPA_RX_HPS_CMDQ_CFG_WR_IN in_dword_masked( \
+ HWIO_IPA_RX_HPS_CMDQ_CFG_WR_ADDR, \
+ HWIO_IPA_RX_HPS_CMDQ_CFG_WR_RMSK)
+#define HWIO_IPA_RX_HPS_CMDQ_CFG_WR_INM(m) in_dword_masked( \
+ HWIO_IPA_RX_HPS_CMDQ_CFG_WR_ADDR, \
+ m)
+#define HWIO_IPA_RX_HPS_CMDQ_CFG_WR_OUT(v) out_dword( \
+ HWIO_IPA_RX_HPS_CMDQ_CFG_WR_ADDR, \
+ v)
+#define HWIO_IPA_RX_HPS_CMDQ_CFG_WR_OUTM(m, v) out_dword_masked_ns( \
+ HWIO_IPA_RX_HPS_CMDQ_CFG_WR_ADDR, \
+ m, \
+ v, \
+ HWIO_IPA_RX_HPS_CMDQ_CFG_WR_IN)
+#define HWIO_IPA_RX_HPS_CMDQ_CFG_WR_BLOCK_WR_BMSK 0x1f
+#define HWIO_IPA_RX_HPS_CMDQ_CFG_WR_BLOCK_WR_SHFT 0x0
#define HWIO_IPA_RX_HPS_CMDQ_CFG_RD_ADDR (IPA_DEBUG_REG_BASE + 0x00000390)
#define HWIO_IPA_RX_HPS_CMDQ_CFG_RD_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
0x00000390)
#define HWIO_IPA_RX_HPS_CMDQ_CFG_RD_OFFS (IPA_DEBUG_REG_BASE_OFFS + \
0x00000390)
+#define HWIO_IPA_RX_HPS_CMDQ_CFG_RD_RMSK 0x1f
+#define HWIO_IPA_RX_HPS_CMDQ_CFG_RD_ATTR 0x3
+#define HWIO_IPA_RX_HPS_CMDQ_CFG_RD_IN in_dword_masked( \
+ HWIO_IPA_RX_HPS_CMDQ_CFG_RD_ADDR, \
+ HWIO_IPA_RX_HPS_CMDQ_CFG_RD_RMSK)
+#define HWIO_IPA_RX_HPS_CMDQ_CFG_RD_INM(m) in_dword_masked( \
+ HWIO_IPA_RX_HPS_CMDQ_CFG_RD_ADDR, \
+ m)
+#define HWIO_IPA_RX_HPS_CMDQ_CFG_RD_OUT(v) out_dword( \
+ HWIO_IPA_RX_HPS_CMDQ_CFG_RD_ADDR, \
+ v)
+#define HWIO_IPA_RX_HPS_CMDQ_CFG_RD_OUTM(m, v) out_dword_masked_ns( \
+ HWIO_IPA_RX_HPS_CMDQ_CFG_RD_ADDR, \
+ m, \
+ v, \
+ HWIO_IPA_RX_HPS_CMDQ_CFG_RD_IN)
+#define HWIO_IPA_RX_HPS_CMDQ_CFG_RD_BLOCK_RD_BMSK 0x1f
+#define HWIO_IPA_RX_HPS_CMDQ_CFG_RD_BLOCK_RD_SHFT 0x0
#define HWIO_IPA_RX_HPS_CMDQ_DATA_WR_0_ADDR (IPA_DEBUG_REG_BASE + \
0x00000394)
#define HWIO_IPA_RX_HPS_CMDQ_DATA_WR_0_PHYS (IPA_DEBUG_REG_BASE_PHYS + \
diff --git a/drivers/platform/msm/ipa/ipa_v3/dump/ipa4.5/ipa_hwio_def.h b/drivers/platform/msm/ipa/ipa_v3/dump/ipa4.5/ipa_hwio_def.h
index 1600f0a..306dfec 100644
--- a/drivers/platform/msm/ipa/ipa_v3/dump/ipa4.5/ipa_hwio_def.h
+++ b/drivers/platform/msm/ipa/ipa_v3/dump/ipa4.5/ipa_hwio_def.h
@@ -1941,6 +1941,22 @@ union ipa_hwio_def_ipa_rx_hps_cmdq_cmd_u {
struct ipa_hwio_def_ipa_rx_hps_cmdq_cmd_s def;
u32 value;
};
+struct ipa_hwio_def_ipa_rx_hps_cmdq_cfg_wr_s {
+ u32 block_wr : 5;
+ u32 reserved0 : 27;
+};
+union ipa_hwio_def_ipa_rx_hps_cmdq_cfg_wr_u {
+ struct ipa_hwio_def_ipa_rx_hps_cmdq_cfg_wr_s def;
+ u32 value;
+};
+struct ipa_hwio_def_ipa_rx_hps_cmdq_cfg_rd_s {
+ u32 block_rd : 5;
+ u32 reserved0 : 27;
+};
+union ipa_hwio_def_ipa_rx_hps_cmdq_cfg_rd_u {
+ struct ipa_hwio_def_ipa_rx_hps_cmdq_cfg_rd_s def;
+ u32 value;
+};
struct ipa_hwio_def_ipa_rx_hps_cmdq_data_rd_0_s {
u32 cmdq_packet_len_f : 16;
u32 cmdq_dest_len_f : 16;
diff --git a/drivers/platform/msm/ipa/ipa_v3/dump/ipa_reg_dump.c b/drivers/platform/msm/ipa/ipa_v3/dump/ipa_reg_dump.c
index 0b498e0..831f9c8 100644
--- a/drivers/platform/msm/ipa/ipa_v3/dump/ipa_reg_dump.c
+++ b/drivers/platform/msm/ipa/ipa_v3/dump/ipa_reg_dump.c
@@ -246,7 +246,12 @@ static struct map_src_dst_addr_s ipa_regs_to_save_array[] = {
IPA_REG_SAVE_RX_SPLT_CMDQ(
IPA_RX_SPLT_CMDQ_STATUS_n, ipa_rx_splt_cmdq_status_n),
-
+ GEN_SRC_DST_ADDR_MAP(IPA_RX_HPS_CMDQ_CFG_WR,
+ ipa.dbg,
+ ipa_rx_hps_cmdq_cfg_wr),
+ GEN_SRC_DST_ADDR_MAP(IPA_RX_HPS_CMDQ_CFG_RD,
+ ipa.dbg,
+ ipa_rx_hps_cmdq_cfg_rd),
GEN_SRC_DST_ADDR_MAP(IPA_RX_HPS_CMDQ_CMD,
ipa.dbg,
ipa_rx_hps_cmdq_cmd),
@@ -731,9 +736,11 @@ static void out_dword(
*/
void ipa_save_gsi_ver(void)
{
+ if (!ipa3_ctx->do_register_collection_on_crash)
+ return;
+
ipa_reg_save.gsi.fw_ver =
- IPA_READ_1xVECTOR_REG(IPA_GSI_TOP_GSI_INST_RAM_n, 0) &
- 0x0000FFFF;
+ IPA_READ_1xVECTOR_REG(IPA_GSI_TOP_GSI_INST_RAM_n, 0);
}
/*
@@ -775,7 +782,11 @@ void ipa_save_registers(void)
in_dword(ipa_regs_to_save_array[i].src_addr);
}
- IPA_HW_REG_SAVE_CFG_ENTRY_PIPE_ENDP_ACTIVE();
+ /*
+ * Set the active flag for all active pipe indexed registers.
+ */
+ for (i = 0; i < IPA_HW_PIPE_ID_MAX; i++)
+ ipa_reg_save.ipa.pipes[i].active = true;
/* Now save the per endp registers for the remaining pipes */
for (i = 0; i < (CONFIG_IPA3_REGDUMP_NUM_EXTRA_ENDP_REGS *
@@ -864,26 +875,6 @@ void ipa_save_registers(void)
n + IPA_REG_SAVE_BYTES_PER_CHNL_SHRAM - 1);
}
- for (i = 0; i < IPA_HW_REG_SAVE_GSI_NUM_CH_CNTXT_Q6; i++) {
- u32 phys_ch_idx = ipa_reg_save.gsi.ch_cntxt.q6[
- i].gsi_map_ee_n_ch_k_vp_table.phy_ch;
- u32 n = phys_ch_idx*IPA_REG_SAVE_BYTES_PER_CHNL_SHRAM;
-
- if (!ipa_reg_save.gsi.ch_cntxt.q6[
- i].gsi_map_ee_n_ch_k_vp_table.valid)
- continue;
- ipa_reg_save.gsi.ch_cntxt.q6[
- i].mcs_channel_scratch.scratch4.shram =
- IPA_READ_1xVECTOR_REG(
- GSI_SHRAM_n,
- n + IPA_REG_SAVE_BYTES_PER_CHNL_SHRAM - 2);
- ipa_reg_save.gsi.ch_cntxt.q6[
- i].mcs_channel_scratch.scratch5.shram =
- IPA_READ_1xVECTOR_REG(
- GSI_SHRAM_n,
- n + IPA_REG_SAVE_BYTES_PER_CHNL_SHRAM - 1);
- }
-
for (i = 0; i < IPA_HW_REG_SAVE_GSI_NUM_CH_CNTXT_UC; i++) {
u32 phys_ch_idx = ipa_reg_save.gsi.ch_cntxt.uc[
i].gsi_map_ee_n_ch_k_vp_table.phy_ch;
diff --git a/drivers/platform/msm/ipa/ipa_v3/dump/ipa_reg_dump.h b/drivers/platform/msm/ipa/ipa_v3/dump/ipa_reg_dump.h
index 6a6619a..8707e9c 100644
--- a/drivers/platform/msm/ipa/ipa_v3/dump/ipa_reg_dump.h
+++ b/drivers/platform/msm/ipa/ipa_v3/dump/ipa_reg_dump.h
@@ -86,8 +86,6 @@
#define IPA_DEBUG_TESTBUS_DEF_EXTERNAL 50
#define IPA_DEBUG_TESTBUS_DEF_INTERNAL 6
-#define IPA_REG_SAVE_BYTES_PER_CHNL_SHRAM 8
-
#define IPA_REG_SAVE_GSI_NUM_EE 3
#define IPA_REG_SAVE_NUM_EXTRA_ENDP_REGS 22
@@ -257,58 +255,6 @@ struct map_src_dst_addr_s {
* Macro to define a particular register cfg entry for all pipe
* indexed register
*/
-#define IPA_REG_SAVE_CFG_ENTRY_PIPE_ENDP(reg_name, var_name) \
- { GEN_1xVECTOR_REG_OFST(reg_name, 0), \
- (u32 *)&ipa_reg_save.ipa.pipes[0].endp.var_name }, \
- { GEN_1xVECTOR_REG_OFST(reg_name, 1), \
- (u32 *)&ipa_reg_save.ipa.pipes[1].endp.var_name }, \
- { GEN_1xVECTOR_REG_OFST(reg_name, 2), \
- (u32 *)&ipa_reg_save.ipa.pipes[2].endp.var_name }, \
- { GEN_1xVECTOR_REG_OFST(reg_name, 3), \
- (u32 *)&ipa_reg_save.ipa.pipes[3].endp.var_name }, \
- { GEN_1xVECTOR_REG_OFST(reg_name, 4), \
- (u32 *)&ipa_reg_save.ipa.pipes[4].endp.var_name }, \
- { GEN_1xVECTOR_REG_OFST(reg_name, 5), \
- (u32 *)&ipa_reg_save.ipa.pipes[5].endp.var_name }, \
- { GEN_1xVECTOR_REG_OFST(reg_name, 6), \
- (u32 *)&ipa_reg_save.ipa.pipes[6].endp.var_name }, \
- { GEN_1xVECTOR_REG_OFST(reg_name, 7), \
- (u32 *)&ipa_reg_save.ipa.pipes[7].endp.var_name }, \
- { GEN_1xVECTOR_REG_OFST(reg_name, 8), \
- (u32 *)&ipa_reg_save.ipa.pipes[8].endp.var_name }, \
- { GEN_1xVECTOR_REG_OFST(reg_name, 9), \
- (u32 *)&ipa_reg_save.ipa.pipes[9].endp.var_name }, \
- { GEN_1xVECTOR_REG_OFST(reg_name, 10), \
- (u32 *)&ipa_reg_save.ipa.pipes[10].endp.var_name }, \
- { GEN_1xVECTOR_REG_OFST(reg_name, 11), \
- (u32 *)&ipa_reg_save.ipa.pipes[11].endp.var_name }, \
- { GEN_1xVECTOR_REG_OFST(reg_name, 12), \
- (u32 *)&ipa_reg_save.ipa.pipes[12].endp.var_name }, \
- { GEN_1xVECTOR_REG_OFST(reg_name, 13), \
- (u32 *)&ipa_reg_save.ipa.pipes[13].endp.var_name }, \
- { GEN_1xVECTOR_REG_OFST(reg_name, 14), \
- (u32 *)&ipa_reg_save.ipa.pipes[14].endp.var_name }, \
- { GEN_1xVECTOR_REG_OFST(reg_name, 15), \
- (u32 *)&ipa_reg_save.ipa.pipes[15].endp.var_name }, \
- { GEN_1xVECTOR_REG_OFST(reg_name, 16), \
- (u32 *)&ipa_reg_save.ipa.pipes[16].endp.var_name }, \
- { GEN_1xVECTOR_REG_OFST(reg_name, 17), \
- (u32 *)&ipa_reg_save.ipa.pipes[17].endp.var_name }, \
- { GEN_1xVECTOR_REG_OFST(reg_name, 18), \
- (u32 *)&ipa_reg_save.ipa.pipes[18].endp.var_name }, \
- { GEN_1xVECTOR_REG_OFST(reg_name, 19), \
- (u32 *)&ipa_reg_save.ipa.pipes[19].endp.var_name }, \
- { GEN_1xVECTOR_REG_OFST(reg_name, 20), \
- (u32 *)&ipa_reg_save.ipa.pipes[20].endp.var_name }, \
- { GEN_1xVECTOR_REG_OFST(reg_name, 21), \
- (u32 *)&ipa_reg_save.ipa.pipes[21].endp.var_name }, \
- { GEN_1xVECTOR_REG_OFST(reg_name, 22), \
- (u32 *)&ipa_reg_save.ipa.pipes[22].endp.var_name }
-
-/*
- * Macro to define a particular register cfg entry for all pipe
- * indexed register
- */
#define IPA_REG_SAVE_CFG_ENTRY_PIPE_ENDP_EXTRA(reg_name, var_name) \
{ 0, 0 }
@@ -394,18 +340,6 @@ struct map_src_dst_addr_s {
* register
*/
#define IPA_REG_SAVE_CFG_ENTRY_GSI_CH_CNTXT(reg_name, var_name) \
- { GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_Q6_EE, 0), \
- (u32 *)&ipa_reg_save.gsi.ch_cntxt.q6[0].var_name }, \
- { GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_Q6_EE, 1), \
- (u32 *)&ipa_reg_save.gsi.ch_cntxt.q6[1].var_name }, \
- { GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_Q6_EE, 2), \
- (u32 *)&ipa_reg_save.gsi.ch_cntxt.q6[2].var_name }, \
- { GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_Q6_EE, 3), \
- (u32 *)&ipa_reg_save.gsi.ch_cntxt.q6[3].var_name }, \
- { GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_Q6_EE, 4), \
- (u32 *)&ipa_reg_save.gsi.ch_cntxt.q6[4].var_name }, \
- { GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_Q6_EE, 5), \
- (u32 *)&ipa_reg_save.gsi.ch_cntxt.q6[5].var_name }, \
{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 0), \
(u32 *)&ipa_reg_save.gsi.ch_cntxt.a7[0].var_name }, \
{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 1), \
@@ -434,20 +368,22 @@ struct map_src_dst_addr_s {
(u32 *)&ipa_reg_save.gsi.ch_cntxt.a7[12].var_name }, \
{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 13), \
(u32 *)&ipa_reg_save.gsi.ch_cntxt.a7[13].var_name }, \
- { GEN_2xVECTOR_REG_OFST(reg_name, IPA_REG_SAVE_HWP_GSI_EE, 1), \
+ { GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 14), \
+ (u32 *)&ipa_reg_save.gsi.ch_cntxt.a7[14].var_name }, \
+ { GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 15), \
+ (u32 *)&ipa_reg_save.gsi.ch_cntxt.a7[15].var_name }, \
+ { GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 16), \
+ (u32 *)&ipa_reg_save.gsi.ch_cntxt.a7[16].var_name }, \
+ { GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 17), \
+ (u32 *)&ipa_reg_save.gsi.ch_cntxt.a7[17].var_name }, \
+ { GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 18), \
+ (u32 *)&ipa_reg_save.gsi.ch_cntxt.a7[18].var_name }, \
+ { GEN_2xVECTOR_REG_OFST(reg_name, IPA_REG_SAVE_HWP_GSI_EE, 1), \
(u32 *)&ipa_reg_save.gsi.ch_cntxt.uc[0].var_name }, \
{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_REG_SAVE_HWP_GSI_EE, 3), \
(u32 *)&ipa_reg_save.gsi.ch_cntxt.uc[1].var_name }
#define IPA_REG_SAVE_CFG_ENTRY_GSI_EVT_CNTXT(reg_name, var_name) \
- { GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_Q6_EE, 0), \
- (u32 *)&ipa_reg_save.gsi.evt_cntxt.q6[0].var_name }, \
- { GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_Q6_EE, 1), \
- (u32 *)&ipa_reg_save.gsi.evt_cntxt.q6[1].var_name }, \
- { GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_Q6_EE, 2), \
- (u32 *)&ipa_reg_save.gsi.evt_cntxt.q6[2].var_name }, \
- { GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_Q6_EE, 3), \
- (u32 *)&ipa_reg_save.gsi.evt_cntxt.q6[3].var_name }, \
{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 0), \
(u32 *)&ipa_reg_save.gsi.evt_cntxt.a7[0].var_name }, \
{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 1), \
@@ -472,6 +408,20 @@ struct map_src_dst_addr_s {
(u32 *)&ipa_reg_save.gsi.evt_cntxt.a7[10].var_name }, \
{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 11), \
(u32 *)&ipa_reg_save.gsi.evt_cntxt.a7[11].var_name }, \
+ { GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 12), \
+ (u32 *)&ipa_reg_save.gsi.evt_cntxt.a7[12].var_name }, \
+ { GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 13), \
+ (u32 *)&ipa_reg_save.gsi.evt_cntxt.a7[13].var_name }, \
+ { GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 14), \
+ (u32 *)&ipa_reg_save.gsi.evt_cntxt.a7[14].var_name }, \
+ { GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 15), \
+ (u32 *)&ipa_reg_save.gsi.evt_cntxt.a7[15].var_name }, \
+ { GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 16), \
+ (u32 *)&ipa_reg_save.gsi.evt_cntxt.a7[16].var_name }, \
+ { GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 17), \
+ (u32 *)&ipa_reg_save.gsi.evt_cntxt.a7[17].var_name }, \
+ { GEN_2xVECTOR_REG_OFST(reg_name, IPA_HW_A7_EE, 18), \
+ (u32 *)&ipa_reg_save.gsi.evt_cntxt.a7[18].var_name }, \
{ GEN_2xVECTOR_REG_OFST(reg_name, IPA_REG_SAVE_HWP_GSI_EE, 1), \
(u32 *)&ipa_reg_save.gsi.evt_cntxt.uc[0].var_name }
@@ -808,6 +758,11 @@ struct ipa_reg_save_dbg_s {
struct ipa_hwio_def_ipa_rx_splt_cmdq_status_n_s
ipa_rx_splt_cmdq_status_n[IPA_RX_SPLT_CMDQ_MAX];
+ union ipa_hwio_def_ipa_rx_hps_cmdq_cfg_wr_u
+ ipa_rx_hps_cmdq_cfg_wr;
+ union ipa_hwio_def_ipa_rx_hps_cmdq_cfg_rd_u
+ ipa_rx_hps_cmdq_cfg_rd;
+
struct ipa_hwio_def_ipa_rx_hps_cmdq_cmd_s
ipa_rx_hps_cmdq_cmd;
union ipa_hwio_def_ipa_rx_hps_cmdq_data_rd_0_u
@@ -1210,8 +1165,6 @@ struct ipa_reg_save_gsi_ch_cntxt_s {
struct ipa_reg_save_gsi_ch_cntxt_per_ep_s
a7[IPA_HW_REG_SAVE_GSI_NUM_CH_CNTXT_A7];
struct ipa_reg_save_gsi_ch_cntxt_per_ep_s
- q6[IPA_HW_REG_SAVE_GSI_NUM_CH_CNTXT_Q6];
- struct ipa_reg_save_gsi_ch_cntxt_per_ep_s
uc[IPA_HW_REG_SAVE_GSI_NUM_CH_CNTXT_UC];
};
@@ -1220,8 +1173,6 @@ struct ipa_reg_save_gsi_evt_cntxt_s {
struct ipa_reg_save_gsi_evt_cntxt_per_ep_s
a7[IPA_HW_REG_SAVE_GSI_NUM_EVT_CNTXT_A7];
struct ipa_reg_save_gsi_evt_cntxt_per_ep_s
- q6[IPA_HW_REG_SAVE_GSI_NUM_EVT_CNTXT_Q6];
- struct ipa_reg_save_gsi_evt_cntxt_per_ep_s
uc[IPA_HW_REG_SAVE_GSI_NUM_EVT_CNTXT_UC];
};
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa.c b/drivers/platform/msm/ipa/ipa_v3/ipa.c
index bebd4de..1bf724c 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa.c
@@ -6685,6 +6685,7 @@ static int ipa_smmu_ap_cb_probe(struct device *dev)
smmu_info.present[IPA_SMMU_CB_AP] = true;
ipa3_ctx->pdev = dev;
+ cb->next_addr = cb->va_end;
return 0;
}
@@ -7116,7 +7117,9 @@ void ipa_pc_qmp_enable(void)
ret = PTR_ERR(ipa3_ctx->mbox);
if (ret != -EPROBE_DEFER)
IPAERR("mailbox channel request failed, ret=%d\n", ret);
- goto cleanup;
+
+ ipa3_ctx->mbox = NULL;
+ return;
}
/* prepare the QMP packet to send */
@@ -7131,8 +7134,10 @@ void ipa_pc_qmp_enable(void)
}
cleanup:
- ipa3_ctx->mbox = NULL;
- mbox_free_channel(ipa3_ctx->mbox);
+ if (ipa3_ctx->mbox) {
+ mbox_free_channel(ipa3_ctx->mbox);
+ ipa3_ctx->mbox = NULL;
+ }
}
/**************************************************************
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c b/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c
index dad8582..d1422db 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c
@@ -92,7 +92,8 @@ static int ipa3_hdr_proc_ctx_to_hw_format(struct ipa_mem_buffer *mem,
entry->hdr->phys_base,
hdr_base_addr,
entry->hdr->offset_entry,
- entry->l2tp_params);
+ entry->l2tp_params,
+ ipa3_ctx->use_64_bit_dma_mask);
if (ret)
return ret;
}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c
index f808f69..6d14b83 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c
@@ -792,7 +792,7 @@ int ipa3_qmi_ul_filter_request_send(
{
struct ipa_configure_ul_firewall_rules_resp_msg_v01 resp;
struct ipa_msg_desc req_desc, resp_desc;
- int rc;
+ int rc, i;
IPAWANDBG("IPACM pass %u rules to Q6\n",
req->firewall_rules_list_len);
@@ -812,6 +812,37 @@ int ipa3_qmi_ul_filter_request_send(
}
mutex_unlock(&ipa3_qmi_lock);
+ /* check if modem is up */
+ if (!ipa3_qmi_indication_fin ||
+ !ipa3_qmi_modem_init_fin ||
+ !ipa_q6_clnt) {
+ IPAWANDBG("modem QMI service is not up yet\n");
+ return -EINVAL;
+ }
+
+ /* Passing 0 rules means that firewall is disabled */
+ if (req->firewall_rules_list_len == 0)
+ IPAWANDBG("IPACM passed 0 rules to Q6\n");
+
+ if (req->firewall_rules_list_len >= QMI_IPA_MAX_UL_FIREWALL_RULES_V01) {
+ IPAWANERR(
+ "Number of rules passed by IPACM, %d, exceed limit %d\n",
+ req->firewall_rules_list_len,
+ QMI_IPA_MAX_UL_FIREWALL_RULES_V01);
+ return -EINVAL;
+ }
+
+ /* Check for valid IP type */
+ for (i = 0; i < req->firewall_rules_list_len; i++) {
+ if (req->firewall_rules_list[i].ip_type !=
+ QMI_IPA_IP_TYPE_V4_V01 &&
+ req->firewall_rules_list[i].ip_type !=
+ QMI_IPA_IP_TYPE_V6_V01)
+ IPAWANERR("Invalid IP type %d\n",
+ req->firewall_rules_list[i].ip_type);
+ return -EINVAL;
+ }
+
req_desc.max_msg_len =
QMI_IPA_INSTALL_UL_FIREWALL_RULES_REQ_MAX_MSG_LEN_V01;
req_desc.msg_id = QMI_IPA_INSTALL_UL_FIREWALL_RULES_REQ_V01;
@@ -825,7 +856,6 @@ int ipa3_qmi_ul_filter_request_send(
resp_desc.msg_id = QMI_IPA_INSTALL_UL_FIREWALL_RULES_RESP_V01;
resp_desc.ei_array =
ipa3_configure_ul_firewall_rules_resp_msg_data_v01_ei;
-
rc = ipa3_qmi_send_req_wait(ipa_q6_clnt,
&req_desc, req,
&resp_desc, &resp,
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c
index a5391a9..82cf654 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c
@@ -497,6 +497,38 @@ int ipa3_wdi_init(void)
return 0;
}
+static int ipa_create_ap_smmu_mapping_pa(phys_addr_t pa, size_t len,
+ bool device, unsigned long *iova)
+{
+ struct ipa_smmu_cb_ctx *cb = ipa3_get_smmu_ctx(IPA_SMMU_CB_AP);
+ unsigned long va = roundup(cb->next_addr, PAGE_SIZE);
+ int prot = IOMMU_READ | IOMMU_WRITE;
+ size_t true_len = roundup(len + pa - rounddown(pa, PAGE_SIZE),
+ PAGE_SIZE);
+ int ret;
+
+ if (!cb->valid) {
+ IPAERR("No SMMU CB setup\n");
+ return -EINVAL;
+ }
+
+ if (len > PAGE_SIZE)
+ va = roundup(cb->next_addr, len);
+
+ ret = ipa3_iommu_map(cb->mapping->domain, va, rounddown(pa, PAGE_SIZE),
+ true_len,
+ device ? (prot | IOMMU_MMIO) : prot);
+ if (ret) {
+ IPAERR("iommu map failed for pa=%pa len=%zu\n", &pa, true_len);
+ return -EINVAL;
+ }
+
+ ipa3_ctx->wdi_map_cnt++;
+ cb->next_addr = va + true_len;
+ *iova = va + pa - rounddown(pa, PAGE_SIZE);
+ return 0;
+}
+
static int ipa_create_uc_smmu_mapping_pa(phys_addr_t pa, size_t len,
bool device, unsigned long *iova)
{
@@ -526,6 +558,67 @@ static int ipa_create_uc_smmu_mapping_pa(phys_addr_t pa, size_t len,
return 0;
}
+static int ipa_create_ap_smmu_mapping_sgt(struct sg_table *sgt,
+ unsigned long *iova)
+{
+ struct ipa_smmu_cb_ctx *cb = ipa3_get_smmu_ctx(IPA_SMMU_CB_AP);
+ unsigned long va = roundup(cb->next_addr, PAGE_SIZE);
+ int prot = IOMMU_READ | IOMMU_WRITE;
+ int ret, i;
+ struct scatterlist *sg;
+ unsigned long start_iova = va;
+ phys_addr_t phys;
+ size_t len = 0;
+ int count = 0;
+
+ if (!cb->valid) {
+ IPAERR("No SMMU CB setup\n");
+ return -EINVAL;
+ }
+ if (!sgt) {
+ IPAERR("Bad parameters, scatter / gather list is NULL\n");
+ return -EINVAL;
+ }
+
+ for_each_sg(sgt->sgl, sg, sgt->nents, i) {
+ /* directly get sg_tbl PA from wlan-driver */
+ len += PAGE_ALIGN(sg->offset + sg->length);
+ }
+
+ if (len > PAGE_SIZE) {
+ va = roundup(cb->next_addr,
+ roundup_pow_of_two(len));
+ start_iova = va;
+ }
+
+ for_each_sg(sgt->sgl, sg, sgt->nents, i) {
+ /* directly get sg_tbl PA from wlan-driver */
+ phys = sg->dma_address;
+ len = PAGE_ALIGN(sg->offset + sg->length);
+
+ ret = ipa3_iommu_map(cb->mapping->domain, va, phys, len, prot);
+ if (ret) {
+ IPAERR("iommu map failed for pa=%pa len=%zu\n",
+ &phys, len);
+ goto bad_mapping;
+ }
+ va += len;
+ ipa3_ctx->wdi_map_cnt++;
+ count++;
+ }
+ cb->next_addr = va;
+ *iova = start_iova;
+
+ return 0;
+
+bad_mapping:
+ for_each_sg(sgt->sgl, sg, count, i)
+ iommu_unmap(cb->mapping->domain, sg_dma_address(sg),
+ sg_dma_len(sg));
+ return -EINVAL;
+}
+
+
static int ipa_create_uc_smmu_mapping_sgt(struct sg_table *sgt,
unsigned long *iova)
{
@@ -576,6 +669,43 @@ static int ipa_create_uc_smmu_mapping_sgt(struct sg_table *sgt,
return -EINVAL;
}
+static void ipa_release_ap_smmu_mappings(enum ipa_client_type client)
+{
+ struct ipa_smmu_cb_ctx *cb = ipa3_get_smmu_ctx(IPA_SMMU_CB_AP);
+ int i, j, start, end;
+
+ if (IPA_CLIENT_IS_CONS(client)) {
+ start = IPA_WDI_TX_RING_RES;
+ if (ipa3_ctx->ipa_wdi3_over_gsi)
+ end = IPA_WDI_TX_DB_RES;
+ else
+ end = IPA_WDI_CE_DB_RES;
+ } else {
+ start = IPA_WDI_RX_RING_RES;
+ if (ipa3_ctx->ipa_wdi2 ||
+ ipa3_ctx->ipa_wdi3_over_gsi)
+ end = IPA_WDI_RX_COMP_RING_WP_RES;
+ else
+ end = IPA_WDI_RX_RING_RP_RES;
+ }
+
+ for (i = start; i <= end; i++) {
+ if (wdi_res[i].valid) {
+ for (j = 0; j < wdi_res[i].nents; j++) {
+ iommu_unmap(cb->mapping->domain,
+ wdi_res[i].res[j].iova,
+ wdi_res[i].res[j].size);
+ ipa3_ctx->wdi_map_cnt--;
+ }
+ kfree(wdi_res[i].res);
+ wdi_res[i].valid = false;
+ }
+ }
+
+ if (ipa3_ctx->wdi_map_cnt == 0)
+ cb->next_addr = cb->va_end;
+}
+
static void ipa_release_uc_smmu_mappings(enum ipa_client_type client)
{
struct ipa_smmu_cb_ctx *cb = ipa3_get_smmu_ctx(IPA_SMMU_CB_UC);
@@ -751,9 +881,11 @@ int ipa_create_gsi_smmu_mapping(int res_idx, bool wlan_smmu_en,
/* no SMMU on WLAN but SMMU on IPA */
if (!wlan_smmu_en && !ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_AP]) {
- if (ipa3_smmu_map_peer_buff(*iova, pa, len,
- sgt, IPA_SMMU_CB_WLAN)) {
- IPAERR("Fail to create mapping res %d\n", res_idx);
+ if (ipa_create_ap_smmu_mapping_pa(pa, len,
+ (res_idx == IPA_WDI_CE_DB_RES) ? true : false,
+ iova)) {
+ IPAERR("Fail to create mapping res %d\n",
+ res_idx);
return -EFAULT;
}
ipa_save_uc_smmu_mapping_pa(res_idx, pa, *iova, len);
@@ -765,10 +897,12 @@ int ipa_create_gsi_smmu_mapping(int res_idx, bool wlan_smmu_en,
case IPA_WDI_RX_RING_RP_RES:
case IPA_WDI_RX_COMP_RING_WP_RES:
case IPA_WDI_CE_DB_RES:
- if (ipa3_smmu_map_peer_buff(*iova, pa, len, sgt,
- IPA_SMMU_CB_WLAN)) {
+ case IPA_WDI_TX_DB_RES:
+ if (ipa_create_ap_smmu_mapping_pa(pa, len,
+ (res_idx == IPA_WDI_CE_DB_RES) ? true : false,
+ iova)) {
IPAERR("Fail to create mapping res %d\n",
- res_idx);
+ res_idx);
return -EFAULT;
}
ipa_save_uc_smmu_mapping_pa(res_idx, pa, *iova, len);
@@ -777,10 +911,9 @@ int ipa_create_gsi_smmu_mapping(int res_idx, bool wlan_smmu_en,
case IPA_WDI_RX_COMP_RING_RES:
case IPA_WDI_TX_RING_RES:
case IPA_WDI_CE_RING_RES:
- if (ipa3_smmu_map_peer_reg(pa, true,
- IPA_SMMU_CB_WLAN)) {
+ if (ipa_create_ap_smmu_mapping_sgt(sgt, iova)) {
IPAERR("Fail to create mapping res %d\n",
- res_idx);
+ res_idx);
return -EFAULT;
}
ipa_save_uc_smmu_mapping_sgt(res_idx, sgt, *iova);
@@ -1304,7 +1437,7 @@ int ipa3_connect_gsi_wdi_pipe(struct ipa_wdi_in_params *in,
ipa_cfg_ep_fail:
memset(&ipa3_ctx->ep[ipa_ep_idx], 0, sizeof(struct ipa3_ep_context));
gsi_timeout:
- ipa_release_uc_smmu_mappings(in->sys.client);
+ ipa_release_ap_smmu_mappings(in->sys.client);
IPA_ACTIVE_CLIENTS_DEC_EP(in->sys.client);
fail:
return result;
@@ -1864,7 +1997,7 @@ int ipa3_disconnect_gsi_wdi_pipe(u32 clnt_hdl)
result);
goto fail_dealloc_channel;
}
- ipa_release_uc_smmu_mappings(clnt_hdl);
+ ipa_release_ap_smmu_mappings(clnt_hdl);
/* for AP+STA stats update */
if (ipa3_ctx->uc_wdi_ctx.stats_notify)
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
index 2ac7a08..c53c26a 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
@@ -167,8 +167,7 @@
#define IPA_v4_2_DST_GROUP_MAX (1)
#define IPA_v4_5_MHI_GROUP_PCIE (0)
-#define IPA_v4_5_GROUP_UL_DL_DST (0)
-#define IPA_v4_5_GROUP_UL_DL_SRC (1)
+#define IPA_v4_5_GROUP_UL_DL (1)
#define IPA_v4_5_MHI_GROUP_DDR (1)
#define IPA_v4_5_MHI_GROUP_DMA (2)
#define IPA_v4_5_MHI_GROUP_QDSS (3)
@@ -367,9 +366,9 @@ static const struct rsrc_min_max ipa3_rsrc_src_grp_config
{5, 5}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0} },
},
[IPA_4_5] = {
- /* unused UL_DL_SRC unused unused UC_RX_Q N/A */
+ /* unused UL_DL unused unused UC_RX_Q N/A */
[IPA_v4_0_RSRC_GRP_TYPE_SRC_PKT_CONTEXTS] = {
- {0, 0}, {1, 63}, {0, 0}, {0, 0}, {1, 63}, {0, 0} },
+ {0, 0}, {1, 11}, {0, 0}, {0, 0}, {1, 63}, {0, 0} },
[IPA_v4_0_RSRC_GRP_TYPE_SRC_DESCRIPTOR_LISTS] = {
{0, 0}, {14, 14}, {0, 0}, {0, 0}, {3, 3}, {0, 0} },
[IPA_v4_0_RSRC_GRP_TYPE_SRC_DESCRIPTOR_BUFF] = {
@@ -380,7 +379,7 @@ static const struct rsrc_min_max ipa3_rsrc_src_grp_config
{0, 0}, {24, 24}, {0, 0}, {0, 0}, {8, 8}, {0, 0} },
},
[IPA_4_5_MHI] = {
- /* PCIE DDR DMA QDSS unused N/A N/A */
+ /* PCIE DDR DMA QDSS unused N/A */
[IPA_v4_0_RSRC_GRP_TYPE_SRC_PKT_CONTEXTS] = {
{3, 8}, {4, 11}, {1, 1}, {1, 1}, {0, 0}, {0, 0} },
[IPA_v4_0_RSRC_GRP_TYPE_SRC_DESCRIPTOR_LISTS] = {
@@ -393,9 +392,9 @@ static const struct rsrc_min_max ipa3_rsrc_src_grp_config
{22, 22}, {16, 16}, {6, 6}, {2, 2}, {0, 0}, {0, 0} },
},
[IPA_4_5_APQ] = {
- /* unused UL_DL_SRC unused unused UC_RX_Q N/A */
+ /* unused UL_DL unused unused UC_RX_Q N/A */
[IPA_v4_0_RSRC_GRP_TYPE_SRC_PKT_CONTEXTS] = {
- {0, 0}, {1, 63}, {0, 0}, {0, 0}, {1, 63}, {0, 0} },
+ {0, 0}, {1, 11}, {0, 0}, {0, 0}, {1, 63}, {0, 0} },
[IPA_v4_0_RSRC_GRP_TYPE_SRC_DESCRIPTOR_LISTS] = {
{0, 0}, {14, 14}, {0, 0}, {0, 0}, {3, 3}, {0, 0} },
[IPA_v4_0_RSRC_GRP_TYPE_SRC_DESCRIPTOR_BUFF] = {
@@ -468,11 +467,11 @@ static const struct rsrc_min_max ipa3_rsrc_dst_grp_config
{1, 63}, {0, 0}, {0, 0}, {0, 0}, {0, 0}, {0, 0} },
},
[IPA_4_5] = {
- /* UL/DL/DPL_DST unused unused unused uC N/A */
+ /* unused UL/DL/DPL unused unused uC N/A */
[IPA_v4_0_RSRC_GRP_TYPE_DST_DATA_SECTORS] = {
- {16, 16}, {5, 5}, {2, 2}, {2, 2}, {0, 0}, {0, 0} },
+ {0, 0}, {16, 16}, {2, 2}, {2, 2}, {0, 0}, {0, 0} },
[IPA_v4_0_RSRC_GRP_TYPE_DST_DPS_DMARS] = {
- {2, 63}, {1, 63}, {1, 2}, {1, 2}, {0, 2}, {0, 0} },
+ {0, 0}, {2, 63}, {1, 2}, {1, 2}, {0, 2}, {0, 0} },
},
[IPA_4_5_MHI] = {
/* PCIE/DPL DDR DMA/CV2X QDSS uC N/A */
@@ -482,11 +481,11 @@ static const struct rsrc_min_max ipa3_rsrc_dst_grp_config
{2, 63}, {1, 63}, {1, 2}, {1, 2}, {0, 2}, {0, 0} },
},
[IPA_4_5_APQ] = {
- /* UL/DL/DPL_DST unused unused unused uC N/A */
+ /* unused UL/DL/DPL unused unused uC N/A */
[IPA_v4_0_RSRC_GRP_TYPE_DST_DATA_SECTORS] = {
- {16, 16}, {5, 5}, {2, 2}, {2, 2}, {0, 0}, {0, 0} },
+ {0, 0}, {16, 16}, {2, 2}, {2, 2}, {0, 0}, {0, 0} },
[IPA_v4_0_RSRC_GRP_TYPE_DST_DPS_DMARS] = {
- {2, 63}, {1, 63}, {1, 2}, {1, 2}, {0, 2}, {0, 0} },
+ {0, 0}, {2, 63}, {1, 2}, {1, 2}, {0, 2}, {0, 0} },
},
};
@@ -2112,177 +2111,177 @@ static const struct ipa_ep_configuration ipa3_ep_mapping
/* IPA_4_5 */
[IPA_4_5][IPA_CLIENT_WLAN2_PROD] = {
- true, IPA_v4_5_GROUP_UL_DL_SRC,
+ true, IPA_v4_5_GROUP_UL_DL,
true,
IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
QMB_MASTER_SELECT_DDR,
{ 9, 12, 8, 16, IPA_EE_AP, GSI_FREE_PRE_FETCH, 2 } },
[IPA_4_5][IPA_CLIENT_USB_PROD] = {
- true, IPA_v4_5_GROUP_UL_DL_SRC,
+ true, IPA_v4_5_GROUP_UL_DL,
true,
IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
QMB_MASTER_SELECT_DDR,
{ 1, 0, 8, 16, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY, 0 } },
[IPA_4_5][IPA_CLIENT_APPS_LAN_PROD] = {
- true, IPA_v4_5_GROUP_UL_DL_SRC,
+ true, IPA_v4_5_GROUP_UL_DL,
false,
IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
QMB_MASTER_SELECT_DDR,
{ 11, 14, 10, 16, IPA_EE_AP, GSI_SMART_PRE_FETCH, 2 } },
[IPA_4_5][IPA_CLIENT_APPS_WAN_PROD] = {
- true, IPA_v4_5_GROUP_UL_DL_SRC,
+ true, IPA_v4_5_GROUP_UL_DL,
true,
IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
QMB_MASTER_SELECT_DDR,
{ 2, 7, 16, 32, IPA_EE_AP, GSI_SMART_PRE_FETCH, 8 } },
[IPA_4_5][IPA_CLIENT_APPS_CMD_PROD] = {
- true, IPA_v4_5_GROUP_UL_DL_SRC,
+ true, IPA_v4_5_GROUP_UL_DL,
false,
IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY,
QMB_MASTER_SELECT_DDR,
{ 7, 9, 20, 24, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY, 0 } },
[IPA_4_5][IPA_CLIENT_ODU_PROD] = {
- true, IPA_v4_5_GROUP_UL_DL_SRC,
+ true, IPA_v4_5_GROUP_UL_DL,
true,
IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
QMB_MASTER_SELECT_DDR,
{ 10, 13, 8, 19, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY, 0 } },
[IPA_4_5][IPA_CLIENT_ETHERNET_PROD] = {
- true, IPA_v4_5_GROUP_UL_DL_SRC,
+ true, IPA_v4_5_GROUP_UL_DL,
true,
IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
QMB_MASTER_SELECT_DDR,
{ 12, 0, 8, 16, IPA_EE_UC, GSI_SMART_PRE_FETCH, 4 } },
[IPA_4_5][IPA_CLIENT_Q6_WAN_PROD] = {
- true, IPA_v4_5_GROUP_UL_DL_SRC,
+ true, IPA_v4_5_GROUP_UL_DL,
true,
IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_DEC_UCP,
QMB_MASTER_SELECT_DDR,
{ 5, 0, 16, 28, IPA_EE_Q6, GSI_SMART_PRE_FETCH, 2 } },
[IPA_4_5][IPA_CLIENT_Q6_CMD_PROD] = {
- true, IPA_v4_5_GROUP_UL_DL_SRC,
+ true, IPA_v4_5_GROUP_UL_DL,
false,
IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_UCP,
QMB_MASTER_SELECT_DDR,
{ 6, 1, 20, 24, IPA_EE_Q6, GSI_ESCAPE_BUF_ONLY, 0 } },
[IPA_4_5][IPA_CLIENT_Q6_DL_NLO_DATA_PROD] = {
- true, IPA_v4_5_GROUP_UL_DL_SRC,
+ true, IPA_v4_5_GROUP_UL_DL,
true,
IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_DEC_UCP,
QMB_MASTER_SELECT_DDR,
{ 8, 2, 27, 32, IPA_EE_Q6, GSI_FREE_PRE_FETCH, 3 } },
/* Only for test purpose */
[IPA_4_5][IPA_CLIENT_TEST_PROD] = {
- true, IPA_v4_5_GROUP_UL_DL_SRC,
+ true, IPA_v4_5_GROUP_UL_DL,
true,
IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
QMB_MASTER_SELECT_DDR,
{ 1, 0, 8, 16, IPA_EE_AP } },
[IPA_4_5][IPA_CLIENT_TEST1_PROD] = {
- true, IPA_v4_5_GROUP_UL_DL_SRC,
+ true, IPA_v4_5_GROUP_UL_DL,
true,
IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
QMB_MASTER_SELECT_DDR,
{ 1, 0, 8, 16, IPA_EE_AP } },
[IPA_4_5][IPA_CLIENT_TEST2_PROD] = {
- true, IPA_v4_5_GROUP_UL_DL_SRC,
+ true, IPA_v4_5_GROUP_UL_DL,
true,
IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
QMB_MASTER_SELECT_DDR,
{ 3, 5, 8, 16, IPA_EE_AP } },
[IPA_4_5][IPA_CLIENT_TEST3_PROD] = {
- true, IPA_v4_5_GROUP_UL_DL_SRC,
+ true, IPA_v4_5_GROUP_UL_DL,
true,
IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
QMB_MASTER_SELECT_DDR,
{ 9, 12, 8, 16, IPA_EE_AP } },
[IPA_4_5][IPA_CLIENT_TEST4_PROD] = {
- true, IPA_v4_5_GROUP_UL_DL_SRC,
+ true, IPA_v4_5_GROUP_UL_DL,
true,
IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
QMB_MASTER_SELECT_DDR,
{ 11, 14, 8, 16, IPA_EE_AP } },
[IPA_4_5][IPA_CLIENT_WLAN2_CONS] = {
- true, IPA_v4_5_GROUP_UL_DL_DST,
+ true, IPA_v4_5_GROUP_UL_DL,
false,
IPA_DPS_HPS_SEQ_TYPE_INVALID,
QMB_MASTER_SELECT_DDR,
{ 24, 3, 8, 14, IPA_EE_AP, GSI_SMART_PRE_FETCH, 4 } },
[IPA_4_5][IPA_CLIENT_USB_CONS] = {
- true, IPA_v4_5_GROUP_UL_DL_DST,
+ true, IPA_v4_5_GROUP_UL_DL,
false,
IPA_DPS_HPS_SEQ_TYPE_INVALID,
QMB_MASTER_SELECT_DDR,
{ 26, 17, 9, 9, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY, 0 } },
[IPA_4_5][IPA_CLIENT_USB_DPL_CONS] = {
- true, IPA_v4_5_GROUP_UL_DL_DST,
+ true, IPA_v4_5_GROUP_UL_DL,
false,
IPA_DPS_HPS_SEQ_TYPE_INVALID,
QMB_MASTER_SELECT_DDR,
{ 15, 15, 5, 5, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY, 0 } },
[IPA_4_5][IPA_CLIENT_ODL_DPL_CONS] = {
- true, IPA_v4_5_GROUP_UL_DL_DST,
+ true, IPA_v4_5_GROUP_UL_DL,
false,
IPA_DPS_HPS_SEQ_TYPE_INVALID,
QMB_MASTER_SELECT_DDR,
{ 22, 2, 5, 5, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY, 0 } },
[IPA_4_5][IPA_CLIENT_APPS_LAN_CONS] = {
- true, IPA_v4_5_GROUP_UL_DL_DST,
+ true, IPA_v4_5_GROUP_UL_DL,
false,
IPA_DPS_HPS_SEQ_TYPE_INVALID,
QMB_MASTER_SELECT_DDR,
{ 16, 10, 9, 9, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY, 0 } },
[IPA_4_5][IPA_CLIENT_APPS_WAN_COAL_CONS] = {
- true, IPA_v4_5_GROUP_UL_DL_DST,
+ true, IPA_v4_5_GROUP_UL_DL,
false,
IPA_DPS_HPS_SEQ_TYPE_INVALID,
QMB_MASTER_SELECT_DDR,
{ 13, 4, 8, 11, IPA_EE_AP, GSI_SMART_PRE_FETCH, 4 } },
[IPA_4_5][IPA_CLIENT_APPS_WAN_CONS] = {
- true, IPA_v4_5_GROUP_UL_DL_DST,
+ true, IPA_v4_5_GROUP_UL_DL,
false,
IPA_DPS_HPS_SEQ_TYPE_INVALID,
QMB_MASTER_SELECT_DDR,
{ 14, 1, 9, 9, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY, 0 } },
[IPA_4_5][IPA_CLIENT_ODU_EMB_CONS] = {
- true, IPA_v4_5_GROUP_UL_DL_DST,
+ true, IPA_v4_5_GROUP_UL_DL,
false,
IPA_DPS_HPS_SEQ_TYPE_INVALID,
QMB_MASTER_SELECT_DDR,
{ 23, 8, 9, 9, IPA_EE_AP, GSI_SMART_PRE_FETCH, 4 } },
[IPA_4_5][IPA_CLIENT_ETHERNET_CONS] = {
- true, IPA_v4_5_GROUP_UL_DL_DST,
+ true, IPA_v4_5_GROUP_UL_DL,
false,
IPA_DPS_HPS_SEQ_TYPE_INVALID,
QMB_MASTER_SELECT_DDR,
{ 28, 1, 9, 9, IPA_EE_UC, GSI_SMART_PRE_FETCH, 4 } },
[IPA_4_5][IPA_CLIENT_Q6_LAN_CONS] = {
- true, IPA_v4_5_GROUP_UL_DL_DST,
+ true, IPA_v4_5_GROUP_UL_DL,
false,
IPA_DPS_HPS_SEQ_TYPE_INVALID,
QMB_MASTER_SELECT_DDR,
{ 17, 3, 9, 9, IPA_EE_Q6, GSI_ESCAPE_BUF_ONLY, 0 } },
[IPA_4_5][IPA_CLIENT_Q6_WAN_CONS] = {
- true, IPA_v4_5_GROUP_UL_DL_DST,
+ true, IPA_v4_5_GROUP_UL_DL,
false,
IPA_DPS_HPS_SEQ_TYPE_INVALID,
QMB_MASTER_SELECT_DDR,
{ 21, 7, 9, 9, IPA_EE_Q6, GSI_ESCAPE_BUF_ONLY, 0 } },
[IPA_4_5][IPA_CLIENT_Q6_UL_NLO_DATA_CONS] = {
- true, IPA_v4_5_GROUP_UL_DL_DST,
+ true, IPA_v4_5_GROUP_UL_DL,
false,
IPA_DPS_HPS_SEQ_TYPE_INVALID,
QMB_MASTER_SELECT_DDR,
{ 19, 5, 5, 5, IPA_EE_Q6, GSI_SMART_PRE_FETCH, 2 } },
[IPA_4_5][IPA_CLIENT_Q6_UL_NLO_ACK_CONS] = {
- true, IPA_v4_5_GROUP_UL_DL_DST,
+ true, IPA_v4_5_GROUP_UL_DL,
false,
IPA_DPS_HPS_SEQ_TYPE_INVALID,
QMB_MASTER_SELECT_DDR,
{ 20, 6, 5, 5, IPA_EE_Q6, GSI_SMART_PRE_FETCH, 2 } },
[IPA_4_5][IPA_CLIENT_Q6_QBAP_STATUS_CONS] = {
- true, IPA_v4_5_GROUP_UL_DL_DST,
+ true, IPA_v4_5_GROUP_UL_DL,
false,
IPA_DPS_HPS_SEQ_TYPE_INVALID,
QMB_MASTER_SELECT_DDR,
@@ -2290,38 +2289,38 @@ static const struct ipa_ep_configuration ipa3_ep_mapping
/* Only for test purpose */
/* MBIM aggregation test pipes should have the same QMB as USB_CONS */
[IPA_4_5][IPA_CLIENT_TEST_CONS] = {
- true, IPA_v4_5_GROUP_UL_DL_DST,
+ true, IPA_v4_5_GROUP_UL_DL,
false,
IPA_DPS_HPS_SEQ_TYPE_INVALID,
QMB_MASTER_SELECT_DDR,
{ 14, 1, 9, 9, IPA_EE_AP } },
[IPA_4_5][IPA_CLIENT_TEST1_CONS] = {
- true, IPA_v4_5_GROUP_UL_DL_DST,
+ true, IPA_v4_5_GROUP_UL_DL,
false,
IPA_DPS_HPS_SEQ_TYPE_INVALID,
QMB_MASTER_SELECT_DDR,
{ 14, 1, 9, 9, IPA_EE_AP } },
[IPA_4_5][IPA_CLIENT_TEST2_CONS] = {
- true, IPA_v4_5_GROUP_UL_DL_DST,
+ true, IPA_v4_5_GROUP_UL_DL,
false,
IPA_DPS_HPS_SEQ_TYPE_INVALID,
QMB_MASTER_SELECT_DDR,
{ 24, 3, 8, 14, IPA_EE_AP } },
[IPA_4_5][IPA_CLIENT_TEST3_CONS] = {
- true, IPA_v4_5_GROUP_UL_DL_DST,
+ true, IPA_v4_5_GROUP_UL_DL,
false,
IPA_DPS_HPS_SEQ_TYPE_INVALID,
QMB_MASTER_SELECT_DDR,
{ 26, 17, 9, 9, IPA_EE_AP } },
[IPA_4_5][IPA_CLIENT_TEST4_CONS] = {
- true, IPA_v4_5_GROUP_UL_DL_DST,
+ true, IPA_v4_5_GROUP_UL_DL,
false,
IPA_DPS_HPS_SEQ_TYPE_INVALID,
QMB_MASTER_SELECT_DDR,
{ 27, 18, 9, 9, IPA_EE_AP } },
/* Dummy consumer (pipe 31) is used in L2TP rt rule */
[IPA_4_5][IPA_CLIENT_DUMMY_CONS] = {
- true, IPA_v4_5_GROUP_UL_DL_DST,
+ true, IPA_v4_5_GROUP_UL_DL,
false,
IPA_DPS_HPS_SEQ_TYPE_INVALID,
QMB_MASTER_SELECT_DDR,
@@ -2378,7 +2377,7 @@ static const struct ipa_ep_configuration ipa3_ep_mapping
{ 10, 13, 8, 16, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY, 0 } },
/* Only for test purpose */
[IPA_4_5_MHI][IPA_CLIENT_TEST_PROD] = {
- true, IPA_v4_5_GROUP_UL_DL_SRC,
+ true, QMB_MASTER_SELECT_DDR,
true,
IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
QMB_MASTER_SELECT_DDR,
@@ -2459,7 +2458,7 @@ static const struct ipa_ep_configuration ipa3_ep_mapping
/* Dummy consumer (pipe 31) is used in L2TP rt rule */
[IPA_4_5_MHI][IPA_CLIENT_DUMMY_CONS] = {
- true, IPA_v4_5_GROUP_UL_DL_DST,
+ true, QMB_MASTER_SELECT_DDR,
false,
IPA_DPS_HPS_SEQ_TYPE_INVALID,
QMB_MASTER_SELECT_DDR,
@@ -2467,117 +2466,117 @@ static const struct ipa_ep_configuration ipa3_ep_mapping
/* IPA_4_5 APQ */
[IPA_4_5_APQ][IPA_CLIENT_WLAN2_PROD] = {
- true, IPA_v4_5_GROUP_UL_DL_SRC,
+ true, IPA_v4_5_GROUP_UL_DL,
true,
IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
QMB_MASTER_SELECT_DDR,
{ 9, 3, 8, 16, IPA_EE_AP, GSI_FREE_PRE_FETCH, 2 } },
[IPA_4_5_APQ][IPA_CLIENT_WIGIG_PROD] = {
- true, IPA_v4_5_GROUP_UL_DL_SRC,
+ true, IPA_v4_5_GROUP_UL_DL,
true,
IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
QMB_MASTER_SELECT_DDR,
{ 1, 1, 8, 16, IPA_EE_AP, GSI_SMART_PRE_FETCH, 4 } },
[IPA_4_5_APQ][IPA_CLIENT_USB_PROD] = {
- true, IPA_v4_5_GROUP_UL_DL_SRC,
+ true, IPA_v4_5_GROUP_UL_DL,
true,
IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
QMB_MASTER_SELECT_DDR,
{ 0, 0, 8, 16, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY, 0 } },
[IPA_4_5_APQ][IPA_CLIENT_APPS_LAN_PROD] = {
- true, IPA_v4_5_GROUP_UL_DL_SRC,
+ true, IPA_v4_5_GROUP_UL_DL,
false,
IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
QMB_MASTER_SELECT_DDR,
{ 11, 4, 8, 16, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY, 0 } },
[IPA_4_5_APQ][IPA_CLIENT_APPS_CMD_PROD] = {
- true, IPA_v4_5_GROUP_UL_DL_SRC,
+ true, IPA_v4_5_GROUP_UL_DL,
false,
IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY,
QMB_MASTER_SELECT_DDR,
{ 7, 12, 20, 24, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY, 0 } },
/* Only for test purpose */
[IPA_4_5_APQ][IPA_CLIENT_TEST_PROD] = {
- true, IPA_v4_5_GROUP_UL_DL_SRC,
+ true, IPA_v4_5_GROUP_UL_DL,
true,
IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
QMB_MASTER_SELECT_DDR,
{ 0, 0, 8, 16, IPA_EE_AP } },
[IPA_4_5_APQ][IPA_CLIENT_TEST1_PROD] = {
- true, IPA_v4_5_GROUP_UL_DL_SRC,
+ true, IPA_v4_5_GROUP_UL_DL,
true,
IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
QMB_MASTER_SELECT_DDR,
{ 0, 0, 8, 16, IPA_EE_AP } },
[IPA_4_5_APQ][IPA_CLIENT_TEST2_PROD] = {
- true, IPA_v4_5_GROUP_UL_DL_SRC,
+ true, IPA_v4_5_GROUP_UL_DL,
true,
IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
QMB_MASTER_SELECT_DDR,
{ 1, 1, 8, 16, IPA_EE_AP } },
[IPA_4_5_APQ][IPA_CLIENT_TEST3_PROD] = {
- true, IPA_v4_5_GROUP_UL_DL_SRC,
+ true, IPA_v4_5_GROUP_UL_DL,
true,
IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
QMB_MASTER_SELECT_DDR,
{ 9, 3, 8, 16, IPA_EE_AP } },
[IPA_4_5_APQ][IPA_CLIENT_TEST4_PROD] = {
- true, IPA_v4_5_GROUP_UL_DL_SRC,
+ true, IPA_v4_5_GROUP_UL_DL,
true,
IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
QMB_MASTER_SELECT_DDR,
{ 10, 10, 8, 16, IPA_EE_AP } },
[IPA_4_5_APQ][IPA_CLIENT_WLAN2_CONS] = {
- true, IPA_v4_5_GROUP_UL_DL_DST,
+ true, IPA_v4_5_GROUP_UL_DL,
false,
IPA_DPS_HPS_SEQ_TYPE_INVALID,
QMB_MASTER_SELECT_DDR,
{ 23, 8, 8, 14, IPA_EE_AP, GSI_SMART_PRE_FETCH, 4 } },
[IPA_4_5_APQ][IPA_CLIENT_WIGIG1_CONS] = {
- true, IPA_v4_5_GROUP_UL_DL_DST,
+ true, IPA_v4_5_GROUP_UL_DL,
false,
IPA_DPS_HPS_SEQ_TYPE_INVALID,
QMB_MASTER_SELECT_DDR,
{ 14, 14, 9, 9, IPA_EE_AP, GSI_SMART_PRE_FETCH, 4 } },
[IPA_4_5_APQ][IPA_CLIENT_WIGIG2_CONS] = {
- true, IPA_v4_5_GROUP_UL_DL_DST,
+ true, IPA_v4_5_GROUP_UL_DL,
false,
IPA_DPS_HPS_SEQ_TYPE_INVALID,
QMB_MASTER_SELECT_DDR,
{ 20, 18, 9, 9, IPA_EE_AP, GSI_SMART_PRE_FETCH, 4 } },
[IPA_4_5_APQ][IPA_CLIENT_WIGIG3_CONS] = {
- true, IPA_v4_5_GROUP_UL_DL_DST,
+ true, IPA_v4_5_GROUP_UL_DL,
false,
IPA_DPS_HPS_SEQ_TYPE_INVALID,
QMB_MASTER_SELECT_DDR,
{ 22, 5, 9, 9, IPA_EE_AP, GSI_SMART_PRE_FETCH, 4 } },
[IPA_4_5_APQ][IPA_CLIENT_WIGIG4_CONS] = {
- true, IPA_v4_5_GROUP_UL_DL_DST,
+ true, IPA_v4_5_GROUP_UL_DL,
false,
IPA_DPS_HPS_SEQ_TYPE_INVALID,
QMB_MASTER_SELECT_DDR,
{ 29, 10, 9, 9, IPA_EE_AP, GSI_SMART_PRE_FETCH, 4 } },
[IPA_4_5_APQ][IPA_CLIENT_USB_CONS] = {
- true, IPA_v4_5_GROUP_UL_DL_DST,
+ true, IPA_v4_5_GROUP_UL_DL,
false,
IPA_DPS_HPS_SEQ_TYPE_INVALID,
QMB_MASTER_SELECT_DDR,
{ 24, 9, 9, 9, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY, 0 } },
[IPA_4_5_APQ][IPA_CLIENT_USB_DPL_CONS] = {
- true, IPA_v4_5_GROUP_UL_DL_DST,
+ true, IPA_v4_5_GROUP_UL_DL,
false,
IPA_DPS_HPS_SEQ_TYPE_INVALID,
QMB_MASTER_SELECT_DDR,
{ 16, 16, 5, 5, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY, 0 } },
[IPA_4_5_APQ][IPA_CLIENT_APPS_LAN_CONS] = {
- true, IPA_v4_5_GROUP_UL_DL_DST,
+ true, IPA_v4_5_GROUP_UL_DL,
false,
IPA_DPS_HPS_SEQ_TYPE_INVALID,
QMB_MASTER_SELECT_DDR,
{ 13, 13, 9, 9, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY, 0 } },
[IPA_4_5_APQ][IPA_CLIENT_ODL_DPL_CONS] = {
- true, IPA_v4_5_GROUP_UL_DL_DST,
+ true, IPA_v4_5_GROUP_UL_DL,
false,
IPA_DPS_HPS_SEQ_TYPE_INVALID,
QMB_MASTER_SELECT_DDR,
@@ -2585,38 +2584,38 @@ static const struct ipa_ep_configuration ipa3_ep_mapping
/* Only for test purpose */
/* MBIM aggregation test pipes should have the same QMB as USB_CONS */
[IPA_4_5_APQ][IPA_CLIENT_TEST_CONS] = {
- true, IPA_v4_5_GROUP_UL_DL_DST,
+ true, IPA_v4_5_GROUP_UL_DL,
false,
IPA_DPS_HPS_SEQ_TYPE_INVALID,
QMB_MASTER_SELECT_DDR,
{ 16, 16, 5, 5, IPA_EE_AP } },
[IPA_4_5_APQ][IPA_CLIENT_TEST1_CONS] = {
- true, IPA_v4_5_GROUP_UL_DL_DST,
+ true, IPA_v4_5_GROUP_UL_DL,
false,
IPA_DPS_HPS_SEQ_TYPE_INVALID,
QMB_MASTER_SELECT_DDR,
{ 16, 16, 5, 5, IPA_EE_AP } },
[IPA_4_5_APQ][IPA_CLIENT_TEST2_CONS] = {
- true, IPA_v4_5_GROUP_UL_DL_DST,
+ true, IPA_v4_5_GROUP_UL_DL,
false,
IPA_DPS_HPS_SEQ_TYPE_INVALID,
QMB_MASTER_SELECT_DDR,
{ 22, 5, 9, 9, IPA_EE_AP } },
[IPA_4_5_APQ][IPA_CLIENT_TEST3_CONS] = {
- true, IPA_v4_5_GROUP_UL_DL_DST,
+ true, IPA_v4_5_GROUP_UL_DL,
false,
IPA_DPS_HPS_SEQ_TYPE_INVALID,
QMB_MASTER_SELECT_DDR,
{ 24, 9, 9, 9, IPA_EE_AP } },
[IPA_4_5_APQ][IPA_CLIENT_TEST4_CONS] = {
- true, IPA_v4_5_GROUP_UL_DL_DST,
+ true, IPA_v4_5_GROUP_UL_DL,
false,
IPA_DPS_HPS_SEQ_TYPE_INVALID,
QMB_MASTER_SELECT_DDR,
{ 23, 8, 8, 13, IPA_EE_AP } },
/* Dummy consumer (pipe 31) is used in L2TP rt rule */
[IPA_4_5_APQ][IPA_CLIENT_DUMMY_CONS] = {
- true, IPA_v4_5_GROUP_UL_DL_DST,
+ true, IPA_v4_5_GROUP_UL_DL,
false,
IPA_DPS_HPS_SEQ_TYPE_INVALID,
QMB_MASTER_SELECT_DDR,
@@ -6693,7 +6692,7 @@ static void ipa3_write_rsrc_grp_type_reg(int group_index,
if (src) {
switch (group_index) {
case IPA_v4_5_MHI_GROUP_PCIE:
- case IPA_v4_5_GROUP_UL_DL_SRC:
+ case IPA_v4_5_GROUP_UL_DL:
ipahal_write_reg_n_fields(
IPA_SRC_RSRC_GRP_01_RSRC_TYPE_n,
n, val);
@@ -6717,8 +6716,8 @@ static void ipa3_write_rsrc_grp_type_reg(int group_index,
}
} else {
switch (group_index) {
- case IPA_v4_5_GROUP_UL_DL_DST:
- case IPA_v4_5_MHI_GROUP_DDR:
+ case IPA_v4_5_MHI_GROUP_PCIE:
+ case IPA_v4_5_GROUP_UL_DL:
ipahal_write_reg_n_fields(
IPA_DST_RSRC_GRP_01_RSRC_TYPE_n,
n, val);
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_wdi3_i.c b/drivers/platform/msm/ipa/ipa_v3/ipa_wdi3_i.c
index 9cbaa39..1e98d20 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_wdi3_i.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_wdi3_i.c
@@ -74,6 +74,7 @@ static int ipa3_setup_wdi3_gsi_channel(u8 is_smmu_enabled,
const struct ipa_gsi_ep_config *gsi_ep_info;
int result, len;
unsigned long va;
+ uint32_t addr_low, addr_high;
if (!info || !info_smmu || !ep) {
IPAERR("invalid input\n");
@@ -211,17 +212,105 @@ static int ipa3_setup_wdi3_gsi_channel(u8 is_smmu_enabled,
IPAERR("failed to write evt ring scratch\n");
goto fail_write_scratch;
}
- /* write event ring db address */
+
+ if (!is_smmu_enabled) {
+ IPADBG("smmu disabled\n");
+ if (info->is_evt_rn_db_pcie_addr == true)
+ IPADBG_LOW("is_evt_rn_db_pcie_addr is PCIE addr\n");
+ else
+ IPADBG_LOW("is_evt_rn_db_pcie_addr is DDR addr\n");
+ IPADBG_LOW("LSB 0x%x\n",
+ (u32)info->event_ring_doorbell_pa);
+ IPADBG_LOW("MSB 0x%x\n",
+ (u32)((u64)info->event_ring_doorbell_pa >> 32));
+ } else {
+ IPADBG("smmu enabled\n");
+ if (info_smmu->is_evt_rn_db_pcie_addr == true)
+ IPADBG_LOW("is_evt_rn_db_pcie_addr is PCIE addr\n");
+ else
+ IPADBG_LOW("is_evt_rn_db_pcie_addr is DDR addr\n");
+ IPADBG_LOW("LSB 0x%x\n",
+ (u32)info_smmu->event_ring_doorbell_pa);
+ IPADBG_LOW("MSB 0x%x\n",
+ (u32)((u64)info_smmu->event_ring_doorbell_pa >> 32));
+ }
+
+ if (!is_smmu_enabled) {
+ addr_low = (u32)info->event_ring_doorbell_pa;
+ addr_high = (u32)((u64)info->event_ring_doorbell_pa >> 32);
+ } else {
+ if (dir == IPA_WDI3_TX_DIR) {
+ if (ipa_create_gsi_smmu_mapping(IPA_WDI_CE_DB_RES,
+ true, info_smmu->event_ring_doorbell_pa,
+ NULL, 4, true, &va)) {
+ IPAERR("failed to get smmu mapping\n");
+ result = -EFAULT;
+ goto fail_write_scratch;
+ }
+ } else {
+ if (ipa_create_gsi_smmu_mapping(
+ IPA_WDI_RX_COMP_RING_WP_RES,
+ true, info_smmu->event_ring_doorbell_pa,
+ NULL, 4, true, &va)) {
+ IPAERR("failed to get smmu mapping\n");
+ result = -EFAULT;
+ goto fail_write_scratch;
+ }
+ }
+ addr_low = (u32)va;
+ addr_high = (u32)((u64)va >> 32);
+ }
+
+ /*
+ * Arch specific:
+ * pcie addr which are not via smmu, use pa directly!
+ * pcie and DDR via 2 different port
+ * assert bit 40 to indicate it is pcie addr
+ * WDI-3.0, MSM --> pcie via smmu
+ * WDI-3.0, MDM --> pcie not via smmu + dual port
+ * assert bit 40 in case
+ */
+ if ((ipa3_ctx->platform_type == IPA_PLAT_TYPE_MDM) &&
+ is_smmu_enabled) {
+ /*
+ * Ir-respective of smmu enabled don't use IOVA addr
+ * since pcie not via smmu in MDM's
+ */
+ if (info_smmu->is_evt_rn_db_pcie_addr == true) {
+ addr_low = (u32)info_smmu->event_ring_doorbell_pa;
+ addr_high =
+ (u32)((u64)info_smmu->event_ring_doorbell_pa
+ >> 32);
+ }
+ }
+
+ /*
+ * GSI recomendation to set bit-40 for (mdm targets && pcie addr)
+ * from wdi-3.0 interface document
+ */
+ if (!is_smmu_enabled) {
+ if ((ipa3_ctx->platform_type == IPA_PLAT_TYPE_MDM) &&
+ info->is_evt_rn_db_pcie_addr)
+ addr_high |= (1 << 8);
+ } else {
+ if ((ipa3_ctx->platform_type == IPA_PLAT_TYPE_MDM) &&
+ info_smmu->is_evt_rn_db_pcie_addr)
+ addr_high |= (1 << 8);
+ }
+
gsi_wdi3_write_evt_ring_db(ep->gsi_evt_ring_hdl,
- (u32)info->event_ring_doorbell_pa,
- (u32)((u64)info->event_ring_doorbell_pa >> 32));
+ addr_low,
+ addr_high);
/* write channel scratch */
memset(&ch_scratch, 0, sizeof(ch_scratch));
ch_scratch.wdi3.update_rp_moderation_threshold =
UPDATE_RP_MODERATION_THRESHOLD;
if (dir == IPA_WDI3_RX_DIR) {
- ch_scratch.wdi3.rx_pkt_offset = info->pkt_offset;
+ if (!is_smmu_enabled)
+ ch_scratch.wdi3.rx_pkt_offset = info->pkt_offset;
+ else
+ ch_scratch.wdi3.rx_pkt_offset = info_smmu->pkt_offset;
/* this metadata reg offset need to be in words */
ch_scratch.wdi3.endp_metadata_reg_offset =
ipahal_get_reg_mn_ofst(IPA_ENDP_INIT_HDR_METADATA_n, 0,
@@ -229,6 +318,28 @@ static int ipa3_setup_wdi3_gsi_channel(u8 is_smmu_enabled,
}
if (!is_smmu_enabled) {
+ IPADBG_LOW("smmu disabled\n");
+ if (info->is_txr_rn_db_pcie_addr == true)
+ IPADBG_LOW("is_txr_rn_db_pcie_addr is PCIE addr\n");
+ else
+ IPADBG_LOW("is_txr_rn_db_pcie_addr is DDR addr\n");
+ IPADBG_LOW("LSB 0x%x\n",
+ (u32)info->transfer_ring_doorbell_pa);
+ IPADBG_LOW("MSB 0x%x\n",
+ (u32)((u64)info->transfer_ring_doorbell_pa >> 32));
+ } else {
+ IPADBG_LOW("smmu eabled\n");
+ if (info_smmu->is_txr_rn_db_pcie_addr == true)
+ IPADBG_LOW("is_txr_rn_db_pcie_addr is PCIE addr\n");
+ else
+ IPADBG_LOW("is_txr_rn_db_pcie_addr is DDR addr\n");
+ IPADBG_LOW("LSB 0x%x\n",
+ (u32)info_smmu->transfer_ring_doorbell_pa);
+ IPADBG_LOW("MSB 0x%x\n",
+ (u32)((u64)info_smmu->transfer_ring_doorbell_pa >> 32));
+ }
+
+ if (!is_smmu_enabled) {
ch_scratch.wdi3.wifi_rp_address_low =
(u32)info->transfer_ring_doorbell_pa;
ch_scratch.wdi3.wifi_rp_address_high =
@@ -262,6 +373,49 @@ static int ipa3_setup_wdi3_gsi_channel(u8 is_smmu_enabled,
(u32)((u64)va >> 32);
}
}
+
+ /*
+ * Arch specific:
+ * pcie addr which are not via smmu, use pa directly!
+ * pcie and DDR via 2 different port
+ * assert bit 40 to indicate it is pcie addr
+ * WDI-3.0, MSM --> pcie via smmu
+ * WDI-3.0, MDM --> pcie not via smmu + dual port
+ * assert bit 40 in case
+ */
+ if ((ipa3_ctx->platform_type == IPA_PLAT_TYPE_MDM) &&
+ is_smmu_enabled) {
+ /*
+ * Ir-respective of smmu enabled don't use IOVA addr
+ * since pcie not via smmu in MDM's
+ */
+ if (info_smmu->is_txr_rn_db_pcie_addr == true) {
+ ch_scratch.wdi3.wifi_rp_address_low =
+ (u32)info_smmu->transfer_ring_doorbell_pa;
+ ch_scratch.wdi3.wifi_rp_address_high =
+ (u32)((u64)info_smmu->transfer_ring_doorbell_pa
+ >> 32);
+ }
+ }
+
+ /*
+ * GSI recomendation to set bit-40 for (mdm targets && pcie addr)
+ * from wdi-3.0 interface document
+ */
+ if (!is_smmu_enabled) {
+ if ((ipa3_ctx->platform_type == IPA_PLAT_TYPE_MDM) &&
+ info->is_txr_rn_db_pcie_addr)
+ ch_scratch.wdi3.wifi_rp_address_high =
+ (u32)((u32)ch_scratch.wdi3.wifi_rp_address_high |
+ (1 << 8));
+ } else {
+ if ((ipa3_ctx->platform_type == IPA_PLAT_TYPE_MDM) &&
+ info_smmu->is_txr_rn_db_pcie_addr)
+ ch_scratch.wdi3.wifi_rp_address_high =
+ (u32)((u32)ch_scratch.wdi3.wifi_rp_address_high |
+ (1 << 8));
+ }
+
result = gsi_write_channel_scratch(ep->gsi_chan_hdl, ch_scratch);
if (result != GSI_STATUS_SUCCESS) {
IPAERR("failed to write evt ring scratch\n");
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_wigig_i.c b/drivers/platform/msm/ipa/ipa_v3/ipa_wigig_i.c
index 8e44841..2f02db7 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_wigig_i.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_wigig_i.c
@@ -31,7 +31,7 @@
static int ipa3_wigig_uc_loaded_handler(struct notifier_block *self,
unsigned long val, void *data)
{
- IPADBG("val %d\n", val);
+ IPADBG("val %ld\n", val);
if (!ipa3_ctx) {
IPAERR("IPA ctx is null\n");
@@ -829,7 +829,7 @@ int ipa3_conn_wigig_rx_pipe_i(void *in, struct ipa_wigig_conn_out_params *out)
if (
IPA_WIGIG_MSB(input->dbuff.data_buffer_base_pa) & 0xFFFFFF00) {
IPAERR(
- "data_buffers_base_address_msb is over the 8 bit limit (0xpa)\n"
+ "data_buffers_base_address_msb is over the 8 bit limit (0x%pa)\n"
, &input->dbuff.data_buffer_base_pa);
IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
return -EFAULT;
@@ -971,7 +971,7 @@ int ipa3_conn_wigig_client_i(void *in, struct ipa_wigig_conn_out_params *out)
!= IPA_WIGIG_8_MSB(
input_smmu->pipe_smmu.status_ring_HWTAIL_pa)) {
IPAERR(
- "status ring HWHEAD and HWTAIL differ in 8 MSbs head 0x%X tail 0x%X\n"
+ "status ring HWHEAD and HWTAIL differ in 8 MSbs head 0x%llX tail 0x%llX\n"
, input_smmu->pipe_smmu.status_ring_HWHEAD_pa,
input_smmu->pipe_smmu.status_ring_HWTAIL_pa);
return -EFAULT;
@@ -1010,7 +1010,7 @@ int ipa3_conn_wigig_client_i(void *in, struct ipa_wigig_conn_out_params *out)
!= IPA_WIGIG_8_MSB(
input->pipe.status_ring_HWTAIL_pa)) {
IPAERR(
- "status ring HWHEAD and HWTAIL differ in 8 MSbs head 0x%X tail 0x%X\n"
+ "status ring HWHEAD and HWTAIL differ in 8 MSbs head 0x%llX tail 0x%llX\n"
, input->pipe.status_ring_HWHEAD_pa,
input->pipe.status_ring_HWTAIL_pa);
return -EFAULT;
@@ -1472,7 +1472,7 @@ int ipa3_enable_wigig_pipe_i(enum ipa_client_type client)
ep->gsi_mem_info.chan_ring_len -
IPA_WIGIG_DESC_RING_EL_SIZE;
- IPADBG("ring ch doorbell (0x%llX) TX %d\n", val,
+ IPADBG("ring ch doorbell (0x%llX) TX %ld\n", val,
ep->gsi_chan_hdl);
res = gsi_ring_ch_ring_db(ep->gsi_chan_hdl, val);
if (res) {
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.c b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.c
index 45efd47..3f38a3a 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.c
@@ -1182,6 +1182,13 @@ static void ipahal_cp_hdr_to_hw_buff_v3(void *const base, u32 offset,
memcpy(base + offset, hdr, hdr_len);
}
+/* Header address update logic. */
+#define IPAHAL_CP_PROC_CTX_HEADER_UPDATE(hdr_lsb, hdr_msb, addr) \
+ do { \
+ hdr_lsb = lower_32_bits(addr); \
+ hdr_msb = upper_32_bits(addr); \
+ } while (0)
+
/*
* ipahal_cp_proc_ctx_to_hw_buff_v3() - copy processing context to
* base address and offset given.
@@ -1195,26 +1202,31 @@ static void ipahal_cp_hdr_to_hw_buff_v3(void *const base, u32 offset,
* @hdr_base_addr: base address in table
* @offset_entry: offset from hdr_base_addr in table
* @l2tp_params: l2tp parameters
+ * @is_64: Indicates whether header base address/dma base address is 64 bit.
*/
static int ipahal_cp_proc_ctx_to_hw_buff_v3(enum ipa_hdr_proc_type type,
void *const base, u32 offset,
u32 hdr_len, bool is_hdr_proc_ctx,
dma_addr_t phys_base, u64 hdr_base_addr,
struct ipa_hdr_offset_entry *offset_entry,
- struct ipa_l2tp_hdr_proc_ctx_params l2tp_params)
+ struct ipa_l2tp_hdr_proc_ctx_params l2tp_params, bool is_64)
{
+ u64 hdr_addr;
+
if (type == IPA_HDR_PROC_NONE) {
struct ipa_hw_hdr_proc_ctx_add_hdr_seq *ctx;
ctx = (struct ipa_hw_hdr_proc_ctx_add_hdr_seq *)
(base + offset);
ctx->hdr_add.tlv.type = IPA_PROC_CTX_TLV_TYPE_HDR_ADD;
- ctx->hdr_add.tlv.length = 1;
+ ctx->hdr_add.tlv.length = is_64 ? 2 : 1;
ctx->hdr_add.tlv.value = hdr_len;
- ctx->hdr_add.hdr_addr = is_hdr_proc_ctx ? phys_base :
+ hdr_addr = is_hdr_proc_ctx ? phys_base :
hdr_base_addr + offset_entry->offset;
IPAHAL_DBG("header address 0x%llx\n",
- ctx->hdr_add.hdr_addr);
+ hdr_addr);
+ IPAHAL_CP_PROC_CTX_HEADER_UPDATE(ctx->hdr_add.hdr_addr,
+ ctx->hdr_add.hdr_addr_hi, hdr_addr);
ctx->end.type = IPA_PROC_CTX_TLV_TYPE_END;
ctx->end.length = 0;
ctx->end.value = 0;
@@ -1224,12 +1236,14 @@ static int ipahal_cp_proc_ctx_to_hw_buff_v3(enum ipa_hdr_proc_type type,
ctx = (struct ipa_hw_hdr_proc_ctx_add_l2tp_hdr_cmd_seq *)
(base + offset);
ctx->hdr_add.tlv.type = IPA_PROC_CTX_TLV_TYPE_HDR_ADD;
- ctx->hdr_add.tlv.length = 1;
+ ctx->hdr_add.tlv.length = is_64 ? 2 : 1;
ctx->hdr_add.tlv.value = hdr_len;
- ctx->hdr_add.hdr_addr = is_hdr_proc_ctx ? phys_base :
+ hdr_addr = is_hdr_proc_ctx ? phys_base :
hdr_base_addr + offset_entry->offset;
IPAHAL_DBG("header address 0x%llx\n",
- ctx->hdr_add.hdr_addr);
+ hdr_addr);
+ IPAHAL_CP_PROC_CTX_HEADER_UPDATE(ctx->hdr_add.hdr_addr,
+ ctx->hdr_add.hdr_addr_hi, hdr_addr);
ctx->l2tp_params.tlv.type = IPA_PROC_CTX_TLV_TYPE_PROC_CMD;
ctx->l2tp_params.tlv.length = 1;
ctx->l2tp_params.tlv.value =
@@ -1251,12 +1265,14 @@ static int ipahal_cp_proc_ctx_to_hw_buff_v3(enum ipa_hdr_proc_type type,
ctx = (struct ipa_hw_hdr_proc_ctx_remove_l2tp_hdr_cmd_seq *)
(base + offset);
ctx->hdr_add.tlv.type = IPA_PROC_CTX_TLV_TYPE_HDR_ADD;
- ctx->hdr_add.tlv.length = 1;
+ ctx->hdr_add.tlv.length = is_64 ? 2 : 1;
ctx->hdr_add.tlv.value = hdr_len;
- ctx->hdr_add.hdr_addr = is_hdr_proc_ctx ? phys_base :
+ hdr_addr = is_hdr_proc_ctx ? phys_base :
hdr_base_addr + offset_entry->offset;
IPAHAL_DBG("header address 0x%llx length %d\n",
- ctx->hdr_add.hdr_addr, ctx->hdr_add.tlv.value);
+ hdr_addr, ctx->hdr_add.tlv.value);
+ IPAHAL_CP_PROC_CTX_HEADER_UPDATE(ctx->hdr_add.hdr_addr,
+ ctx->hdr_add.hdr_addr_hi, hdr_addr);
ctx->l2tp_params.tlv.type = IPA_PROC_CTX_TLV_TYPE_PROC_CMD;
ctx->l2tp_params.tlv.length = 1;
ctx->l2tp_params.tlv.value =
@@ -1287,12 +1303,14 @@ static int ipahal_cp_proc_ctx_to_hw_buff_v3(enum ipa_hdr_proc_type type,
ctx = (struct ipa_hw_hdr_proc_ctx_add_hdr_cmd_seq *)
(base + offset);
ctx->hdr_add.tlv.type = IPA_PROC_CTX_TLV_TYPE_HDR_ADD;
- ctx->hdr_add.tlv.length = 1;
+ ctx->hdr_add.tlv.length = is_64 ? 2 : 1;
ctx->hdr_add.tlv.value = hdr_len;
- ctx->hdr_add.hdr_addr = is_hdr_proc_ctx ? phys_base :
+ hdr_addr = is_hdr_proc_ctx ? phys_base :
hdr_base_addr + offset_entry->offset;
IPAHAL_DBG("header address 0x%llx\n",
- ctx->hdr_add.hdr_addr);
+ hdr_addr);
+ IPAHAL_CP_PROC_CTX_HEADER_UPDATE(ctx->hdr_add.hdr_addr,
+ ctx->hdr_add.hdr_addr_hi, hdr_addr);
ctx->cmd.type = IPA_PROC_CTX_TLV_TYPE_PROC_CMD;
ctx->cmd.length = 0;
switch (type) {
@@ -1350,7 +1368,8 @@ struct ipahal_hdr_funcs {
bool is_hdr_proc_ctx, dma_addr_t phys_base,
u64 hdr_base_addr,
struct ipa_hdr_offset_entry *offset_entry,
- struct ipa_l2tp_hdr_proc_ctx_params l2tp_params);
+ struct ipa_l2tp_hdr_proc_ctx_params l2tp_params,
+ bool is_64);
int (*ipahal_get_proc_ctx_needed_len)(enum ipa_hdr_proc_type type);
};
@@ -1416,17 +1435,18 @@ void ipahal_cp_hdr_to_hw_buff(void *base, u32 offset, u8 *const hdr,
* @hdr_base_addr: base address in table
* @offset_entry: offset from hdr_base_addr in table
* @l2tp_params: l2tp parameters
+ * @is_64: Indicates whether header base address/dma base address is 64 bit.
*/
int ipahal_cp_proc_ctx_to_hw_buff(enum ipa_hdr_proc_type type,
void *const base, u32 offset, u32 hdr_len,
bool is_hdr_proc_ctx, dma_addr_t phys_base,
u64 hdr_base_addr, struct ipa_hdr_offset_entry *offset_entry,
- struct ipa_l2tp_hdr_proc_ctx_params l2tp_params)
+ struct ipa_l2tp_hdr_proc_ctx_params l2tp_params, bool is_64)
{
IPAHAL_DBG(
- "type %d, base %pK, offset %d, hdr_len %d, is_hdr_proc_ctx %d, hdr_base_addr %llu, offset_entry %pK\n"
+ "type %d, base %pK, offset %d, hdr_len %d, is_hdr_proc_ctx %d, hdr_base_addr %llu, offset_entry %pK, bool %d\n"
, type, base, offset, hdr_len, is_hdr_proc_ctx,
- hdr_base_addr, offset_entry);
+ hdr_base_addr, offset_entry, is_64);
if (!base ||
!hdr_len ||
@@ -1442,7 +1462,7 @@ int ipahal_cp_proc_ctx_to_hw_buff(enum ipa_hdr_proc_type type,
return hdr_funcs.ipahal_cp_proc_ctx_to_hw_buff(type, base, offset,
hdr_len, is_hdr_proc_ctx, phys_base,
- hdr_base_addr, offset_entry, l2tp_params);
+ hdr_base_addr, offset_entry, l2tp_params, is_64);
}
/*
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.h b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.h
index fb2ba48..942fa521 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.h
@@ -630,13 +630,14 @@ void ipahal_cp_hdr_to_hw_buff(void *base, u32 offset, u8 *hdr, u32 hdr_len);
* @hdr_base_addr: base address in table
* @offset_entry: offset from hdr_base_addr in table
* @l2tp_params: l2tp parameters
+ * @is_64: Indicates whether header base address/dma base address is 64 bit.
*/
int ipahal_cp_proc_ctx_to_hw_buff(enum ipa_hdr_proc_type type,
void *base, u32 offset, u32 hdr_len,
bool is_hdr_proc_ctx, dma_addr_t phys_base,
u64 hdr_base_addr,
struct ipa_hdr_offset_entry *offset_entry,
- struct ipa_l2tp_hdr_proc_ctx_params l2tp_params);
+ struct ipa_l2tp_hdr_proc_ctx_params l2tp_params, bool is_64);
/*
* ipahal_get_proc_ctx_needed_len() - calculates the needed length for addition
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_i.h b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_i.h
index 70ff6f5..6811244 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_i.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_i.h
@@ -648,7 +648,8 @@ struct ipa_hw_hdr_proc_ctx_tlv {
*/
struct ipa_hw_hdr_proc_ctx_hdr_add {
struct ipa_hw_hdr_proc_ctx_tlv tlv;
- u64 hdr_addr;
+ u32 hdr_addr;
+ u32 hdr_addr_hi;
};
/**
diff --git a/drivers/platform/msm/qcom-geni-se.c b/drivers/platform/msm/qcom-geni-se.c
index 1802f16..e16246b 100644
--- a/drivers/platform/msm/qcom-geni-se.c
+++ b/drivers/platform/msm/qcom-geni-se.c
@@ -993,6 +993,9 @@ int geni_se_clk_freq_match(struct se_geni_rsc *rsc, unsigned long req_freq,
unsigned long *tbl;
int num_clk_levels;
int i;
+ unsigned long best_delta = 0;
+ unsigned long new_delta;
+ unsigned int divider;
num_clk_levels = geni_se_clk_tbl_get(rsc, &tbl);
if (num_clk_levels < 0)
@@ -1002,17 +1005,21 @@ int geni_se_clk_freq_match(struct se_geni_rsc *rsc, unsigned long req_freq,
return -EFAULT;
*res_freq = 0;
- for (i = 0; i < num_clk_levels; i++) {
- if (!(tbl[i] % req_freq)) {
- *index = i;
- *res_freq = tbl[i];
- return 0;
- }
- if (!(*res_freq) || ((tbl[i] > *res_freq) &&
- (tbl[i] < req_freq))) {
+ for (i = 0; i < num_clk_levels; i++) {
+ divider = DIV_ROUND_UP(tbl[i], req_freq);
+ new_delta = req_freq - (tbl[i] / divider);
+
+ if (!best_delta || new_delta < best_delta) {
+ /* We have a new best! */
*index = i;
*res_freq = tbl[i];
+
+ /*If the new best is exact then we're done*/
+ if (new_delta == 0)
+ return 0;
+
+ best_delta = new_delta;
}
}
diff --git a/drivers/platform/msm/sps/sps.c b/drivers/platform/msm/sps/sps.c
index 81c2ec5..b325676 100644
--- a/drivers/platform/msm/sps/sps.c
+++ b/drivers/platform/msm/sps/sps.c
@@ -2328,8 +2328,11 @@ int sps_deregister_bam_device(unsigned long dev_handle)
mutex_lock(&bam->lock);
sps_bam_device_de_init(bam);
mutex_unlock(&bam->lock);
+ ipc_log_context_destroy(bam->ipc_log0);
ipc_log_context_destroy(bam->ipc_log1);
ipc_log_context_destroy(bam->ipc_log2);
+ ipc_log_context_destroy(bam->ipc_log3);
+ ipc_log_context_destroy(bam->ipc_log4);
if (bam->props.virt_size)
(void)iounmap(bam->props.virt_addr);
diff --git a/drivers/power/supply/power_supply_sysfs.c b/drivers/power/supply/power_supply_sysfs.c
index d16e4b7..301006d 100644
--- a/drivers/power/supply/power_supply_sysfs.c
+++ b/drivers/power/supply/power_supply_sysfs.c
@@ -450,6 +450,7 @@ static struct device_attribute power_supply_attrs[] = {
POWER_SUPPLY_ATTR(qc_opti_disable),
POWER_SUPPLY_ATTR(cc_soc),
POWER_SUPPLY_ATTR(batt_age_level),
+ POWER_SUPPLY_ATTR(scale_mode_en),
/* Charge pump properties */
POWER_SUPPLY_ATTR(cp_status1),
POWER_SUPPLY_ATTR(cp_status2),
diff --git a/drivers/power/supply/qcom/qpnp-fg-gen4.c b/drivers/power/supply/qcom/qpnp-fg-gen4.c
index e053a07..4b84efd 100644
--- a/drivers/power/supply/qcom/qpnp-fg-gen4.c
+++ b/drivers/power/supply/qcom/qpnp-fg-gen4.c
@@ -264,6 +264,7 @@ struct fg_gen4_chip {
bool rslow_low;
bool rapid_soc_dec_en;
bool vbatt_low;
+ bool chg_term_good;
};
struct bias_config {
@@ -2348,8 +2349,16 @@ static int fg_gen4_adjust_recharge_soc(struct fg_gen4_chip *chip)
new_recharge_soc = msoc - (FULL_CAPACITY -
recharge_soc);
fg->recharge_soc_adjusted = true;
+ if (fg->health == POWER_SUPPLY_HEALTH_GOOD)
+ chip->chg_term_good = true;
} else {
- /* adjusted already, do nothing */
+ /*
+ * If charge termination happened properly then
+ * do nothing.
+ */
+ if (chip->chg_term_good)
+ return 0;
+
if (fg->health != POWER_SUPPLY_HEALTH_GOOD)
return 0;
@@ -2360,7 +2369,7 @@ static int fg_gen4_adjust_recharge_soc(struct fg_gen4_chip *chip)
new_recharge_soc = recharge_soc;
fg->recharge_soc_adjusted = false;
- return 0;
+ chip->chg_term_good = false;
}
} else {
if (!fg->recharge_soc_adjusted)
@@ -2379,11 +2388,13 @@ static int fg_gen4_adjust_recharge_soc(struct fg_gen4_chip *chip)
/* Restore the default value */
new_recharge_soc = recharge_soc;
fg->recharge_soc_adjusted = false;
+ chip->chg_term_good = false;
}
} else {
/* Restore the default value */
new_recharge_soc = recharge_soc;
fg->recharge_soc_adjusted = false;
+ chip->chg_term_good = false;
}
if (recharge_soc_status == fg->recharge_soc_adjusted)
diff --git a/drivers/power/supply/qcom/qpnp-smb5.c b/drivers/power/supply/qcom/qpnp-smb5.c
index 9536937..5e85290 100644
--- a/drivers/power/supply/qcom/qpnp-smb5.c
+++ b/drivers/power/supply/qcom/qpnp-smb5.c
@@ -322,24 +322,29 @@ static int smb5_chg_config_init(struct smb5 *chip)
chip->chg.smb_version = PM8150B_SUBTYPE;
chg->param = smb5_pm8150b_params;
chg->name = "pm8150b_charger";
+ chg->wa_flags |= CHG_TERMINATION_WA;
break;
case PM6150_SUBTYPE:
chip->chg.smb_version = PM6150_SUBTYPE;
chg->param = smb5_pm8150b_params;
chg->name = "pm6150_charger";
- chg->wa_flags |= SW_THERM_REGULATION_WA;
+ chg->wa_flags |= SW_THERM_REGULATION_WA | CHG_TERMINATION_WA;
if (pmic_rev_id->rev4 >= 2)
chg->uusb_moisture_protection_capable = true;
chg->main_fcc_max = PM6150_MAX_FCC_UA;
break;
case PMI632_SUBTYPE:
chip->chg.smb_version = PMI632_SUBTYPE;
- chg->wa_flags |= WEAK_ADAPTER_WA | USBIN_OV_WA;
+ chg->wa_flags |= WEAK_ADAPTER_WA | USBIN_OV_WA
+ | CHG_TERMINATION_WA;
chg->param = smb5_pmi632_params;
chg->use_extcon = true;
chg->name = "pmi632_charger";
/* PMI632 does not support PD */
chg->pd_not_supported = true;
+ chg->lpd_disabled = true;
+ if (pmic_rev_id->rev4 >= 2)
+ chg->uusb_moisture_protection_enabled = true;
chg->hw_max_icl_ua =
(chip->dt.usb_icl_ua > 0) ? chip->dt.usb_icl_ua
: PMI632_MAX_ICL_UA;
@@ -436,6 +441,8 @@ static int smb5_parse_dt_misc(struct smb5 *chip, struct device_node *node)
chg->pd_not_supported = of_property_read_bool(node,
"qcom,usb-pd-disable");
+ chg->lpd_disabled = of_property_read_bool(node, "qcom,lpd-disable");
+
rc = of_property_read_u32(node, "qcom,wd-bark-time-secs",
&chip->dt.wd_bark_time);
if (rc < 0 || chip->dt.wd_bark_time < MIN_WD_BARK_TIME)
@@ -1831,10 +1838,10 @@ static int smb5_configure_typec(struct smb_charger *chg)
return rc;
}
+ val = chg->lpd_disabled ? 0 : TYPEC_WATER_DETECTION_INT_EN_BIT;
/* Use simple write to enable only required interrupts */
rc = smblib_write(chg, TYPE_C_INTERRUPT_EN_CFG_2_REG,
- TYPEC_SRC_BATT_HPWR_INT_EN_BIT |
- TYPEC_WATER_DETECTION_INT_EN_BIT);
+ TYPEC_SRC_BATT_HPWR_INT_EN_BIT | val);
if (rc < 0) {
dev_err(chg->dev,
"Couldn't configure Type-C interrupts rc=%d\n", rc);
@@ -1926,7 +1933,9 @@ static int smb5_configure_micro_usb(struct smb_charger *chg)
}
/* Disable periodic monitoring of CC_ID pin */
- rc = smblib_write(chg, TYPEC_U_USB_WATER_PROTECTION_CFG_REG, 0);
+ rc = smblib_write(chg, ((chg->smb_version == PMI632_SUBTYPE) ?
+ PMI632_TYPEC_U_USB_WATER_PROTECTION_CFG_REG :
+ TYPEC_U_USB_WATER_PROTECTION_CFG_REG), 0);
if (rc < 0) {
dev_err(chg->dev, "Couldn't disable periodic monitoring of CC_ID rc=%d\n",
rc);
@@ -2090,12 +2099,189 @@ static int smb5_init_dc_peripheral(struct smb_charger *chg)
return rc;
}
+static int smb5_configure_recharging(struct smb5 *chip)
+{
+ int rc = 0;
+ struct smb_charger *chg = &chip->chg;
+ union power_supply_propval pval;
+ /* Configure VBATT-based or automatic recharging */
+
+ rc = smblib_masked_write(chg, CHGR_CFG2_REG, RECHG_MASK,
+ (chip->dt.auto_recharge_vbat_mv != -EINVAL) ?
+ VBAT_BASED_RECHG_BIT : 0);
+ if (rc < 0) {
+ dev_err(chg->dev, "Couldn't configure VBAT-rechg CHG_CFG2_REG rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ /* program the auto-recharge VBAT threshold */
+ if (chip->dt.auto_recharge_vbat_mv != -EINVAL) {
+ u32 temp = VBAT_TO_VRAW_ADC(chip->dt.auto_recharge_vbat_mv);
+
+ temp = ((temp & 0xFF00) >> 8) | ((temp & 0xFF) << 8);
+ rc = smblib_batch_write(chg,
+ CHGR_ADC_RECHARGE_THRESHOLD_MSB_REG, (u8 *)&temp, 2);
+ if (rc < 0) {
+ dev_err(chg->dev, "Couldn't configure ADC_RECHARGE_THRESHOLD REG rc=%d\n",
+ rc);
+ return rc;
+ }
+ /* Program the sample count for VBAT based recharge to 3 */
+ rc = smblib_masked_write(chg, CHGR_NO_SAMPLE_TERM_RCHG_CFG_REG,
+ NO_OF_SAMPLE_FOR_RCHG,
+ 2 << NO_OF_SAMPLE_FOR_RCHG_SHIFT);
+ if (rc < 0) {
+ dev_err(chg->dev, "Couldn't configure CHGR_NO_SAMPLE_FOR_TERM_RCHG_CFG rc=%d\n",
+ rc);
+ return rc;
+ }
+ }
+
+ rc = smblib_masked_write(chg, CHGR_CFG2_REG, RECHG_MASK,
+ (chip->dt.auto_recharge_soc != -EINVAL) ?
+ SOC_BASED_RECHG_BIT : VBAT_BASED_RECHG_BIT);
+ if (rc < 0) {
+ dev_err(chg->dev, "Couldn't configure SOC-rechg CHG_CFG2_REG rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ /* program the auto-recharge threshold */
+ if (chip->dt.auto_recharge_soc != -EINVAL) {
+ pval.intval = chip->dt.auto_recharge_soc;
+ rc = smblib_set_prop_rechg_soc_thresh(chg, &pval);
+ if (rc < 0) {
+ dev_err(chg->dev, "Couldn't configure CHG_RCHG_SOC_REG rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ /* Program the sample count for SOC based recharge to 1 */
+ rc = smblib_masked_write(chg, CHGR_NO_SAMPLE_TERM_RCHG_CFG_REG,
+ NO_OF_SAMPLE_FOR_RCHG, 0);
+ if (rc < 0) {
+ dev_err(chg->dev, "Couldn't configure CHGR_NO_SAMPLE_FOR_TERM_RCHG_CFG rc=%d\n",
+ rc);
+ return rc;
+ }
+ }
+
+ return 0;
+}
+
+static int smb5_configure_float_charger(struct smb5 *chip)
+{
+ int rc = 0;
+ struct smb_charger *chg = &chip->chg;
+
+ /* configure float charger options */
+ switch (chip->dt.float_option) {
+ case FLOAT_DCP:
+ rc = smblib_masked_write(chg, USBIN_OPTIONS_2_CFG_REG,
+ FLOAT_OPTIONS_MASK, 0);
+ break;
+ case FLOAT_SDP:
+ rc = smblib_masked_write(chg, USBIN_OPTIONS_2_CFG_REG,
+ FLOAT_OPTIONS_MASK, FORCE_FLOAT_SDP_CFG_BIT);
+ break;
+ case DISABLE_CHARGING:
+ rc = smblib_masked_write(chg, USBIN_OPTIONS_2_CFG_REG,
+ FLOAT_OPTIONS_MASK, FLOAT_DIS_CHGING_CFG_BIT);
+ break;
+ case SUSPEND_INPUT:
+ rc = smblib_masked_write(chg, USBIN_OPTIONS_2_CFG_REG,
+ FLOAT_OPTIONS_MASK, SUSPEND_FLOAT_CFG_BIT);
+ break;
+ default:
+ rc = 0;
+ break;
+ }
+
+ if (rc < 0) {
+ dev_err(chg->dev, "Couldn't configure float charger options rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ rc = smblib_read(chg, USBIN_OPTIONS_2_CFG_REG, &chg->float_cfg);
+ if (rc < 0) {
+ dev_err(chg->dev, "Couldn't read float charger options rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ return 0;
+}
+
+static int smb5_init_connector_type(struct smb_charger *chg)
+{
+ int rc, type = 0;
+ u8 val = 0;
+
+ /*
+ * PMI632 can have the connector type defined by a dedicated register
+ * PMI632_TYPEC_MICRO_USB_MODE_REG or by a common TYPEC_U_USB_CFG_REG.
+ */
+ if (chg->smb_version == PMI632_SUBTYPE) {
+ rc = smblib_read(chg, PMI632_TYPEC_MICRO_USB_MODE_REG, &val);
+ if (rc < 0) {
+ dev_err(chg->dev, "Couldn't read USB mode rc=%d\n", rc);
+ return rc;
+ }
+ type = !!(val & MICRO_USB_MODE_ONLY_BIT);
+ }
+
+ /*
+ * If PMI632_TYPEC_MICRO_USB_MODE_REG is not set and for all non-PMI632
+ * check the connector type using TYPEC_U_USB_CFG_REG.
+ */
+ if (!type) {
+ rc = smblib_read(chg, TYPEC_U_USB_CFG_REG, &val);
+ if (rc < 0) {
+ dev_err(chg->dev, "Couldn't read U_USB config rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ type = !!(val & EN_MICRO_USB_MODE_BIT);
+ }
+
+ pr_debug("Connector type=%s\n", type ? "Micro USB" : "TypeC");
+
+ if (type) {
+ chg->connector_type = POWER_SUPPLY_CONNECTOR_MICRO_USB;
+ rc = smb5_configure_micro_usb(chg);
+ } else {
+ chg->connector_type = POWER_SUPPLY_CONNECTOR_TYPEC;
+ rc = smb5_configure_typec(chg);
+ }
+ if (rc < 0) {
+ dev_err(chg->dev,
+ "Couldn't configure TypeC/micro-USB mode rc=%d\n", rc);
+ return rc;
+ }
+
+ /*
+ * PMI632 based hw init:
+ * - Rerun APSD to ensure proper charger detection if device
+ * boots with charger connected.
+ * - Initialize flash module for PMI632
+ */
+ if (chg->smb_version == PMI632_SUBTYPE) {
+ schgm_flash_init(chg);
+ smblib_rerun_apsd_if_required(chg);
+ }
+
+ return 0;
+
+}
+
static int smb5_init_hw(struct smb5 *chip)
{
struct smb_charger *chg = &chip->chg;
- int rc, type = 0;
+ int rc;
u8 val = 0, mask = 0;
- union power_supply_propval pval;
if (chip->dt.no_battery)
chg->fake_capacity = 50;
@@ -2170,60 +2356,13 @@ static int smb5_init_hw(struct smb5 *chip)
return rc;
}
- /*
- * PMI632 can have the connector type defined by a dedicated register
- * TYPEC_MICRO_USB_MODE_REG or by a common TYPEC_U_USB_CFG_REG.
- */
- if (chg->smb_version == PMI632_SUBTYPE) {
- rc = smblib_read(chg, TYPEC_MICRO_USB_MODE_REG, &val);
- if (rc < 0) {
- dev_err(chg->dev, "Couldn't read USB mode rc=%d\n", rc);
- return rc;
- }
- type = !!(val & MICRO_USB_MODE_ONLY_BIT);
- }
-
- /*
- * If TYPEC_MICRO_USB_MODE_REG is not set and for all non-PMI632
- * check the connector type using TYPEC_U_USB_CFG_REG.
- */
- if (!type) {
- rc = smblib_read(chg, TYPEC_U_USB_CFG_REG, &val);
- if (rc < 0) {
- dev_err(chg->dev, "Couldn't read U_USB config rc=%d\n",
- rc);
- return rc;
- }
-
- type = !!(val & EN_MICRO_USB_MODE_BIT);
- }
-
- pr_debug("Connector type=%s\n", type ? "Micro USB" : "TypeC");
-
- if (type) {
- chg->connector_type = POWER_SUPPLY_CONNECTOR_MICRO_USB;
- rc = smb5_configure_micro_usb(chg);
- } else {
- chg->connector_type = POWER_SUPPLY_CONNECTOR_TYPEC;
- rc = smb5_configure_typec(chg);
- }
+ rc = smb5_init_connector_type(chg);
if (rc < 0) {
- dev_err(chg->dev,
- "Couldn't configure TypeC/micro-USB mode rc=%d\n", rc);
+ dev_err(chg->dev, "Couldn't configure connector type rc=%d\n",
+ rc);
return rc;
}
- /*
- * PMI632 based hw init:
- * - Rerun APSD to ensure proper charger detection if device
- * boots with charger connected.
- * - Initialize flash module for PMI632
- */
- if (chg->smb_version == PMI632_SUBTYPE) {
- schgm_flash_init(chg);
- smblib_rerun_apsd_if_required(chg);
- }
-
/* Use ICL results from HW */
rc = smblib_icl_override(chg, HW_AUTO_MODE);
if (rc < 0) {
@@ -2307,7 +2446,7 @@ static int smb5_init_hw(struct smb5 *chip)
val = (ilog2(chip->dt.wd_bark_time / 16) << BARK_WDOG_TIMEOUT_SHIFT)
& BARK_WDOG_TIMEOUT_MASK;
- val |= BITE_WDOG_TIMEOUT_8S;
+ val |= (BITE_WDOG_TIMEOUT_8S | BITE_WDOG_DISABLE_CHARGING_CFG_BIT);
rc = smblib_masked_write(chg, SNARL_BARK_BITE_WD_CFG_REG,
BITE_WDOG_DISABLE_CHARGING_CFG_BIT |
BARK_WDOG_TIMEOUT_MASK | BITE_WDOG_TIMEOUT_MASK,
@@ -2337,41 +2476,9 @@ static int smb5_init_hw(struct smb5 *chip)
return rc;
}
- /* configure float charger options */
- switch (chip->dt.float_option) {
- case FLOAT_DCP:
- rc = smblib_masked_write(chg, USBIN_OPTIONS_2_CFG_REG,
- FLOAT_OPTIONS_MASK, 0);
- break;
- case FLOAT_SDP:
- rc = smblib_masked_write(chg, USBIN_OPTIONS_2_CFG_REG,
- FLOAT_OPTIONS_MASK, FORCE_FLOAT_SDP_CFG_BIT);
- break;
- case DISABLE_CHARGING:
- rc = smblib_masked_write(chg, USBIN_OPTIONS_2_CFG_REG,
- FLOAT_OPTIONS_MASK, FLOAT_DIS_CHGING_CFG_BIT);
- break;
- case SUSPEND_INPUT:
- rc = smblib_masked_write(chg, USBIN_OPTIONS_2_CFG_REG,
- FLOAT_OPTIONS_MASK, SUSPEND_FLOAT_CFG_BIT);
- break;
- default:
- rc = 0;
- break;
- }
-
- if (rc < 0) {
- dev_err(chg->dev, "Couldn't configure float charger options rc=%d\n",
- rc);
+ rc = smb5_configure_float_charger(chip);
+ if (rc < 0)
return rc;
- }
-
- rc = smblib_read(chg, USBIN_OPTIONS_2_CFG_REG, &chg->float_cfg);
- if (rc < 0) {
- dev_err(chg->dev, "Couldn't read float charger options rc=%d\n",
- rc);
- return rc;
- }
switch (chip->dt.chg_inhibit_thr_mv) {
case 50:
@@ -2415,66 +2522,9 @@ static int smb5_init_hw(struct smb5 *chip)
return rc;
}
- rc = smblib_masked_write(chg, CHGR_CFG2_REG, RECHG_MASK,
- (chip->dt.auto_recharge_vbat_mv != -EINVAL) ?
- VBAT_BASED_RECHG_BIT : 0);
- if (rc < 0) {
- dev_err(chg->dev, "Couldn't configure VBAT-rechg CHG_CFG2_REG rc=%d\n",
- rc);
+ rc = smb5_configure_recharging(chip);
+ if (rc < 0)
return rc;
- }
-
- /* program the auto-recharge VBAT threshold */
- if (chip->dt.auto_recharge_vbat_mv != -EINVAL) {
- u32 temp = VBAT_TO_VRAW_ADC(chip->dt.auto_recharge_vbat_mv);
-
- temp = ((temp & 0xFF00) >> 8) | ((temp & 0xFF) << 8);
- rc = smblib_batch_write(chg,
- CHGR_ADC_RECHARGE_THRESHOLD_MSB_REG, (u8 *)&temp, 2);
- if (rc < 0) {
- dev_err(chg->dev, "Couldn't configure ADC_RECHARGE_THRESHOLD REG rc=%d\n",
- rc);
- return rc;
- }
- /* Program the sample count for VBAT based recharge to 3 */
- rc = smblib_masked_write(chg, CHGR_NO_SAMPLE_TERM_RCHG_CFG_REG,
- NO_OF_SAMPLE_FOR_RCHG,
- 2 << NO_OF_SAMPLE_FOR_RCHG_SHIFT);
- if (rc < 0) {
- dev_err(chg->dev, "Couldn't configure CHGR_NO_SAMPLE_FOR_TERM_RCHG_CFG rc=%d\n",
- rc);
- return rc;
- }
- }
-
- rc = smblib_masked_write(chg, CHGR_CFG2_REG, RECHG_MASK,
- (chip->dt.auto_recharge_soc != -EINVAL) ?
- SOC_BASED_RECHG_BIT : VBAT_BASED_RECHG_BIT);
- if (rc < 0) {
- dev_err(chg->dev, "Couldn't configure SOC-rechg CHG_CFG2_REG rc=%d\n",
- rc);
- return rc;
- }
-
- /* program the auto-recharge threshold */
- if (chip->dt.auto_recharge_soc != -EINVAL) {
- pval.intval = chip->dt.auto_recharge_soc;
- rc = smblib_set_prop_rechg_soc_thresh(chg, &pval);
- if (rc < 0) {
- dev_err(chg->dev, "Couldn't configure CHG_RCHG_SOC_REG rc=%d\n",
- rc);
- return rc;
- }
-
- /* Program the sample count for SOC based recharge to 1 */
- rc = smblib_masked_write(chg, CHGR_NO_SAMPLE_TERM_RCHG_CFG_REG,
- NO_OF_SAMPLE_FOR_RCHG, 0);
- if (rc < 0) {
- dev_err(chg->dev, "Couldn't configure CHGR_NO_SAMPLE_FOR_TERM_RCHG_CFG rc=%d\n",
- rc);
- return rc;
- }
- }
rc = smblib_disable_hw_jeita(chg, true);
if (rc < 0) {
@@ -3129,6 +3179,21 @@ static int smb5_probe(struct platform_device *pdev)
goto cleanup;
}
+ /* Support reporting polarity and speed via properties */
+ rc = extcon_set_property_capability(chg->extcon,
+ EXTCON_USB, EXTCON_PROP_USB_TYPEC_POLARITY);
+ rc |= extcon_set_property_capability(chg->extcon,
+ EXTCON_USB, EXTCON_PROP_USB_SS);
+ rc |= extcon_set_property_capability(chg->extcon,
+ EXTCON_USB_HOST, EXTCON_PROP_USB_TYPEC_POLARITY);
+ rc |= extcon_set_property_capability(chg->extcon,
+ EXTCON_USB_HOST, EXTCON_PROP_USB_SS);
+ if (rc < 0) {
+ dev_err(chg->dev,
+ "failed to configure extcon capabilities\n");
+ goto cleanup;
+ }
+
rc = smb5_init_hw(chip);
if (rc < 0) {
pr_err("Couldn't initialize hardware rc=%d\n", rc);
diff --git a/drivers/power/supply/qcom/smb1355-charger.c b/drivers/power/supply/qcom/smb1355-charger.c
index 916b160..469a276 100644
--- a/drivers/power/supply/qcom/smb1355-charger.c
+++ b/drivers/power/supply/qcom/smb1355-charger.c
@@ -47,6 +47,9 @@
#define BATT_GT_PRE_TO_FAST_BIT BIT(4)
#define ENABLE_CHARGING_BIT BIT(3)
+#define CHGR_CHARGING_ENABLE_CMD_REG (CHGR_BASE + 0x42)
+#define CHARGING_ENABLE_CMD_BIT BIT(0)
+
#define CHGR_CFG2_REG (CHGR_BASE + 0x51)
#define CHG_EN_SRC_BIT BIT(7)
#define CHG_EN_POLARITY_BIT BIT(6)
@@ -1032,7 +1035,17 @@ static int smb1355_init_hw(struct smb1355 *chip)
return rc;
}
- /* disable parallel charging path */
+ /*
+ * Disable command based SMB1355 enablement and disable parallel
+ * charging path by switching to command based mode.
+ */
+ rc = smb1355_masked_write(chip, CHGR_CHARGING_ENABLE_CMD_REG,
+ CHARGING_ENABLE_CMD_BIT, 0);
+ if (rc < 0) {
+ pr_err("Coudln't configure command bit, rc=%d\n", rc);
+ return rc;
+ }
+
rc = smb1355_set_parallel_charging(chip, true);
if (rc < 0) {
pr_err("Couldn't disable parallel path rc=%d\n", rc);
diff --git a/drivers/power/supply/qcom/smb1390-charger-psy.c b/drivers/power/supply/qcom/smb1390-charger-psy.c
index ff8f484..af4dc03 100644
--- a/drivers/power/supply/qcom/smb1390-charger-psy.c
+++ b/drivers/power/supply/qcom/smb1390-charger-psy.c
@@ -124,7 +124,6 @@ struct smb1390 {
struct votable *ilim_votable;
struct votable *fcc_votable;
struct votable *fv_votable;
- struct votable *cp_awake_votable;
/* power supplies */
struct power_supply *usb_psy;
@@ -138,7 +137,9 @@ struct smb1390 {
struct smb1390_iio iio;
int irq_status;
int taper_entry_fv;
- bool switcher_disabled;
+ bool switcher_enabled;
+ int die_temp;
+ bool suspended;
};
struct smb_irq {
@@ -276,8 +277,8 @@ static irqreturn_t default_irq_handler(int irq, void *data)
rc = smb1390_get_cp_en_status(chip, SWITCHER_EN, &enable);
if (!rc) {
- if (chip->switcher_disabled == enable) {
- chip->switcher_disabled = !chip->switcher_disabled;
+ if (chip->switcher_enabled != enable) {
+ chip->switcher_enabled = enable;
if (chip->fcc_votable)
rerun_election(chip->fcc_votable);
}
@@ -424,7 +425,7 @@ static int smb1390_disable_vote_cb(struct votable *votable, void *data,
struct smb1390 *chip = data;
int rc = 0;
- if (!is_psy_voter_available(chip))
+ if (!is_psy_voter_available(chip) || chip->suspended)
return -EAGAIN;
if (disable) {
@@ -432,10 +433,7 @@ static int smb1390_disable_vote_cb(struct votable *votable, void *data,
CMD_EN_SWITCHER_BIT, 0);
if (rc < 0)
return rc;
-
- vote(chip->cp_awake_votable, CP_VOTER, false, 0);
} else {
- vote(chip->cp_awake_votable, CP_VOTER, true, 0);
rc = smb1390_masked_write(chip, CORE_CONTROL1_REG,
CMD_EN_SWITCHER_BIT, CMD_EN_SWITCHER_BIT);
if (rc < 0)
@@ -454,7 +452,7 @@ static int smb1390_ilim_vote_cb(struct votable *votable, void *data,
struct smb1390 *chip = data;
int rc = 0;
- if (!is_psy_voter_available(chip))
+ if (!is_psy_voter_available(chip) || chip->suspended)
return -EAGAIN;
/* ILIM should always have at least one active vote */
@@ -483,20 +481,6 @@ static int smb1390_ilim_vote_cb(struct votable *votable, void *data,
return rc;
}
-static int smb1390_awake_vote_cb(struct votable *votable, void *data,
- int awake, const char *client)
-{
- struct smb1390 *chip = data;
-
- if (awake)
- __pm_stay_awake(chip->cp_ws);
- else
- __pm_relax(chip->cp_ws);
-
- pr_debug("client: %s awake: %d\n", client, awake);
- return 0;
-}
-
static int smb1390_notifier_cb(struct notifier_block *nb,
unsigned long event, void *data)
{
@@ -705,12 +689,26 @@ static int smb1390_get_prop(struct power_supply *psy,
!get_effective_result(chip->disable_votable);
break;
case POWER_SUPPLY_PROP_CP_SWITCHER_EN:
- rc = smb1390_get_cp_en_status(chip, SWITCHER_EN, &enable);
- if (!rc)
- val->intval = enable;
+ if (chip->suspended) {
+ val->intval = chip->switcher_enabled;
+ } else {
+ rc = smb1390_get_cp_en_status(chip, SWITCHER_EN,
+ &enable);
+ if (!rc)
+ val->intval = enable;
+ }
break;
case POWER_SUPPLY_PROP_CP_DIE_TEMP:
- rc = smb1390_get_die_temp(chip, val);
+ if (chip->suspended) {
+ if (chip->die_temp != -ENODATA)
+ val->intval = chip->die_temp;
+ else
+ rc = -ENODATA;
+ } else {
+ rc = smb1390_get_die_temp(chip, val);
+ if (rc >= 0)
+ chip->die_temp = val->intval;
+ }
break;
case POWER_SUPPLY_PROP_CP_ISNS:
rc = smb1390_get_isns(chip, val);
@@ -844,11 +842,6 @@ static void smb1390_release_channels(struct smb1390 *chip)
static int smb1390_create_votables(struct smb1390 *chip)
{
- chip->cp_awake_votable = create_votable("CP_AWAKE", VOTE_SET_ANY,
- smb1390_awake_vote_cb, chip);
- if (IS_ERR(chip->cp_awake_votable))
- return PTR_ERR(chip->cp_awake_votable);
-
chip->disable_votable = create_votable("CP_DISABLE",
VOTE_SET_ANY, smb1390_disable_vote_cb, chip);
if (IS_ERR(chip->disable_votable))
@@ -880,7 +873,6 @@ static void smb1390_destroy_votables(struct smb1390 *chip)
{
destroy_votable(chip->disable_votable);
destroy_votable(chip->ilim_votable);
- destroy_votable(chip->cp_awake_votable);
}
static int smb1390_init_hw(struct smb1390 *chip)
@@ -989,7 +981,8 @@ static int smb1390_probe(struct platform_device *pdev)
chip->dev = &pdev->dev;
spin_lock_init(&chip->status_change_lock);
mutex_init(&chip->die_chan_lock);
- chip->switcher_disabled = true;
+ chip->die_temp = -ENODATA;
+ platform_set_drvdata(pdev, chip);
chip->regmap = dev_get_regmap(chip->dev->parent, NULL);
if (!chip->regmap) {
@@ -1071,6 +1064,30 @@ static int smb1390_remove(struct platform_device *pdev)
return 0;
}
+static int smb1390_suspend(struct device *dev)
+{
+ struct smb1390 *chip = dev_get_drvdata(dev);
+
+ chip->suspended = true;
+ return 0;
+}
+
+static int smb1390_resume(struct device *dev)
+{
+ struct smb1390 *chip = dev_get_drvdata(dev);
+
+ chip->suspended = false;
+ rerun_election(chip->ilim_votable);
+ rerun_election(chip->disable_votable);
+
+ return 0;
+}
+
+static const struct dev_pm_ops smb1390_pm_ops = {
+ .suspend = smb1390_suspend,
+ .resume = smb1390_resume,
+};
+
static const struct of_device_id match_table[] = {
{ .compatible = "qcom,smb1390-charger-psy", },
{ },
@@ -1079,6 +1096,7 @@ static const struct of_device_id match_table[] = {
static struct platform_driver smb1390_driver = {
.driver = {
.name = "qcom,smb1390-charger-psy",
+ .pm = &smb1390_pm_ops,
.of_match_table = match_table,
},
.probe = smb1390_probe,
diff --git a/drivers/power/supply/qcom/smb5-lib.c b/drivers/power/supply/qcom/smb5-lib.c
index 48f0165..2a23637 100644
--- a/drivers/power/supply/qcom/smb5-lib.c
+++ b/drivers/power/supply/qcom/smb5-lib.c
@@ -1079,6 +1079,9 @@ static void smblib_uusb_removal(struct smb_charger *chg)
cancel_delayed_work_sync(&chg->pl_enable_work);
+ if (chg->wa_flags & CHG_TERMINATION_WA)
+ alarm_cancel(&chg->chg_termination_alarm);
+
if (chg->wa_flags & BOOST_BACK_WA) {
data = chg->irq_info[SWITCHER_POWER_OK_IRQ].irq_data;
if (data) {
@@ -1099,6 +1102,7 @@ static void smblib_uusb_removal(struct smb_charger *chg)
is_flash_active(chg) ? SDP_CURRENT_UA : SDP_100_MA);
vote(chg->usb_icl_votable, SW_QC3_VOTER, false, 0);
vote(chg->usb_icl_votable, HVDCP2_ICL_VOTER, false, 0);
+ vote(chg->usb_icl_votable, CHG_TERMINATION_VOTER, false, 0);
/* Remove SW thermal regulation WA votes */
vote(chg->usb_icl_votable, SW_THERM_REGULATION_VOTER, false, 0);
@@ -1423,13 +1427,15 @@ static int smblib_set_moisture_protection(struct smb_charger *chg,
/* Set 1% duty cycle on ID detection */
rc = smblib_masked_write(chg,
- TYPEC_U_USB_WATER_PROTECTION_CFG_REG,
- EN_MICRO_USB_WATER_PROTECTION_BIT |
- MICRO_USB_DETECTION_ON_TIME_CFG_MASK |
- MICRO_USB_DETECTION_PERIOD_CFG_MASK,
- EN_MICRO_USB_WATER_PROTECTION_BIT |
- MICRO_USB_DETECTION_ON_TIME_20_MS |
- MICRO_USB_DETECTION_PERIOD_X_100);
+ ((chg->smb_version == PMI632_SUBTYPE) ?
+ PMI632_TYPEC_U_USB_WATER_PROTECTION_CFG_REG :
+ TYPEC_U_USB_WATER_PROTECTION_CFG_REG),
+ EN_MICRO_USB_WATER_PROTECTION_BIT |
+ MICRO_USB_DETECTION_ON_TIME_CFG_MASK |
+ MICRO_USB_DETECTION_PERIOD_CFG_MASK,
+ EN_MICRO_USB_WATER_PROTECTION_BIT |
+ MICRO_USB_DETECTION_ON_TIME_20_MS |
+ MICRO_USB_DETECTION_PERIOD_X_100);
if (rc < 0) {
smblib_err(chg, "Couldn't set 1 percent CC_ID duty cycle rc=%d\n",
rc);
@@ -1454,7 +1460,9 @@ static int smblib_set_moisture_protection(struct smb_charger *chg,
}
/* Disable periodic monitoring of CC_ID pin */
- rc = smblib_write(chg, TYPEC_U_USB_WATER_PROTECTION_CFG_REG, 0);
+ rc = smblib_write(chg, ((chg->smb_version == PMI632_SUBTYPE) ?
+ PMI632_TYPEC_U_USB_WATER_PROTECTION_CFG_REG :
+ TYPEC_U_USB_WATER_PROTECTION_CFG_REG), 0);
if (rc < 0) {
smblib_err(chg, "Couldn't disable 1 percent CC_ID duty cycle rc=%d\n",
rc);
@@ -1832,6 +1840,16 @@ int smblib_get_prop_batt_status(struct smb_charger *chg,
return 0;
}
+ /*
+ * If charge termination WA is active and has suspended charging, then
+ * continue reporting charging status as FULL.
+ */
+ if (is_client_vote_enabled(chg->usb_icl_votable,
+ CHG_TERMINATION_VOTER)) {
+ val->intval = POWER_SUPPLY_STATUS_FULL;
+ return 0;
+ }
+
if (val->intval != POWER_SUPPLY_STATUS_CHARGING)
return 0;
@@ -2730,6 +2748,11 @@ int smblib_get_prop_dc_present(struct smb_charger *chg,
int rc;
u8 stat;
+ if (chg->smb_version == PMI632_SUBTYPE) {
+ val->intval = 0;
+ return 0;
+ }
+
rc = smblib_read(chg, DCIN_BASE + INT_RT_STS_OFFSET, &stat);
if (rc < 0) {
smblib_err(chg, "Couldn't read DCIN_RT_STS rc=%d\n", rc);
@@ -2746,11 +2769,22 @@ int smblib_get_prop_dc_online(struct smb_charger *chg,
int rc = 0;
u8 stat;
+ if (chg->smb_version == PMI632_SUBTYPE) {
+ val->intval = 0;
+ return 0;
+ }
+
if (get_client_vote(chg->dc_suspend_votable, USER_VOTER)) {
val->intval = false;
return rc;
}
+ if (is_client_vote_enabled(chg->dc_suspend_votable,
+ CHG_TERMINATION_VOTER)) {
+ rc = smblib_get_prop_dc_present(chg, val);
+ return rc;
+ }
+
rc = smblib_read(chg, POWER_PATH_STATUS_REG, &stat);
if (rc < 0) {
smblib_err(chg, "Couldn't read POWER_PATH_STATUS rc=%d\n",
@@ -2863,6 +2897,12 @@ int smblib_get_prop_usb_online(struct smb_charger *chg,
return rc;
}
+ if (is_client_vote_enabled(chg->usb_icl_votable,
+ CHG_TERMINATION_VOTER)) {
+ rc = smblib_get_prop_usb_present(chg, val);
+ return rc;
+ }
+
rc = smblib_read(chg, POWER_PATH_STATUS_REG, &stat);
if (rc < 0) {
smblib_err(chg, "Couldn't read POWER_PATH_STATUS rc=%d\n",
@@ -4216,6 +4256,38 @@ irqreturn_t default_irq_handler(int irq, void *data)
return IRQ_HANDLED;
}
+#define CHG_TERM_WA_ENTRY_DELAY_MS 300000 /* 5 min */
+#define CHG_TERM_WA_EXIT_DELAY_MS 60000 /* 1 min */
+static void smblib_eval_chg_termination(struct smb_charger *chg, u8 batt_status)
+{
+ union power_supply_propval pval = {0, };
+ int rc = 0;
+
+ rc = smblib_get_prop_from_bms(chg, POWER_SUPPLY_PROP_CAPACITY, &pval);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't read SOC value, rc=%d\n", rc);
+ return;
+ }
+
+ /*
+ * Post charge termination, switch to BSM mode triggers the risk of
+ * over charging as BATFET opening may take some time post the necessity
+ * of staying in supplemental mode, leading to unintended charging of
+ * battery. Trigger the charge termination WA once charging is completed
+ * to prevent overcharing.
+ */
+ if ((batt_status == TERMINATE_CHARGE) && (pval.intval == 100)) {
+ alarm_start_relative(&chg->chg_termination_alarm,
+ ms_to_ktime(CHG_TERM_WA_ENTRY_DELAY_MS));
+ } else if (pval.intval < 100) {
+ /*
+ * Reset CC_SOC reference value for charge termination WA once
+ * we exit the TERMINATE_CHARGE state and soc drops below 100%
+ */
+ chg->cc_soc_ref = 0;
+ }
+}
+
irqreturn_t chg_state_change_irq_handler(int irq, void *data)
{
struct smb_irq_data *irq_data = data;
@@ -4233,6 +4305,10 @@ irqreturn_t chg_state_change_irq_handler(int irq, void *data)
}
stat = stat & BATTERY_CHARGER_STATUS_MASK;
+
+ if (chg->wa_flags & CHG_TERMINATION_WA)
+ smblib_eval_chg_termination(chg, stat);
+
power_supply_changed(chg->batt_psy);
return IRQ_HANDLED;
}
@@ -4897,6 +4973,9 @@ static bool smblib_src_lpd(struct smb_charger *chg)
u8 stat;
int rc;
+ if (chg->lpd_disabled)
+ return false;
+
rc = smblib_read(chg, TYPE_C_SRC_STATUS_REG, &stat);
if (rc < 0) {
smblib_err(chg, "Couldn't read TYPE_C_SRC_STATUS_REG rc=%d\n",
@@ -5027,6 +5106,9 @@ static void typec_src_removal(struct smb_charger *chg)
cancel_delayed_work_sync(&chg->pl_enable_work);
+ if (chg->wa_flags & CHG_TERMINATION_WA)
+ alarm_cancel(&chg->chg_termination_alarm);
+
/* reset input current limit voters */
vote(chg->usb_icl_votable, SW_ICL_MAX_VOTER, true,
is_flash_active(chg) ? SDP_CURRENT_UA : SDP_100_MA);
@@ -5037,6 +5119,7 @@ static void typec_src_removal(struct smb_charger *chg)
vote(chg->usb_icl_votable, OTG_VOTER, false, 0);
vote(chg->usb_icl_votable, CTM_VOTER, false, 0);
vote(chg->usb_icl_votable, HVDCP2_ICL_VOTER, false, 0);
+ vote(chg->usb_icl_votable, CHG_TERMINATION_VOTER, false, 0);
/* reset usb irq voters */
vote(chg->usb_irq_enable_votable, PD_VOTER, false, 0);
@@ -5139,12 +5222,35 @@ static void smblib_handle_rp_change(struct smb_charger *chg, int typec_mode)
chg->typec_mode, typec_mode);
}
+static void smblib_lpd_launch_ra_open_work(struct smb_charger *chg)
+{
+ u8 stat;
+ int rc;
+
+ if (chg->lpd_disabled)
+ return;
+
+ rc = smblib_read(chg, TYPE_C_MISC_STATUS_REG, &stat);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't read TYPE_C_MISC_STATUS_REG rc=%d\n",
+ rc);
+ return;
+ }
+
+ if (!(stat & TYPEC_TCCDEBOUNCE_DONE_STATUS_BIT)
+ && chg->lpd_stage == LPD_STAGE_NONE) {
+ chg->lpd_stage = LPD_STAGE_FLOAT;
+ cancel_delayed_work_sync(&chg->lpd_ra_open_work);
+ vote(chg->awake_votable, LPD_VOTER, true, 0);
+ schedule_delayed_work(&chg->lpd_ra_open_work,
+ msecs_to_jiffies(300));
+ }
+}
+
irqreturn_t typec_or_rid_detection_change_irq_handler(int irq, void *data)
{
struct smb_irq_data *irq_data = data;
struct smb_charger *chg = irq_data->parent_data;
- u8 stat;
- int rc;
smblib_dbg(chg, PR_INTERRUPT, "IRQ: %s\n", irq_data->name);
@@ -5176,21 +5282,7 @@ irqreturn_t typec_or_rid_detection_change_irq_handler(int irq, void *data)
if (chg->pr_swap_in_progress || chg->pd_hard_reset)
goto out;
- rc = smblib_read(chg, TYPE_C_MISC_STATUS_REG, &stat);
- if (rc < 0) {
- smblib_err(chg, "Couldn't read TYPE_C_MISC_STATUS_REG rc=%d\n",
- rc);
- goto out;
- }
-
- if (!(stat & TYPEC_TCCDEBOUNCE_DONE_STATUS_BIT)
- && chg->lpd_stage == LPD_STAGE_NONE) {
- chg->lpd_stage = LPD_STAGE_FLOAT;
- cancel_delayed_work_sync(&chg->lpd_ra_open_work);
- vote(chg->awake_votable, LPD_VOTER, true, 0);
- schedule_delayed_work(&chg->lpd_ra_open_work,
- msecs_to_jiffies(300));
- }
+ smblib_lpd_launch_ra_open_work(chg);
if (chg->usb_psy)
power_supply_changed(chg->usb_psy);
@@ -5225,6 +5317,16 @@ irqreturn_t typec_state_change_irq_handler(int irq, void *data)
return IRQ_HANDLED;
}
+static void smblib_lpd_clear_ra_open_work(struct smb_charger *chg)
+{
+ if (chg->lpd_disabled)
+ return;
+
+ cancel_delayed_work_sync(&chg->lpd_detach_work);
+ chg->lpd_stage = LPD_STAGE_FLOAT_CANCEL;
+ cancel_delayed_work_sync(&chg->lpd_ra_open_work);
+ vote(chg->awake_votable, LPD_VOTER, false, 0);
+}
irqreturn_t typec_attach_detach_irq_handler(int irq, void *data)
{
@@ -5243,10 +5345,8 @@ irqreturn_t typec_attach_detach_irq_handler(int irq, void *data)
}
if (stat & TYPEC_ATTACH_DETACH_STATE_BIT) {
- cancel_delayed_work_sync(&chg->lpd_detach_work);
- chg->lpd_stage = LPD_STAGE_FLOAT_CANCEL;
- cancel_delayed_work_sync(&chg->lpd_ra_open_work);
- vote(chg->awake_votable, LPD_VOTER, false, 0);
+
+ smblib_lpd_clear_ra_open_work(chg);
rc = smblib_read(chg, TYPE_C_MISC_STATUS_REG, &stat);
if (rc < 0) {
@@ -5779,10 +5879,18 @@ static void smblib_moisture_protection_work(struct work_struct *work)
u8 stat;
/*
+ * Hold awake votable to prevent pm_relax being called prior to
+ * completion of this work.
+ */
+ vote(chg->awake_votable, MOISTURE_VOTER, true, 0);
+
+ /*
* Disable 1% duty cycle on CC_ID pin and enable uUSB factory mode
* detection to track any change on RID, as interrupts are disable.
*/
- rc = smblib_write(chg, TYPEC_U_USB_WATER_PROTECTION_CFG_REG, 0);
+ rc = smblib_write(chg, ((chg->smb_version == PMI632_SUBTYPE) ?
+ PMI632_TYPEC_U_USB_WATER_PROTECTION_CFG_REG :
+ TYPEC_U_USB_WATER_PROTECTION_CFG_REG), 0);
if (rc < 0) {
smblib_err(chg, "Couldn't disable periodic monitoring of CC_ID rc=%d\n",
rc);
@@ -5840,7 +5948,7 @@ static void smblib_moisture_protection_work(struct work_struct *work)
}
out:
- pm_relax(chg->dev);
+ vote(chg->awake_votable, MOISTURE_VOTER, false, 0);
}
static enum alarmtimer_restart moisture_protection_alarm_cb(struct alarm *alarm,
@@ -5859,6 +5967,94 @@ static enum alarmtimer_restart moisture_protection_alarm_cb(struct alarm *alarm,
return ALARMTIMER_NORESTART;
}
+static void smblib_chg_termination_work(struct work_struct *work)
+{
+ union power_supply_propval pval;
+ struct smb_charger *chg = container_of(work, struct smb_charger,
+ chg_termination_work);
+ int rc, input_present, delay = CHG_TERM_WA_ENTRY_DELAY_MS;
+
+ /*
+ * Hold awake votable to prevent pm_relax being called prior to
+ * completion of this work.
+ */
+ vote(chg->awake_votable, CHG_TERMINATION_VOTER, true, 0);
+
+ rc = smblib_is_input_present(chg, &input_present);
+ if ((rc < 0) || !input_present)
+ goto out;
+
+ rc = smblib_get_prop_from_bms(chg, POWER_SUPPLY_PROP_CAPACITY, &pval);
+ if ((rc < 0) || (pval.intval < 100)) {
+ vote(chg->usb_icl_votable, CHG_TERMINATION_VOTER, false, 0);
+ goto out;
+ }
+
+ rc = smblib_get_prop_from_bms(chg, POWER_SUPPLY_PROP_CHARGE_FULL,
+ &pval);
+ if (rc < 0)
+ goto out;
+
+ /*
+ * On change in the value of learned capacity, re-initialize the
+ * reference cc_soc value due to change in cc_soc characteristic value
+ * at full capacity. Also, in case cc_soc_ref value is reset,
+ * re-initialize it.
+ */
+ if (pval.intval != chg->charge_full_cc || !chg->cc_soc_ref) {
+ chg->charge_full_cc = pval.intval;
+ rc = smblib_get_prop_from_bms(chg, POWER_SUPPLY_PROP_CC_SOC,
+ &pval);
+ if (rc < 0)
+ goto out;
+
+ chg->cc_soc_ref = pval.intval;
+ } else {
+ rc = smblib_get_prop_from_bms(chg, POWER_SUPPLY_PROP_CC_SOC,
+ &pval);
+ if (rc < 0)
+ goto out;
+ }
+
+ /*
+ * Suspend/Unsuspend USB input to keep cc_soc within the 0.5% to 0.75%
+ * overshoot range of the cc_soc value at termination, to prevent
+ * overcharging.
+ */
+ if (pval.intval < DIV_ROUND_CLOSEST(chg->cc_soc_ref * 10050, 10000)) {
+ vote(chg->usb_icl_votable, CHG_TERMINATION_VOTER, false, 0);
+ vote(chg->dc_suspend_votable, CHG_TERMINATION_VOTER, false, 0);
+ delay = CHG_TERM_WA_ENTRY_DELAY_MS;
+ } else if (pval.intval > DIV_ROUND_CLOSEST(chg->cc_soc_ref * 10075,
+ 10000)) {
+ vote(chg->usb_icl_votable, CHG_TERMINATION_VOTER, true, 0);
+ vote(chg->dc_suspend_votable, CHG_TERMINATION_VOTER, true, 0);
+ delay = CHG_TERM_WA_EXIT_DELAY_MS;
+ }
+
+ smblib_dbg(chg, PR_MISC, "Chg Term WA readings: cc_soc: %d, cc_soc_ref: %d, delay: %d\n",
+ pval.intval, chg->cc_soc_ref, delay);
+ alarm_start_relative(&chg->chg_termination_alarm, ms_to_ktime(delay));
+out:
+ vote(chg->awake_votable, CHG_TERMINATION_VOTER, false, 0);
+}
+
+static enum alarmtimer_restart chg_termination_alarm_cb(struct alarm *alarm,
+ ktime_t now)
+{
+ struct smb_charger *chg = container_of(alarm, struct smb_charger,
+ chg_termination_alarm);
+
+ smblib_dbg(chg, PR_MISC, "Charge termination WA alarm triggered %lld\n",
+ ktime_to_ms(now));
+
+ /* Atomic context, cannot use voter */
+ pm_stay_awake(chg->dev);
+ schedule_work(&chg->chg_termination_work);
+
+ return ALARMTIMER_NORESTART;
+}
+
static void jeita_update_work(struct work_struct *work)
{
struct smb_charger *chg = container_of(work, struct smb_charger,
@@ -6230,6 +6426,19 @@ int smblib_init(struct smb_charger *chg)
smblib_thermal_regulation_work);
INIT_DELAYED_WORK(&chg->usbov_dbc_work, smblib_usbov_dbc_work);
+ if (chg->wa_flags & CHG_TERMINATION_WA) {
+ INIT_WORK(&chg->chg_termination_work,
+ smblib_chg_termination_work);
+
+ if (alarmtimer_get_rtcdev()) {
+ alarm_init(&chg->chg_termination_alarm, ALARM_BOOTTIME,
+ chg_termination_alarm_cb);
+ } else {
+ smblib_err(chg, "Couldn't get rtc device\n");
+ return -ENODEV;
+ }
+ }
+
if (chg->uusb_moisture_protection_enabled) {
INIT_WORK(&chg->moisture_protection_work,
smblib_moisture_protection_work);
@@ -6339,6 +6548,10 @@ int smblib_deinit(struct smb_charger *chg)
alarm_cancel(&chg->moisture_protection_alarm);
cancel_work_sync(&chg->moisture_protection_work);
}
+ if (chg->wa_flags & CHG_TERMINATION_WA) {
+ alarm_cancel(&chg->chg_termination_alarm);
+ cancel_work_sync(&chg->chg_termination_work);
+ }
cancel_work_sync(&chg->bms_update_work);
cancel_work_sync(&chg->jeita_update_work);
cancel_work_sync(&chg->pl_update_work);
diff --git a/drivers/power/supply/qcom/smb5-lib.h b/drivers/power/supply/qcom/smb5-lib.h
index b7dd7bb..154554a 100644
--- a/drivers/power/supply/qcom/smb5-lib.h
+++ b/drivers/power/supply/qcom/smb5-lib.h
@@ -65,6 +65,7 @@ enum print_reason {
#define HVDCP2_ICL_VOTER "HVDCP2_ICL_VOTER"
#define AICL_THRESHOLD_VOTER "AICL_THRESHOLD_VOTER"
#define USBOV_DBC_VOTER "USBOV_DBC_VOTER"
+#define CHG_TERMINATION_VOTER "CHG_TERMINATION_VOTER"
#define BOOST_BACK_STORM_COUNT 3
#define WEAK_CHG_STORM_COUNT 8
@@ -107,6 +108,7 @@ enum {
SW_THERM_REGULATION_WA = BIT(1),
WEAK_ADAPTER_WA = BIT(2),
USBIN_OV_WA = BIT(3),
+ CHG_TERMINATION_WA = BIT(4),
};
enum jeita_cfg_stat {
@@ -395,6 +397,7 @@ struct smb_charger {
struct work_struct pl_update_work;
struct work_struct jeita_update_work;
struct work_struct moisture_protection_work;
+ struct work_struct chg_termination_work;
struct delayed_work ps_change_timeout_work;
struct delayed_work clear_hdc_work;
struct delayed_work icl_change_work;
@@ -408,6 +411,7 @@ struct smb_charger {
struct alarm lpd_recheck_timer;
struct alarm moisture_protection_alarm;
+ struct alarm chg_termination_alarm;
/* secondary charger config */
bool sec_pl_present;
@@ -463,6 +467,7 @@ struct smb_charger {
int smb_temp_max;
u8 typec_try_mode;
enum lpd_stage lpd_stage;
+ bool lpd_disabled;
enum lpd_reason lpd_reason;
bool fcc_stepper_enable;
int die_temp;
@@ -487,6 +492,8 @@ struct smb_charger {
int aicl_cont_threshold_mv;
int default_aicl_cont_threshold_mv;
bool aicl_max_reached;
+ int charge_full_cc;
+ int cc_soc_ref;
/* workaround flag */
u32 wa_flags;
diff --git a/drivers/power/supply/qcom/smb5-reg.h b/drivers/power/supply/qcom/smb5-reg.h
index aa9a000..af361fb 100644
--- a/drivers/power/supply/qcom/smb5-reg.h
+++ b/drivers/power/supply/qcom/smb5-reg.h
@@ -429,12 +429,13 @@ enum {
#define EN_MICRO_USB_FACTORY_MODE_BIT BIT(1)
#define EN_MICRO_USB_MODE_BIT BIT(0)
-#define TYPEC_U_USB_WATER_PROTECTION_CFG_REG (TYPEC_BASE + 0x73)
-#define EN_MICRO_USB_WATER_PROTECTION_BIT BIT(4)
-#define MICRO_USB_DETECTION_ON_TIME_CFG_MASK GENMASK(3, 2)
-#define MICRO_USB_DETECTION_PERIOD_CFG_MASK GENMASK(1, 0)
+#define PMI632_TYPEC_U_USB_WATER_PROTECTION_CFG_REG (TYPEC_BASE + 0x72)
+#define TYPEC_U_USB_WATER_PROTECTION_CFG_REG (TYPEC_BASE + 0x73)
+#define EN_MICRO_USB_WATER_PROTECTION_BIT BIT(4)
+#define MICRO_USB_DETECTION_ON_TIME_CFG_MASK GENMASK(3, 2)
+#define MICRO_USB_DETECTION_PERIOD_CFG_MASK GENMASK(1, 0)
-#define TYPEC_MICRO_USB_MODE_REG (TYPEC_BASE + 0x73)
+#define PMI632_TYPEC_MICRO_USB_MODE_REG (TYPEC_BASE + 0x73)
#define MICRO_USB_MODE_ONLY_BIT BIT(0)
/********************************
* MISC Peripheral Registers *
diff --git a/drivers/scsi/csiostor/csio_attr.c b/drivers/scsi/csiostor/csio_attr.c
index 8a00403..9bd2bd8 100644
--- a/drivers/scsi/csiostor/csio_attr.c
+++ b/drivers/scsi/csiostor/csio_attr.c
@@ -594,12 +594,12 @@ csio_vport_create(struct fc_vport *fc_vport, bool disable)
}
fc_vport_set_state(fc_vport, FC_VPORT_INITIALIZING);
+ ln->fc_vport = fc_vport;
if (csio_fcoe_alloc_vnp(hw, ln))
goto error;
*(struct csio_lnode **)fc_vport->dd_data = ln;
- ln->fc_vport = fc_vport;
if (!fc_vport->node_name)
fc_vport->node_name = wwn_to_u64(csio_ln_wwnn(ln));
if (!fc_vport->port_name)
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
index fadc99c..a1551ab 100644
--- a/drivers/scsi/libsas/sas_expander.c
+++ b/drivers/scsi/libsas/sas_expander.c
@@ -829,6 +829,7 @@ static struct domain_device *sas_ex_discover_end_dev(
rphy = sas_end_device_alloc(phy->port);
if (!rphy)
goto out_free;
+ rphy->identify.phy_identifier = phy_id;
child->rphy = rphy;
get_device(&rphy->dev);
@@ -856,6 +857,7 @@ static struct domain_device *sas_ex_discover_end_dev(
child->rphy = rphy;
get_device(&rphy->dev);
+ rphy->identify.phy_identifier = phy_id;
sas_fill_in_rphy(child, rphy);
list_add_tail(&child->disco_list_node, &parent->port->disco_list);
diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c
index 918ae18..ca62117 100644
--- a/drivers/scsi/lpfc/lpfc_nvme.c
+++ b/drivers/scsi/lpfc/lpfc_nvme.c
@@ -297,7 +297,8 @@ lpfc_nvme_localport_delete(struct nvme_fc_local_port *localport)
lport);
/* release any threads waiting for the unreg to complete */
- complete(&lport->lport_unreg_done);
+ if (lport->vport->localport)
+ complete(lport->lport_unreg_cmp);
}
/* lpfc_nvme_remoteport_delete
@@ -2556,7 +2557,8 @@ lpfc_nvme_create_localport(struct lpfc_vport *vport)
*/
void
lpfc_nvme_lport_unreg_wait(struct lpfc_vport *vport,
- struct lpfc_nvme_lport *lport)
+ struct lpfc_nvme_lport *lport,
+ struct completion *lport_unreg_cmp)
{
#if (IS_ENABLED(CONFIG_NVME_FC))
u32 wait_tmo;
@@ -2568,8 +2570,7 @@ lpfc_nvme_lport_unreg_wait(struct lpfc_vport *vport,
*/
wait_tmo = msecs_to_jiffies(LPFC_NVME_WAIT_TMO * 1000);
while (true) {
- ret = wait_for_completion_timeout(&lport->lport_unreg_done,
- wait_tmo);
+ ret = wait_for_completion_timeout(lport_unreg_cmp, wait_tmo);
if (unlikely(!ret)) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR,
"6176 Lport %p Localport %p wait "
@@ -2603,12 +2604,12 @@ lpfc_nvme_destroy_localport(struct lpfc_vport *vport)
struct lpfc_nvme_lport *lport;
struct lpfc_nvme_ctrl_stat *cstat;
int ret;
+ DECLARE_COMPLETION_ONSTACK(lport_unreg_cmp);
if (vport->nvmei_support == 0)
return;
localport = vport->localport;
- vport->localport = NULL;
lport = (struct lpfc_nvme_lport *)localport->private;
cstat = lport->cstat;
@@ -2619,13 +2620,14 @@ lpfc_nvme_destroy_localport(struct lpfc_vport *vport)
/* lport's rport list is clear. Unregister
* lport and release resources.
*/
- init_completion(&lport->lport_unreg_done);
+ lport->lport_unreg_cmp = &lport_unreg_cmp;
ret = nvme_fc_unregister_localport(localport);
/* Wait for completion. This either blocks
* indefinitely or succeeds
*/
- lpfc_nvme_lport_unreg_wait(vport, lport);
+ lpfc_nvme_lport_unreg_wait(vport, lport, &lport_unreg_cmp);
+ vport->localport = NULL;
kfree(cstat);
/* Regardless of the unregister upcall response, clear
diff --git a/drivers/scsi/lpfc/lpfc_nvme.h b/drivers/scsi/lpfc/lpfc_nvme.h
index cfd4719..b234d02 100644
--- a/drivers/scsi/lpfc/lpfc_nvme.h
+++ b/drivers/scsi/lpfc/lpfc_nvme.h
@@ -50,7 +50,7 @@ struct lpfc_nvme_ctrl_stat {
/* Declare nvme-based local and remote port definitions. */
struct lpfc_nvme_lport {
struct lpfc_vport *vport;
- struct completion lport_unreg_done;
+ struct completion *lport_unreg_cmp;
/* Add stats counters here */
struct lpfc_nvme_ctrl_stat *cstat;
atomic_t fc4NvmeLsRequests;
diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c
index b766afe..e2575c8 100644
--- a/drivers/scsi/lpfc/lpfc_nvmet.c
+++ b/drivers/scsi/lpfc/lpfc_nvmet.c
@@ -1003,7 +1003,8 @@ lpfc_nvmet_targetport_delete(struct nvmet_fc_target_port *targetport)
struct lpfc_nvmet_tgtport *tport = targetport->private;
/* release any threads waiting for the unreg to complete */
- complete(&tport->tport_unreg_done);
+ if (tport->phba->targetport)
+ complete(tport->tport_unreg_cmp);
}
static void
@@ -1700,6 +1701,7 @@ lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba)
struct lpfc_nvmet_tgtport *tgtp;
struct lpfc_queue *wq;
uint32_t qidx;
+ DECLARE_COMPLETION_ONSTACK(tport_unreg_cmp);
if (phba->nvmet_support == 0)
return;
@@ -1709,9 +1711,9 @@ lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba)
wq = phba->sli4_hba.nvme_wq[qidx];
lpfc_nvmet_wqfull_flush(phba, wq, NULL);
}
- init_completion(&tgtp->tport_unreg_done);
+ tgtp->tport_unreg_cmp = &tport_unreg_cmp;
nvmet_fc_unregister_targetport(phba->targetport);
- wait_for_completion_timeout(&tgtp->tport_unreg_done, 5);
+ wait_for_completion_timeout(&tport_unreg_cmp, 5);
lpfc_nvmet_cleanup_io_context(phba);
}
phba->targetport = NULL;
diff --git a/drivers/scsi/lpfc/lpfc_nvmet.h b/drivers/scsi/lpfc/lpfc_nvmet.h
index 1aaff63..0ec1082 100644
--- a/drivers/scsi/lpfc/lpfc_nvmet.h
+++ b/drivers/scsi/lpfc/lpfc_nvmet.h
@@ -34,7 +34,7 @@
/* Used for NVME Target */
struct lpfc_nvmet_tgtport {
struct lpfc_hba *phba;
- struct completion tport_unreg_done;
+ struct completion *tport_unreg_cmp;
/* Stats counters - lpfc_nvmet_unsol_ls_buffer */
atomic_t rcv_ls_req_in;
diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig
index 22bfc19..345ccf3 100644
--- a/drivers/soc/qcom/Kconfig
+++ b/drivers/soc/qcom/Kconfig
@@ -68,6 +68,15 @@
This information is exported to usespace via sysfs entries and userspace
algorithms uses info and decide when to turn on/off the cpu cores.
+config MSM_QBT_HANDLER
+ bool "Event Handler for QTI Ultrasonic Fingerprint Sensor"
+ help
+ This driver acts as a interrupt handler, where the interrupt is generated
+ by the QTI Ultrasonic Fingerprint Sensor. It queues the events for each
+ interrupt in an event queue and notifies the userspace to read the events
+ from the queue. It also creates an input device to send key events such as
+ KEY_POWER, KEY_HOME.
+
config QCOM_GSBI
tristate "QCOM General Serial Bus Interface"
depends on ARCH_QCOM
diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile
index 43ca8fa..f171fe6 100644
--- a/drivers/soc/qcom/Makefile
+++ b/drivers/soc/qcom/Makefile
@@ -66,6 +66,7 @@
obj-$(CONFIG_QCOM_GLINK) += glink_probe.o
obj-$(CONFIG_QCOM_GLINK_PKT) += glink_pkt.o
obj-$(CONFIG_QCOM_QDSS_BRIDGE) += qdss_bridge.o
+obj-$(CONFIG_MSM_QBT_HANDLER) += qbt_handler.o
obj-$(CONFIG_QSEE_IPC_IRQ) += qsee_ipc_irq.o
obj-$(CONFIG_QSEE_IPC_IRQ_BRIDGE) += qsee_ipc_irq_bridge.o
obj-$(CONFIG_QPNP_PBS) += qpnp-pbs.o
diff --git a/drivers/soc/qcom/dcc_v2.c b/drivers/soc/qcom/dcc_v2.c
index 7101500..c074d79 100644
--- a/drivers/soc/qcom/dcc_v2.c
+++ b/drivers/soc/qcom/dcc_v2.c
@@ -145,6 +145,7 @@ struct dcc_drvdata {
uint32_t nr_config[DCC_MAX_LINK_LIST];
uint8_t curr_list;
uint8_t cti_trig;
+ uint8_t loopoff;
};
static bool dcc_ready(struct dcc_drvdata *drvdata)
@@ -250,7 +251,6 @@ static int __dcc_ll_cfg(struct dcc_drvdata *drvdata, int curr_list)
/* write new offset = 1 to continue
* processing the list
*/
- link |= ((0x1 << 8) & BM(8, 14));
dcc_sram_writel(drvdata, link, sram_offset);
sram_offset += 4;
/* Reset link and prev_off */
@@ -283,7 +283,8 @@ static int __dcc_ll_cfg(struct dcc_drvdata *drvdata, int curr_list)
if (loop_start) {
loop = (sram_offset - loop_off) / 4;
- loop |= (loop_cnt << 13) & BM(13, 27);
+ loop |= (loop_cnt << drvdata->loopoff) &
+ BM(drvdata->loopoff, 27);
loop |= DCC_LOOP_DESCRIPTOR;
total_len += (total_len - loop_len) * loop_cnt;
@@ -315,7 +316,6 @@ static int __dcc_ll_cfg(struct dcc_drvdata *drvdata, int curr_list)
/* write new offset = 1 to continue
* processing the list
*/
- link |= ((0x1 << 8) & BM(8, 14));
dcc_sram_writel(drvdata, link, sram_offset);
sram_offset += 4;
/* Reset link and prev_off */
@@ -1624,6 +1624,8 @@ static int dcc_probe(struct platform_device *pdev)
if (ret)
return -EINVAL;
+ drvdata->loopoff = get_bitmask_order((drvdata->ram_size +
+ drvdata->ram_offset) / 4 - 1);
mutex_init(&drvdata->mutex);
for (i = 0; i < DCC_MAX_LINK_LIST; i++) {
diff --git a/drivers/soc/qcom/msm_bus/Makefile b/drivers/soc/qcom/msm_bus/Makefile
index a217fd4..a764258 100644
--- a/drivers/soc/qcom/msm_bus/Makefile
+++ b/drivers/soc/qcom/msm_bus/Makefile
@@ -8,7 +8,7 @@
ifdef CONFIG_QCOM_BUS_CONFIG_RPMH
obj-y += msm_bus_fabric_rpmh.o msm_bus_arb_rpmh.o msm_bus_rules.o \
- msm_bus_bimc_rpmh.o msm_bus_noc_rpmh.o
+ msm_bus_bimc_rpmh.o msm_bus_noc_rpmh.o msm_bus_proxy_client.o
obj-$(CONFIG_OF) += msm_bus_of_rpmh.o
else
obj-y += msm_bus_fabric_adhoc.o msm_bus_arb_adhoc.o msm_bus_rules.o \
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_proxy_client.c b/drivers/soc/qcom/msm_bus/msm_bus_proxy_client.c
new file mode 100644
index 0000000..d3ca18f
--- /dev/null
+++ b/drivers/soc/qcom/msm_bus/msm_bus_proxy_client.c
@@ -0,0 +1,85 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/msm-bus.h>
+
+struct proxy_client {
+ struct msm_bus_scale_pdata *pdata;
+ unsigned int client_handle;
+};
+
+static struct proxy_client proxy_client_info;
+
+static int msm_bus_device_proxy_client_probe(struct platform_device *pdev)
+{
+ int ret;
+
+ proxy_client_info.pdata = msm_bus_cl_get_pdata(pdev);
+
+ if (!proxy_client_info.pdata)
+ return 0;
+
+ proxy_client_info.client_handle =
+ msm_bus_scale_register_client(proxy_client_info.pdata);
+
+ if (!proxy_client_info.client_handle) {
+ dev_err(&pdev->dev, "Unable to register bus client\n");
+ return -ENODEV;
+ }
+
+ ret = msm_bus_scale_client_update_request(
+ proxy_client_info.client_handle, 1);
+ if (ret)
+ dev_err(&pdev->dev, "Bandwidth update failed (%d)\n", ret);
+
+ return ret;
+}
+
+static const struct of_device_id proxy_client_match[] = {
+ {.compatible = "qcom,bus-proxy-client"},
+ {}
+};
+
+static struct platform_driver msm_bus_proxy_client_driver = {
+ .probe = msm_bus_device_proxy_client_probe,
+ .driver = {
+ .name = "msm_bus_proxy_client_device",
+ .of_match_table = proxy_client_match,
+ },
+};
+
+static int __init msm_bus_proxy_client_init_driver(void)
+{
+ int rc;
+
+ rc = platform_driver_register(&msm_bus_proxy_client_driver);
+ if (rc) {
+ pr_err("Failed to register proxy client device driver\n");
+ return rc;
+ }
+
+ return rc;
+}
+
+static int __init msm_bus_proxy_client_unvote(void)
+{
+ int ret;
+
+ if (!proxy_client_info.pdata || !proxy_client_info.client_handle)
+ return 0;
+
+ ret = msm_bus_scale_client_update_request(
+ proxy_client_info.client_handle, 0);
+ if (ret)
+ pr_err("%s: bandwidth update request failed (%d)\n",
+ __func__, ret);
+
+ msm_bus_scale_unregister_client(proxy_client_info.client_handle);
+
+ return 0;
+}
+
+subsys_initcall_sync(msm_bus_proxy_client_init_driver);
+late_initcall_sync(msm_bus_proxy_client_unvote);
diff --git a/drivers/soc/qcom/peripheral-loader.c b/drivers/soc/qcom/peripheral-loader.c
index 7f63c1f3..0b8b7d1 100644
--- a/drivers/soc/qcom/peripheral-loader.c
+++ b/drivers/soc/qcom/peripheral-loader.c
@@ -1121,6 +1121,7 @@ int pil_boot(struct pil_desc *desc)
}
trace_pil_event("before_auth_reset", desc);
+ notify_before_auth_and_reset(desc->dev);
ret = desc->ops->auth_and_reset(desc);
if (ret) {
pil_err(desc, "Failed to bring out of reset(rc:%d)\n", ret);
diff --git a/drivers/soc/qcom/qbt_handler.c b/drivers/soc/qcom/qbt_handler.c
new file mode 100644
index 0000000..0c93e1bd
--- /dev/null
+++ b/drivers/soc/qcom/qbt_handler.c
@@ -0,0 +1,959 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
+ */
+
+#define DEBUG
+#define pr_fmt(fmt) "qbt:%s: " fmt, __func__
+
+#include <linux/input.h>
+#include <linux/ktime.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/uaccess.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+#include <linux/cdev.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/workqueue.h>
+#include <linux/pm.h>
+#include <linux/of.h>
+#include <linux/mutex.h>
+#include <linux/atomic.h>
+#include <linux/of_gpio.h>
+#include <linux/kfifo.h>
+#include <linux/poll.h>
+#include <uapi/linux/qbt_handler.h>
+
+#define QBT_DEV "qbt"
+#define MAX_FW_EVENTS 128
+#define MINOR_NUM_FD 0
+#define MINOR_NUM_IPC 1
+#define QBT_INPUT_DEV_NAME "qbt_key_input"
+#define QBT_INPUT_DEV_VERSION 0x0100
+
+struct finger_detect_gpio {
+ int gpio;
+ int active_low;
+ int irq;
+ struct work_struct work;
+ int last_gpio_state;
+ int event_reported;
+ bool irq_enabled;
+};
+
+struct fw_event_desc {
+ enum qbt_fw_event ev;
+};
+
+struct fw_ipc_info {
+ int gpio;
+ int irq;
+ bool irq_enabled;
+ struct work_struct work;
+};
+
+struct qbt_drvdata {
+ struct class *qbt_class;
+ struct cdev qbt_fd_cdev;
+ struct cdev qbt_ipc_cdev;
+ struct input_dev *in_dev;
+ struct device *dev;
+ char *qbt_node;
+ atomic_t fd_available;
+ atomic_t ipc_available;
+ struct mutex mutex;
+ struct mutex fd_events_mutex;
+ struct mutex ipc_events_mutex;
+ struct fw_ipc_info fw_ipc;
+ struct finger_detect_gpio fd_gpio;
+ DECLARE_KFIFO(fd_events, struct fw_event_desc, MAX_FW_EVENTS);
+ DECLARE_KFIFO(ipc_events, struct fw_event_desc, MAX_FW_EVENTS);
+ wait_queue_head_t read_wait_queue_fd;
+ wait_queue_head_t read_wait_queue_ipc;
+ bool is_wuhb_connected;
+};
+
+/**
+ * qbt_open() - Function called when user space opens device.
+ * Successful if driver not currently open.
+ * @inode: ptr to inode object
+ * @file: ptr to file object
+ *
+ * Return: 0 on success. Error code on failure.
+ */
+static int qbt_open(struct inode *inode, struct file *file)
+{
+ struct qbt_drvdata *drvdata = NULL;
+ int rc = 0;
+ int minor_no = -1;
+
+ if (!inode || !inode->i_cdev || !file) {
+ pr_err("NULL pointer passed\n");
+ return -EINVAL;
+ }
+ minor_no = iminor(inode);
+ if (minor_no == MINOR_NUM_FD) {
+ drvdata = container_of(inode->i_cdev,
+ struct qbt_drvdata, qbt_fd_cdev);
+ } else if (minor_no == MINOR_NUM_IPC) {
+ drvdata = container_of(inode->i_cdev,
+ struct qbt_drvdata, qbt_ipc_cdev);
+ } else {
+ pr_err("Invalid minor number\n");
+ return -EINVAL;
+ }
+
+ file->private_data = drvdata;
+
+ pr_debug("entry minor_no=%d\n", minor_no);
+
+ /* disallowing concurrent opens */
+ if (minor_no == MINOR_NUM_FD &&
+ !atomic_dec_and_test(&drvdata->fd_available)) {
+ atomic_inc(&drvdata->fd_available);
+ rc = -EBUSY;
+ } else if (minor_no == MINOR_NUM_IPC &&
+ !atomic_dec_and_test(&drvdata->ipc_available)) {
+ atomic_inc(&drvdata->ipc_available);
+ rc = -EBUSY;
+ }
+
+ pr_debug("exit : %d\n", rc);
+ return rc;
+}
+
+/**
+ * qbt_release() - Function called when user space closes device.
+
+ * @inode: ptr to inode object
+ * @file: ptr to file object
+ *
+ * Return: 0 on success. Error code on failure.
+ */
+static int qbt_release(struct inode *inode, struct file *file)
+{
+ struct qbt_drvdata *drvdata;
+ int minor_no = -1;
+
+ if (!file || !file->private_data || !inode) {
+ pr_err("NULL pointer passed\n");
+ return -EINVAL;
+ }
+ drvdata = file->private_data;
+ minor_no = iminor(inode);
+ if (minor_no == MINOR_NUM_FD) {
+ atomic_inc(&drvdata->fd_available);
+ } else if (minor_no == MINOR_NUM_IPC) {
+ atomic_inc(&drvdata->ipc_available);
+ } else {
+ pr_err("Invalid minor number\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/**
+ * qbt_ioctl() - Function called when user space calls ioctl.
+ * @file: struct file - not used
+ * @cmd: cmd identifier such as QBT_IS_WUHB_CONNECTED
+ * @arg: ptr to relevant structe: either qbt_app or
+ * qbt_send_tz_cmd depending on which cmd is passed
+ *
+ * Return: 0 on success. Error code on failure.
+ */
+static long qbt_ioctl(
+ struct file *file, unsigned int cmd, unsigned long arg)
+{
+ int rc = 0;
+ void __user *priv_arg = (void __user *)arg;
+ struct qbt_drvdata *drvdata;
+
+ if (!file || !file->private_data) {
+ pr_err("NULL pointer passed\n");
+ return -EINVAL;
+ }
+
+ drvdata = file->private_data;
+
+ if (IS_ERR(priv_arg)) {
+ dev_err(drvdata->dev, "%s: invalid user space pointer %lu\n",
+ __func__, arg);
+ return -EINVAL;
+ }
+
+ mutex_lock(&drvdata->mutex);
+
+ pr_debug("cmd received %d\n", cmd);
+
+ switch (cmd) {
+ case QBT_ENABLE_IPC:
+ {
+ if (!drvdata->fw_ipc.irq_enabled) {
+ enable_irq(drvdata->fw_ipc.irq);
+ drvdata->fw_ipc.irq_enabled = true;
+ pr_debug("%s: QBT_ENABLE_IPC\n", __func__);
+ }
+ break;
+ }
+ case QBT_DISABLE_IPC:
+ {
+ if (drvdata->fw_ipc.irq_enabled) {
+ disable_irq(drvdata->fw_ipc.irq);
+ drvdata->fw_ipc.irq_enabled = false;
+ pr_debug("%s: QBT_DISABLE_IPC\n", __func__);
+ }
+ break;
+ }
+ case QBT_ENABLE_FD:
+ {
+ if (drvdata->is_wuhb_connected &&
+ !drvdata->fd_gpio.irq_enabled) {
+ enable_irq(drvdata->fd_gpio.irq);
+ drvdata->fd_gpio.irq_enabled = true;
+ pr_debug("%s: QBT_ENABLE_FD\n", __func__);
+ }
+ break;
+ }
+ case QBT_DISABLE_FD:
+ {
+ if (drvdata->is_wuhb_connected &&
+ drvdata->fd_gpio.irq_enabled) {
+ disable_irq(drvdata->fd_gpio.irq);
+ drvdata->fd_gpio.irq_enabled = false;
+ pr_debug("%s: QBT_DISABLE_FD\n", __func__);
+ }
+ break;
+ }
+ case QBT_IS_WUHB_CONNECTED:
+ {
+ struct qbt_wuhb_connected_status wuhb_connected_status;
+
+ wuhb_connected_status.is_wuhb_connected =
+ drvdata->is_wuhb_connected;
+ rc = copy_to_user((void __user *)priv_arg,
+ &wuhb_connected_status,
+ sizeof(wuhb_connected_status));
+
+ if (rc != 0) {
+ pr_err("Failed to copy wuhb connected status: %d\n",
+ rc);
+ rc = -EFAULT;
+ goto end;
+ }
+
+ break;
+ }
+ case QBT_SEND_KEY_EVENT:
+ {
+ struct qbt_key_event key_event;
+
+ if (copy_from_user(&key_event, priv_arg,
+ sizeof(key_event))
+ != 0) {
+ rc = -EFAULT;
+ pr_err("failed copy from user space %d\n", rc);
+ goto end;
+ }
+
+ input_event(drvdata->in_dev, EV_KEY,
+ key_event.key, key_event.value);
+ input_sync(drvdata->in_dev);
+ break;
+ }
+ default:
+ pr_err("invalid cmd %d\n", cmd);
+ rc = -ENOIOCTLCMD;
+ goto end;
+ }
+
+end:
+ mutex_unlock(&drvdata->mutex);
+ return rc;
+}
+
+static int get_events_fifo_len_locked(
+ struct qbt_drvdata *drvdata, int minor_no)
+{
+ int len = 0;
+
+ if (minor_no == MINOR_NUM_FD) {
+ mutex_lock(&drvdata->fd_events_mutex);
+ len = kfifo_len(&drvdata->fd_events);
+ mutex_unlock(&drvdata->fd_events_mutex);
+ } else if (minor_no == MINOR_NUM_IPC) {
+ mutex_lock(&drvdata->ipc_events_mutex);
+ len = kfifo_len(&drvdata->ipc_events);
+ mutex_unlock(&drvdata->ipc_events_mutex);
+ }
+
+ return len;
+}
+
+static ssize_t qbt_read(struct file *filp, char __user *ubuf,
+ size_t cnt, loff_t *ppos)
+{
+ struct fw_event_desc fw_event;
+ struct qbt_drvdata *drvdata;
+ wait_queue_head_t *read_wait_queue = NULL;
+ int rc = 0;
+ int minor_no = -1;
+ int fifo_len;
+
+ pr_debug("entry with numBytes = %zd, minor_no = %d\n", cnt, minor_no);
+
+ if (!filp || !filp->private_data) {
+ pr_err("NULL pointer passed\n");
+ return -EINVAL;
+ }
+ drvdata = filp->private_data;
+
+ if (cnt < sizeof(fw_event.ev)) {
+ pr_err("Num bytes to read is too small\n");
+ return -EINVAL;
+ }
+
+ minor_no = iminor(filp->f_path.dentry->d_inode);
+ if (minor_no == MINOR_NUM_FD) {
+ read_wait_queue = &drvdata->read_wait_queue_fd;
+ } else if (minor_no == MINOR_NUM_IPC) {
+ read_wait_queue = &drvdata->read_wait_queue_ipc;
+ } else {
+ pr_err("Invalid minor number\n");
+ return -EINVAL;
+ }
+
+ fifo_len = get_events_fifo_len_locked(drvdata, minor_no);
+ while (fifo_len == 0) {
+ if (filp->f_flags & O_NONBLOCK) {
+ pr_debug("fw_events fifo: empty, returning\n");
+ return -EAGAIN;
+ }
+ pr_debug("fw_events fifo: empty, waiting\n");
+ if (wait_event_interruptible(*read_wait_queue,
+ (get_events_fifo_len_locked(
+ drvdata, minor_no) > 0)))
+ return -ERESTARTSYS;
+ fifo_len = get_events_fifo_len_locked(drvdata, minor_no);
+ }
+
+ if (minor_no == MINOR_NUM_FD) {
+ mutex_lock(&drvdata->fd_events_mutex);
+ rc = kfifo_get(&drvdata->fd_events, &fw_event);
+ mutex_unlock(&drvdata->fd_events_mutex);
+ } else if (minor_no == MINOR_NUM_IPC) {
+ mutex_lock(&drvdata->ipc_events_mutex);
+ rc = kfifo_get(&drvdata->ipc_events, &fw_event);
+ mutex_unlock(&drvdata->ipc_events_mutex);
+ } else {
+ pr_err("Invalid minor number\n");
+ }
+
+ if (!rc) {
+ pr_err("fw_events fifo: unexpectedly empty\n");
+ return -EINVAL;
+ }
+
+ pr_debug("Firmware event %d at minor no %d read at time %lu uS\n",
+ (int)fw_event.ev, minor_no,
+ (unsigned long)ktime_to_us(ktime_get()));
+ return copy_to_user(ubuf, &fw_event.ev, sizeof(fw_event.ev));
+}
+
+static unsigned int qbt_poll(struct file *filp,
+ struct poll_table_struct *wait)
+{
+ struct qbt_drvdata *drvdata;
+ unsigned int mask = 0;
+ int minor_no = -1;
+
+ if (!filp || !filp->private_data) {
+ pr_err("NULL pointer passed\n");
+ return -EINVAL;
+ }
+ drvdata = filp->private_data;
+
+ minor_no = iminor(filp->f_path.dentry->d_inode);
+ if (minor_no == MINOR_NUM_FD) {
+ poll_wait(filp, &drvdata->read_wait_queue_fd, wait);
+ if (kfifo_len(&drvdata->fd_events) > 0)
+ mask |= (POLLIN | POLLRDNORM);
+ } else if (minor_no == MINOR_NUM_IPC) {
+ poll_wait(filp, &drvdata->read_wait_queue_ipc, wait);
+ if (kfifo_len(&drvdata->ipc_events) > 0)
+ mask |= (POLLIN | POLLRDNORM);
+ } else {
+ pr_err("Invalid minor number\n");
+ return -EINVAL;
+ }
+
+ return mask;
+}
+
+static const struct file_operations qbt_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = qbt_ioctl,
+ .open = qbt_open,
+ .release = qbt_release,
+ .read = qbt_read,
+ .poll = qbt_poll
+};
+
+static int qbt_dev_register(struct qbt_drvdata *drvdata)
+{
+ dev_t dev_no, major_no;
+ int ret = 0;
+ size_t node_size;
+ char *node_name = QBT_DEV;
+ struct device *dev = drvdata->dev;
+ struct device *device;
+
+ node_size = strlen(node_name) + 1;
+
+ drvdata->qbt_node = devm_kzalloc(dev, node_size, GFP_KERNEL);
+ if (!drvdata->qbt_node) {
+ ret = -ENOMEM;
+ goto err_alloc;
+ }
+
+ strlcpy(drvdata->qbt_node, node_name, node_size);
+
+ ret = alloc_chrdev_region(&dev_no, 0, 2, drvdata->qbt_node);
+ if (ret) {
+ pr_err("alloc_chrdev_region failed %d\n", ret);
+ goto err_alloc;
+ }
+ major_no = MAJOR(dev_no);
+
+ cdev_init(&drvdata->qbt_fd_cdev, &qbt_fops);
+
+ drvdata->qbt_fd_cdev.owner = THIS_MODULE;
+ ret = cdev_add(&drvdata->qbt_fd_cdev,
+ MKDEV(major_no, MINOR_NUM_FD), 1);
+ if (ret) {
+ pr_err("cdev_add failed for fd %d\n", ret);
+ goto err_cdev_add;
+ }
+ cdev_init(&drvdata->qbt_ipc_cdev, &qbt_fops);
+
+ drvdata->qbt_ipc_cdev.owner = THIS_MODULE;
+ ret = cdev_add(&drvdata->qbt_ipc_cdev,
+ MKDEV(major_no, MINOR_NUM_IPC), 1);
+ if (ret) {
+ pr_err("cdev_add failed for ipc %d\n", ret);
+ goto err_cdev_add;
+ }
+
+ drvdata->qbt_class = class_create(THIS_MODULE,
+ drvdata->qbt_node);
+ if (IS_ERR(drvdata->qbt_class)) {
+ ret = PTR_ERR(drvdata->qbt_class);
+ pr_err("class_create failed %d\n", ret);
+ goto err_class_create;
+ }
+
+ device = device_create(drvdata->qbt_class, NULL,
+ drvdata->qbt_fd_cdev.dev, drvdata,
+ "%s_fd", drvdata->qbt_node);
+ if (IS_ERR(device)) {
+ ret = PTR_ERR(device);
+ pr_err("fd device_create failed %d\n", ret);
+ goto err_dev_create;
+ }
+
+ device = device_create(drvdata->qbt_class, NULL,
+ drvdata->qbt_ipc_cdev.dev, drvdata,
+ "%s_ipc", drvdata->qbt_node);
+ if (IS_ERR(device)) {
+ ret = PTR_ERR(device);
+ pr_err("ipc device_create failed %d\n", ret);
+ goto err_dev_create;
+ }
+
+ return 0;
+err_dev_create:
+ class_destroy(drvdata->qbt_class);
+err_class_create:
+ cdev_del(&drvdata->qbt_fd_cdev);
+ cdev_del(&drvdata->qbt_ipc_cdev);
+err_cdev_add:
+ unregister_chrdev_region(drvdata->qbt_fd_cdev.dev, 1);
+ unregister_chrdev_region(drvdata->qbt_ipc_cdev.dev, 1);
+err_alloc:
+ return ret;
+}
+
+/**
+ * qbt1000_create_input_device() - Function allocates an input
+ * device, configures it for key events and registers it
+ *
+ * @drvdata: ptr to driver data
+ *
+ * Return: 0 on success. Error code on failure.
+ */
+static int qbt_create_input_device(struct qbt_drvdata *drvdata)
+{
+ int rc = 0;
+
+ drvdata->in_dev = input_allocate_device();
+ if (drvdata->in_dev == NULL) {
+ dev_err(drvdata->dev, "%s: input_allocate_device() failed\n",
+ __func__);
+ rc = -ENOMEM;
+ goto end;
+ }
+
+ drvdata->in_dev->name = QBT_INPUT_DEV_NAME;
+ drvdata->in_dev->phys = NULL;
+ drvdata->in_dev->id.bustype = BUS_HOST;
+ drvdata->in_dev->id.vendor = 0x0001;
+ drvdata->in_dev->id.product = 0x0001;
+ drvdata->in_dev->id.version = QBT_INPUT_DEV_VERSION;
+
+ drvdata->in_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
+ drvdata->in_dev->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH);
+
+ drvdata->in_dev->keybit[BIT_WORD(KEY_HOMEPAGE)] |=
+ BIT_MASK(KEY_HOMEPAGE);
+ drvdata->in_dev->keybit[BIT_WORD(KEY_VOLUMEDOWN)] |=
+ BIT_MASK(KEY_VOLUMEDOWN);
+ drvdata->in_dev->keybit[BIT_WORD(KEY_POWER)] |=
+ BIT_MASK(KEY_POWER);
+
+ input_set_abs_params(drvdata->in_dev, ABS_X,
+ 0,
+ 1000,
+ 0, 0);
+ input_set_abs_params(drvdata->in_dev, ABS_Y,
+ 0,
+ 1000,
+ 0, 0);
+
+ rc = input_register_device(drvdata->in_dev);
+ if (rc) {
+ dev_err(drvdata->dev, "%s: input_reg_dev() failed %d\n",
+ __func__, rc);
+ goto end;
+ }
+
+end:
+ if (rc)
+ input_free_device(drvdata->in_dev);
+ return rc;
+}
+
+static void qbt_fd_report_event(struct qbt_drvdata *drvdata, int state)
+{
+ struct fw_event_desc fw_event;
+
+ if (!drvdata->is_wuhb_connected) {
+ pr_err("Skipping as WUHB_INT is disconnected\n");
+ return;
+ }
+
+ if (drvdata->fd_gpio.event_reported
+ && state == drvdata->fd_gpio.last_gpio_state)
+ return;
+
+ pr_debug("gpio %d: report state %d current_time %lu uS\n",
+ drvdata->fd_gpio.gpio, state,
+ (unsigned long)ktime_to_us(ktime_get()));
+
+ drvdata->fd_gpio.event_reported = 1;
+ drvdata->fd_gpio.last_gpio_state = state;
+
+ fw_event.ev = (state ? FW_EVENT_FINGER_DOWN : FW_EVENT_FINGER_UP);
+
+ mutex_lock(&drvdata->fd_events_mutex);
+
+ kfifo_reset(&drvdata->fd_events);
+
+ if (!kfifo_put(&drvdata->fd_events, fw_event)) {
+ pr_err("FD events fifo: error adding item\n");
+ } else {
+ pr_debug("FD event %d queued at time %lu uS\n", fw_event.ev,
+ (unsigned long)ktime_to_us(ktime_get()));
+ }
+ mutex_unlock(&drvdata->fd_events_mutex);
+ wake_up_interruptible(&drvdata->read_wait_queue_fd);
+}
+
+static void qbt_gpio_work_func(struct work_struct *work)
+{
+ int state;
+ struct qbt_drvdata *drvdata;
+
+ if (!work) {
+ pr_err("NULL pointer passed\n");
+ return;
+ }
+
+ drvdata = container_of(work, struct qbt_drvdata, fd_gpio.work);
+
+ state = (__gpio_get_value(drvdata->fd_gpio.gpio) ? 1 : 0)
+ ^ drvdata->fd_gpio.active_low;
+
+ qbt_fd_report_event(drvdata, state);
+
+ pm_relax(drvdata->dev);
+}
+
+static irqreturn_t qbt_gpio_isr(int irq, void *dev_id)
+{
+ struct qbt_drvdata *drvdata = dev_id;
+
+ if (!drvdata) {
+ pr_err("NULL pointer passed\n");
+ return IRQ_HANDLED;
+ }
+
+ if (irq != drvdata->fd_gpio.irq) {
+ pr_warn("invalid irq %d (expected %d)\n",
+ irq, drvdata->fd_gpio.irq);
+ return IRQ_HANDLED;
+ }
+
+ pr_debug("FD event received at time %lu uS\n",
+ (unsigned long)ktime_to_us(ktime_get()));
+
+ pm_stay_awake(drvdata->dev);
+ schedule_work(&drvdata->fd_gpio.work);
+
+ return IRQ_HANDLED;
+}
+
+static void qbt_irq_report_event(struct work_struct *work)
+{
+ struct qbt_drvdata *drvdata;
+ struct fw_event_desc fw_ev_des;
+
+ if (!work) {
+ pr_err("NULL pointer passed\n");
+ return;
+ }
+ drvdata = container_of(work, struct qbt_drvdata, fw_ipc.work);
+
+ fw_ev_des.ev = FW_EVENT_IPC;
+ mutex_lock(&drvdata->ipc_events_mutex);
+ if (!kfifo_put(&drvdata->ipc_events, fw_ev_des)) {
+ pr_err("ipc events: fifo full, drop event %d\n",
+ (int) fw_ev_des.ev);
+ } else {
+ pr_debug("IPC event %d queued at time %lu uS\n", fw_ev_des.ev,
+ (unsigned long)ktime_to_us(ktime_get()));
+ }
+ mutex_unlock(&drvdata->ipc_events_mutex);
+ wake_up_interruptible(&drvdata->read_wait_queue_ipc);
+ pm_relax(drvdata->dev);
+}
+
+/**
+ * qbt_ipc_irq_handler() - function processes IPC
+ * interrupts on its own thread
+ * @irq: the interrupt that occurred
+ * @dev_id: pointer to the qbt_drvdata
+ *
+ * Return: IRQ_HANDLED when complete
+ */
+static irqreturn_t qbt_ipc_irq_handler(int irq, void *dev_id)
+{
+ struct qbt_drvdata *drvdata = (struct qbt_drvdata *)dev_id;
+
+ if (!drvdata) {
+ pr_err("NULL pointer passed\n");
+ return IRQ_HANDLED;
+ }
+
+ if (irq != drvdata->fw_ipc.irq) {
+ pr_warn("invalid irq %d (expected %d)\n",
+ irq, drvdata->fw_ipc.irq);
+ return IRQ_HANDLED;
+ }
+
+ pr_debug("IPC event received at time %lu uS\n",
+ (unsigned long)ktime_to_us(ktime_get()));
+
+ pm_stay_awake(drvdata->dev);
+ schedule_work(&drvdata->fw_ipc.work);
+
+ return IRQ_HANDLED;
+}
+
+static int setup_fd_gpio_irq(struct platform_device *pdev,
+ struct qbt_drvdata *drvdata)
+{
+ int rc = 0;
+ int irq;
+ const char *desc = "qbt_finger_detect";
+
+ if (!drvdata->is_wuhb_connected) {
+ pr_err("Skipping as WUHB_INT is disconnected\n");
+ goto end;
+ }
+
+ rc = devm_gpio_request_one(&pdev->dev, drvdata->fd_gpio.gpio,
+ GPIOF_IN, desc);
+ if (rc < 0) {
+ pr_err("failed to request gpio %d, error %d\n",
+ drvdata->fd_gpio.gpio, rc);
+ goto end;
+ }
+
+
+ irq = gpio_to_irq(drvdata->fd_gpio.gpio);
+ if (irq < 0) {
+ rc = irq;
+ pr_err("unable to get irq number for gpio %d, error %d\n",
+ drvdata->fd_gpio.gpio, rc);
+ goto end;
+ }
+
+
+ drvdata->fd_gpio.irq = irq;
+ INIT_WORK(&drvdata->fd_gpio.work, qbt_gpio_work_func);
+
+ rc = devm_request_any_context_irq(&pdev->dev, drvdata->fd_gpio.irq,
+ qbt_gpio_isr, IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
+ desc, drvdata);
+
+ if (rc < 0) {
+ pr_err("unable to claim irq %d; error %d\n",
+ drvdata->fd_gpio.irq, rc);
+ goto end;
+ }
+
+end:
+ pr_debug("rc %d\n", rc);
+ return rc;
+}
+
+static int setup_ipc_irq(struct platform_device *pdev,
+ struct qbt_drvdata *drvdata)
+{
+ int rc = 0;
+ const char *desc = "qbt_ipc";
+
+ drvdata->fw_ipc.irq = gpio_to_irq(drvdata->fw_ipc.gpio);
+ INIT_WORK(&drvdata->fw_ipc.work, qbt_irq_report_event);
+ pr_debug("irq %d gpio %d\n",
+ drvdata->fw_ipc.irq, drvdata->fw_ipc.gpio);
+
+ if (drvdata->fw_ipc.irq < 0) {
+ rc = drvdata->fw_ipc.irq;
+ pr_err("no irq for gpio %d, error=%d\n",
+ drvdata->fw_ipc.gpio, rc);
+ goto end;
+ }
+
+ rc = devm_gpio_request_one(&pdev->dev, drvdata->fw_ipc.gpio,
+ GPIOF_IN, desc);
+
+ if (rc < 0) {
+ pr_err("failed to request gpio %d, error %d\n",
+ drvdata->fw_ipc.gpio, rc);
+ goto end;
+ }
+
+ rc = devm_request_threaded_irq(&pdev->dev,
+ drvdata->fw_ipc.irq,
+ NULL,
+ qbt_ipc_irq_handler,
+ IRQF_ONESHOT | IRQF_TRIGGER_RISING,
+ desc,
+ drvdata);
+
+ if (rc < 0) {
+ pr_err("failed to register for ipc irq %d, rc = %d\n",
+ drvdata->fw_ipc.irq, rc);
+ goto end;
+ }
+
+end:
+ return rc;
+}
+
+/**
+ * qbt_read_device_tree() - Function reads device tree
+ * properties into driver data
+ * @pdev: ptr to platform device object
+ * @drvdata: ptr to driver data
+ *
+ * Return: 0 on success. Error code on failure.
+ */
+static int qbt_read_device_tree(struct platform_device *pdev,
+ struct qbt_drvdata *drvdata)
+{
+ int rc = 0;
+ int gpio;
+ enum of_gpio_flags flags;
+
+ /* read IPC gpio */
+ drvdata->fw_ipc.gpio = of_get_named_gpio(pdev->dev.of_node,
+ "qcom,ipc-gpio", 0);
+ if (drvdata->fw_ipc.gpio < 0) {
+ rc = drvdata->fw_ipc.gpio;
+ pr_err("ipc gpio not found, error=%d\n", rc);
+ goto end;
+ }
+
+ gpio = of_get_named_gpio_flags(pdev->dev.of_node,
+ "qcom,finger-detect-gpio", 0, &flags);
+ if (gpio < 0) {
+ pr_err("failed to get gpio flags\n");
+ drvdata->is_wuhb_connected = 0;
+ goto end;
+ }
+
+ drvdata->is_wuhb_connected = 1;
+ drvdata->fd_gpio.gpio = gpio;
+ drvdata->fd_gpio.active_low = flags & OF_GPIO_ACTIVE_LOW;
+
+end:
+ return rc;
+}
+
+/**
+ * qbt_probe() - Function loads hardware config from device tree
+ * @pdev: ptr to platform device object
+ *
+ * Return: 0 on success. Error code on failure.
+ */
+static int qbt_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct qbt_drvdata *drvdata;
+ int rc = 0;
+
+ pr_debug("entry\n");
+ drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
+ if (!drvdata)
+ return -ENOMEM;
+
+ drvdata->dev = &pdev->dev;
+ platform_set_drvdata(pdev, drvdata);
+
+ rc = qbt_read_device_tree(pdev, drvdata);
+ if (rc < 0)
+ goto end;
+
+ atomic_set(&drvdata->fd_available, 1);
+ atomic_set(&drvdata->ipc_available, 1);
+
+ mutex_init(&drvdata->mutex);
+ mutex_init(&drvdata->fd_events_mutex);
+ mutex_init(&drvdata->ipc_events_mutex);
+
+ rc = qbt_dev_register(drvdata);
+ if (rc < 0)
+ goto end;
+ rc = qbt_create_input_device(drvdata);
+ if (rc < 0)
+ goto end;
+ INIT_KFIFO(drvdata->fd_events);
+ INIT_KFIFO(drvdata->ipc_events);
+ init_waitqueue_head(&drvdata->read_wait_queue_fd);
+ init_waitqueue_head(&drvdata->read_wait_queue_ipc);
+
+ rc = setup_fd_gpio_irq(pdev, drvdata);
+ if (rc < 0)
+ goto end;
+ drvdata->fd_gpio.irq_enabled = false;
+ disable_irq(drvdata->fd_gpio.irq);
+
+ rc = setup_ipc_irq(pdev, drvdata);
+ if (rc < 0)
+ goto end;
+ drvdata->fw_ipc.irq_enabled = false;
+ disable_irq(drvdata->fw_ipc.irq);
+
+ rc = device_init_wakeup(&pdev->dev, 1);
+ if (rc < 0)
+ goto end;
+
+end:
+ pr_debug("exit : %d\n", rc);
+ return rc;
+}
+
+static int qbt_remove(struct platform_device *pdev)
+{
+ struct qbt_drvdata *drvdata = platform_get_drvdata(pdev);
+
+ mutex_destroy(&drvdata->mutex);
+ mutex_destroy(&drvdata->fd_events_mutex);
+ mutex_destroy(&drvdata->ipc_events_mutex);
+
+ device_destroy(drvdata->qbt_class, drvdata->qbt_fd_cdev.dev);
+ device_destroy(drvdata->qbt_class, drvdata->qbt_ipc_cdev.dev);
+
+ class_destroy(drvdata->qbt_class);
+ cdev_del(&drvdata->qbt_fd_cdev);
+ cdev_del(&drvdata->qbt_ipc_cdev);
+ unregister_chrdev_region(drvdata->qbt_fd_cdev.dev, 1);
+ unregister_chrdev_region(drvdata->qbt_ipc_cdev.dev, 1);
+
+ device_init_wakeup(&pdev->dev, 0);
+
+ return 0;
+}
+
+static int qbt_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ int rc = 0;
+ struct qbt_drvdata *drvdata = platform_get_drvdata(pdev);
+
+ /*
+ * Returning an error code if driver currently making a TZ call.
+ * Note: The purpose of this driver is to ensure that the clocks are on
+ * while making a TZ call. Hence the clock check to determine if the
+ * driver will allow suspend to occur.
+ */
+ if (!mutex_trylock(&drvdata->mutex))
+ return -EBUSY;
+
+ else {
+ if (drvdata->is_wuhb_connected)
+ enable_irq_wake(drvdata->fd_gpio.irq);
+
+ enable_irq_wake(drvdata->fw_ipc.irq);
+ }
+
+ mutex_unlock(&drvdata->mutex);
+
+ return rc;
+}
+
+static int qbt_resume(struct platform_device *pdev)
+{
+ struct qbt_drvdata *drvdata = platform_get_drvdata(pdev);
+
+ if (drvdata->is_wuhb_connected)
+ disable_irq_wake(drvdata->fd_gpio.irq);
+
+ disable_irq_wake(drvdata->fw_ipc.irq);
+
+ return 0;
+}
+
+static const struct of_device_id qbt_match[] = {
+ { .compatible = "qcom,qbt-handler" },
+ {}
+};
+
+static struct platform_driver qbt_plat_driver = {
+ .probe = qbt_probe,
+ .remove = qbt_remove,
+ .suspend = qbt_suspend,
+ .resume = qbt_resume,
+ .driver = {
+ .name = "qbt_handler",
+ .of_match_table = qbt_match,
+ },
+};
+
+module_platform_driver(qbt_plat_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Qualcomm Technologies, Inc. QBT HANDLER");
diff --git a/drivers/soc/qcom/qdss_bridge.c b/drivers/soc/qcom/qdss_bridge.c
index ed2ee73..d5a1a97 100644
--- a/drivers/soc/qcom/qdss_bridge.c
+++ b/drivers/soc/qcom/qdss_bridge.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
*/
#define KMSG_COMPONENT "QDSS diag bridge"
@@ -21,14 +21,20 @@
#include "qdss_bridge.h"
#define MODULE_NAME "qdss_bridge"
-
-#define QDSS_BUF_SIZE (16*1024)
-#define MHI_CLIENT_QDSS_IN 9
+#define INIT_STATUS -1
/* Max number of objects needed */
static int poolsize = 32;
static struct class *mhi_class;
+static enum mhi_dev_state dev_state = INIT_STATUS;
+static enum mhi_ch curr_chan;
+
+static const char * const str_mhi_curr_chan[] = {
+ [QDSS] = "QDSS",
+ [QDSS_HW] = "IP_HW_QDSS",
+ [EMPTY] = "EMPTY",
+};
static const char * const str_mhi_transfer_mode[] = {
[MHI_TRANSFER_TYPE_USB] = "usb",
@@ -152,7 +158,6 @@ static void qdss_del_buf_tbl_entry(struct qdss_bridge_drvdata *drvdata,
return;
}
}
-
spin_unlock_bh(&drvdata->lock);
}
@@ -206,6 +211,14 @@ static ssize_t mode_show(struct device *dev,
str_mhi_transfer_mode[drvdata->mode]);
}
+static ssize_t curr_chan_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ if (curr_chan < QDSS || curr_chan > EMPTY)
+ return -EINVAL;
+ return scnprintf(buf, PAGE_SIZE, "%s\n", str_mhi_curr_chan[curr_chan]);
+}
+
static ssize_t mode_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t size)
@@ -275,6 +288,7 @@ static ssize_t mode_store(struct device *dev,
}
static DEVICE_ATTR_RW(mode);
+static DEVICE_ATTR_RO(curr_chan);
static void mhi_read_work_fn(struct work_struct *work)
{
@@ -776,6 +790,13 @@ static void qdss_mhi_remove(struct mhi_device *mhi_dev)
drvdata = mhi_dev->priv_data;
if (!drvdata)
return;
+
+ pr_debug("remove dev state: %d\n", mhi_dev->mhi_cntrl->dev_state);
+
+ dev_state = mhi_dev->mhi_cntrl->dev_state;
+ if (mhi_dev->mhi_cntrl->dev_state != MHI_STATE_RESET)
+ curr_chan = EMPTY;
+
spin_lock_bh(&drvdata->lock);
if (drvdata->opened == ENABLE) {
drvdata->opened = SSR;
@@ -823,11 +844,40 @@ static int qdss_mhi_probe(struct mhi_device *mhi_dev,
const struct mhi_device_id *id)
{
int ret;
+ bool def = false;
unsigned int baseminor = 0;
unsigned int count = 1;
struct qdss_bridge_drvdata *drvdata;
+ struct device_node *of_node = mhi_dev->dev.of_node;
dev_t dev;
+ pr_debug("probe dev state: %d chan: %s curr_chan: %d\n",
+ mhi_dev->mhi_cntrl->dev_state,
+ id->chan,
+ curr_chan);
+
+ def = of_property_read_bool(of_node, "mhi,default-channel");
+ if (dev_state == INIT_STATUS) {
+ if (!def)
+ return -EINVAL;
+ if (!strcmp(id->chan, "QDSS"))
+ curr_chan = QDSS;
+ if (!strcmp(id->chan, "QDSS_HW"))
+ curr_chan = QDSS_HW;
+ } else if (dev_state == MHI_STATE_RESET) {
+ if (strcmp(id->chan, str_mhi_curr_chan[curr_chan]))
+ return -EINVAL;
+ } else {
+ if (curr_chan != EMPTY) {
+ pr_err("Need unbind another channel before bind.\n");
+ return -EINVAL;
+ }
+ if (!strcmp(id->chan, "QDSS"))
+ curr_chan = QDSS;
+ if (!strcmp(id->chan, "QDSS_HW"))
+ curr_chan = QDSS_HW;
+ }
+
drvdata = devm_kzalloc(&mhi_dev->dev, sizeof(*drvdata), GFP_KERNEL);
if (!drvdata) {
ret = -ENOMEM;
@@ -865,7 +915,12 @@ static int qdss_mhi_probe(struct mhi_device *mhi_dev,
ret = device_create_file(drvdata->dev, &dev_attr_mode);
if (ret) {
- pr_err("sysfs node create failed error:%d\n", ret);
+ pr_err("mode sysfs node create failed error:%d\n", ret);
+ goto exit_destroy_device;
+ }
+ ret = device_create_file(drvdata->dev, &dev_attr_curr_chan);
+ if (ret) {
+ pr_err("curr_chan sysfs node create failed error:%d\n", ret);
goto exit_destroy_device;
}
@@ -891,6 +946,7 @@ static int qdss_mhi_probe(struct mhi_device *mhi_dev,
static const struct mhi_device_id qdss_mhi_match_table[] = {
{ .chan = "QDSS", .driver_data = 0x4000 },
+ { .chan = "IP_HW_QDSS", .driver_data = 0x4000 },
{},
};
diff --git a/drivers/soc/qcom/qdss_bridge.h b/drivers/soc/qcom/qdss_bridge.h
index 0967aea..81f096f 100644
--- a/drivers/soc/qcom/qdss_bridge.h
+++ b/drivers/soc/qcom/qdss_bridge.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
*/
#ifndef _QDSS_BRIDGE_H
@@ -30,6 +30,12 @@ enum open_status {
SSR,
};
+enum mhi_ch {
+ QDSS,
+ QDSS_HW,
+ EMPTY,
+};
+
struct qdss_bridge_drvdata {
int alias;
enum open_status opened;
diff --git a/drivers/soc/qcom/rq_stats.c b/drivers/soc/qcom/rq_stats.c
index c5d6f07..4906d97 100644
--- a/drivers/soc/qcom/rq_stats.c
+++ b/drivers/soc/qcom/rq_stats.c
@@ -3,248 +3,21 @@
* Copyright (c) 2010-2015, 2017, 2019, The Linux Foundation. All rights reserved.
*/
-#include <linux/kernel.h>
#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/hrtimer.h>
#include <linux/cpu.h>
#include <linux/kobject.h>
#include <linux/sysfs.h>
-#include <linux/notifier.h>
-#include <linux/slab.h>
-#include <linux/workqueue.h>
-#include <linux/sched.h>
-#include <linux/spinlock.h>
#include <linux/rq_stats.h>
-#include <linux/cpufreq.h>
-#include <linux/kernel_stat.h>
-#include <linux/tick.h>
-#include <asm/smp_plat.h>
-#include <linux/suspend.h>
#define MAX_LONG_SIZE 24
-#define DEFAULT_RQ_POLL_JIFFIES 1
#define DEFAULT_DEF_TIMER_JIFFIES 5
-struct notifier_block freq_transition;
-
-struct cpu_load_data {
- u64 prev_cpu_idle;
- u64 prev_cpu_wall;
- unsigned int avg_load_maxfreq;
- unsigned int samples;
- unsigned int window_size;
- unsigned int cur_freq;
- unsigned int policy_max;
- cpumask_var_t related_cpus;
- struct mutex cpu_load_mutex;
-};
-
-static DEFINE_PER_CPU(struct cpu_load_data, cpuload);
-
-
-static int update_average_load(unsigned int freq, unsigned int cpu)
-{
-
- struct cpu_load_data *pcpu = &per_cpu(cpuload, cpu);
- u64 cur_wall_time, cur_idle_time;
- unsigned int idle_time, wall_time;
- unsigned int cur_load, load_at_max_freq;
-
- cur_idle_time = get_cpu_idle_time(cpu, &cur_wall_time, 0);
-
- wall_time = (unsigned int) (cur_wall_time - pcpu->prev_cpu_wall);
- pcpu->prev_cpu_wall = cur_wall_time;
-
- idle_time = (unsigned int) (cur_idle_time - pcpu->prev_cpu_idle);
- pcpu->prev_cpu_idle = cur_idle_time;
-
-
- if (unlikely(wall_time <= 0 || wall_time < idle_time))
- return 0;
-
- cur_load = 100 * (wall_time - idle_time) / wall_time;
-
- /* Calculate the scaled load across CPU */
- load_at_max_freq = (cur_load * freq) / pcpu->policy_max;
-
- if (!pcpu->avg_load_maxfreq) {
- /* This is the first sample in this window*/
- pcpu->avg_load_maxfreq = load_at_max_freq;
- pcpu->window_size = wall_time;
- } else {
- /*
- * The is already a sample available in this window.
- * Compute weighted average with prev entry, so that we get
- * the precise weighted load.
- */
- pcpu->avg_load_maxfreq =
- ((pcpu->avg_load_maxfreq * pcpu->window_size) +
- (load_at_max_freq * wall_time)) /
- (wall_time + pcpu->window_size);
-
- pcpu->window_size += wall_time;
- }
-
- return 0;
-}
-
-static unsigned int report_load_at_max_freq(void)
-{
- int cpu;
- struct cpu_load_data *pcpu;
- unsigned int total_load = 0;
-
- for_each_online_cpu(cpu) {
- pcpu = &per_cpu(cpuload, cpu);
- mutex_lock(&pcpu->cpu_load_mutex);
- update_average_load(pcpu->cur_freq, cpu);
- total_load += pcpu->avg_load_maxfreq;
- pcpu->avg_load_maxfreq = 0;
- mutex_unlock(&pcpu->cpu_load_mutex);
- }
- return total_load;
-}
-
-static int cpufreq_transition_handler(struct notifier_block *nb,
- unsigned long val, void *data)
-{
- struct cpufreq_freqs *freqs = data;
- struct cpu_load_data *this_cpu = &per_cpu(cpuload, freqs->cpu);
- int j;
-
- switch (val) {
- case CPUFREQ_POSTCHANGE:
- for_each_cpu(j, this_cpu->related_cpus) {
- struct cpu_load_data *pcpu = &per_cpu(cpuload, j);
-
- mutex_lock(&pcpu->cpu_load_mutex);
- update_average_load(freqs->old, j);
- pcpu->cur_freq = freqs->new;
- mutex_unlock(&pcpu->cpu_load_mutex);
- }
- break;
- }
- return 0;
-}
-
-static void update_related_cpus(void)
-{
- unsigned int cpu;
-
- for_each_cpu(cpu, cpu_online_mask) {
- struct cpu_load_data *this_cpu = &per_cpu(cpuload, cpu);
- struct cpufreq_policy cpu_policy;
-
- cpufreq_get_policy(&cpu_policy, cpu);
- cpumask_copy(this_cpu->related_cpus, cpu_policy.cpus);
- }
-}
-
-static int cpu_online_handler(unsigned int cpu)
-{
- struct cpu_load_data *this_cpu = &per_cpu(cpuload, cpu);
-
- if (!this_cpu->cur_freq)
- this_cpu->cur_freq = cpufreq_quick_get(cpu);
- update_related_cpus();
- this_cpu->avg_load_maxfreq = 0;
- return 0;
-}
-
-static int system_suspend_handler(struct notifier_block *nb,
- unsigned long val, void *data)
-{
- switch (val) {
- case PM_POST_HIBERNATION:
- case PM_POST_SUSPEND:
- case PM_POST_RESTORE:
- rq_info.hotplug_disabled = 0;
- break;
- case PM_HIBERNATION_PREPARE:
- case PM_SUSPEND_PREPARE:
- rq_info.hotplug_disabled = 1;
- break;
- default:
- return NOTIFY_DONE;
- }
- return NOTIFY_OK;
-}
-
-
-static ssize_t hotplug_disable_show(struct kobject *kobj,
- struct kobj_attribute *attr, char *buf)
-{
- unsigned int val = rq_info.hotplug_disabled;
-
- return snprintf(buf, MAX_LONG_SIZE, "%d\n", val);
-}
-
-static struct kobj_attribute hotplug_disabled_attr = __ATTR_RO(hotplug_disable);
-
static void def_work_fn(struct work_struct *work)
{
/* Notify polling threads on change of value */
sysfs_notify(rq_info.kobj, NULL, "def_timer_ms");
}
-static ssize_t run_queue_avg_show(struct kobject *kobj,
- struct kobj_attribute *attr, char *buf)
-{
- unsigned int val = 0;
- unsigned long flags = 0;
-
- spin_lock_irqsave(&rq_lock, flags);
- /* rq avg currently available only on one core */
- val = rq_info.rq_avg;
- rq_info.rq_avg = 0;
- spin_unlock_irqrestore(&rq_lock, flags);
-
- return snprintf(buf, PAGE_SIZE, "%d.%d\n", val/10, val%10);
-}
-
-static struct kobj_attribute run_queue_avg_attr = __ATTR_RO(run_queue_avg);
-
-static ssize_t show_run_queue_poll_ms(struct kobject *kobj,
- struct kobj_attribute *attr, char *buf)
-{
- int ret = 0;
- unsigned long flags = 0;
-
- spin_lock_irqsave(&rq_lock, flags);
- ret = snprintf(buf, MAX_LONG_SIZE, "%u\n",
- jiffies_to_msecs(rq_info.rq_poll_jiffies));
- spin_unlock_irqrestore(&rq_lock, flags);
-
- return ret;
-}
-
-static ssize_t store_run_queue_poll_ms(struct kobject *kobj,
- struct kobj_attribute *attr,
- const char *buf, size_t count)
-{
- unsigned int val = 0;
- unsigned long flags = 0;
- static DEFINE_MUTEX(lock_poll_ms);
-
- mutex_lock(&lock_poll_ms);
-
- spin_lock_irqsave(&rq_lock, flags);
- if (kstrtouint(buf, 0, &val))
- count = -EINVAL;
- else
- rq_info.rq_poll_jiffies = msecs_to_jiffies(val);
- spin_unlock_irqrestore(&rq_lock, flags);
-
- mutex_unlock(&lock_poll_ms);
-
- return count;
-}
-
-static struct kobj_attribute run_queue_poll_ms_attr =
- __ATTR(run_queue_poll_ms, 0600, show_run_queue_poll_ms,
- store_run_queue_poll_ms);
-
static ssize_t show_def_timer_ms(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
@@ -276,22 +49,8 @@ static struct kobj_attribute def_timer_ms_attr =
__ATTR(def_timer_ms, 0600, show_def_timer_ms,
store_def_timer_ms);
-static ssize_t show_cpu_normalized_load(struct kobject *kobj,
- struct kobj_attribute *attr, char *buf)
-{
- return snprintf(buf, MAX_LONG_SIZE, "%u\n", report_load_at_max_freq());
-}
-
-static struct kobj_attribute cpu_normalized_load_attr =
- __ATTR(cpu_normalized_load, 0600, show_cpu_normalized_load,
- NULL);
-
static struct attribute *rq_attrs[] = {
- &cpu_normalized_load_attr.attr,
&def_timer_ms_attr.attr,
- &run_queue_avg_attr.attr,
- &run_queue_poll_ms_attr.attr,
- &hotplug_disabled_attr.attr,
NULL,
};
@@ -303,7 +62,6 @@ static int init_rq_attribs(void)
{
int err;
- rq_info.rq_avg = 0;
rq_info.attr_group = &rq_attr_group;
/* Create /sys/devices/system/cpu/cpu0/rq-stats/... */
@@ -324,8 +82,6 @@ static int init_rq_attribs(void)
static int __init msm_rq_stats_init(void)
{
int ret;
- int i;
- struct cpufreq_policy cpu_policy;
#ifndef CONFIG_SMP
/* Bail out if this is not an SMP Target */
@@ -337,44 +93,12 @@ static int __init msm_rq_stats_init(void)
WARN_ON(!rq_wq);
INIT_WORK(&rq_info.def_timer_work, def_work_fn);
spin_lock_init(&rq_lock);
- rq_info.rq_poll_jiffies = DEFAULT_RQ_POLL_JIFFIES;
rq_info.def_timer_jiffies = DEFAULT_DEF_TIMER_JIFFIES;
- rq_info.rq_poll_last_jiffy = 0;
rq_info.def_timer_last_jiffy = 0;
- rq_info.hotplug_disabled = 0;
ret = init_rq_attribs();
rq_info.init = 1;
- for_each_possible_cpu(i) {
- struct cpu_load_data *pcpu = &per_cpu(cpuload, i);
-
- mutex_init(&pcpu->cpu_load_mutex);
- cpufreq_get_policy(&cpu_policy, i);
- pcpu->policy_max = cpu_policy.cpuinfo.max_freq;
- if (cpu_online(i))
- pcpu->cur_freq = cpufreq_quick_get(i);
- cpumask_copy(pcpu->related_cpus, cpu_policy.cpus);
- }
- freq_transition.notifier_call = cpufreq_transition_handler;
- cpufreq_register_notifier(&freq_transition,
- CPUFREQ_TRANSITION_NOTIFIER);
- ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "rq_stats:online",
- cpu_online_handler, NULL);
-
return ret;
}
late_initcall(msm_rq_stats_init);
-
-static int __init msm_rq_stats_early_init(void)
-{
-#ifndef CONFIG_SMP
- /* Bail out if this is not an SMP Target */
- rq_info.init = 0;
- return -EPERM;
-#endif
-
- pm_notifier(system_suspend_handler, 0);
- return 0;
-}
-core_initcall(msm_rq_stats_early_init);
diff --git a/drivers/soc/qcom/secure_buffer.c b/drivers/soc/qcom/secure_buffer.c
index d9d4b82..599a8d9 100644
--- a/drivers/soc/qcom/secure_buffer.c
+++ b/drivers/soc/qcom/secure_buffer.c
@@ -238,10 +238,17 @@ static struct mem_prot_info *get_info_list_from_table(struct sg_table *table,
#define BATCH_MAX_SIZE SZ_2M
#define BATCH_MAX_SECTIONS 32
-int hyp_assign_table(struct sg_table *table,
+/*
+ * When -EAGAIN is returned it is safe for the caller to try to call
+ * __hyp_assign_table again.
+ *
+ * When -EADDRNOTAVAIL is returned the memory may no longer be in
+ * a usable state and should no longer be accessed by the HLOS.
+ */
+static int __hyp_assign_table(struct sg_table *table,
u32 *source_vm_list, int source_nelems,
int *dest_vmids, int *dest_perms,
- int dest_nelems)
+ int dest_nelems, bool try_lock)
{
int ret = 0;
struct scm_desc desc = {0};
@@ -271,10 +278,17 @@ int hyp_assign_table(struct sg_table *table,
&dest_vm_copy_size);
if (!dest_vm_copy) {
ret = -ENOMEM;
- goto out_free;
+ goto out_free_src;
}
- mutex_lock(&secure_buffer_mutex);
+ if (try_lock) {
+ if (!mutex_trylock(&secure_buffer_mutex)) {
+ ret = -EAGAIN;
+ goto out_free_dest;
+ }
+ } else {
+ mutex_lock(&secure_buffer_mutex);
+ }
sg_table_copy = get_info_list_from_table(table, &sg_table_copy_size);
if (!sg_table_copy) {
@@ -330,6 +344,12 @@ int hyp_assign_table(struct sg_table *table,
if (ret) {
pr_info("%s: Failed to assign memory protection, ret = %d\n",
__func__, ret);
+
+ /*
+ * Make it clear to clients that the memory may no
+ * longer be in a usable state.
+ */
+ ret = -EADDRNOTAVAIL;
break;
}
batch_start = batch_end;
@@ -337,12 +357,31 @@ int hyp_assign_table(struct sg_table *table,
out_unlock:
mutex_unlock(&secure_buffer_mutex);
+out_free_dest:
kfree(dest_vm_copy);
-out_free:
+out_free_src:
kfree(source_vm_copy);
return ret;
}
+int hyp_assign_table(struct sg_table *table,
+ u32 *source_vm_list, int source_nelems,
+ int *dest_vmids, int *dest_perms,
+ int dest_nelems)
+{
+ return __hyp_assign_table(table, source_vm_list, source_nelems,
+ dest_vmids, dest_perms, dest_nelems, false);
+}
+
+int try_hyp_assign_table(struct sg_table *table,
+ u32 *source_vm_list, int source_nelems,
+ int *dest_vmids, int *dest_perms,
+ int dest_nelems)
+{
+ return __hyp_assign_table(table, source_vm_list, source_nelems,
+ dest_vmids, dest_perms, dest_nelems, true);
+}
+
int hyp_assign_phys(phys_addr_t addr, u64 size, u32 *source_vm_list,
int source_nelems, int *dest_vmids,
int *dest_perms, int dest_nelems)
diff --git a/drivers/soc/qcom/spcom.c b/drivers/soc/qcom/spcom.c
index 7a4edd7..595f0de 100644
--- a/drivers/soc/qcom/spcom.c
+++ b/drivers/soc/qcom/spcom.c
@@ -400,7 +400,7 @@ static int spcom_rx(struct spcom_channel *ch,
goto exit_err;
}
} else {
- pr_debug("pending data size [%zu], requested size [%zu], ch->txn_id %d\n",
+ pr_debug("pending data size [%zu], requested size [%u], ch->txn_id %d\n",
ch->actual_rx_size, size, ch->txn_id);
}
if (!ch->rpmsg_rx_buf) {
@@ -895,7 +895,7 @@ static int spcom_handle_lock_ion_buf_command(struct spcom_channel *ch,
}
if (cmd->arg > (unsigned int)INT_MAX) {
- pr_err("int overflow [%ld]\n", cmd->arg);
+ pr_err("int overflow [%u]\n", cmd->arg);
return -EINVAL;
}
fd = cmd->arg;
@@ -926,7 +926,7 @@ static int spcom_handle_lock_ion_buf_command(struct spcom_channel *ch,
if (ch->dmabuf_handle_table[i] == NULL) {
ch->dmabuf_handle_table[i] = dma_buf;
ch->dmabuf_fd_table[i] = fd;
- pr_debug("ch [%s] locked ion buf #%d fd [%d] dma_buf=0x%x\n",
+ pr_debug("ch [%s] locked ion buf #%d fd [%d] dma_buf=0x%pK\n",
ch->name, i,
ch->dmabuf_fd_table[i],
ch->dmabuf_handle_table[i]);
@@ -964,7 +964,7 @@ static int spcom_handle_unlock_ion_buf_command(struct spcom_channel *ch,
return -EINVAL;
}
if (cmd->arg > (unsigned int)INT_MAX) {
- pr_err("int overflow [%ld]\n", cmd->arg);
+ pr_err("int overflow [%u]\n", cmd->arg);
return -EINVAL;
}
fd = cmd->arg;
@@ -1000,7 +1000,7 @@ static int spcom_handle_unlock_ion_buf_command(struct spcom_channel *ch,
if (!ch->dmabuf_handle_table[i])
continue;
if (ch->dmabuf_handle_table[i] == dma_buf) {
- pr_debug("ch [%s] unlocked ion buf #%d fd [%d] dma_buf=0x%x\n",
+ pr_debug("ch [%s] unlocked ion buf #%d fd [%d] dma_buf=0x%pK\n",
ch->name, i,
ch->dmabuf_fd_table[i],
ch->dmabuf_handle_table[i]);
@@ -1369,7 +1369,7 @@ static int spcom_device_release(struct inode *inode, struct file *filp)
ch->is_busy = false;
ch->pid = 0;
if (ch->rpmsg_rx_buf) {
- pr_debug("ch [%s] discarting unconsumed rx packet actual_rx_size=%d\n",
+ pr_debug("ch [%s] discarting unconsumed rx packet actual_rx_size=%zd\n",
name, ch->actual_rx_size);
kfree(ch->rpmsg_rx_buf);
ch->rpmsg_rx_buf = NULL;
@@ -1884,7 +1884,7 @@ static void spcom_signal_rx_done(struct work_struct *ignored)
if (ch->rpmsg_abort) {
if (ch->rpmsg_rx_buf) {
- pr_debug("ch [%s] rx aborted free %d bytes\n",
+ pr_debug("ch [%s] rx aborted free %zd bytes\n",
ch->name, ch->actual_rx_size);
kfree(ch->rpmsg_rx_buf);
ch->actual_rx_size = 0;
@@ -1892,7 +1892,7 @@ static void spcom_signal_rx_done(struct work_struct *ignored)
goto rx_aborted;
}
if (ch->rpmsg_rx_buf) {
- pr_err("ch [%s] previous buffer not consumed %d bytes\n",
+ pr_err("ch [%s] previous buffer not consumed %zd bytes\n",
ch->name, ch->actual_rx_size);
kfree(ch->rpmsg_rx_buf);
ch->rpmsg_rx_buf = NULL;
diff --git a/drivers/soc/qcom/subsystem_restart.c b/drivers/soc/qcom/subsystem_restart.c
index dcdf907..f9ac0a7 100644
--- a/drivers/soc/qcom/subsystem_restart.c
+++ b/drivers/soc/qcom/subsystem_restart.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (c) 2011-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2011-2019, The Linux Foundation. All rights reserved.
*/
#define pr_fmt(fmt) "subsys-restart: %s(): " fmt, __func__
@@ -1342,6 +1342,16 @@ void notify_proxy_unvote(struct device *device)
notify_each_subsys_device(&dev, 1, SUBSYS_PROXY_UNVOTE, NULL);
}
+void notify_before_auth_and_reset(struct device *device)
+{
+ struct subsys_device *dev = desc_to_subsys(device);
+
+ if (dev)
+ notify_each_subsys_device(&dev, 1,
+ SUBSYS_BEFORE_AUTH_AND_RESET, NULL);
+}
+
+
static int subsys_device_open(struct inode *inode, struct file *file)
{
struct subsys_device *device, *subsys_dev = 0;
diff --git a/drivers/spi/spi-geni-qcom.c b/drivers/spi/spi-geni-qcom.c
index 36435f7..ffe21e8 100644
--- a/drivers/spi/spi-geni-qcom.c
+++ b/drivers/spi/spi-geni-qcom.c
@@ -188,7 +188,7 @@ static int get_spi_clk_cfg(u32 speed_hz, struct spi_geni_master *mas,
res_freq = (sclk_freq / (*clk_div));
- dev_dbg(mas->dev, "%s: req %u resultant %u sclk %lu, idx %d, div %d\n",
+ dev_dbg(mas->dev, "%s: req %u resultant %lu sclk %lu, idx %d, div %d\n",
__func__, speed_hz, res_freq, sclk_freq, *clk_idx, *clk_div);
ret = clk_set_rate(rsc->se_clk, sclk_freq);
diff --git a/drivers/spi/spi-rockchip.c b/drivers/spi/spi-rockchip.c
index fdcf307..4b77fa1 100644
--- a/drivers/spi/spi-rockchip.c
+++ b/drivers/spi/spi-rockchip.c
@@ -15,6 +15,7 @@
#include <linux/clk.h>
#include <linux/dmaengine.h>
+#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/pinctrl/consumer.h>
diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c
index f116a64..5a53ae0 100644
--- a/drivers/staging/android/ion/ion.c
+++ b/drivers/staging/android/ion/ion.c
@@ -172,8 +172,7 @@ static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
void ion_buffer_destroy(struct ion_buffer *buffer)
{
if (buffer->kmap_cnt > 0) {
- pr_warn_once("%s: buffer still mapped in the kernel\n",
- __func__);
+ pr_warn_ratelimited("ION client likely missing a call to dma_buf_kunmap or dma_buf_vunmap\n");
buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
}
buffer->heap->ops->free(buffer);
@@ -220,7 +219,7 @@ static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
static void ion_buffer_kmap_put(struct ion_buffer *buffer)
{
if (buffer->kmap_cnt == 0) {
- pr_warn_ratelimited("Call dma_buf_begin_cpu_access before dma_buf_end_cpu_access, pid:%d\n",
+ pr_warn_ratelimited("ION client likely missing a call to dma_buf_kmap or dma_buf_vmap, pid:%d\n",
current->pid);
return;
}
@@ -310,9 +309,9 @@ static void ion_dma_buf_detatch(struct dma_buf *dmabuf,
struct ion_buffer *buffer = dmabuf->priv;
mutex_lock(&buffer->lock);
- free_duped_table(a->table);
list_del(&a->list);
mutex_unlock(&buffer->lock);
+ free_duped_table(a->table);
kfree(a);
}
@@ -495,31 +494,59 @@ static void ion_dma_buf_release(struct dma_buf *dmabuf)
struct ion_buffer *buffer = dmabuf->priv;
_ion_buffer_destroy(buffer);
-}
-
-static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
-{
- struct ion_buffer *buffer = dmabuf->priv;
-
- WARN(!buffer->vaddr, "Call dma_buf_begin_cpu_access before dma_buf_kmap\n");
- return buffer->vaddr + offset * PAGE_SIZE;
-}
-
-static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
- void *ptr)
-{
+ kfree(dmabuf->exp_name);
}
static void *ion_dma_buf_vmap(struct dma_buf *dmabuf)
{
struct ion_buffer *buffer = dmabuf->priv;
+ void *vaddr = ERR_PTR(-EINVAL);
- WARN(!buffer->vaddr, "Call dma_buf_begin_cpu_access before dma_buf_vmap\n");
- return buffer->vaddr;
+ if (buffer->heap->ops->map_kernel) {
+ mutex_lock(&buffer->lock);
+ vaddr = ion_buffer_kmap_get(buffer);
+ mutex_unlock(&buffer->lock);
+ } else {
+ pr_warn_ratelimited("heap %s doesn't support map_kernel\n",
+ buffer->heap->name);
+ }
+
+ return vaddr;
}
static void ion_dma_buf_vunmap(struct dma_buf *dmabuf, void *vaddr)
{
+ struct ion_buffer *buffer = dmabuf->priv;
+
+ if (buffer->heap->ops->map_kernel) {
+ mutex_lock(&buffer->lock);
+ ion_buffer_kmap_put(buffer);
+ mutex_unlock(&buffer->lock);
+ }
+}
+
+static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
+{
+ /*
+ * TODO: Once clients remove their hacks where they assume kmap(ed)
+ * addresses are virtually contiguous implement this properly
+ */
+ void *vaddr = ion_dma_buf_vmap(dmabuf);
+
+ if (IS_ERR(vaddr))
+ return vaddr;
+
+ return vaddr + offset * PAGE_SIZE;
+}
+
+static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
+ void *ptr)
+{
+ /*
+ * TODO: Once clients remove their hacks where they assume kmap(ed)
+ * addresses are virtually contiguous implement this properly
+ */
+ ion_dma_buf_vunmap(dmabuf, ptr);
}
static int ion_sgl_sync_range(struct device *dev, struct scatterlist *sgl,
@@ -604,7 +631,6 @@ static int __ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
bool sync_only_mapped)
{
struct ion_buffer *buffer = dmabuf->priv;
- void *vaddr;
struct ion_dma_buf_attachment *a;
int ret = 0;
@@ -617,19 +643,6 @@ static int __ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
goto out;
}
- /*
- * TODO: Move this elsewhere because we don't always need a vaddr
- */
- if (buffer->heap->ops->map_kernel) {
- mutex_lock(&buffer->lock);
- vaddr = ion_buffer_kmap_get(buffer);
- if (IS_ERR(vaddr)) {
- ret = PTR_ERR(vaddr);
- goto unlock;
- }
- mutex_unlock(&buffer->lock);
- }
-
if (!(buffer->flags & ION_FLAG_CACHED)) {
trace_ion_begin_cpu_access_cmo_skip(NULL, dmabuf->name, false,
true, direction,
@@ -701,8 +714,6 @@ static int __ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
}
}
-
-unlock:
mutex_unlock(&buffer->lock);
out:
return ret;
@@ -725,12 +736,6 @@ static int __ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
goto out;
}
- if (buffer->heap->ops->map_kernel) {
- mutex_lock(&buffer->lock);
- ion_buffer_kmap_put(buffer);
- mutex_unlock(&buffer->lock);
- }
-
if (!(buffer->flags & ION_FLAG_CACHED)) {
trace_ion_end_cpu_access_cmo_skip(NULL, dmabuf->name, false,
true, direction,
@@ -833,7 +838,6 @@ static int ion_dma_buf_begin_cpu_access_partial(struct dma_buf *dmabuf,
unsigned int len)
{
struct ion_buffer *buffer = dmabuf->priv;
- void *vaddr;
struct ion_dma_buf_attachment *a;
int ret = 0;
@@ -846,15 +850,6 @@ static int ion_dma_buf_begin_cpu_access_partial(struct dma_buf *dmabuf,
goto out;
}
- /*
- * TODO: Move this elsewhere because we don't always need a vaddr
- */
- if (buffer->heap->ops->map_kernel) {
- mutex_lock(&buffer->lock);
- vaddr = ion_buffer_kmap_get(buffer);
- mutex_unlock(&buffer->lock);
- }
-
if (!(buffer->flags & ION_FLAG_CACHED)) {
trace_ion_begin_cpu_access_cmo_skip(NULL, dmabuf->name, false,
true, dir,
@@ -934,12 +929,6 @@ static int ion_dma_buf_end_cpu_access_partial(struct dma_buf *dmabuf,
goto out;
}
- if (buffer->heap->ops->map_kernel) {
- mutex_lock(&buffer->lock);
- ion_buffer_kmap_put(buffer);
- mutex_unlock(&buffer->lock);
- }
-
if (!(buffer->flags & ION_FLAG_CACHED)) {
trace_ion_end_cpu_access_cmo_skip(NULL, dmabuf->name, false,
true, direction,
@@ -1038,6 +1027,7 @@ struct dma_buf *ion_alloc_dmabuf(size_t len, unsigned int heap_id_mask,
struct ion_heap *heap;
DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
struct dma_buf *dmabuf;
+ char task_comm[TASK_COMM_LEN];
pr_debug("%s: len %zu heap_id_mask %u flags %x\n", __func__,
len, heap_id_mask, flags);
@@ -1069,14 +1059,20 @@ struct dma_buf *ion_alloc_dmabuf(size_t len, unsigned int heap_id_mask,
if (IS_ERR(buffer))
return ERR_CAST(buffer);
+ get_task_comm(task_comm, current->group_leader);
+
exp_info.ops = &dma_buf_ops;
exp_info.size = buffer->size;
exp_info.flags = O_RDWR;
exp_info.priv = buffer;
+ exp_info.exp_name = kasprintf(GFP_KERNEL, "%s-%s-%d-%s", KBUILD_MODNAME,
+ heap->name, current->tgid, task_comm);
dmabuf = dma_buf_export(&exp_info);
- if (IS_ERR(dmabuf))
+ if (IS_ERR(dmabuf)) {
_ion_buffer_destroy(buffer);
+ kfree(exp_info.exp_name);
+ }
return dmabuf;
}
diff --git a/drivers/staging/android/ion/ion.h b/drivers/staging/android/ion/ion.h
index a9aed00..8b29a76 100644
--- a/drivers/staging/android/ion/ion.h
+++ b/drivers/staging/android/ion/ion.h
@@ -3,7 +3,7 @@
* drivers/staging/android/ion/ion.h
*
* Copyright (C) 2011 Google, Inc.
- * Copyright (c) 2011-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2011-2019, The Linux Foundation. All rights reserved.
*
*/
@@ -30,6 +30,7 @@
#define ION_MM_HEAP_NAME "mm"
#define ION_SPSS_HEAP_NAME "spss"
#define ION_SECURE_CARVEOUT_HEAP_NAME "secure_carveout"
+#define ION_USER_CONTIG_HEAP_NAME "user_contig"
#define ION_QSECOM_HEAP_NAME "qsecom"
#define ION_QSECOM_TA_HEAP_NAME "qsecom_ta"
#define ION_SECURE_HEAP_NAME "secure_heap"
diff --git a/drivers/staging/android/ion/ion_cma_secure_heap.c b/drivers/staging/android/ion/ion_cma_secure_heap.c
index 5a18b27..8e28ba0 100644
--- a/drivers/staging/android/ion/ion_cma_secure_heap.c
+++ b/drivers/staging/android/ion/ion_cma_secure_heap.c
@@ -343,8 +343,8 @@ static void ion_secure_cma_free_chunk(struct ion_cma_secure_heap *sheap,
kfree(chunk);
}
-static void __ion_secure_cma_shrink_pool(struct ion_cma_secure_heap *sheap,
- int max_nr)
+static unsigned long
+__ion_secure_cma_shrink_pool(struct ion_cma_secure_heap *sheap, int max_nr)
{
struct list_head *entry, *_n;
unsigned long drained_size = 0, skipped_size = 0;
@@ -368,6 +368,7 @@ static void __ion_secure_cma_shrink_pool(struct ion_cma_secure_heap *sheap,
}
trace_ion_secure_cma_shrink_pool_end(drained_size, skipped_size);
+ return drained_size;
}
int ion_secure_cma_drain_pool(struct ion_heap *heap, void *unused)
@@ -385,6 +386,7 @@ int ion_secure_cma_drain_pool(struct ion_heap *heap, void *unused)
static unsigned long ion_secure_cma_shrinker(struct shrinker *shrinker,
struct shrink_control *sc)
{
+ unsigned long freed;
struct ion_cma_secure_heap *sheap = container_of(shrinker,
struct ion_cma_secure_heap, shrinker);
int nr_to_scan = sc->nr_to_scan;
@@ -397,11 +399,11 @@ static unsigned long ion_secure_cma_shrinker(struct shrinker *shrinker,
if (!mutex_trylock(&sheap->chunk_lock))
return -EAGAIN;
- __ion_secure_cma_shrink_pool(sheap, nr_to_scan);
+ freed = __ion_secure_cma_shrink_pool(sheap, nr_to_scan);
mutex_unlock(&sheap->chunk_lock);
- return atomic_read(&sheap->total_pool_size);
+ return freed;
}
static unsigned long ion_secure_cma_shrinker_count(struct shrinker *shrinker,
diff --git a/drivers/staging/android/ion/ion_secure_util.c b/drivers/staging/android/ion/ion_secure_util.c
index df88427..1c1d4dd 100644
--- a/drivers/staging/android/ion/ion_secure_util.c
+++ b/drivers/staging/android/ion/ion_secure_util.c
@@ -89,7 +89,8 @@ static int populate_vm_list(unsigned long flags, unsigned int *vm_list,
}
int ion_hyp_unassign_sg(struct sg_table *sgt, int *source_vm_list,
- int source_nelems, bool clear_page_private)
+ int source_nelems, bool clear_page_private,
+ bool try_lock)
{
u32 dest_vmid = VMID_HLOS;
u32 dest_perms = PERM_READ | PERM_WRITE | PERM_EXEC;
@@ -103,11 +104,16 @@ int ion_hyp_unassign_sg(struct sg_table *sgt, int *source_vm_list,
goto out;
}
- ret = hyp_assign_table(sgt, source_vm_list, source_nelems,
- &dest_vmid, &dest_perms, 1);
+ if (try_lock)
+ ret = try_hyp_assign_table(sgt, source_vm_list, source_nelems,
+ &dest_vmid, &dest_perms, 1);
+ else
+ ret = hyp_assign_table(sgt, source_vm_list, source_nelems,
+ &dest_vmid, &dest_perms, 1);
if (ret) {
- pr_err("%s: Unassign call failed.\n",
- __func__);
+ if (!try_lock)
+ pr_err("%s: Unassign call failed.\n",
+ __func__);
goto out;
}
if (clear_page_private)
@@ -183,7 +189,7 @@ int ion_hyp_unassign_sg_from_flags(struct sg_table *sgt, unsigned long flags,
}
ret = ion_hyp_unassign_sg(sgt, source_vm_list, source_nelems,
- set_page_private);
+ set_page_private, false);
out_free_source:
kfree(source_vm_list);
diff --git a/drivers/staging/android/ion/ion_secure_util.h b/drivers/staging/android/ion/ion_secure_util.h
index 6267342..bd525e5 100644
--- a/drivers/staging/android/ion/ion_secure_util.h
+++ b/drivers/staging/android/ion/ion_secure_util.h
@@ -13,7 +13,8 @@ bool is_secure_vmid_valid(int vmid);
int ion_hyp_assign_sg(struct sg_table *sgt, int *dest_vm_list,
int dest_nelems, bool set_page_private);
int ion_hyp_unassign_sg(struct sg_table *sgt, int *source_vm_list,
- int source_nelems, bool clear_page_private);
+ int source_nelems, bool clear_page_private,
+ bool try_lock);
int ion_hyp_unassign_sg_from_flags(struct sg_table *sgt, unsigned long flags,
bool set_page_private);
int ion_hyp_assign_sg_from_flags(struct sg_table *sgt, unsigned long flags,
diff --git a/drivers/staging/android/ion/ion_system_heap.c b/drivers/staging/android/ion/ion_system_heap.c
index aa781f5..9453d6f 100644
--- a/drivers/staging/android/ion/ion_system_heap.c
+++ b/drivers/staging/android/ion/ion_system_heap.c
@@ -3,7 +3,7 @@
* drivers/staging/android/ion/ion_system_heap.c
*
* Copyright (C) 2011 Google, Inc.
- * Copyright (c) 2011-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2011-2019, The Linux Foundation. All rights reserved.
*
*/
@@ -158,6 +158,9 @@ alloc_from_pool_preferred(struct ion_system_heap *heap,
struct page_info *info;
int i;
+ if (buffer->flags & ION_FLAG_POOL_FORCE_ALLOC)
+ goto force_alloc;
+
info = kmalloc(sizeof(*info), GFP_KERNEL);
if (!info)
return ERR_PTR(-ENOMEM);
@@ -189,6 +192,7 @@ alloc_from_pool_preferred(struct ion_system_heap *heap,
}
kfree(info);
+force_alloc:
return alloc_largest_available(heap, buffer, size, max_order);
}
@@ -325,8 +329,10 @@ static int ion_system_heap_allocate(struct ion_heap *heap,
goto err;
table = kzalloc(sizeof(*table), GFP_KERNEL);
- if (!table)
+ if (!table) {
+ ret = -ENOMEM;
goto err_free_data_pages;
+ }
ret = sg_alloc_table(table, i, GFP_KERNEL);
if (ret)
@@ -388,7 +394,7 @@ static int ion_system_heap_allocate(struct ion_heap *heap,
buffer->private_flags |= ION_PRIV_FLAG_SHRINKER_FREE;
if (vmid > 0)
- ion_hyp_unassign_sg(table, &vmid, 1, true);
+ ion_hyp_unassign_sg(table, &vmid, 1, true, false);
for_each_sg(table->sgl, sg, table->nents, i)
free_buffer_page(sys_heap, buffer, sg_page(sg),
@@ -429,7 +435,7 @@ void ion_system_heap_free(struct ion_buffer *buffer)
if (vmid < 0)
ion_heap_buffer_zero(buffer);
} else if (vmid > 0) {
- if (ion_hyp_unassign_sg(table, &vmid, 1, true))
+ if (ion_hyp_unassign_sg(table, &vmid, 1, true, false))
return;
}
diff --git a/drivers/staging/android/ion/ion_system_secure_heap.c b/drivers/staging/android/ion/ion_system_secure_heap.c
index 53fcd55..f0d8d72 100644
--- a/drivers/staging/android/ion/ion_system_secure_heap.c
+++ b/drivers/staging/android/ion/ion_system_secure_heap.c
@@ -149,7 +149,8 @@ size_t ion_system_secure_heap_page_pool_total(struct ion_heap *heap,
return total << PAGE_SHIFT;
}
-static void process_one_shrink(struct ion_heap *sys_heap,
+static void process_one_shrink(struct ion_system_secure_heap *secure_heap,
+ struct ion_heap *sys_heap,
struct prefetch_info *info)
{
struct ion_buffer buffer;
@@ -157,7 +158,7 @@ static void process_one_shrink(struct ion_heap *sys_heap,
int ret;
memset(&buffer, 0, sizeof(struct ion_buffer));
- buffer.heap = sys_heap;
+ buffer.heap = &secure_heap->heap;
buffer.flags = info->vmid;
pool_size = ion_system_secure_heap_page_pool_total(sys_heap,
@@ -171,6 +172,7 @@ static void process_one_shrink(struct ion_heap *sys_heap,
}
buffer.private_flags = ION_PRIV_FLAG_SHRINKER_FREE;
+ buffer.heap = sys_heap;
sys_heap->ops->free(&buffer);
}
@@ -190,7 +192,7 @@ static void ion_system_secure_heap_prefetch_work(struct work_struct *work)
spin_unlock_irqrestore(&secure_heap->work_lock, flags);
if (info->shrink)
- process_one_shrink(sys_heap, info);
+ process_one_shrink(secure_heap, sys_heap, info);
else
process_one_prefetch(sys_heap, info);
@@ -205,7 +207,7 @@ static int alloc_prefetch_info(struct ion_prefetch_regions __user *
struct list_head *items)
{
struct prefetch_info *info;
- u64 __user *user_sizes;
+ u64 user_sizes;
int err;
unsigned int nr_sizes, vmid, i;
@@ -226,7 +228,7 @@ static int alloc_prefetch_info(struct ion_prefetch_regions __user *
if (!info)
return -ENOMEM;
- err = get_user(info->size, &user_sizes[i]);
+ err = get_user(info->size, ((u64 __user *)user_sizes + i));
if (err)
goto out_free;
@@ -260,7 +262,10 @@ static int __ion_system_secure_heap_resize(struct ion_heap *heap, void *ptr,
return -EINVAL;
for (i = 0; i < data->nr_regions; i++) {
- ret = alloc_prefetch_info(&data->regions[i], shrink, &items);
+ struct ion_prefetch_regions *r;
+
+ r = (struct ion_prefetch_regions *)data->regions + i;
+ ret = alloc_prefetch_info(r, shrink, &items);
if (ret)
goto out_free;
}
@@ -270,9 +275,9 @@ static int __ion_system_secure_heap_resize(struct ion_heap *heap, void *ptr,
spin_unlock_irqrestore(&secure_heap->work_lock, flags);
goto out_free;
}
- list_splice_init(&items, &secure_heap->prefetch_list);
- schedule_delayed_work(&secure_heap->prefetch_work,
- shrink ? msecs_to_jiffies(SHRINK_DELAY) : 0);
+ list_splice_tail_init(&items, &secure_heap->prefetch_list);
+ queue_delayed_work(system_unbound_wq, &secure_heap->prefetch_work,
+ shrink ? msecs_to_jiffies(SHRINK_DELAY) : 0);
spin_unlock_irqrestore(&secure_heap->work_lock, flags);
return 0;
@@ -449,7 +454,10 @@ int ion_secure_page_pool_shrink(struct ion_system_heap *sys_heap,
sg = sg_next(sg);
}
- if (ion_hyp_unassign_sg(&sgt, &vmid, 1, true))
+ ret = ion_hyp_unassign_sg(&sgt, &vmid, 1, true, true);
+ if (ret == -EADDRNOTAVAIL)
+ goto out3;
+ else if (ret < 0)
goto out2;
list_for_each_entry_safe(page, tmp, &pages, lru) {
@@ -460,6 +468,8 @@ int ion_secure_page_pool_shrink(struct ion_system_heap *sys_heap,
sg_free_table(&sgt);
return freed;
+out2:
+ sg_free_table(&sgt);
out1:
/* Restore pages to secure pool */
list_for_each_entry_safe(page, tmp, &pages, lru) {
@@ -467,7 +477,7 @@ int ion_secure_page_pool_shrink(struct ion_system_heap *sys_heap,
ion_page_pool_free(pool, page);
}
return 0;
-out2:
+out3:
/*
* The security state of the pages is unknown after a failure;
* They can neither be added back to the secure pool nor buddy system.
diff --git a/drivers/staging/android/ion/msm/msm_ion_of.c b/drivers/staging/android/ion/msm/msm_ion_of.c
index 4c313b9..a1dc3f8 100644
--- a/drivers/staging/android/ion/msm/msm_ion_of.c
+++ b/drivers/staging/android/ion/msm/msm_ion_of.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
*/
#include <linux/err.h>
@@ -41,6 +41,10 @@ static struct ion_heap_desc ion_heap_meta[] = {
.name = ION_MM_HEAP_NAME,
},
{
+ .id = ION_USER_CONTIG_HEAP_ID,
+ .name = ION_USER_CONTIG_HEAP_NAME,
+ },
+ {
.id = ION_QSECOM_HEAP_ID,
.name = ION_QSECOM_HEAP_NAME,
},
@@ -161,6 +165,10 @@ static int msm_ion_get_heap_dt_data(struct device_node *node,
base = cma_get_base(dev->cma_area);
size = cma_get_size(dev->cma_area);
ret = 0;
+ } else if (dev->dma_mem) {
+ base = dma_get_device_base(dev, dev->dma_mem);
+ size = dma_get_size(dev->dma_mem);
+ ret = 0;
}
} else {
base = of_translate_address(pnode, basep);
diff --git a/drivers/staging/android/uapi/msm_ion.h b/drivers/staging/android/uapi/msm_ion.h
index f6016c7..860ec69 100644
--- a/drivers/staging/android/uapi/msm_ion.h
+++ b/drivers/staging/android/uapi/msm_ion.h
@@ -53,6 +53,7 @@ enum ion_heap_ids {
#define ION_QSECOM_TA_HEAP_ID 19
#define ION_AUDIO_HEAP_ID 28
#define ION_CAMERA_HEAP_ID 20
+#define ION_USER_CONTIG_HEAP_ID 26
/**
* Flags to be used when allocating from the secure heap for
* content protection
@@ -100,15 +101,15 @@ enum ion_heap_ids {
#define ION_IOC_MSM_MAGIC 'M'
struct ion_prefetch_regions {
+ __u64 sizes;
__u32 vmid;
- __u64 __user *sizes;
__u32 nr_sizes;
};
struct ion_prefetch_data {
- __u32 heap_id;
__u64 len;
- struct ion_prefetch_regions __user *regions;
+ __u64 regions;
+ __u32 heap_id;
__u32 nr_regions;
};
diff --git a/drivers/staging/rtl8723bs/include/ieee80211.h b/drivers/staging/rtl8723bs/include/ieee80211.h
index bcc8dfa..9efb4dc 100644
--- a/drivers/staging/rtl8723bs/include/ieee80211.h
+++ b/drivers/staging/rtl8723bs/include/ieee80211.h
@@ -850,18 +850,18 @@ enum ieee80211_state {
#define IP_FMT "%pI4"
#define IP_ARG(x) (x)
-extern __inline int is_multicast_mac_addr(const u8 *addr)
+static inline int is_multicast_mac_addr(const u8 *addr)
{
return ((addr[0] != 0xff) && (0x01 & addr[0]));
}
-extern __inline int is_broadcast_mac_addr(const u8 *addr)
+static inline int is_broadcast_mac_addr(const u8 *addr)
{
return ((addr[0] == 0xff) && (addr[1] == 0xff) && (addr[2] == 0xff) && \
(addr[3] == 0xff) && (addr[4] == 0xff) && (addr[5] == 0xff));
}
-extern __inline int is_zero_mac_addr(const u8 *addr)
+static inline int is_zero_mac_addr(const u8 *addr)
{
return ((addr[0] == 0x00) && (addr[1] == 0x00) && (addr[2] == 0x00) && \
(addr[3] == 0x00) && (addr[4] == 0x00) && (addr[5] == 0x00));
diff --git a/drivers/thermal/int340x_thermal/processor_thermal_device.c b/drivers/thermal/int340x_thermal/processor_thermal_device.c
index 284cf2c..8e1cf4d 100644
--- a/drivers/thermal/int340x_thermal/processor_thermal_device.c
+++ b/drivers/thermal/int340x_thermal/processor_thermal_device.c
@@ -84,7 +84,12 @@ static ssize_t power_limit_##index##_##suffix##_show(struct device *dev, \
struct pci_dev *pci_dev; \
struct platform_device *pdev; \
struct proc_thermal_device *proc_dev; \
-\
+ \
+ if (proc_thermal_emum_mode == PROC_THERMAL_NONE) { \
+ dev_warn(dev, "Attempted to get power limit before device was initialized!\n"); \
+ return 0; \
+ } \
+ \
if (proc_thermal_emum_mode == PROC_THERMAL_PLATFORM_DEV) { \
pdev = to_platform_device(dev); \
proc_dev = platform_get_drvdata(pdev); \
@@ -298,11 +303,6 @@ static int proc_thermal_add(struct device *dev,
*priv = proc_priv;
ret = proc_thermal_read_ppcc(proc_priv);
- if (!ret) {
- ret = sysfs_create_group(&dev->kobj,
- &power_limit_attribute_group);
-
- }
if (ret)
return ret;
@@ -316,8 +316,7 @@ static int proc_thermal_add(struct device *dev,
proc_priv->int340x_zone = int340x_thermal_zone_add(adev, ops);
if (IS_ERR(proc_priv->int340x_zone)) {
- ret = PTR_ERR(proc_priv->int340x_zone);
- goto remove_group;
+ return PTR_ERR(proc_priv->int340x_zone);
} else
ret = 0;
@@ -331,9 +330,6 @@ static int proc_thermal_add(struct device *dev,
remove_zone:
int340x_thermal_zone_remove(proc_priv->int340x_zone);
-remove_group:
- sysfs_remove_group(&proc_priv->dev->kobj,
- &power_limit_attribute_group);
return ret;
}
@@ -364,7 +360,10 @@ static int int3401_add(struct platform_device *pdev)
platform_set_drvdata(pdev, proc_priv);
proc_thermal_emum_mode = PROC_THERMAL_PLATFORM_DEV;
- return 0;
+ dev_info(&pdev->dev, "Creating sysfs group for PROC_THERMAL_PLATFORM_DEV\n");
+
+ return sysfs_create_group(&pdev->dev.kobj,
+ &power_limit_attribute_group);
}
static int int3401_remove(struct platform_device *pdev)
@@ -423,7 +422,7 @@ static int proc_thermal_pci_probe(struct pci_dev *pdev,
proc_priv->soc_dts = intel_soc_dts_iosf_init(
INTEL_SOC_DTS_INTERRUPT_MSI, 2, 0);
- if (proc_priv->soc_dts && pdev->irq) {
+ if (!IS_ERR(proc_priv->soc_dts) && pdev->irq) {
ret = pci_enable_msi(pdev);
if (!ret) {
ret = request_threaded_irq(pdev->irq, NULL,
@@ -441,7 +440,10 @@ static int proc_thermal_pci_probe(struct pci_dev *pdev,
dev_err(&pdev->dev, "No auxiliary DTSs enabled\n");
}
- return 0;
+ dev_info(&pdev->dev, "Creating sysfs group for PROC_THERMAL_PCI\n");
+
+ return sysfs_create_group(&pdev->dev.kobj,
+ &power_limit_attribute_group);
}
static void proc_thermal_pci_remove(struct pci_dev *pdev)
diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
index 7d030c2..50b6746 100644
--- a/drivers/tty/serial/fsl_lpuart.c
+++ b/drivers/tty/serial/fsl_lpuart.c
@@ -1695,7 +1695,7 @@ lpuart32_set_termios(struct uart_port *port, struct ktermios *termios,
}
/* ask the core to calculate the divisor */
- baud = uart_get_baud_rate(port, termios, old, 50, port->uartclk / 16);
+ baud = uart_get_baud_rate(port, termios, old, 50, port->uartclk / 4);
spin_lock_irqsave(&sport->port.lock, flags);
diff --git a/drivers/tty/serial/msm_geni_serial.c b/drivers/tty/serial/msm_geni_serial.c
index 1a74da9..0593b4f 100644
--- a/drivers/tty/serial/msm_geni_serial.c
+++ b/drivers/tty/serial/msm_geni_serial.c
@@ -1931,6 +1931,13 @@ static void msm_geni_serial_set_termios(struct uart_port *uport,
geni_serial_write_term_regs(uport, port->loopback, tx_trans_cfg,
tx_parity_cfg, rx_trans_cfg, rx_parity_cfg, bits_per_char,
stop_bit_len, ser_clk_cfg);
+
+ if (termios->c_cflag & CRTSCTS) {
+ geni_write_reg_nolog(0x0, uport->membase, SE_UART_MANUAL_RFR);
+ IPC_LOG_MSG(port->ipc_log_misc, "%s: Manual flow off\n",
+ __func__);
+ }
+
IPC_LOG_MSG(port->ipc_log_misc, "%s: baud %d\n", __func__, baud);
IPC_LOG_MSG(port->ipc_log_misc, "Tx: trans_cfg%d parity %d\n",
tx_trans_cfg, tx_parity_cfg);
diff --git a/drivers/tty/serial/qcom_geni_serial.c b/drivers/tty/serial/qcom_geni_serial.c
index 1515074..35d1f6fa 100644
--- a/drivers/tty/serial/qcom_geni_serial.c
+++ b/drivers/tty/serial/qcom_geni_serial.c
@@ -221,7 +221,7 @@ static unsigned int qcom_geni_serial_get_mctrl(struct uart_port *uport)
unsigned int mctrl = TIOCM_DSR | TIOCM_CAR;
u32 geni_ios;
- if (uart_console(uport) || !uart_cts_enabled(uport)) {
+ if (uart_console(uport)) {
mctrl |= TIOCM_CTS;
} else {
geni_ios = readl_relaxed(uport->membase + SE_GENI_IOS);
@@ -237,7 +237,7 @@ static void qcom_geni_serial_set_mctrl(struct uart_port *uport,
{
u32 uart_manual_rfr = 0;
- if (uart_console(uport) || !uart_cts_enabled(uport))
+ if (uart_console(uport))
return;
if (!(mctrl & TIOCM_RTS))
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 6bbea04..b57591c 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -2210,10 +2210,8 @@ static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
ret = wait_for_completion_timeout(&dwc->ep0_in_setup,
msecs_to_jiffies(DWC3_PULL_UP_TIMEOUT));
- if (ret == 0) {
+ if (ret == 0)
dev_err(dwc->dev, "timed out waiting for SETUP phase\n");
- return -ETIMEDOUT;
- }
}
spin_lock_irqsave(&dwc->lock, flags);
@@ -2423,6 +2421,7 @@ static int __dwc3_gadget_start(struct dwc3 *dwc)
/* begin to receive SETUP packets */
dwc->ep0state = EP0_SETUP_PHASE;
+ dwc->link_state = DWC3_LINK_STATE_SS_DIS;
dwc3_ep0_out_start(dwc);
dwc3_gadget_enable_irq(dwc);
@@ -3963,6 +3962,8 @@ int dwc3_gadget_suspend(struct dwc3 *dwc)
dwc3_disconnect_gadget(dwc);
__dwc3_gadget_stop(dwc);
+ synchronize_irq(dwc->irq_gadget);
+
return 0;
}
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index 7c2b88d..730ba2bf 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -146,6 +146,7 @@ int config_ep_by_speed(struct usb_gadget *g,
struct usb_function *f,
struct usb_ep *_ep)
{
+ struct usb_composite_dev *cdev;
struct usb_endpoint_descriptor *chosen_desc = NULL;
struct usb_descriptor_header **speed_desc = NULL;
@@ -157,6 +158,8 @@ int config_ep_by_speed(struct usb_gadget *g,
if (!g || !f || !_ep)
return -EIO;
+ cdev = get_gadget_data(g);
+
/* select desired speed */
switch (g->speed) {
case USB_SPEED_SUPER_PLUS:
@@ -182,6 +185,13 @@ int config_ep_by_speed(struct usb_gadget *g,
default:
speed_desc = f->fs_descriptors;
}
+
+ if (!speed_desc) {
+ DBG(cdev, "%s desc not present for function %s\n",
+ usb_speed_string(g->speed), f->name);
+ return -EIO;
+ }
+
/* find descriptors */
for_each_ep_desc(speed_desc, d_spd) {
chosen_desc = (struct usb_endpoint_descriptor *)*d_spd;
@@ -225,12 +235,9 @@ int config_ep_by_speed(struct usb_gadget *g,
_ep->maxburst = comp_desc->bMaxBurst + 1;
break;
default:
- if (comp_desc->bMaxBurst != 0) {
- struct usb_composite_dev *cdev;
-
- cdev = get_gadget_data(g);
+ if (comp_desc->bMaxBurst != 0)
ERROR(cdev, "ep0 bMaxBurst must be 0\n");
- }
+
_ep->maxburst = 1;
break;
}
diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c
index d1b0725..3309c1f 100644
--- a/drivers/usb/gadget/configfs.c
+++ b/drivers/usb/gadget/configfs.c
@@ -15,11 +15,16 @@
#include <linux/kdev_t.h>
#include <linux/usb/ch9.h>
+#ifdef CONFIG_USB_F_NCM
+#include <function/u_ncm.h>
+#endif
+
#ifdef CONFIG_USB_CONFIGFS_F_ACC
extern int acc_ctrlrequest(struct usb_composite_dev *cdev,
const struct usb_ctrlrequest *ctrl);
void acc_disconnect(void);
#endif
+
static struct class *android_class;
static struct device *android_device;
static int index;
@@ -1508,6 +1513,18 @@ static int android_setup(struct usb_gadget *gadget,
}
}
+#ifdef CONFIG_USB_F_NCM
+ if (value < 0)
+ value = ncm_ctrlrequest(cdev, c);
+
+ /*
+ * for mirror link command case, if it already been handled,
+ * do not pass to composite_setup
+ */
+ if (value == 0)
+ return value;
+#endif
+
#ifdef CONFIG_USB_CONFIGFS_F_ACC
if (value < 0)
value = acc_ctrlrequest(cdev, c);
diff --git a/drivers/usb/gadget/function/f_cdev.c b/drivers/usb/gadget/function/f_cdev.c
index f4dcbc8..6060761 100644
--- a/drivers/usb/gadget/function/f_cdev.c
+++ b/drivers/usb/gadget/function/f_cdev.c
@@ -128,6 +128,8 @@ struct f_cdev {
unsigned long nbytes_to_port_bridge;
unsigned long nbytes_from_port_bridge;
+ struct dentry *debugfs_root;
+
/* To test remote wakeup using debugfs */
u8 debugfs_rw_enable;
};
@@ -139,12 +141,6 @@ struct f_cdev_opts {
u8 port_num;
};
-struct usb_cser_debugfs {
- struct dentry *debugfs_root;
-};
-
-static struct usb_cser_debugfs debugfs;
-
static int major, minors;
struct class *fcdev_classp;
static DEFINE_IDA(chardev_ida);
@@ -157,7 +153,7 @@ static int usb_cser_connect(struct f_cdev *port);
static void usb_cser_disconnect(struct f_cdev *port);
static struct f_cdev *f_cdev_alloc(char *func_name, int portno);
static void usb_cser_free_req(struct usb_ep *ep, struct usb_request *req);
-static void usb_cser_debugfs_exit(void);
+static void usb_cser_debugfs_exit(struct f_cdev *port);
static struct usb_interface_descriptor cser_interface_desc = {
.bLength = USB_DT_INTERFACE_SIZE,
@@ -858,9 +854,9 @@ static void cser_free_inst(struct usb_function_instance *fi)
if (opts->port) {
device_destroy(fcdev_classp, MKDEV(major, opts->port->minor));
cdev_del(&opts->port->fcdev_cdev);
+ usb_cser_debugfs_exit(opts->port);
}
usb_cser_chardev_deinit();
- usb_cser_debugfs_exit();
kfree(opts->func_name);
kfree(opts->port);
kfree(opts);
@@ -1638,17 +1634,17 @@ static const struct file_operations cser_rem_wakeup_fops = {
static void usb_cser_debugfs_init(struct f_cdev *port)
{
- debugfs.debugfs_root = debugfs_create_dir(port->name, NULL);
- if (IS_ERR(debugfs.debugfs_root))
+ port->debugfs_root = debugfs_create_dir(port->name, NULL);
+ if (IS_ERR(port->debugfs_root))
return;
debugfs_create_file("remote_wakeup", 0600,
- debugfs.debugfs_root, port, &cser_rem_wakeup_fops);
+ port->debugfs_root, port, &cser_rem_wakeup_fops);
}
-static void usb_cser_debugfs_exit(void)
+static void usb_cser_debugfs_exit(struct f_cdev *port)
{
- debugfs_remove_recursive(debugfs.debugfs_root);
+ debugfs_remove_recursive(port->debugfs_root);
}
static struct f_cdev *f_cdev_alloc(char *func_name, int portno)
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index 3137125..9b1224c 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -38,6 +38,18 @@
#define FUNCTIONFS_MAGIC 0xa647361 /* Chosen by a honest dice roll ;) */
+#define NUM_PAGES 10 /* # of pages for ipc logging */
+
+#ifdef CONFIG_DYNAMIC_DEBUG
+#define ffs_log(fmt, ...) do { \
+ ipc_log_string(ffs->ipc_log, "%s: " fmt, __func__, ##__VA_ARGS__); \
+ dynamic_pr_debug("%s: " fmt, __func__, ##__VA_ARGS__); \
+} while (0)
+#else
+#define ffs_log(fmt, ...) \
+ ipc_log_string(ffs->ipc_log, "%s: " fmt, __func__, ##__VA_ARGS__)
+#endif
+
/* Reference counter handling */
static void ffs_data_get(struct ffs_data *ffs);
static void ffs_data_put(struct ffs_data *ffs);
@@ -275,6 +287,9 @@ static int __ffs_ep0_queue_wait(struct ffs_data *ffs, char *data, size_t len)
spin_unlock_irq(&ffs->ev.waitq.lock);
+ ffs_log("enter: state %d setup_state %d flags %lu", ffs->state,
+ ffs->setup_state, ffs->flags);
+
req->buf = data;
req->length = len;
@@ -299,11 +314,18 @@ static int __ffs_ep0_queue_wait(struct ffs_data *ffs, char *data, size_t len)
}
ffs->setup_state = FFS_NO_SETUP;
+
+ ffs_log("exit: state %d setup_state %d flags %lu", ffs->state,
+ ffs->setup_state, ffs->flags);
+
return req->status ? req->status : req->actual;
}
static int __ffs_ep0_stall(struct ffs_data *ffs)
{
+ ffs_log("state %d setup_state %d flags %lu can_stall %d", ffs->state,
+ ffs->setup_state, ffs->flags, ffs->ev.can_stall);
+
if (ffs->ev.can_stall) {
pr_vdebug("ep0 stall\n");
usb_ep_set_halt(ffs->gadget->ep0);
@@ -324,6 +346,9 @@ static ssize_t ffs_ep0_write(struct file *file, const char __user *buf,
ENTER();
+ ffs_log("enter:len %zu state %d setup_state %d flags %lu", len,
+ ffs->state, ffs->setup_state, ffs->flags);
+
/* Fast check if setup was canceled */
if (ffs_setup_state_clear_cancelled(ffs) == FFS_SETUP_CANCELLED)
return -EIDRM;
@@ -452,6 +477,9 @@ static ssize_t ffs_ep0_write(struct file *file, const char __user *buf,
break;
}
+ ffs_log("exit:ret %zd state %d setup_state %d flags %lu", ret,
+ ffs->state, ffs->setup_state, ffs->flags);
+
mutex_unlock(&ffs->mutex);
return ret;
}
@@ -486,6 +514,10 @@ static ssize_t __ffs_ep0_read_events(struct ffs_data *ffs, char __user *buf,
ffs->ev.count * sizeof *ffs->ev.types);
spin_unlock_irq(&ffs->ev.waitq.lock);
+
+ ffs_log("state %d setup_state %d flags %lu #evt %zu", ffs->state,
+ ffs->setup_state, ffs->flags, n);
+
mutex_unlock(&ffs->mutex);
return unlikely(copy_to_user(buf, events, size)) ? -EFAULT : size;
@@ -501,6 +533,9 @@ static ssize_t ffs_ep0_read(struct file *file, char __user *buf,
ENTER();
+ ffs_log("enter:len %zu state %d setup_state %d flags %lu", len,
+ ffs->state, ffs->setup_state, ffs->flags);
+
/* Fast check if setup was canceled */
if (ffs_setup_state_clear_cancelled(ffs) == FFS_SETUP_CANCELLED)
return -EIDRM;
@@ -590,8 +625,12 @@ static ssize_t ffs_ep0_read(struct file *file, char __user *buf,
spin_unlock_irq(&ffs->ev.waitq.lock);
done_mutex:
+ ffs_log("exit:ret %d state %d setup_state %d flags %lu", ret,
+ ffs->state, ffs->setup_state, ffs->flags);
+
mutex_unlock(&ffs->mutex);
kfree(data);
+
return ret;
}
@@ -601,6 +640,9 @@ static int ffs_ep0_open(struct inode *inode, struct file *file)
ENTER();
+ ffs_log("state %d setup_state %d flags %lu opened %d", ffs->state,
+ ffs->setup_state, ffs->flags, atomic_read(&ffs->opened));
+
if (unlikely(ffs->state == FFS_CLOSING))
return -EBUSY;
@@ -616,6 +658,9 @@ static int ffs_ep0_release(struct inode *inode, struct file *file)
ENTER();
+ ffs_log("state %d setup_state %d flags %lu opened %d", ffs->state,
+ ffs->setup_state, ffs->flags, atomic_read(&ffs->opened));
+
ffs_data_closed(ffs);
return 0;
@@ -629,6 +674,9 @@ static long ffs_ep0_ioctl(struct file *file, unsigned code, unsigned long value)
ENTER();
+ ffs_log("state %d setup_state %d flags %lu opened %d", ffs->state,
+ ffs->setup_state, ffs->flags, atomic_read(&ffs->opened));
+
if (code == FUNCTIONFS_INTERFACE_REVMAP) {
struct ffs_function *func = ffs->func;
ret = func ? ffs_func_revmap_intf(func, value) : -ENODEV;
@@ -647,6 +695,9 @@ static __poll_t ffs_ep0_poll(struct file *file, poll_table *wait)
__poll_t mask = EPOLLWRNORM;
int ret;
+ ffs_log("enter:state %d setup_state %d flags %lu opened %d", ffs->state,
+ ffs->setup_state, ffs->flags, atomic_read(&ffs->opened));
+
poll_wait(file, &ffs->ev.waitq, wait);
ret = ffs_mutex_lock(&ffs->mutex, file->f_flags & O_NONBLOCK);
@@ -677,6 +728,8 @@ static __poll_t ffs_ep0_poll(struct file *file, poll_table *wait)
break;
}
+ ffs_log("exit: mask %u", mask);
+
mutex_unlock(&ffs->mutex);
return mask;
@@ -753,10 +806,13 @@ static void ffs_user_copy_worker(struct work_struct *work)
{
struct ffs_io_data *io_data = container_of(work, struct ffs_io_data,
work);
+ struct ffs_data *ffs = io_data->ffs;
int ret = io_data->req->status ? io_data->req->status :
io_data->req->actual;
bool kiocb_has_eventfd = io_data->kiocb->ki_flags & IOCB_EVENTFD;
+ ffs_log("enter: ret %d for %s", ret, io_data->read ? "read" : "write");
+
if (io_data->read && ret > 0) {
mm_segment_t oldfs = get_fs();
@@ -778,6 +834,8 @@ static void ffs_user_copy_worker(struct work_struct *work)
kfree(io_data->to_free);
kfree(io_data->buf);
kfree(io_data);
+
+ ffs_log("exit");
}
static void ffs_epfile_async_io_complete(struct usb_ep *_ep,
@@ -788,6 +846,8 @@ static void ffs_epfile_async_io_complete(struct usb_ep *_ep,
ENTER();
+ ffs_log("enter");
+
INIT_WORK(&io_data->work, ffs_user_copy_worker);
queue_work(ffs->io_completion_wq, &io_data->work);
}
@@ -877,12 +937,15 @@ static ssize_t __ffs_epfile_read_data(struct ffs_epfile *epfile,
static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
{
struct ffs_epfile *epfile = file->private_data;
+ struct ffs_data *ffs = epfile->ffs;
struct usb_request *req;
struct ffs_ep *ep;
char *data = NULL;
ssize_t ret, data_len = -EINVAL;
int halt;
+ ffs_log("enter: %s", epfile->name);
+
/* Are we still active? */
if (WARN_ON(epfile->ffs->state != FFS_ACTIVE))
return -ENODEV;
@@ -1000,6 +1063,8 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
spin_unlock_irq(&epfile->ffs->eps_lock);
+ ffs_log("queued %d bytes on %s", data_len, epfile->name);
+
if (unlikely(wait_for_completion_interruptible(&done))) {
/*
* To avoid race condition with ffs_epfile_io_complete,
@@ -1011,6 +1076,9 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
interrupted = ep->status < 0;
}
+ ffs_log("%s:ep status %d for req %pK", epfile->name, ep->status,
+ req);
+
if (interrupted)
ret = -EINTR;
else if (io_data->read && ep->status > 0)
@@ -1039,6 +1107,8 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
goto error_lock;
}
+ ffs_log("queued %d bytes on %s", data_len, epfile->name);
+
ret = -EIOCBQUEUED;
/*
* Do not kfree the buffer in this function. It will be freed
@@ -1053,6 +1123,9 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
mutex_unlock(&epfile->mutex);
error:
kfree(data);
+
+ ffs_log("exit: %s ret %zd", epfile->name, ret);
+
return ret;
}
@@ -1060,9 +1133,14 @@ static int
ffs_epfile_open(struct inode *inode, struct file *file)
{
struct ffs_epfile *epfile = inode->i_private;
+ struct ffs_data *ffs = epfile->ffs;
ENTER();
+ ffs_log("%s: state %d setup_state %d flag %lu", epfile->name,
+ epfile->ffs->state, epfile->ffs->setup_state,
+ epfile->ffs->flags);
+
if (WARN_ON(epfile->ffs->state != FFS_ACTIVE))
return -ENODEV;
@@ -1076,10 +1154,14 @@ static int ffs_aio_cancel(struct kiocb *kiocb)
{
struct ffs_io_data *io_data = kiocb->private;
struct ffs_epfile *epfile = kiocb->ki_filp->private_data;
+ struct ffs_data *ffs = epfile->ffs;
int value;
ENTER();
+ ffs_log("enter:state %d setup_state %d flag %lu", epfile->ffs->state,
+ epfile->ffs->setup_state, epfile->ffs->flags);
+
spin_lock_irq(&epfile->ffs->eps_lock);
if (likely(io_data && io_data->ep && io_data->req))
@@ -1089,16 +1171,22 @@ static int ffs_aio_cancel(struct kiocb *kiocb)
spin_unlock_irq(&epfile->ffs->eps_lock);
+ ffs_log("exit: value %d", value);
+
return value;
}
static ssize_t ffs_epfile_write_iter(struct kiocb *kiocb, struct iov_iter *from)
{
+ struct ffs_epfile *epfile = kiocb->ki_filp->private_data;
+ struct ffs_data *ffs = epfile->ffs;
struct ffs_io_data io_data, *p = &io_data;
ssize_t res;
ENTER();
+ ffs_log("enter");
+
if (!is_sync_kiocb(kiocb)) {
p = kmalloc(sizeof(io_data), GFP_KERNEL);
if (unlikely(!p))
@@ -1125,16 +1213,23 @@ static ssize_t ffs_epfile_write_iter(struct kiocb *kiocb, struct iov_iter *from)
kfree(p);
else
*from = p->data;
+
+ ffs_log("exit: ret %zd", res);
+
return res;
}
static ssize_t ffs_epfile_read_iter(struct kiocb *kiocb, struct iov_iter *to)
{
+ struct ffs_epfile *epfile = kiocb->ki_filp->private_data;
+ struct ffs_data *ffs = epfile->ffs;
struct ffs_io_data io_data, *p = &io_data;
ssize_t res;
ENTER();
+ ffs_log("enter");
+
if (!is_sync_kiocb(kiocb)) {
p = kmalloc(sizeof(io_data), GFP_KERNEL);
if (unlikely(!p))
@@ -1173,6 +1268,9 @@ static ssize_t ffs_epfile_read_iter(struct kiocb *kiocb, struct iov_iter *to)
} else {
*to = p->data;
}
+
+ ffs_log("exit: ret %zd", res);
+
return res;
}
@@ -1180,10 +1278,15 @@ static int
ffs_epfile_release(struct inode *inode, struct file *file)
{
struct ffs_epfile *epfile = inode->i_private;
+ struct ffs_data *ffs = epfile->ffs;
ENTER();
__ffs_epfile_read_buffer_free(epfile);
+ ffs_log("%s: state %d setup_state %d flag %lu", epfile->name,
+ epfile->ffs->state, epfile->ffs->setup_state,
+ epfile->ffs->flags);
+
ffs_data_closed(epfile->ffs);
return 0;
@@ -1193,11 +1296,16 @@ static long ffs_epfile_ioctl(struct file *file, unsigned code,
unsigned long value)
{
struct ffs_epfile *epfile = file->private_data;
+ struct ffs_data *ffs = epfile->ffs;
struct ffs_ep *ep;
int ret;
ENTER();
+ ffs_log("%s: code 0x%08x value %#lx state %d setup_state %d flag %lu",
+ epfile->name, code, value, epfile->ffs->state,
+ epfile->ffs->setup_state, epfile->ffs->flags);
+
if (WARN_ON(epfile->ffs->state != FFS_ACTIVE))
return -ENODEV;
@@ -1263,6 +1371,8 @@ static long ffs_epfile_ioctl(struct file *file, unsigned code,
}
spin_unlock_irq(&epfile->ffs->eps_lock);
+ ffs_log("exit: %s: ret %d\n", epfile->name, ret);
+
return ret;
}
@@ -1301,10 +1411,13 @@ ffs_sb_make_inode(struct super_block *sb, void *data,
const struct inode_operations *iops,
struct ffs_file_perms *perms)
{
+ struct ffs_data *ffs = sb->s_fs_info;
struct inode *inode;
ENTER();
+ ffs_log("enter");
+
inode = new_inode(sb);
if (likely(inode)) {
@@ -1338,6 +1451,8 @@ static struct dentry *ffs_sb_create_file(struct super_block *sb,
ENTER();
+ ffs_log("enter");
+
dentry = d_alloc_name(sb->s_root, name);
if (unlikely(!dentry))
return NULL;
@@ -1349,6 +1464,7 @@ static struct dentry *ffs_sb_create_file(struct super_block *sb,
}
d_add(dentry, inode);
+
return dentry;
}
@@ -1374,6 +1490,8 @@ static int ffs_sb_fill(struct super_block *sb, void *_data, int silent)
ENTER();
+ ffs_log("enter");
+
ffs->sb = sb;
data->ffs_data = NULL;
sb->s_fs_info = ffs;
@@ -1541,6 +1659,7 @@ ffs_fs_mount(struct file_system_type *t, int flags,
ffs_release_dev(data.ffs_data);
ffs_data_put(data.ffs_data);
}
+
return rv;
}
@@ -1600,6 +1719,8 @@ static void ffs_data_get(struct ffs_data *ffs)
{
ENTER();
+ ffs_log("ref %u", refcount_read(&ffs->ref));
+
refcount_inc(&ffs->ref);
}
@@ -1607,6 +1728,10 @@ static void ffs_data_opened(struct ffs_data *ffs)
{
ENTER();
+ ffs_log("enter: state %d setup_state %d flag %lu opened %d ref %d",
+ ffs->state, ffs->setup_state, ffs->flags,
+ atomic_read(&ffs->opened), refcount_read(&ffs->ref));
+
refcount_inc(&ffs->ref);
if (atomic_add_return(1, &ffs->opened) == 1 &&
ffs->state == FFS_DEACTIVATED) {
@@ -1619,6 +1744,8 @@ static void ffs_data_put(struct ffs_data *ffs)
{
ENTER();
+ ffs_log("ref %u", refcount_read(&ffs->ref));
+
if (unlikely(refcount_dec_and_test(&ffs->ref))) {
pr_info("%s(): freeing\n", __func__);
ffs_data_clear(ffs);
@@ -1626,6 +1753,7 @@ static void ffs_data_put(struct ffs_data *ffs)
waitqueue_active(&ffs->ep0req_completion.wait) ||
waitqueue_active(&ffs->wait));
destroy_workqueue(ffs->io_completion_wq);
+ ipc_log_context_destroy(ffs->ipc_log);
kfree(ffs->dev_name);
kfree(ffs);
}
@@ -1635,6 +1763,9 @@ static void ffs_data_closed(struct ffs_data *ffs)
{
ENTER();
+ ffs_log("state %d setup_state %d flag %lu opened %d", ffs->state,
+ ffs->setup_state, ffs->flags, atomic_read(&ffs->opened));
+
if (atomic_dec_and_test(&ffs->opened)) {
if (ffs->no_disconnect) {
ffs->state = FFS_DEACTIVATED;
@@ -1660,6 +1791,7 @@ static void ffs_data_closed(struct ffs_data *ffs)
static struct ffs_data *ffs_data_new(const char *dev_name)
{
+ char ipcname[24] = "usb_ffs_";
struct ffs_data *ffs = kzalloc(sizeof *ffs, GFP_KERNEL);
if (unlikely(!ffs))
return NULL;
@@ -1684,6 +1816,11 @@ static struct ffs_data *ffs_data_new(const char *dev_name)
/* XXX REVISIT need to update it in some places, or do we? */
ffs->ev.can_stall = 1;
+ strlcat(ipcname, dev_name, sizeof(ipcname));
+ ffs->ipc_log = ipc_log_context_create(NUM_PAGES, ipcname, 0);
+ if (IS_ERR_OR_NULL(ffs->ipc_log))
+ ffs->ipc_log = NULL;
+
return ffs;
}
@@ -1691,6 +1828,11 @@ static void ffs_data_clear(struct ffs_data *ffs)
{
ENTER();
+ ffs_log("enter: state %d setup_state %d flag %lu", ffs->state,
+ ffs->setup_state, ffs->flags);
+
+ pr_debug("%s: ffs->gadget= %pK, ffs->flags= %lu\n",
+ __func__, ffs->gadget, ffs->flags);
ffs_closed(ffs);
BUG_ON(ffs->gadget);
@@ -1710,6 +1852,9 @@ static void ffs_data_reset(struct ffs_data *ffs)
{
ENTER();
+ ffs_log("enter: state %d setup_state %d flag %lu", ffs->state,
+ ffs->setup_state, ffs->flags);
+
ffs_data_clear(ffs);
ffs->epfiles = NULL;
@@ -1742,6 +1887,9 @@ static int functionfs_bind(struct ffs_data *ffs, struct usb_composite_dev *cdev)
ENTER();
+ ffs_log("enter: state %d setup_state %d flag %lu", ffs->state,
+ ffs->setup_state, ffs->flags);
+
if (WARN_ON(ffs->state != FFS_ACTIVE
|| test_and_set_bit(FFS_FL_BOUND, &ffs->flags)))
return -EBADFD;
@@ -1767,6 +1915,7 @@ static int functionfs_bind(struct ffs_data *ffs, struct usb_composite_dev *cdev)
}
ffs->gadget = cdev->gadget;
+
ffs_data_get(ffs);
return 0;
}
@@ -1780,6 +1929,8 @@ static void functionfs_unbind(struct ffs_data *ffs)
ffs->ep0req = NULL;
ffs->gadget = NULL;
clear_bit(FFS_FL_BOUND, &ffs->flags);
+ ffs_log("state %d setup_state %d flag %lu gadget %pK\n",
+ ffs->state, ffs->setup_state, ffs->flags, ffs->gadget);
ffs_data_put(ffs);
}
}
@@ -1791,6 +1942,9 @@ static int ffs_epfiles_create(struct ffs_data *ffs)
ENTER();
+ ffs_log("enter: eps_count %u state %d setup_state %d flag %lu",
+ ffs->eps_count, ffs->state, ffs->setup_state, ffs->flags);
+
count = ffs->eps_count;
epfiles = kcalloc(count, sizeof(*epfiles), GFP_KERNEL);
if (!epfiles)
@@ -1814,15 +1968,19 @@ static int ffs_epfiles_create(struct ffs_data *ffs)
}
ffs->epfiles = epfiles;
+
return 0;
}
static void ffs_epfiles_destroy(struct ffs_epfile *epfiles, unsigned count)
{
struct ffs_epfile *epfile = epfiles;
+ struct ffs_data *ffs = epfiles->ffs;
ENTER();
+ ffs_log("enter: count %u", count);
+
for (; count; --count, ++epfile) {
BUG_ON(mutex_is_locked(&epfile->mutex));
if (epfile->dentry) {
@@ -1838,10 +1996,14 @@ static void ffs_epfiles_destroy(struct ffs_epfile *epfiles, unsigned count)
static void ffs_func_eps_disable(struct ffs_function *func)
{
struct ffs_ep *ep = func->eps;
+ struct ffs_data *ffs = func->ffs;
struct ffs_epfile *epfile = func->ffs->epfiles;
unsigned count = func->ffs->eps_count;
unsigned long flags;
+ ffs_log("enter: state %d setup_state %d flag %lu", func->ffs->state,
+ func->ffs->setup_state, func->ffs->flags);
+
spin_lock_irqsave(&func->ffs->eps_lock, flags);
while (count--) {
/* pending requests get nuked */
@@ -1867,6 +2029,9 @@ static int ffs_func_eps_enable(struct ffs_function *func)
unsigned long flags;
int ret = 0;
+ ffs_log("enter: state %d setup_state %d flag %lu", func->ffs->state,
+ func->ffs->setup_state, func->ffs->flags);
+
spin_lock_irqsave(&func->ffs->eps_lock, flags);
while(count--) {
ep->ep->driver_data = ep;
@@ -1883,7 +2048,9 @@ static int ffs_func_eps_enable(struct ffs_function *func)
epfile->ep = ep;
epfile->in = usb_endpoint_dir_in(ep->ep->desc);
epfile->isoc = usb_endpoint_xfer_isoc(ep->ep->desc);
+ ffs_log("usb_ep_enable %s", ep->ep->name);
} else {
+ ffs_log("usb_ep_enable %s ret %d", ep->ep->name, ret);
break;
}
@@ -1924,7 +2091,8 @@ typedef int (*ffs_os_desc_callback)(enum ffs_os_desc_type entity,
struct usb_os_desc_header *h, void *data,
unsigned len, void *priv);
-static int __must_check ffs_do_single_desc(char *data, unsigned len,
+static int __must_check ffs_do_single_desc(struct ffs_data *ffs,
+ char *data, unsigned int len,
ffs_entity_callback entity,
void *priv)
{
@@ -1934,6 +2102,8 @@ static int __must_check ffs_do_single_desc(char *data, unsigned len,
ENTER();
+ ffs_log("enter: len %u", len);
+
/* At least two bytes are required: length and type */
if (len < 2) {
pr_vdebug("descriptor too short\n");
@@ -2050,10 +2220,13 @@ static int __must_check ffs_do_single_desc(char *data, unsigned len,
#undef __entity_check_STRING
#undef __entity_check_ENDPOINT
+ ffs_log("exit: desc type %d length %d", _ds->bDescriptorType, length);
+
return length;
}
-static int __must_check ffs_do_descs(unsigned count, char *data, unsigned len,
+static int __must_check ffs_do_descs(struct ffs_data *ffs, unsigned int count,
+ char *data, unsigned int len,
ffs_entity_callback entity, void *priv)
{
const unsigned _len = len;
@@ -2061,6 +2234,8 @@ static int __must_check ffs_do_descs(unsigned count, char *data, unsigned len,
ENTER();
+ ffs_log("enter: len %u", len);
+
for (;;) {
int ret;
@@ -2078,7 +2253,7 @@ static int __must_check ffs_do_descs(unsigned count, char *data, unsigned len,
if (!data)
return _len - len;
- ret = ffs_do_single_desc(data, len, entity, priv);
+ ret = ffs_do_single_desc(ffs, data, len, entity, priv);
if (unlikely(ret < 0)) {
pr_debug("%s returns %d\n", __func__, ret);
return ret;
@@ -2095,10 +2270,13 @@ static int __ffs_data_do_entity(enum ffs_entity_type type,
void *priv)
{
struct ffs_desc_helper *helper = priv;
+ struct ffs_data *ffs = helper->ffs;
struct usb_endpoint_descriptor *d;
ENTER();
+ ffs_log("enter: type %u", type);
+
switch (type) {
case FFS_DESCRIPTOR:
break;
@@ -2140,12 +2318,15 @@ static int __ffs_data_do_entity(enum ffs_entity_type type,
return 0;
}
-static int __ffs_do_os_desc_header(enum ffs_os_desc_type *next_type,
+static int __ffs_do_os_desc_header(struct ffs_data *ffs,
+ enum ffs_os_desc_type *next_type,
struct usb_os_desc_header *desc)
{
u16 bcd_version = le16_to_cpu(desc->bcdVersion);
u16 w_index = le16_to_cpu(desc->wIndex);
+ ffs_log("enter: bcd:%x w_index:%d", bcd_version, w_index);
+
if (bcd_version != 1) {
pr_vdebug("unsupported os descriptors version: %d",
bcd_version);
@@ -2170,7 +2351,8 @@ static int __ffs_do_os_desc_header(enum ffs_os_desc_type *next_type,
* Process all extended compatibility/extended property descriptors
* of a feature descriptor
*/
-static int __must_check ffs_do_single_os_desc(char *data, unsigned len,
+static int __must_check ffs_do_single_os_desc(struct ffs_data *ffs,
+ char *data, unsigned int len,
enum ffs_os_desc_type type,
u16 feature_count,
ffs_os_desc_callback entity,
@@ -2182,22 +2364,27 @@ static int __must_check ffs_do_single_os_desc(char *data, unsigned len,
ENTER();
+ ffs_log("enter: len %u os desc type %d", len, type);
+
/* loop over all ext compat/ext prop descriptors */
while (feature_count--) {
ret = entity(type, h, data, len, priv);
if (unlikely(ret < 0)) {
- pr_debug("bad OS descriptor, type: %d\n", type);
+ ffs_log("bad OS descriptor, type: %d\n", type);
return ret;
}
data += ret;
len -= ret;
}
+
+
return _len - len;
}
/* Process a number of complete Feature Descriptors (Ext Compat or Ext Prop) */
-static int __must_check ffs_do_os_descs(unsigned count,
- char *data, unsigned len,
+static int __must_check ffs_do_os_descs(struct ffs_data *ffs,
+ unsigned int count, char *data,
+ unsigned int len,
ffs_os_desc_callback entity, void *priv)
{
const unsigned _len = len;
@@ -2205,6 +2392,8 @@ static int __must_check ffs_do_os_descs(unsigned count,
ENTER();
+ ffs_log("enter: len %u", len);
+
for (num = 0; num < count; ++num) {
int ret;
enum ffs_os_desc_type type;
@@ -2224,9 +2413,9 @@ static int __must_check ffs_do_os_descs(unsigned count,
if (le32_to_cpu(desc->dwLength) > len)
return -EINVAL;
- ret = __ffs_do_os_desc_header(&type, desc);
+ ret = __ffs_do_os_desc_header(ffs, &type, desc);
if (unlikely(ret < 0)) {
- pr_debug("entity OS_DESCRIPTOR(%02lx); ret = %d\n",
+ ffs_log("entity OS_DESCRIPTOR(%02lx); ret = %d\n",
num, ret);
return ret;
}
@@ -2244,16 +2433,17 @@ static int __must_check ffs_do_os_descs(unsigned count,
* Process all function/property descriptors
* of this Feature Descriptor
*/
- ret = ffs_do_single_os_desc(data, len, type,
+ ret = ffs_do_single_os_desc(ffs, data, len, type,
feature_count, entity, priv, desc);
if (unlikely(ret < 0)) {
- pr_debug("%s returns %d\n", __func__, ret);
+ ffs_log("%s returns %d\n", __func__, ret);
return ret;
}
len -= ret;
data += ret;
}
+
return _len - len;
}
@@ -2269,6 +2459,8 @@ static int __ffs_data_do_os_desc(enum ffs_os_desc_type type,
ENTER();
+ ffs_log("enter: type %d len %u", type, len);
+
switch (type) {
case FFS_OS_DESC_EXT_COMPAT: {
struct usb_ext_compat_desc *d = data;
@@ -2333,6 +2525,7 @@ static int __ffs_data_do_os_desc(enum ffs_os_desc_type type,
pr_vdebug("unknown descriptor: %d\n", type);
return -EINVAL;
}
+
return length;
}
@@ -2346,6 +2539,8 @@ static int __ffs_data_got_descs(struct ffs_data *ffs,
ENTER();
+ ffs_log("enter: len %zu", len);
+
if (get_unaligned_le32(data + 4) != len)
goto error;
@@ -2419,7 +2614,7 @@ static int __ffs_data_got_descs(struct ffs_data *ffs,
continue;
helper.interfaces_count = 0;
helper.eps_count = 0;
- ret = ffs_do_descs(counts[i], data, len,
+ ret = ffs_do_descs(ffs, counts[i], data, len,
__ffs_data_do_entity, &helper);
if (ret < 0)
goto error;
@@ -2440,7 +2635,7 @@ static int __ffs_data_got_descs(struct ffs_data *ffs,
len -= ret;
}
if (os_descs_count) {
- ret = ffs_do_os_descs(os_descs_count, data, len,
+ ret = ffs_do_os_descs(ffs, os_descs_count, data, len,
__ffs_data_do_os_desc, ffs);
if (ret < 0)
goto error;
@@ -2478,6 +2673,8 @@ static int __ffs_data_got_strings(struct ffs_data *ffs,
ENTER();
+ ffs_log("enter: len %zu", len);
+
if (unlikely(len < 16 ||
get_unaligned_le32(data) != FUNCTIONFS_STRINGS_MAGIC ||
get_unaligned_le32(data + 4) != len))
@@ -2610,6 +2807,9 @@ static void __ffs_event_add(struct ffs_data *ffs,
enum usb_functionfs_event_type rem_type1, rem_type2 = type;
int neg = 0;
+ ffs_log("enter: type %d state %d setup_state %d flag %lu", type,
+ ffs->state, ffs->setup_state, ffs->flags);
+
/*
* Abort any unhandled setup
*
@@ -2698,11 +2898,14 @@ static int __ffs_func_bind_do_descs(enum ffs_entity_type type, u8 *valuep,
{
struct usb_endpoint_descriptor *ds = (void *)desc;
struct ffs_function *func = priv;
+ struct ffs_data *ffs = func->ffs;
struct ffs_ep *ffs_ep;
unsigned ep_desc_id;
int idx;
static const char *speed_names[] = { "full", "high", "super" };
+ ffs_log("enter");
+
if (type != FFS_DESCRIPTOR)
return 0;
@@ -2786,9 +2989,12 @@ static int __ffs_func_bind_do_nums(enum ffs_entity_type type, u8 *valuep,
void *priv)
{
struct ffs_function *func = priv;
+ struct ffs_data *ffs = func->ffs;
unsigned idx;
u8 newValue;
+ ffs_log("enter: type %d", type);
+
switch (type) {
default:
case FFS_DESCRIPTOR:
@@ -2833,6 +3039,9 @@ static int __ffs_func_bind_do_nums(enum ffs_entity_type type, u8 *valuep,
pr_vdebug("%02x -> %02x\n", *valuep, newValue);
*valuep = newValue;
+
+ ffs_log("exit: newValue %d", newValue);
+
return 0;
}
@@ -2841,8 +3050,11 @@ static int __ffs_func_bind_do_os_desc(enum ffs_os_desc_type type,
unsigned len, void *priv)
{
struct ffs_function *func = priv;
+ struct ffs_data *ffs = func->ffs;
u8 length = 0;
+ ffs_log("enter: type %d", type);
+
switch (type) {
case FFS_OS_DESC_EXT_COMPAT: {
struct usb_ext_compat_desc *desc = data;
@@ -2921,6 +3133,7 @@ static inline struct f_fs_opts *ffs_do_functionfs_bind(struct usb_function *f,
struct ffs_function *func = ffs_func_from_usb(f);
struct f_fs_opts *ffs_opts =
container_of(f->fi, struct f_fs_opts, func_inst);
+ struct ffs_data *ffs = ffs_opts->dev->ffs_data;
int ret;
ENTER();
@@ -2953,8 +3166,10 @@ static inline struct f_fs_opts *ffs_do_functionfs_bind(struct usb_function *f,
*/
if (!ffs_opts->refcnt) {
ret = functionfs_bind(func->ffs, c->cdev);
- if (ret)
+ if (ret) {
+ ffs_log("functionfs_bind returned %d", ret);
return ERR_PTR(ret);
+ }
}
ffs_opts->refcnt++;
func->function.strings = func->ffs->stringtabs;
@@ -3002,6 +3217,9 @@ static int _ffs_func_bind(struct usb_configuration *c,
ENTER();
+ ffs_log("enter: state %d setup_state %d flag %lu", ffs->state,
+ ffs->setup_state, ffs->flags);
+
/* Has descriptors only for speeds gadget does not support */
if (unlikely(!(full | high | super)))
return -ENOTSUPP;
@@ -3039,7 +3257,7 @@ static int _ffs_func_bind(struct usb_configuration *c,
*/
if (likely(full)) {
func->function.fs_descriptors = vla_ptr(vlabuf, d, fs_descs);
- fs_len = ffs_do_descs(ffs->fs_descs_count,
+ fs_len = ffs_do_descs(ffs, ffs->fs_descs_count,
vla_ptr(vlabuf, d, raw_descs),
d_raw_descs__sz,
__ffs_func_bind_do_descs, func);
@@ -3053,7 +3271,7 @@ static int _ffs_func_bind(struct usb_configuration *c,
if (likely(high)) {
func->function.hs_descriptors = vla_ptr(vlabuf, d, hs_descs);
- hs_len = ffs_do_descs(ffs->hs_descs_count,
+ hs_len = ffs_do_descs(ffs, ffs->hs_descs_count,
vla_ptr(vlabuf, d, raw_descs) + fs_len,
d_raw_descs__sz - fs_len,
__ffs_func_bind_do_descs, func);
@@ -3067,7 +3285,7 @@ static int _ffs_func_bind(struct usb_configuration *c,
if (likely(super)) {
func->function.ss_descriptors = vla_ptr(vlabuf, d, ss_descs);
- ss_len = ffs_do_descs(ffs->ss_descs_count,
+ ss_len = ffs_do_descs(ffs, ffs->ss_descs_count,
vla_ptr(vlabuf, d, raw_descs) + fs_len + hs_len,
d_raw_descs__sz - fs_len - hs_len,
__ffs_func_bind_do_descs, func);
@@ -3085,7 +3303,7 @@ static int _ffs_func_bind(struct usb_configuration *c,
* endpoint numbers rewriting. We can do that in one go
* now.
*/
- ret = ffs_do_descs(ffs->fs_descs_count +
+ ret = ffs_do_descs(ffs, ffs->fs_descs_count +
(high ? ffs->hs_descs_count : 0) +
(super ? ffs->ss_descs_count : 0),
vla_ptr(vlabuf, d, raw_descs), d_raw_descs__sz,
@@ -3105,7 +3323,7 @@ static int _ffs_func_bind(struct usb_configuration *c,
vla_ptr(vlabuf, d, ext_compat) + i * 16;
INIT_LIST_HEAD(&desc->ext_prop);
}
- ret = ffs_do_os_descs(ffs->ms_os_descs_count,
+ ret = ffs_do_os_descs(ffs, ffs->ms_os_descs_count,
vla_ptr(vlabuf, d, raw_descs) +
fs_len + hs_len + ss_len,
d_raw_descs__sz - fs_len - hs_len -
@@ -3119,10 +3337,12 @@ static int _ffs_func_bind(struct usb_configuration *c,
/* And we're done */
ffs_event_add(ffs, FUNCTIONFS_BIND);
+
return 0;
error:
/* XXX Do we need to release all claimed endpoints here? */
+ ffs_log("exit: ret %d", ret);
return ret;
}
@@ -3131,11 +3351,14 @@ static int ffs_func_bind(struct usb_configuration *c,
{
struct f_fs_opts *ffs_opts = ffs_do_functionfs_bind(f, c);
struct ffs_function *func = ffs_func_from_usb(f);
+ struct ffs_data *ffs = func->ffs;
int ret;
if (IS_ERR(ffs_opts))
return PTR_ERR(ffs_opts);
+ ffs_log("enter");
+
ret = _ffs_func_bind(c, f);
if (ret && !--ffs_opts->refcnt)
functionfs_unbind(func->ffs);
@@ -3150,6 +3373,9 @@ static void ffs_reset_work(struct work_struct *work)
{
struct ffs_data *ffs = container_of(work,
struct ffs_data, reset_work);
+
+ ffs_log("enter");
+
ffs_data_reset(ffs);
}
@@ -3160,6 +3386,8 @@ static int ffs_func_set_alt(struct usb_function *f,
struct ffs_data *ffs = func->ffs;
int ret = 0, intf;
+ ffs_log("enter: alt %d", (int)alt);
+
if (alt != (unsigned)-1) {
intf = ffs_func_revmap_intf(func, interface);
if (unlikely(intf < 0))
@@ -3189,11 +3417,16 @@ static int ffs_func_set_alt(struct usb_function *f,
ret = ffs_func_eps_enable(func);
if (likely(ret >= 0))
ffs_event_add(ffs, FUNCTIONFS_ENABLE);
+
return ret;
}
static void ffs_func_disable(struct usb_function *f)
{
+ struct ffs_function *func = ffs_func_from_usb(f);
+ struct ffs_data *ffs = func->ffs;
+
+ ffs_log("enter");
ffs_func_set_alt(f, 0, (unsigned)-1);
}
@@ -3213,6 +3446,11 @@ static int ffs_func_setup(struct usb_function *f,
pr_vdebug("creq->wIndex = %04x\n", le16_to_cpu(creq->wIndex));
pr_vdebug("creq->wLength = %04x\n", le16_to_cpu(creq->wLength));
+ ffs_log("enter: state %d reqtype=%02x req=%02x wv=%04x wi=%04x wl=%04x",
+ ffs->state, creq->bRequestType, creq->bRequest,
+ le16_to_cpu(creq->wValue), le16_to_cpu(creq->wIndex),
+ le16_to_cpu(creq->wLength));
+
/*
* Most requests directed to interface go through here
* (notable exceptions are set/get interface) so we need to
@@ -3281,13 +3519,23 @@ static bool ffs_func_req_match(struct usb_function *f,
static void ffs_func_suspend(struct usb_function *f)
{
+ struct ffs_data *ffs = ffs_func_from_usb(f)->ffs;
+
ENTER();
+
+ ffs_log("enter");
+
ffs_event_add(ffs_func_from_usb(f)->ffs, FUNCTIONFS_SUSPEND);
}
static void ffs_func_resume(struct usb_function *f)
{
+ struct ffs_data *ffs = ffs_func_from_usb(f)->ffs;
+
ENTER();
+
+ ffs_log("enter");
+
ffs_event_add(ffs_func_from_usb(f)->ffs, FUNCTIONFS_RESUME);
}
@@ -3360,7 +3608,9 @@ static struct ffs_dev *_ffs_find_dev(const char *name)
if (dev)
return dev;
- return _ffs_do_find_dev(name);
+ dev = _ffs_do_find_dev(name);
+
+ return dev;
}
/* Configfs support *********************************************************/
@@ -3451,6 +3701,10 @@ static void ffs_func_unbind(struct usb_configuration *c,
unsigned long flags;
ENTER();
+
+ ffs_log("enter: state %d setup_state %d flag %lu", ffs->state,
+ ffs->setup_state, ffs->flags);
+
if (ffs->func == func) {
ffs_func_eps_disable(func);
ffs->func = NULL;
@@ -3481,6 +3735,9 @@ static void ffs_func_unbind(struct usb_configuration *c,
func->interfaces_nums = NULL;
ffs_event_add(ffs, FUNCTIONFS_UNBIND);
+
+ ffs_log("exit: state %d setup_state %d flag %lu", ffs->state,
+ ffs->setup_state, ffs->flags);
}
static struct usb_function *ffs_alloc(struct usb_function_instance *fi)
@@ -3568,6 +3825,7 @@ int ffs_single_dev(struct ffs_dev *dev)
dev->single = true;
ffs_dev_unlock();
+
return ret;
}
EXPORT_SYMBOL_GPL(ffs_single_dev);
@@ -3593,6 +3851,7 @@ static void *ffs_acquire_dev(const char *dev_name)
struct ffs_dev *ffs_dev;
ENTER();
+
ffs_dev_lock();
ffs_dev = _ffs_find_dev(dev_name);
@@ -3607,6 +3866,7 @@ static void *ffs_acquire_dev(const char *dev_name)
ffs_dev->mounted = true;
ffs_dev_unlock();
+
return ffs_dev;
}
@@ -3615,6 +3875,7 @@ static void ffs_release_dev(struct ffs_data *ffs_data)
struct ffs_dev *ffs_dev;
ENTER();
+
ffs_dev_lock();
ffs_dev = ffs_data->private_data;
@@ -3634,6 +3895,9 @@ static int ffs_ready(struct ffs_data *ffs)
int ret = 0;
ENTER();
+
+ ffs_log("enter");
+
ffs_dev_lock();
ffs_obj = ffs->private_data;
@@ -3658,6 +3922,9 @@ static int ffs_ready(struct ffs_data *ffs)
set_bit(FFS_FL_CALL_CLOSED_CALLBACK, &ffs->flags);
done:
ffs_dev_unlock();
+
+ ffs_log("exit: ret %d", ret);
+
return ret;
}
@@ -3668,6 +3935,9 @@ static void ffs_closed(struct ffs_data *ffs)
struct config_item *ci;
ENTER();
+
+ ffs_log("enter");
+
ffs_dev_lock();
ffs_obj = ffs->private_data;
@@ -3693,11 +3963,16 @@ static void ffs_closed(struct ffs_data *ffs)
ci = opts->func_inst.group.cg_item.ci_parent->ci_parent;
ffs_dev_unlock();
- if (test_bit(FFS_FL_BOUND, &ffs->flags))
+ if (test_bit(FFS_FL_BOUND, &ffs->flags)) {
unregister_gadget_item(ci);
+ ffs_log("unreg gadget done");
+ }
+
return;
done:
ffs_dev_unlock();
+
+ ffs_log("exit error");
}
/* Misc helper functions ****************************************************/
diff --git a/drivers/usb/gadget/function/f_gsi.h b/drivers/usb/gadget/function/f_gsi.h
index b9bf791..3708033 100644
--- a/drivers/usb/gadget/function/f_gsi.h
+++ b/drivers/usb/gadget/function/f_gsi.h
@@ -23,7 +23,7 @@
#define GSI_MBIM_CTRL_NAME "android_mbim"
#define GSI_DPL_CTRL_NAME "dpl_ctrl"
#define GSI_CTRL_NAME_LEN (sizeof(GSI_MBIM_CTRL_NAME)+2)
-#define GSI_MAX_CTRL_PKT_SIZE 4096
+#define GSI_MAX_CTRL_PKT_SIZE 8192
#define GSI_CTRL_DTR (1 << 0)
#define GSI_NUM_IN_RNDIS_BUFFERS 50
diff --git a/drivers/usb/gadget/function/f_ncm.c b/drivers/usb/gadget/function/f_ncm.c
index 19556f0..97cce3b 100644
--- a/drivers/usb/gadget/function/f_ncm.c
+++ b/drivers/usb/gadget/function/f_ncm.c
@@ -1580,10 +1580,58 @@ static const struct config_item_type ncm_func_type = {
.ct_owner = THIS_MODULE,
};
+#ifdef CONFIG_USB_CONFIGFS_UEVENT
+
+struct ncm_setup_desc {
+ struct work_struct work;
+ struct device *device;
+ uint8_t major; // Mirror Link major version
+ uint8_t minor; // Mirror Link minor version
+};
+
+static struct ncm_setup_desc *_ncm_setup_desc;
+
+#define MIRROR_LINK_STRING_LENGTH_MAX 32
+static void ncm_setup_work(struct work_struct *data)
+{
+ char mirror_link_string[MIRROR_LINK_STRING_LENGTH_MAX];
+ char *envp[2] = { mirror_link_string, NULL };
+
+ snprintf(mirror_link_string, MIRROR_LINK_STRING_LENGTH_MAX,
+ "MirrorLink=V%d.%d",
+ _ncm_setup_desc->major, _ncm_setup_desc->minor);
+ kobject_uevent_env(&_ncm_setup_desc->device->kobj, KOBJ_CHANGE, envp);
+}
+
+int ncm_ctrlrequest(struct usb_composite_dev *cdev,
+ const struct usb_ctrlrequest *ctrl)
+{
+ int value = -EOPNOTSUPP;
+
+ if (ctrl->bRequestType == 0x40 && ctrl->bRequest == 0xF0
+ && _ncm_setup_desc) {
+ _ncm_setup_desc->minor = (uint8_t)(ctrl->wValue >> 8);
+ _ncm_setup_desc->major = (uint8_t)(ctrl->wValue & 0xFF);
+ schedule_work(&_ncm_setup_desc->work);
+ value = 0;
+ }
+
+ return value;
+}
+#endif
+
static void ncm_free_inst(struct usb_function_instance *f)
{
struct f_ncm_opts *opts;
+#ifdef CONFIG_USB_CONFIGFS_UEVENT
+ cancel_work_sync(&_ncm_setup_desc->work);
+ /* release _ncm_setup_desc related resource */
+ device_destroy(_ncm_setup_desc->device->class,
+ _ncm_setup_desc->device->devt);
+ kfree(_ncm_setup_desc);
+#endif
+
opts = container_of(f, struct f_ncm_opts, func_inst);
if (opts->bound)
gether_cleanup(netdev_priv(opts->net));
@@ -1602,6 +1650,14 @@ static struct usb_function_instance *ncm_alloc_inst(void)
config_group_init_type_name(&opts->func_inst.group, "", &ncm_func_type);
+#ifdef CONFIG_USB_CONFIGFS_UEVENT
+ _ncm_setup_desc = kzalloc(sizeof(*_ncm_setup_desc), GFP_KERNEL);
+ if (!_ncm_setup_desc)
+ return ERR_PTR(-ENOMEM);
+ INIT_WORK(&_ncm_setup_desc->work, ncm_setup_work);
+ _ncm_setup_desc->device = create_function_device("f_ncm");
+#endif
+
return &opts->func_inst;
}
@@ -1626,6 +1682,8 @@ static void ncm_unbind(struct usb_configuration *c, struct usb_function *f)
DBG(c->cdev, "ncm unbind\n");
+ opts->bound = false;
+
hrtimer_cancel(&ncm->task_timer);
ncm_string_defs[0].id = 0;
@@ -1635,7 +1693,6 @@ static void ncm_unbind(struct usb_configuration *c, struct usb_function *f)
usb_ep_free_request(ncm->notify, ncm->notify_req);
gether_cleanup(netdev_priv(opts->net));
- opts->bound = false;
}
static struct usb_function *ncm_alloc(struct usb_function_instance *fi)
diff --git a/drivers/usb/gadget/function/f_sourcesink.c b/drivers/usb/gadget/function/f_sourcesink.c
index 9cdef10..ed68a48 100644
--- a/drivers/usb/gadget/function/f_sourcesink.c
+++ b/drivers/usb/gadget/function/f_sourcesink.c
@@ -838,7 +838,7 @@ static struct usb_function *source_sink_alloc_func(
ss = kzalloc(sizeof(*ss), GFP_KERNEL);
if (!ss)
- return NULL;
+ return ERR_PTR(-ENOMEM);
ss_opts = container_of(fi, struct f_ss_opts, func_inst);
diff --git a/drivers/usb/gadget/function/u_ether_configfs.h b/drivers/usb/gadget/function/u_ether_configfs.h
index b0cf25c..959f666 100644
--- a/drivers/usb/gadget/function/u_ether_configfs.h
+++ b/drivers/usb/gadget/function/u_ether_configfs.h
@@ -32,6 +32,11 @@
struct f_##_f_##_opts *opts = to_f_##_f_##_opts(item); \
int result; \
\
+ if (opts->bound == false) { \
+ pr_err("Gadget function do not bind yet.\n"); \
+ return -ENODEV; \
+ } \
+ \
mutex_lock(&opts->lock); \
result = gether_get_dev_addr(opts->net, page, PAGE_SIZE); \
mutex_unlock(&opts->lock); \
@@ -45,6 +50,11 @@
struct f_##_f_##_opts *opts = to_f_##_f_##_opts(item); \
int ret; \
\
+ if (opts->bound == false) { \
+ pr_err("Gadget function do not bind yet.\n"); \
+ return -ENODEV; \
+ } \
+ \
mutex_lock(&opts->lock); \
if (opts->refcnt) { \
mutex_unlock(&opts->lock); \
@@ -67,6 +77,11 @@
struct f_##_f_##_opts *opts = to_f_##_f_##_opts(item); \
int result; \
\
+ if (opts->bound == false) { \
+ pr_err("Gadget function do not bind yet.\n"); \
+ return -ENODEV; \
+ } \
+ \
mutex_lock(&opts->lock); \
result = gether_get_host_addr(opts->net, page, PAGE_SIZE); \
mutex_unlock(&opts->lock); \
@@ -80,6 +95,11 @@
struct f_##_f_##_opts *opts = to_f_##_f_##_opts(item); \
int ret; \
\
+ if (opts->bound == false) { \
+ pr_err("Gadget function do not bind yet.\n"); \
+ return -ENODEV; \
+ } \
+ \
mutex_lock(&opts->lock); \
if (opts->refcnt) { \
mutex_unlock(&opts->lock); \
@@ -102,6 +122,11 @@
struct f_##_f_##_opts *opts = to_f_##_f_##_opts(item); \
unsigned qmult; \
\
+ if (opts->bound == false) { \
+ pr_err("Gadget function do not bind yet.\n"); \
+ return -ENODEV; \
+ } \
+ \
mutex_lock(&opts->lock); \
qmult = gether_get_qmult(opts->net); \
mutex_unlock(&opts->lock); \
@@ -115,6 +140,11 @@
u8 val; \
int ret; \
\
+ if (opts->bound == false) { \
+ pr_err("Gadget function do not bind yet.\n"); \
+ return -ENODEV; \
+ } \
+ \
mutex_lock(&opts->lock); \
if (opts->refcnt) { \
ret = -EBUSY; \
@@ -141,6 +171,11 @@ out: \
struct f_##_f_##_opts *opts = to_f_##_f_##_opts(item); \
int ret; \
\
+ if (opts->bound == false) { \
+ pr_err("Gadget function do not bind yet.\n"); \
+ return -ENODEV; \
+ } \
+ \
mutex_lock(&opts->lock); \
ret = gether_get_ifname(opts->net, page, PAGE_SIZE); \
mutex_unlock(&opts->lock); \
diff --git a/drivers/usb/gadget/function/u_fs.h b/drivers/usb/gadget/function/u_fs.h
index c3aba4d..0856ca3 100644
--- a/drivers/usb/gadget/function/u_fs.h
+++ b/drivers/usb/gadget/function/u_fs.h
@@ -18,6 +18,7 @@
#include <linux/mutex.h>
#include <linux/workqueue.h>
#include <linux/refcount.h>
+#include <linux/ipc_logging.h>
#ifdef VERBOSE_DEBUG
#ifndef pr_vdebug
@@ -285,6 +286,8 @@ struct ffs_data {
* destroyed by ffs_epfiles_destroy().
*/
struct ffs_epfile *epfiles;
+
+ void *ipc_log;
};
diff --git a/drivers/usb/gadget/function/u_ncm.h b/drivers/usb/gadget/function/u_ncm.h
index 67324f9..785bda0 100644
--- a/drivers/usb/gadget/function/u_ncm.h
+++ b/drivers/usb/gadget/function/u_ncm.h
@@ -30,4 +30,8 @@ struct f_ncm_opts {
int refcnt;
};
+extern struct device *create_function_device(char *name);
+int ncm_ctrlrequest(struct usb_composite_dev *cdev,
+ const struct usb_ctrlrequest *ctrl);
+
#endif /* U_NCM_H */
diff --git a/fs/direct-io.c b/fs/direct-io.c
index 53ba123..1dcd800 100644
--- a/fs/direct-io.c
+++ b/fs/direct-io.c
@@ -720,6 +720,7 @@ static int get_more_blocks(struct dio *dio, struct dio_submit *sdio,
unsigned long fs_count; /* Number of filesystem-sized blocks */
int create;
unsigned int i_blkbits = sdio->blkbits + sdio->blkfactor;
+ loff_t i_size;
/*
* If there was a memory error and we've overwritten all the
@@ -749,8 +750,8 @@ static int get_more_blocks(struct dio *dio, struct dio_submit *sdio,
*/
create = dio->op == REQ_OP_WRITE;
if (dio->flags & DIO_SKIP_HOLES) {
- if (fs_startblk <= ((i_size_read(dio->inode) - 1) >>
- i_blkbits))
+ i_size = i_size_read(dio->inode);
+ if (i_size && fs_startblk <= (i_size - 1) >> i_blkbits)
create = 0;
}
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index 471d863..82ce6d4 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -331,11 +331,22 @@ struct inode_switch_wbs_context {
struct work_struct work;
};
+static void bdi_down_write_wb_switch_rwsem(struct backing_dev_info *bdi)
+{
+ down_write(&bdi->wb_switch_rwsem);
+}
+
+static void bdi_up_write_wb_switch_rwsem(struct backing_dev_info *bdi)
+{
+ up_write(&bdi->wb_switch_rwsem);
+}
+
static void inode_switch_wbs_work_fn(struct work_struct *work)
{
struct inode_switch_wbs_context *isw =
container_of(work, struct inode_switch_wbs_context, work);
struct inode *inode = isw->inode;
+ struct backing_dev_info *bdi = inode_to_bdi(inode);
struct address_space *mapping = inode->i_mapping;
struct bdi_writeback *old_wb = inode->i_wb;
struct bdi_writeback *new_wb = isw->new_wb;
@@ -344,6 +355,12 @@ static void inode_switch_wbs_work_fn(struct work_struct *work)
void **slot;
/*
+ * If @inode switches cgwb membership while sync_inodes_sb() is
+ * being issued, sync_inodes_sb() might miss it. Synchronize.
+ */
+ down_read(&bdi->wb_switch_rwsem);
+
+ /*
* By the time control reaches here, RCU grace period has passed
* since I_WB_SWITCH assertion and all wb stat update transactions
* between unlocked_inode_to_wb_begin/end() are guaranteed to be
@@ -435,6 +452,8 @@ static void inode_switch_wbs_work_fn(struct work_struct *work)
spin_unlock(&new_wb->list_lock);
spin_unlock(&old_wb->list_lock);
+ up_read(&bdi->wb_switch_rwsem);
+
if (switched) {
wb_wakeup(new_wb);
wb_put(old_wb);
@@ -475,9 +494,18 @@ static void inode_switch_wbs(struct inode *inode, int new_wb_id)
if (inode->i_state & I_WB_SWITCH)
return;
+ /*
+ * Avoid starting new switches while sync_inodes_sb() is in
+ * progress. Otherwise, if the down_write protected issue path
+ * blocks heavily, we might end up starting a large number of
+ * switches which will block on the rwsem.
+ */
+ if (!down_read_trylock(&bdi->wb_switch_rwsem))
+ return;
+
isw = kzalloc(sizeof(*isw), GFP_ATOMIC);
if (!isw)
- return;
+ goto out_unlock;
/* find and pin the new wb */
rcu_read_lock();
@@ -511,12 +539,14 @@ static void inode_switch_wbs(struct inode *inode, int new_wb_id)
* Let's continue after I_WB_SWITCH is guaranteed to be visible.
*/
call_rcu(&isw->rcu_head, inode_switch_wbs_rcu_fn);
- return;
+ goto out_unlock;
out_free:
if (isw->new_wb)
wb_put(isw->new_wb);
kfree(isw);
+out_unlock:
+ up_read(&bdi->wb_switch_rwsem);
}
/**
@@ -894,6 +924,9 @@ fs_initcall(cgroup_writeback_init);
#else /* CONFIG_CGROUP_WRITEBACK */
+static void bdi_down_write_wb_switch_rwsem(struct backing_dev_info *bdi) { }
+static void bdi_up_write_wb_switch_rwsem(struct backing_dev_info *bdi) { }
+
static struct bdi_writeback *
locked_inode_to_wb_and_lock_list(struct inode *inode)
__releases(&inode->i_lock)
@@ -2420,8 +2453,11 @@ void sync_inodes_sb(struct super_block *sb)
return;
WARN_ON(!rwsem_is_locked(&sb->s_umount));
+ /* protect against inode wb switch, see inode_switch_wbs_work_fn() */
+ bdi_down_write_wb_switch_rwsem(bdi);
bdi_split_work_to_wbs(bdi, &work, false);
wb_wait_for_completion(bdi, &done);
+ bdi_up_write_wb_switch_rwsem(bdi);
wait_sb_inodes(sb);
}
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index 1978581..b0eef00 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -859,6 +859,18 @@ static int hugetlbfs_migrate_page(struct address_space *mapping,
rc = migrate_huge_page_move_mapping(mapping, newpage, page);
if (rc != MIGRATEPAGE_SUCCESS)
return rc;
+
+ /*
+ * page_private is subpool pointer in hugetlb pages. Transfer to
+ * new page. PagePrivate is not associated with page_private for
+ * hugetlb pages and can not be set here as only page_huge_active
+ * pages can be migrated.
+ */
+ if (page_private(page)) {
+ set_page_private(newpage, page_private(page));
+ set_page_private(page, 0);
+ }
+
if (mode != MIGRATE_SYNC_NO_COPY)
migrate_page_copy(newpage, page);
else
diff --git a/fs/kernfs/file.c b/fs/kernfs/file.c
index dbf5bc2..2d8b91f 100644
--- a/fs/kernfs/file.c
+++ b/fs/kernfs/file.c
@@ -832,26 +832,35 @@ void kernfs_drain_open_files(struct kernfs_node *kn)
* to see if it supports poll (Neither 'poll' nor 'select' return
* an appropriate error code). When in doubt, set a suitable timeout value.
*/
+__poll_t kernfs_generic_poll(struct kernfs_open_file *of, poll_table *wait)
+{
+ struct kernfs_node *kn = kernfs_dentry_node(of->file->f_path.dentry);
+ struct kernfs_open_node *on = kn->attr.open;
+
+ poll_wait(of->file, &on->poll, wait);
+
+ if (of->event != atomic_read(&on->event))
+ return DEFAULT_POLLMASK|EPOLLERR|EPOLLPRI;
+
+ return DEFAULT_POLLMASK;
+}
+
static __poll_t kernfs_fop_poll(struct file *filp, poll_table *wait)
{
struct kernfs_open_file *of = kernfs_of(filp);
struct kernfs_node *kn = kernfs_dentry_node(filp->f_path.dentry);
- struct kernfs_open_node *on = kn->attr.open;
+ __poll_t ret;
if (!kernfs_get_active(kn))
- goto trigger;
+ return DEFAULT_POLLMASK|EPOLLERR|EPOLLPRI;
- poll_wait(filp, &on->poll, wait);
+ if (kn->attr.ops->poll)
+ ret = kn->attr.ops->poll(of, wait);
+ else
+ ret = kernfs_generic_poll(of, wait);
kernfs_put_active(kn);
-
- if (of->event != atomic_read(&on->event))
- goto trigger;
-
- return DEFAULT_POLLMASK;
-
- trigger:
- return DEFAULT_POLLMASK|EPOLLERR|EPOLLPRI;
+ return ret;
}
static void kernfs_notify_workfn(struct work_struct *work)
diff --git a/include/dt-bindings/clock/qcom,dispcc-kona.h b/include/dt-bindings/clock/qcom,dispcc-kona.h
index f48b27a..60b8d4a 100644
--- a/include/dt-bindings/clock/qcom,dispcc-kona.h
+++ b/include/dt-bindings/clock/qcom,dispcc-kona.h
@@ -21,57 +21,53 @@
#define DISP_CC_MDSS_DP_AUX1_CLK_SRC 11
#define DISP_CC_MDSS_DP_AUX_CLK 12
#define DISP_CC_MDSS_DP_AUX_CLK_SRC 13
-#define DISP_CC_MDSS_DP_CRYPTO1_CLK 14
-#define DISP_CC_MDSS_DP_CRYPTO1_CLK_SRC 15
-#define DISP_CC_MDSS_DP_CRYPTO_CLK 16
-#define DISP_CC_MDSS_DP_CRYPTO_CLK_SRC 17
-#define DISP_CC_MDSS_DP_LINK1_CLK 18
-#define DISP_CC_MDSS_DP_LINK1_CLK_SRC 19
-#define DISP_CC_MDSS_DP_LINK1_DIV_CLK_SRC 20
-#define DISP_CC_MDSS_DP_LINK1_INTF_CLK 21
-#define DISP_CC_MDSS_DP_LINK_CLK 22
-#define DISP_CC_MDSS_DP_LINK_CLK_SRC 23
-#define DISP_CC_MDSS_DP_LINK_DIV_CLK_SRC 24
-#define DISP_CC_MDSS_DP_LINK_INTF_CLK 25
-#define DISP_CC_MDSS_DP_PIXEL1_CLK 26
-#define DISP_CC_MDSS_DP_PIXEL1_CLK_SRC 27
-#define DISP_CC_MDSS_DP_PIXEL2_CLK 28
-#define DISP_CC_MDSS_DP_PIXEL2_CLK_SRC 29
-#define DISP_CC_MDSS_DP_PIXEL_CLK 30
-#define DISP_CC_MDSS_DP_PIXEL_CLK_SRC 31
-#define DISP_CC_MDSS_EDP_AUX_CLK 32
-#define DISP_CC_MDSS_EDP_AUX_CLK_SRC 33
-#define DISP_CC_MDSS_EDP_GTC_CLK 34
-#define DISP_CC_MDSS_EDP_GTC_CLK_SRC 35
-#define DISP_CC_MDSS_EDP_LINK_CLK 36
-#define DISP_CC_MDSS_EDP_LINK_CLK_SRC 37
-#define DISP_CC_MDSS_EDP_LINK_DIV_CLK_SRC 38
-#define DISP_CC_MDSS_EDP_LINK_INTF_CLK 39
-#define DISP_CC_MDSS_EDP_PIXEL_CLK 40
-#define DISP_CC_MDSS_EDP_PIXEL_CLK_SRC 41
-#define DISP_CC_MDSS_ESC0_CLK 42
-#define DISP_CC_MDSS_ESC0_CLK_SRC 43
-#define DISP_CC_MDSS_ESC1_CLK 44
-#define DISP_CC_MDSS_ESC1_CLK_SRC 45
-#define DISP_CC_MDSS_MDP_CLK 46
-#define DISP_CC_MDSS_MDP_CLK_SRC 47
-#define DISP_CC_MDSS_MDP_LUT_CLK 48
-#define DISP_CC_MDSS_NON_GDSC_AHB_CLK 49
-#define DISP_CC_MDSS_PCLK0_CLK 50
-#define DISP_CC_MDSS_PCLK0_CLK_SRC 51
-#define DISP_CC_MDSS_PCLK1_CLK 52
-#define DISP_CC_MDSS_PCLK1_CLK_SRC 53
-#define DISP_CC_MDSS_ROT_CLK 54
-#define DISP_CC_MDSS_ROT_CLK_SRC 55
-#define DISP_CC_MDSS_RSCC_AHB_CLK 56
-#define DISP_CC_MDSS_RSCC_VSYNC_CLK 57
-#define DISP_CC_MDSS_VSYNC_CLK 58
-#define DISP_CC_MDSS_VSYNC_CLK_SRC 59
-#define DISP_CC_PLL0 60
-#define DISP_CC_PLL1 61
-#define DISP_CC_SLEEP_CLK 62
-#define DISP_CC_SLEEP_CLK_SRC 63
-#define DISP_CC_XO_CLK 64
+#define DISP_CC_MDSS_DP_LINK1_CLK 14
+#define DISP_CC_MDSS_DP_LINK1_CLK_SRC 15
+#define DISP_CC_MDSS_DP_LINK1_DIV_CLK_SRC 16
+#define DISP_CC_MDSS_DP_LINK1_INTF_CLK 17
+#define DISP_CC_MDSS_DP_LINK_CLK 18
+#define DISP_CC_MDSS_DP_LINK_CLK_SRC 19
+#define DISP_CC_MDSS_DP_LINK_DIV_CLK_SRC 20
+#define DISP_CC_MDSS_DP_LINK_INTF_CLK 21
+#define DISP_CC_MDSS_DP_PIXEL1_CLK 22
+#define DISP_CC_MDSS_DP_PIXEL1_CLK_SRC 23
+#define DISP_CC_MDSS_DP_PIXEL2_CLK 24
+#define DISP_CC_MDSS_DP_PIXEL2_CLK_SRC 25
+#define DISP_CC_MDSS_DP_PIXEL_CLK 26
+#define DISP_CC_MDSS_DP_PIXEL_CLK_SRC 27
+#define DISP_CC_MDSS_EDP_AUX_CLK 28
+#define DISP_CC_MDSS_EDP_AUX_CLK_SRC 29
+#define DISP_CC_MDSS_EDP_GTC_CLK 30
+#define DISP_CC_MDSS_EDP_GTC_CLK_SRC 31
+#define DISP_CC_MDSS_EDP_LINK_CLK 32
+#define DISP_CC_MDSS_EDP_LINK_CLK_SRC 33
+#define DISP_CC_MDSS_EDP_LINK_DIV_CLK_SRC 34
+#define DISP_CC_MDSS_EDP_LINK_INTF_CLK 35
+#define DISP_CC_MDSS_EDP_PIXEL_CLK 36
+#define DISP_CC_MDSS_EDP_PIXEL_CLK_SRC 37
+#define DISP_CC_MDSS_ESC0_CLK 38
+#define DISP_CC_MDSS_ESC0_CLK_SRC 39
+#define DISP_CC_MDSS_ESC1_CLK 40
+#define DISP_CC_MDSS_ESC1_CLK_SRC 41
+#define DISP_CC_MDSS_MDP_CLK 42
+#define DISP_CC_MDSS_MDP_CLK_SRC 43
+#define DISP_CC_MDSS_MDP_LUT_CLK 44
+#define DISP_CC_MDSS_NON_GDSC_AHB_CLK 45
+#define DISP_CC_MDSS_PCLK0_CLK 46
+#define DISP_CC_MDSS_PCLK0_CLK_SRC 47
+#define DISP_CC_MDSS_PCLK1_CLK 48
+#define DISP_CC_MDSS_PCLK1_CLK_SRC 49
+#define DISP_CC_MDSS_ROT_CLK 50
+#define DISP_CC_MDSS_ROT_CLK_SRC 51
+#define DISP_CC_MDSS_RSCC_AHB_CLK 52
+#define DISP_CC_MDSS_RSCC_VSYNC_CLK 53
+#define DISP_CC_MDSS_VSYNC_CLK 54
+#define DISP_CC_MDSS_VSYNC_CLK_SRC 55
+#define DISP_CC_PLL0 56
+#define DISP_CC_PLL1 57
+#define DISP_CC_SLEEP_CLK 58
+#define DISP_CC_SLEEP_CLK_SRC 59
+#define DISP_CC_XO_CLK 60
/* DISP_CC resets */
#define DISP_CC_MDSS_CORE_BCR 0
diff --git a/include/dt-bindings/clock/qcom,gpucc-lito.h b/include/dt-bindings/clock/qcom,gpucc-lito.h
index 883c55d..e998b25 100644
--- a/include/dt-bindings/clock/qcom,gpucc-lito.h
+++ b/include/dt-bindings/clock/qcom,gpucc-lito.h
@@ -1,29 +1,27 @@
/* SPDX-License-Identifier: GPL-2.0-only */
-/* Copyright (c) 2018, The Linux Foundation. All rights reserved. */
+/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved. */
#ifndef _DT_BINDINGS_CLK_QCOM_GPU_CC_LITO_H
#define _DT_BINDINGS_CLK_QCOM_GPU_CC_LITO_H
-#define GPU_CC_PLL0 0
-#define GPU_CC_PLL0_OUT_EVEN 1
-#define GPU_CC_PLL1 2
-#define GPU_CC_CX_GFX3D_CLK 3
-#define GPU_CC_CX_GFX3D_SLV_CLK 4
-#define GPU_CC_CX_GMU_CLK 5
-#define GPU_CC_CX_SNOC_DVM_CLK 6
-#define GPU_CC_CXO_AON_CLK 7
-#define GPU_CC_CXO_CLK 8
-#define GPU_CC_GMU_CLK_SRC 9
-#define GPU_CC_GX_CXO_CLK 10
-#define GPU_CC_GX_GFX3D_CLK 11
-#define GPU_CC_GX_GFX3D_CLK_SRC 12
-#define GPU_CC_GX_GMU_CLK 13
-#define GPU_CC_GX_VSENSE_CLK 14
-#define GPU_CC_AHB_CLK 15
-#define GPU_CC_CRC_AHB_CLK 16
-#define GPU_CC_CX_APB_CLK 17
-#define GPU_CC_RBCPR_AHB_CLK 18
-#define GPU_CC_RBCPR_CLK 19
-#define GPU_CC_RBCPR_CLK_SRC 20
+#define MEASURE_ONLY_GPU_CC_CX_GFX3D_CLK 0
+#define MEASURE_ONLY_GPU_CC_CX_GFX3D_SLV_CLK 1
+#define MEASURE_ONLY_GPU_CC_GX_GFX3D_CLK 2
+#define GPU_CC_PLL1 3
+#define GPU_CC_CX_GMU_CLK 4
+#define GPU_CC_CX_SNOC_DVM_CLK 5
+#define GPU_CC_CXO_AON_CLK 6
+#define GPU_CC_CXO_CLK 7
+#define GPU_CC_GMU_CLK_SRC 8
+#define GPU_CC_GX_CXO_CLK 9
+#define GPU_CC_GX_GMU_CLK 10
+#define GPU_CC_GX_VSENSE_CLK 11
+#define GPU_CC_AHB_CLK 12
+#define GPU_CC_CRC_AHB_CLK 13
+#define GPU_CC_CX_APB_CLK 14
+#define GPU_CC_RBCPR_AHB_CLK 15
+#define GPU_CC_RBCPR_CLK 16
+#define GPU_CC_RBCPR_CLK_SRC 17
+#define GPU_CC_SLEEP_CLK 18
#endif
diff --git a/include/dt-bindings/clock/qcom,rpmh.h b/include/dt-bindings/clock/qcom,rpmh.h
index 2b122c1..d6c1dff 100644
--- a/include/dt-bindings/clock/qcom,rpmh.h
+++ b/include/dt-bindings/clock/qcom,rpmh.h
@@ -23,5 +23,7 @@
#define RPMH_RF_CLKD3_A 15
#define RPMH_RF_CLKD4 16
#define RPMH_RF_CLKD4_A 17
+#define RPMH_RF_CLK4 18
+#define RPMH_RF_CLK4_A 19
#endif
diff --git a/include/dt-bindings/sound/qcom,bolero-clk-rsc.h b/include/dt-bindings/sound/qcom,bolero-clk-rsc.h
new file mode 100644
index 0000000..038c066
--- /dev/null
+++ b/include/dt-bindings/sound/qcom,bolero-clk-rsc.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __BOLERO_CODEC_CLK_RSC_H
+#define __BOLERO_CODEC_CLK_RSC_H
+
+/* Bolero clock types */
+#define TX_CORE_CLK 0
+#define RX_CORE_CLK 1
+#define WSA_CORE_CLK 2
+#define VA_CORE_CLK 3
+#define TX_NPL_CLK 4
+#define RX_NPL_CLK 5
+#define WSA_NPL_CLK 6
+#define VA_NPL_CLK 7
+#define MAX_CLK 8
+
+#endif /* __BOLERO_CODEC_CLK_RSC_H */
diff --git a/include/linux/backing-dev-defs.h b/include/linux/backing-dev-defs.h
index c311571..07e02d6 100644
--- a/include/linux/backing-dev-defs.h
+++ b/include/linux/backing-dev-defs.h
@@ -190,6 +190,7 @@ struct backing_dev_info {
struct radix_tree_root cgwb_tree; /* radix tree of active cgroup wbs */
struct rb_root cgwb_congested_tree; /* their congested states */
struct mutex cgwb_release_mutex; /* protect shutdown of wb structs */
+ struct rw_semaphore wb_switch_rwsem; /* no cgwb switch while syncing */
#else
struct bdi_writeback_congested *wb_congested;
#endif
diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
index 5e1694f..6f9ea86 100644
--- a/include/linux/cgroup-defs.h
+++ b/include/linux/cgroup-defs.h
@@ -32,6 +32,7 @@ struct kernfs_node;
struct kernfs_ops;
struct kernfs_open_file;
struct seq_file;
+struct poll_table_struct;
#define MAX_CGROUP_TYPE_NAMELEN 32
#define MAX_CGROUP_ROOT_NAMELEN 64
@@ -573,6 +574,9 @@ struct cftype {
ssize_t (*write)(struct kernfs_open_file *of,
char *buf, size_t nbytes, loff_t off);
+ __poll_t (*poll)(struct kernfs_open_file *of,
+ struct poll_table_struct *pt);
+
#ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lock_class_key lockdep_key;
#endif
diff --git a/include/linux/dma-buf-ref.h b/include/linux/dma-buf-ref.h
new file mode 100644
index 0000000..5bdf1f2
--- /dev/null
+++ b/include/linux/dma-buf-ref.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DMA_BUF_REF_H
+#define _DMA_BUF_REF_H
+
+struct dma_buf;
+struct seq_file;
+
+#ifdef CONFIG_DEBUG_DMA_BUF_REF
+void dma_buf_ref_init(struct dma_buf *b);
+void dma_buf_ref_destroy(struct dma_buf *b);
+void dma_buf_ref_mod(struct dma_buf *b, int nr);
+int dma_buf_ref_show(struct seq_file *s, struct dma_buf *dmabuf);
+
+#else
+static inline void dma_buf_ref_init(struct dma_buf *b) {}
+static inline void dma_buf_ref_destroy(struct dma_buf *b) {}
+static inline void dma_buf_ref_mod(struct dma_buf *b, int nr) {}
+static inline int dma_buf_ref_show(struct seq_file *s, struct dma_buf *dmabuf)
+{
+ return -ENOMEM;
+}
+#endif
+
+
+#endif /* _DMA_BUF_REF_H */
diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h
index 38ebfdc..2ba99cc 100644
--- a/include/linux/dma-buf.h
+++ b/include/linux/dma-buf.h
@@ -31,6 +31,7 @@
#include <linux/dma-mapping.h>
#include <linux/fs.h>
#include <linux/dma-fence.h>
+#include <linux/dma-buf-ref.h>
#include <linux/wait.h>
struct device;
@@ -381,6 +382,7 @@ struct dma_buf_ops {
* @vmap_ptr: the current vmap ptr if vmapping_counter > 0
* @exp_name: name of the exporter; useful for debugging.
* @name: unique name for the buffer
+ * @ktime: time (in jiffies) at which the buffer was born
* @owner: pointer to exporter module; used for refcounting when exporter is a
* kernel module.
* @list_node: node for dma_buf accounting and debugging.
@@ -409,6 +411,7 @@ struct dma_buf {
void *vmap_ptr;
const char *exp_name;
char *name;
+ ktime_t ktime;
struct module *owner;
struct list_head list_node;
void *priv;
@@ -423,6 +426,8 @@ struct dma_buf {
__poll_t active;
} cb_excl, cb_shared;
+
+ struct list_head refs;
};
/**
@@ -495,6 +500,7 @@ struct dma_buf_export_info {
static inline void get_dma_buf(struct dma_buf *dmabuf)
{
get_file(dmabuf->file);
+ dma_buf_ref_mod(dmabuf, 1);
}
struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
diff --git a/include/linux/ipa_wdi3.h b/include/linux/ipa_wdi3.h
index aca7fba..3c8a72c 100644
--- a/include/linux/ipa_wdi3.h
+++ b/include/linux/ipa_wdi3.h
@@ -97,10 +97,12 @@ struct ipa_wdi_reg_intf_in_params {
* @transfer_ring_size: size of the transfer ring
* @transfer_ring_doorbell_pa: physical address of the doorbell that
IPA uC will update the tailpointer of the transfer ring
+ * @is_txr_rn_db_pcie_addr: Bool indicated txr ring DB is pcie or not
* @event_ring_base_pa: physical address of the base of the event ring
* @event_ring_size: event ring size
* @event_ring_doorbell_pa: physical address of the doorbell that IPA uC
will update the headpointer of the event ring
+ * @is_evt_rn_db_pcie_addr: Bool indicated evt ring DB is pcie or not
* @num_pkt_buffers: Number of pkt buffers allocated. The size of the event
ring and the transfer ring has to be atleast ( num_pkt_buffers + 1)
* @pkt_offset: packet offset (wdi header length)
@@ -113,10 +115,12 @@ struct ipa_wdi_pipe_setup_info {
phys_addr_t transfer_ring_base_pa;
u32 transfer_ring_size;
phys_addr_t transfer_ring_doorbell_pa;
+ bool is_txr_rn_db_pcie_addr;
phys_addr_t event_ring_base_pa;
u32 event_ring_size;
phys_addr_t event_ring_doorbell_pa;
+ bool is_evt_rn_db_pcie_addr;
u16 num_pkt_buffers;
u16 pkt_offset;
@@ -132,10 +136,12 @@ struct ipa_wdi_pipe_setup_info {
* @transfer_ring_size: size of the transfer ring
* @transfer_ring_doorbell_pa: physical address of the doorbell that
IPA uC will update the tailpointer of the transfer ring
+ * @is_txr_rn_db_pcie_addr: Bool indicated txr ring DB is pcie or not
* @event_ring_base_pa: physical address of the base of the event ring
* @event_ring_size: event ring size
* @event_ring_doorbell_pa: physical address of the doorbell that IPA uC
will update the headpointer of the event ring
+ * @is_evt_rn_db_pcie_addr: Bool indicated evt ring DB is pcie or not
* @num_pkt_buffers: Number of pkt buffers allocated. The size of the event
ring and the transfer ring has to be atleast ( num_pkt_buffers + 1)
* @pkt_offset: packet offset (wdi header length)
@@ -148,10 +154,12 @@ struct ipa_wdi_pipe_setup_info_smmu {
struct sg_table transfer_ring_base;
u32 transfer_ring_size;
phys_addr_t transfer_ring_doorbell_pa;
+ bool is_txr_rn_db_pcie_addr;
struct sg_table event_ring_base;
u32 event_ring_size;
phys_addr_t event_ring_doorbell_pa;
+ bool is_evt_rn_db_pcie_addr;
u16 num_pkt_buffers;
u16 pkt_offset;
diff --git a/include/linux/irq.h b/include/linux/irq.h
index 201de12..c9bffda 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -1151,7 +1151,8 @@ void irq_matrix_offline(struct irq_matrix *m);
void irq_matrix_assign_system(struct irq_matrix *m, unsigned int bit, bool replace);
int irq_matrix_reserve_managed(struct irq_matrix *m, const struct cpumask *msk);
void irq_matrix_remove_managed(struct irq_matrix *m, const struct cpumask *msk);
-int irq_matrix_alloc_managed(struct irq_matrix *m, unsigned int cpu);
+int irq_matrix_alloc_managed(struct irq_matrix *m, const struct cpumask *msk,
+ unsigned int *mapped_cpu);
void irq_matrix_reserve(struct irq_matrix *m);
void irq_matrix_remove_reserved(struct irq_matrix *m);
int irq_matrix_alloc(struct irq_matrix *m, const struct cpumask *msk,
diff --git a/include/linux/kernfs.h b/include/linux/kernfs.h
index 814643f..444869d 100644
--- a/include/linux/kernfs.h
+++ b/include/linux/kernfs.h
@@ -25,6 +25,7 @@ struct seq_file;
struct vm_area_struct;
struct super_block;
struct file_system_type;
+struct poll_table_struct;
struct kernfs_open_node;
struct kernfs_iattrs;
@@ -261,6 +262,9 @@ struct kernfs_ops {
ssize_t (*write)(struct kernfs_open_file *of, char *buf, size_t bytes,
loff_t off);
+ __poll_t (*poll)(struct kernfs_open_file *of,
+ struct poll_table_struct *pt);
+
int (*mmap)(struct kernfs_open_file *of, struct vm_area_struct *vma);
#ifdef CONFIG_DEBUG_LOCK_ALLOC
@@ -350,6 +354,8 @@ int kernfs_remove_by_name_ns(struct kernfs_node *parent, const char *name,
int kernfs_rename_ns(struct kernfs_node *kn, struct kernfs_node *new_parent,
const char *new_name, const void *new_ns);
int kernfs_setattr(struct kernfs_node *kn, const struct iattr *iattr);
+__poll_t kernfs_generic_poll(struct kernfs_open_file *of,
+ struct poll_table_struct *pt);
void kernfs_notify(struct kernfs_node *kn);
const void *kernfs_super_ns(struct super_block *sb);
diff --git a/include/linux/kthread.h b/include/linux/kthread.h
index c196176..edf8f86 100644
--- a/include/linux/kthread.h
+++ b/include/linux/kthread.h
@@ -4,7 +4,6 @@
/* Simple interface for creating and stopping kernel threads without mess. */
#include <linux/err.h>
#include <linux/sched.h>
-#include <linux/cgroup.h>
__printf(4, 5)
struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
@@ -198,6 +197,8 @@ bool kthread_cancel_delayed_work_sync(struct kthread_delayed_work *work);
void kthread_destroy_worker(struct kthread_worker *worker);
+struct cgroup_subsys_state;
+
#ifdef CONFIG_BLK_CGROUP
void kthread_associate_blkcg(struct cgroup_subsys_state *css);
struct cgroup_subsys_state *kthread_blkcg(void);
diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h
index 100247c..dc56925 100644
--- a/include/linux/power_supply.h
+++ b/include/linux/power_supply.h
@@ -323,6 +323,7 @@ enum power_supply_property {
POWER_SUPPLY_PROP_QC_OPTI_DISABLE,
POWER_SUPPLY_PROP_CC_SOC,
POWER_SUPPLY_PROP_BATT_AGE_LEVEL,
+ POWER_SUPPLY_PROP_SCALE_MODE_EN,
/* Charge pump properties */
POWER_SUPPLY_PROP_CP_STATUS1,
POWER_SUPPLY_PROP_CP_STATUS2,
diff --git a/include/linux/psi_types.h b/include/linux/psi_types.h
index 2cf422d..4d1c1f6 100644
--- a/include/linux/psi_types.h
+++ b/include/linux/psi_types.h
@@ -11,7 +11,7 @@ enum psi_task_count {
NR_IOWAIT,
NR_MEMSTALL,
NR_RUNNING,
- NR_PSI_TASK_COUNTS,
+ NR_PSI_TASK_COUNTS = 3,
};
/* Task state bitmasks */
@@ -24,7 +24,7 @@ enum psi_res {
PSI_IO,
PSI_MEM,
PSI_CPU,
- NR_PSI_RESOURCES,
+ NR_PSI_RESOURCES = 3,
};
/*
@@ -41,7 +41,7 @@ enum psi_states {
PSI_CPU_SOME,
/* Only per-CPU, to weigh the CPU in the global average: */
PSI_NONIDLE,
- NR_PSI_STATES,
+ NR_PSI_STATES = 6,
};
struct psi_group_cpu {
@@ -53,6 +53,9 @@ struct psi_group_cpu {
/* States of the tasks belonging to this group */
unsigned int tasks[NR_PSI_TASK_COUNTS];
+ /* Aggregate pressure state derived from the tasks */
+ u32 state_mask;
+
/* Period time sampling buckets for each state of interest (ns) */
u32 times[NR_PSI_STATES];
@@ -66,17 +69,17 @@ struct psi_group_cpu {
};
struct psi_group {
- /* Protects data updated during an aggregation */
- struct mutex stat_lock;
+ /* Protects data used by the aggregator */
+ struct mutex avgs_lock;
/* Per-cpu task state & time tracking */
struct psi_group_cpu __percpu *pcpu;
- /* Periodic aggregation state */
- u64 total_prev[NR_PSI_STATES - 1];
- u64 last_update;
- u64 next_update;
- struct delayed_work clock_work;
+ /* Running pressure averages */
+ u64 avg_total[NR_PSI_STATES - 1];
+ u64 avg_last_update;
+ u64 avg_next_update;
+ struct delayed_work avgs_work;
/* Total stall times and sampled pressure averages */
u64 total[NR_PSI_STATES - 1];
diff --git a/include/linux/rq_stats.h b/include/linux/rq_stats.h
index a0bccf1..59440af 100644
--- a/include/linux/rq_stats.h
+++ b/include/linux/rq_stats.h
@@ -1,17 +1,11 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2011,2013-2014,2019, The Linux Foundation. All rights reserved.
- *
*/
struct rq_data {
- unsigned int rq_avg;
- unsigned long rq_poll_jiffies;
unsigned long def_timer_jiffies;
- unsigned long rq_poll_last_jiffy;
- unsigned long rq_poll_total_jiffies;
unsigned long def_timer_last_jiffy;
- unsigned int hotplug_disabled;
int64_t def_start_time;
struct attribute_group *attr_group;
struct kobject *kobj;
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 1b79884..8bd1a9b 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -25,7 +25,6 @@
#include <linux/latencytop.h>
#include <linux/sched/prio.h>
#include <linux/signal_types.h>
-#include <linux/psi_types.h>
#include <linux/mm_types_task.h>
#include <linux/task_io_accounting.h>
#include <linux/rseq.h>
diff --git a/include/soc/qcom/secure_buffer.h b/include/soc/qcom/secure_buffer.h
index 03f03319..602055a 100644
--- a/include/soc/qcom/secure_buffer.h
+++ b/include/soc/qcom/secure_buffer.h
@@ -48,6 +48,12 @@ int hyp_assign_table(struct sg_table *table,
u32 *source_vm_list, int source_nelems,
int *dest_vmids, int *dest_perms,
int dest_nelems);
+
+int try_hyp_assign_table(struct sg_table *table,
+ u32 *source_vm_list, int source_nelems,
+ int *dest_vmids, int *dest_perms,
+ int dest_nelems);
+
extern int hyp_assign_phys(phys_addr_t addr, u64 size,
u32 *source_vmlist, int source_nelems,
int *dest_vmids, int *dest_perms, int dest_nelems);
@@ -72,6 +78,14 @@ static inline int hyp_assign_table(struct sg_table *table,
return -EINVAL;
}
+static inline int try_hyp_assign_table(struct sg_table *table,
+ u32 *source_vm_list, int source_nelems,
+ int *dest_vmids, int *dest_perms,
+ int dest_nelems)
+{
+ return -EINVAL;
+}
+
static inline int hyp_assign_phys(phys_addr_t addr, u64 size,
u32 *source_vmlist, int source_nelems,
int *dest_vmids, int *dest_perms, int dest_nelems)
diff --git a/include/soc/qcom/subsystem_notif.h b/include/soc/qcom/subsystem_notif.h
index 7d1bbbb..79f8169 100644
--- a/include/soc/qcom/subsystem_notif.h
+++ b/include/soc/qcom/subsystem_notif.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (c) 2011, 2013-2014, 2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2011, 2013-2014, 2018-2019, The Linux Foundation. All rights reserved.
*/
/*
* Subsystem restart notifier API header
@@ -17,6 +17,7 @@ enum subsys_notif_type {
SUBSYS_AFTER_SHUTDOWN,
SUBSYS_BEFORE_POWERUP,
SUBSYS_AFTER_POWERUP,
+ SUBSYS_BEFORE_AUTH_AND_RESET,
SUBSYS_RAMDUMP_NOTIFICATION,
SUBSYS_POWERUP_FAILURE,
SUBSYS_PROXY_VOTE,
diff --git a/include/soc/qcom/subsystem_restart.h b/include/soc/qcom/subsystem_restart.h
index 363928a..81716f3 100644
--- a/include/soc/qcom/subsystem_restart.h
+++ b/include/soc/qcom/subsystem_restart.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2019, The Linux Foundation. All rights reserved.
*/
#ifndef __SUBSYS_RESTART_H
@@ -157,6 +157,7 @@ extern void subsys_set_crash_status(struct subsys_device *dev,
extern enum crash_status subsys_get_crash_status(struct subsys_device *dev);
void notify_proxy_vote(struct device *device);
void notify_proxy_unvote(struct device *device);
+void notify_before_auth_and_reset(struct device *device);
void complete_err_ready(struct subsys_device *subsys);
void complete_shutdown_ack(struct subsys_device *subsys);
struct subsys_device *find_subsys_device(const char *str);
@@ -218,6 +219,7 @@ enum crash_status subsys_get_crash_status(struct subsys_device *dev)
}
static inline void notify_proxy_vote(struct device *device) { }
static inline void notify_proxy_unvote(struct device *device) { }
+static inline void notify_before_auth_and_reset(struct device *device) { }
static inline int wait_for_shutdown_ack(struct subsys_desc *desc)
{
return -EOPNOTSUPP;
diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild
index ada1bce..dc1a320 100644
--- a/include/uapi/linux/Kbuild
+++ b/include/uapi/linux/Kbuild
@@ -13,6 +13,7 @@
endif
header-y += nfc/
+header-y += qbt_handler.h
ifneq ($(VSERVICES_SUPPORT), "")
include include/linux/Kbuild.vservices
diff --git a/include/uapi/linux/android/binder.h b/include/uapi/linux/android/binder.h
index 4a1c285..7d62fcf 100644
--- a/include/uapi/linux/android/binder.h
+++ b/include/uapi/linux/android/binder.h
@@ -88,6 +88,16 @@ enum flat_binder_object_flags {
* scheduling policy from the caller (for synchronous transactions).
*/
FLAT_BINDER_FLAG_INHERIT_RT = 0x800,
+
+#ifdef __KERNEL__
+ /**
+ * @FLAT_BINDER_FLAG_TXN_SECURITY_CTX: request security contexts
+ *
+ * Only when set, causes senders to include their security
+ * context
+ */
+ FLAT_BINDER_FLAG_TXN_SECURITY_CTX = 0x1000,
+#endif /* __KERNEL__ */
};
#ifdef BINDER_IPC_32BIT
@@ -265,6 +275,7 @@ struct binder_node_info_for_ref {
#define BINDER_VERSION _IOWR('b', 9, struct binder_version)
#define BINDER_GET_NODE_DEBUG_INFO _IOWR('b', 11, struct binder_node_debug_info)
#define BINDER_GET_NODE_INFO_FOR_REF _IOWR('b', 12, struct binder_node_info_for_ref)
+#define BINDER_SET_CONTEXT_MGR_EXT _IOW('b', 13, struct flat_binder_object)
/*
* NOTE: Two special error codes you should check for when calling
@@ -323,6 +334,13 @@ struct binder_transaction_data {
} data;
};
+#ifdef __KERNEL__
+struct binder_transaction_data_secctx {
+ struct binder_transaction_data transaction_data;
+ binder_uintptr_t secctx;
+};
+#endif /* __KERNEL__ */
+
struct binder_transaction_data_sg {
struct binder_transaction_data transaction_data;
binder_size_t buffers_size;
@@ -358,6 +376,13 @@ enum binder_driver_return_protocol {
BR_OK = _IO('r', 1),
/* No parameters! */
+#ifdef __KERNEL__
+ BR_TRANSACTION_SEC_CTX = _IOR('r', 2,
+ struct binder_transaction_data_secctx),
+ /*
+ * binder_transaction_data_secctx: the received command.
+ */
+#endif /* __KERNEL__ */
BR_TRANSACTION = _IOR('r', 2, struct binder_transaction_data),
BR_REPLY = _IOR('r', 3, struct binder_transaction_data),
/*
diff --git a/include/uapi/linux/qbt_handler.h b/include/uapi/linux/qbt_handler.h
new file mode 100644
index 0000000..8ebbf1f
--- /dev/null
+++ b/include/uapi/linux/qbt_handler.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
+/*
+ * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _UAPI_QBT_HANDLER_H_
+#define _UAPI_QBT_HANDLER_H_
+
+#define MAX_NAME_SIZE 32
+
+#define QBT_IS_WUHB_CONNECTED 100
+#define QBT_SEND_KEY_EVENT 101
+#define QBT_ENABLE_IPC 102
+#define QBT_DISABLE_IPC 103
+#define QBT_ENABLE_FD 104
+#define QBT_DISABLE_FD 105
+
+/*
+ * enum qbt_fw_event -
+ * enumeration of firmware events
+ * @FW_EVENT_FINGER_DOWN - finger down detected
+ * @FW_EVENT_FINGER_UP - finger up detected
+ * @FW_EVENT_IPC - an IPC from the firmware is pending
+ */
+enum qbt_fw_event {
+ FW_EVENT_FINGER_DOWN = 1,
+ FW_EVENT_FINGER_UP = 2,
+ FW_EVENT_IPC = 3,
+};
+
+/*
+ * struct qbt_wuhb_connected_status -
+ * used to query whether WUHB INT line is connected
+ * @is_wuhb_connected - if non-zero, WUHB INT line is connected
+ */
+struct qbt_wuhb_connected_status {
+ bool is_wuhb_connected;
+};
+
+/*
+ * struct qbt_key_event -
+ * used to send key event
+ * @key - the key event to send
+ * @value - value of the key event
+ */
+struct qbt_key_event {
+ int key;
+ int value;
+};
+
+#endif /* _UAPI_QBT_HANDLER_H_ */
diff --git a/include/uapi/linux/v4l2-controls.h b/include/uapi/linux/v4l2-controls.h
index e2d30c7..1496008 100644
--- a/include/uapi/linux/v4l2-controls.h
+++ b/include/uapi/linux/v4l2-controls.h
@@ -962,6 +962,9 @@ enum v4l2_mpeg_vidc_video_vp9_level {
#define V4L2_CID_MPEG_VIDC_VIDEO_FRAME_RATE \
(V4L2_CID_MPEG_MSM_VIDC_BASE + 119)
+#define V4L2_CID_MPEG_VIDC_VENC_BITRATE_SAVINGS \
+ (V4L2_CID_MPEG_MSM_VIDC_BASE + 131)
+
#define V4L2_CID_MPEG_VIDC_VIDEO_HEVC_MAX_HIER_CODING_LAYER \
(V4L2_CID_MPEG_MSM_VIDC_BASE + 120)
enum v4l2_mpeg_vidc_video_hevc_max_hier_coding_layer {
diff --git a/include/uapi/media/msm_cvp_private.h b/include/uapi/media/msm_cvp_private.h
index 7eb1166..1200c5c 100644
--- a/include/uapi/media/msm_cvp_private.h
+++ b/include/uapi/media/msm_cvp_private.h
@@ -12,91 +12,103 @@
/* VIDIOC private cvp command */
#define VIDIOC_CVP_CMD \
- _IOWR('V', BASE_VIDIOC_PRIVATE_CVP, struct msm_cvp_arg)
+ _IOWR('V', BASE_VIDIOC_PRIVATE_CVP, struct cvp_kmd_arg)
/* Commands type */
-#define MSM_VIDC_CMD_START 0x10000000
-#define MSM_CVP_CMD_START (MSM_VIDC_CMD_START + 0x1000)
+#define CVP_KMD_CMD_BASE 0x10000000
+#define CVP_KMD_CMD_START (CVP_KMD_CMD_BASE + 0x1000)
/*
* userspace clients pass one of the below arguments type
- * in struct msm_cvp_arg (@type field).
+ * in struct cvp_kmd_arg (@type field).
*/
/*
- * MSM_CVP_GET_SESSION_INFO - this argument type is used to
+ * CVP_KMD_GET_SESSION_INFO - this argument type is used to
* get the session information from driver. it passes
- * struct msm_cvp_session_info {}
+ * struct cvp_kmd_session_info {}
*/
-#define MSM_CVP_GET_SESSION_INFO (MSM_CVP_CMD_START + 1)
+#define CVP_KMD_GET_SESSION_INFO (CVP_KMD_CMD_START + 1)
/*
- * MSM_CVP_REQUEST_POWER - this argument type is used to
+ * CVP_KMD_REQUEST_POWER - this argument type is used to
* set the power required to driver. it passes
- * struct msm_cvp_request_power {}
+ * struct cvp_kmd_request_power {}
*/
-#define MSM_CVP_REQUEST_POWER (MSM_CVP_CMD_START + 2)
+#define CVP_KMD_REQUEST_POWER (CVP_KMD_CMD_START + 2)
/*
- * MSM_CVP_REGISTER_BUFFER - this argument type is used to
+ * CVP_KMD_REGISTER_BUFFER - this argument type is used to
* register the buffer to driver. it passes
- * struct msm_cvp_buffer {}
+ * struct cvp_kmd_buffer {}
*/
-#define MSM_CVP_REGISTER_BUFFER (MSM_CVP_CMD_START + 3)
+#define CVP_KMD_REGISTER_BUFFER (CVP_KMD_CMD_START + 3)
/*
- * MSM_CVP_REGISTER_BUFFER - this argument type is used to
+ * CVP_KMD_REGISTER_BUFFER - this argument type is used to
* unregister the buffer to driver. it passes
- * struct msm_cvp_buffer {}
+ * struct cvp_kmd_buffer {}
*/
-#define MSM_CVP_UNREGISTER_BUFFER (MSM_CVP_CMD_START + 4)
+#define CVP_KMD_UNREGISTER_BUFFER (CVP_KMD_CMD_START + 4)
-#define MSM_CVP_HFI_SEND_CMD (MSM_CVP_CMD_START + 5)
+#define CVP_KMD_HFI_SEND_CMD (CVP_KMD_CMD_START + 5)
-#define MSM_CVP_HFI_DFS_CONFIG_CMD (MSM_CVP_CMD_START + 6)
+#define CVP_KMD_HFI_DFS_CONFIG_CMD (CVP_KMD_CMD_START + 6)
-#define MSM_CVP_HFI_DFS_FRAME_CMD (MSM_CVP_CMD_START + 7)
+#define CVP_KMD_HFI_DFS_FRAME_CMD (CVP_KMD_CMD_START + 7)
-#define MSM_CVP_HFI_DFS_FRAME_CMD_RESPONSE (MSM_CVP_CMD_START + 8)
+#define CVP_KMD_HFI_DFS_FRAME_CMD_RESPONSE (CVP_KMD_CMD_START + 8)
-#define MSM_CVP_HFI_DME_CONFIG_CMD (MSM_CVP_CMD_START + 9)
+#define CVP_KMD_HFI_DME_CONFIG_CMD (CVP_KMD_CMD_START + 9)
-#define MSM_CVP_HFI_DME_FRAME_CMD (MSM_CVP_CMD_START + 10)
+#define CVP_KMD_HFI_DME_FRAME_CMD (CVP_KMD_CMD_START + 10)
-#define MSM_CVP_HFI_DME_FRAME_CMD_RESPONSE (MSM_CVP_CMD_START + 11)
+#define CVP_KMD_HFI_DME_FRAME_CMD_RESPONSE (CVP_KMD_CMD_START + 11)
-#define MSM_CVP_HFI_PERSIST_CMD (MSM_CVP_CMD_START + 12)
+#define CVP_KMD_HFI_PERSIST_CMD (CVP_KMD_CMD_START + 12)
-#define MSM_CVP_HFI_PERSIST_CMD_RESPONSE (MSM_CVP_CMD_START + 13)
+#define CVP_KMD_HFI_PERSIST_CMD_RESPONSE (CVP_KMD_CMD_START + 13)
+
+#define CVP_KMD_HFI_DME_FRAME_FENCE_CMD (CVP_KMD_CMD_START + 14)
+
+#define CVP_KMD_SEND_CMD_PKT (CVP_KMD_CMD_START + 64)
+
+#define CVP_KMD_RECEIVE_MSG_PKT (CVP_KMD_CMD_START + 65)
+
+#define CVP_KMD_SET_SYS_PROPERTY (CVP_KMD_CMD_START + 66)
+
+#define CVP_KMD_GET_SYS_PROPERTY (CVP_KMD_CMD_START + 67)
+
+#define CVP_KMD_SESSION_CONTROL (CVP_KMD_CMD_START + 68)
/* flags */
-#define MSM_CVP_FLAG_UNSECURE 0x00000000
-#define MSM_CVP_FLAG_SECURE 0x00000001
+#define CVP_KMD_FLAG_UNSECURE 0x00000000
+#define CVP_KMD_FLAG_SECURE 0x00000001
/* buffer type */
-#define MSM_CVP_BUFTYPE_INPUT 0x00000001
-#define MSM_CVP_BUFTYPE_OUTPUT 0x00000002
-#define MSM_CVP_BUFTYPE_INTERNAL_1 0x00000003
-#define MSM_CVP_BUFTYPE_INTERNAL_2 0x00000004
+#define CVP_KMD_BUFTYPE_INPUT 0x00000001
+#define CVP_KMD_BUFTYPE_OUTPUT 0x00000002
+#define CVP_KMD_BUFTYPE_INTERNAL_1 0x00000003
+#define CVP_KMD_BUFTYPE_INTERNAL_2 0x00000004
/**
- * struct msm_cvp_session_info - session information
+ * struct cvp_kmd_session_info - session information
* @session_id: current session id
*/
-struct msm_cvp_session_info {
+struct cvp_kmd_session_info {
unsigned int session_id;
unsigned int reserved[10];
};
/**
- * struct msm_cvp_request_power - power / clock data information
+ * struct cvp_kmd_request_power - power / clock data information
* @clock_cycles_a: clock cycles per second required for hardware_a
* @clock_cycles_b: clock cycles per second required for hardware_b
* @ddr_bw: bandwidth required for ddr in bps
* @sys_cache_bw: bandwidth required for system cache in bps
*/
-struct msm_cvp_request_power {
+struct cvp_kmd_request_power {
unsigned int clock_cycles_a;
unsigned int clock_cycles_b;
unsigned int ddr_bw;
@@ -105,7 +117,7 @@ struct msm_cvp_request_power {
};
/**
- * struct msm_cvp_buffer - buffer information to be registered
+ * struct cvp_kmd_buffer - buffer information to be registered
* @index: index of buffer
* @type: buffer type
* @fd: file descriptor of buffer
@@ -114,7 +126,7 @@ struct msm_cvp_request_power {
* @pixelformat: fourcc format
* @flags: buffer flags
*/
-struct msm_cvp_buffer {
+struct cvp_kmd_buffer {
unsigned int index;
unsigned int type;
unsigned int fd;
@@ -126,42 +138,42 @@ struct msm_cvp_buffer {
};
/**
- * struct msm_cvp_send_cmd - sending generic HFI command
+ * struct cvp_kmd_send_cmd - sending generic HFI command
* @cmd_address_fd: file descriptor of cmd_address
* @cmd_size: allocated size of buffer
*/
-struct msm_cvp_send_cmd {
+struct cvp_kmd_send_cmd {
unsigned int cmd_address_fd;
unsigned int cmd_size;
unsigned int reserved[10];
};
/**
- * struct msm_cvp_color_plane_info - color plane info
+ * struct cvp_kmd_color_plane_info - color plane info
* @stride: stride of plane
* @buf_size: size of plane
*/
-struct msm_cvp_color_plane_info {
+struct cvp_kmd_color_plane_info {
int stride[HFI_MAX_PLANES];
unsigned int buf_size[HFI_MAX_PLANES];
};
/**
- * struct msm_cvp_client_data - store generic client
+ * struct cvp_kmd_client_data - store generic client
* data
* @transactionid: transaction id
* @client_data1: client data to be used during callback
* @client_data2: client data to be used during callback
*/
-struct msm_cvp_client_data {
+struct cvp_kmd_client_data {
unsigned int transactionid;
unsigned int client_data1;
unsigned int client_data2;
};
#define CVP_COLOR_PLANE_INFO_SIZE \
- sizeof(struct msm_cvp_color_plane_info)
-#define CVP_CLIENT_DATA_SIZE sizeof(struct msm_cvp_client_data)
+ sizeof(struct cvp_kmd_color_plane_info)
+#define CVP_CLIENT_DATA_SIZE sizeof(struct cvp_kmd_client_data)
#define CVP_DFS_CONFIG_CMD_SIZE 38
#define CVP_DFS_FRAME_CMD_SIZE 16
#define CVP_DFS_FRAME_BUFFERS_OFFSET 8
@@ -175,29 +187,56 @@ struct msm_cvp_client_data {
#define CVP_PERSIST_BUFFERS_OFFSET 7
#define CVP_PSRSIST_BUF_NUM 2
-struct msm_cvp_dfs_config {
+struct cvp_kmd_dfs_config {
unsigned int cvp_dfs_config[CVP_DFS_CONFIG_CMD_SIZE];
};
-struct msm_cvp_dfs_frame {
+struct cvp_kmd_dfs_frame {
unsigned int frame_data[CVP_DFS_FRAME_CMD_SIZE];
};
-struct msm_cvp_dme_config {
+struct cvp_kmd_dme_config {
unsigned int cvp_dme_config[CVP_DME_CONFIG_CMD_SIZE];
};
-struct msm_cvp_dme_frame {
+struct cvp_kmd_dme_frame {
unsigned int frame_data[CVP_DME_FRAME_CMD_SIZE];
};
-struct msm_cvp_persist_buf {
+struct cvp_kmd_persist_buf {
unsigned int persist_data[CVP_PERSIST_CMD_SIZE];
};
+#define MAX_HFI_PKT_SIZE 250
+
+struct cvp_kmd_hfi_packet {
+ unsigned int pkt_data[MAX_HFI_PKT_SIZE];
+};
+
+struct cvp_kmd_sys_property {
+ unsigned int prop_type;
+ unsigned int data;
+};
+
+struct cvp_kmd_sys_properties {
+ unsigned int prop_num;
+ struct cvp_kmd_sys_property prop_data;
+};
+
+#define MAX_HFI_FENCE_SIZE 16
+#define MAX_HFI_FENCE_OFFSET (MAX_HFI_PKT_SIZE-MAX_HFI_FENCE_SIZE)
+struct cvp_kmd_hfi_fence_packet {
+ unsigned int pkt_data[MAX_HFI_FENCE_OFFSET];
+ unsigned int fence_data[MAX_HFI_FENCE_SIZE];
+};
+
+
/**
- * struct msm_cvp_arg - argument passed with VIDIOC_CVP_CMD
+ * struct cvp_kmd_arg - argument passed with VIDIOC_CVP_CMD
+ * To be deprecated
* @type: command type
+ * @buf_offset: offset to buffer list in the command
+ * @buf_num: number of buffers in the command
* @session: session information
* @req_power: power information
* @regbuf: buffer to be registered
@@ -205,22 +244,28 @@ struct msm_cvp_persist_buf {
* @send_cmd: sending generic HFI command
* @dfs_config: sending DFS config command
* @dfs_frame: sending DFS frame command
+ * @hfi_pkt: HFI packet created by user library
+ * @sys_properties System properties read or set by user library
+ * @hfi_fence_pkt: HFI fence packet created by user library
*/
-struct msm_cvp_arg {
+struct cvp_kmd_arg {
unsigned int type;
- union data_t {
- struct msm_cvp_session_info session;
- struct msm_cvp_request_power req_power;
- struct msm_cvp_buffer regbuf;
- struct msm_cvp_buffer unregbuf;
- struct msm_cvp_send_cmd send_cmd;
- struct msm_cvp_dfs_config dfs_config;
- struct msm_cvp_dfs_frame dfs_frame;
- struct msm_cvp_dme_config dme_config;
- struct msm_cvp_dme_frame dme_frame;
- struct msm_cvp_persist_buf pbuf_cmd;
+ unsigned int buf_offset;
+ unsigned int buf_num;
+ union cvp_data_t {
+ struct cvp_kmd_session_info session;
+ struct cvp_kmd_request_power req_power;
+ struct cvp_kmd_buffer regbuf;
+ struct cvp_kmd_buffer unregbuf;
+ struct cvp_kmd_send_cmd send_cmd;
+ struct cvp_kmd_dfs_config dfs_config;
+ struct cvp_kmd_dfs_frame dfs_frame;
+ struct cvp_kmd_dme_config dme_config;
+ struct cvp_kmd_dme_frame dme_frame;
+ struct cvp_kmd_persist_buf pbuf_cmd;
+ struct cvp_kmd_hfi_packet hfi_pkt;
+ struct cvp_kmd_sys_properties sys_properties;
+ struct cvp_kmd_hfi_fence_packet hfi_fence_pkt;
} data;
- unsigned int reserved[12];
};
-
#endif
diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
index 498c6bc..4d631cb 100644
--- a/kernel/cgroup/cgroup.c
+++ b/kernel/cgroup/cgroup.c
@@ -3499,6 +3499,16 @@ static ssize_t cgroup_file_write(struct kernfs_open_file *of, char *buf,
return ret ?: nbytes;
}
+static __poll_t cgroup_file_poll(struct kernfs_open_file *of, poll_table *pt)
+{
+ struct cftype *cft = of->kn->priv;
+
+ if (cft->poll)
+ return cft->poll(of, pt);
+
+ return kernfs_generic_poll(of, pt);
+}
+
static void *cgroup_seqfile_start(struct seq_file *seq, loff_t *ppos)
{
return seq_cft(seq)->seq_start(seq, ppos);
@@ -3537,6 +3547,7 @@ static struct kernfs_ops cgroup_kf_single_ops = {
.open = cgroup_file_open,
.release = cgroup_file_release,
.write = cgroup_file_write,
+ .poll = cgroup_file_poll,
.seq_show = cgroup_seqfile_show,
};
@@ -3545,6 +3556,7 @@ static struct kernfs_ops cgroup_kf_ops = {
.open = cgroup_file_open,
.release = cgroup_file_release,
.write = cgroup_file_write,
+ .poll = cgroup_file_poll,
.seq_start = cgroup_seqfile_start,
.seq_next = cgroup_seqfile_next,
.seq_stop = cgroup_seqfile_stop,
diff --git a/kernel/exit.c b/kernel/exit.c
index 7b5be763..ddd2aa9 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -307,7 +307,7 @@ void rcuwait_wake_up(struct rcuwait *w)
* MB (A) MB (B)
* [L] cond [L] tsk
*/
- smp_rmb(); /* (B) */
+ smp_mb(); /* (B) */
/*
* Avoid using task_rcu_dereference() magic as long as we are careful,
diff --git a/kernel/futex.c b/kernel/futex.c
index d7c465f..c5fca74 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -1444,11 +1444,7 @@ static void mark_wake_futex(struct wake_q_head *wake_q, struct futex_q *q)
if (WARN(q->pi_state || q->rt_waiter, "refusing to wake PI futex\n"))
return;
- /*
- * Queue the task for later wakeup for after we've released
- * the hb->lock. wake_q_add() grabs reference to p.
- */
- wake_q_add(wake_q, p);
+ get_task_struct(p);
__unqueue_futex(q);
/*
* The waiting task can free the futex_q as soon as q->lock_ptr = NULL
@@ -1458,6 +1454,13 @@ static void mark_wake_futex(struct wake_q_head *wake_q, struct futex_q *q)
* plist_del in __unqueue_futex().
*/
smp_store_release(&q->lock_ptr, NULL);
+
+ /*
+ * Queue the task for later wakeup for after we've released
+ * the hb->lock. wake_q_add() grabs reference to p.
+ */
+ wake_q_add(wake_q, p);
+ put_task_struct(p);
}
/*
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 6dfdb4d..eb584ad 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -396,6 +396,9 @@ int irq_setup_affinity(struct irq_desc *desc)
}
cpumask_and(&mask, cpu_online_mask, set);
+ if (cpumask_empty(&mask))
+ cpumask_copy(&mask, cpu_online_mask);
+
if (node != NUMA_NO_NODE) {
const struct cpumask *nodemask = cpumask_of_node(node);
diff --git a/kernel/irq/matrix.c b/kernel/irq/matrix.c
index 5092494..9233770 100644
--- a/kernel/irq/matrix.c
+++ b/kernel/irq/matrix.c
@@ -14,6 +14,7 @@ struct cpumap {
unsigned int available;
unsigned int allocated;
unsigned int managed;
+ unsigned int managed_allocated;
bool initialized;
bool online;
unsigned long alloc_map[IRQ_MATRIX_SIZE];
@@ -124,6 +125,48 @@ static unsigned int matrix_alloc_area(struct irq_matrix *m, struct cpumap *cm,
return area;
}
+/* Find the best CPU which has the lowest vector allocation count */
+static unsigned int matrix_find_best_cpu(struct irq_matrix *m,
+ const struct cpumask *msk)
+{
+ unsigned int cpu, best_cpu, maxavl = 0;
+ struct cpumap *cm;
+
+ best_cpu = UINT_MAX;
+
+ for_each_cpu(cpu, msk) {
+ cm = per_cpu_ptr(m->maps, cpu);
+
+ if (!cm->online || cm->available <= maxavl)
+ continue;
+
+ best_cpu = cpu;
+ maxavl = cm->available;
+ }
+ return best_cpu;
+}
+
+/* Find the best CPU which has the lowest number of managed IRQs allocated */
+static unsigned int matrix_find_best_cpu_managed(struct irq_matrix *m,
+ const struct cpumask *msk)
+{
+ unsigned int cpu, best_cpu, allocated = UINT_MAX;
+ struct cpumap *cm;
+
+ best_cpu = UINT_MAX;
+
+ for_each_cpu(cpu, msk) {
+ cm = per_cpu_ptr(m->maps, cpu);
+
+ if (!cm->online || cm->managed_allocated > allocated)
+ continue;
+
+ best_cpu = cpu;
+ allocated = cm->managed_allocated;
+ }
+ return best_cpu;
+}
+
/**
* irq_matrix_assign_system - Assign system wide entry in the matrix
* @m: Matrix pointer
@@ -239,11 +282,21 @@ void irq_matrix_remove_managed(struct irq_matrix *m, const struct cpumask *msk)
* @m: Matrix pointer
* @cpu: On which CPU the interrupt should be allocated
*/
-int irq_matrix_alloc_managed(struct irq_matrix *m, unsigned int cpu)
+int irq_matrix_alloc_managed(struct irq_matrix *m, const struct cpumask *msk,
+ unsigned int *mapped_cpu)
{
- struct cpumap *cm = per_cpu_ptr(m->maps, cpu);
- unsigned int bit, end = m->alloc_end;
+ unsigned int bit, cpu, end = m->alloc_end;
+ struct cpumap *cm;
+ if (cpumask_empty(msk))
+ return -EINVAL;
+
+ cpu = matrix_find_best_cpu_managed(m, msk);
+ if (cpu == UINT_MAX)
+ return -ENOSPC;
+
+ cm = per_cpu_ptr(m->maps, cpu);
+ end = m->alloc_end;
/* Get managed bit which are not allocated */
bitmap_andnot(m->scratch_map, cm->managed_map, cm->alloc_map, end);
bit = find_first_bit(m->scratch_map, end);
@@ -251,7 +304,9 @@ int irq_matrix_alloc_managed(struct irq_matrix *m, unsigned int cpu)
return -ENOSPC;
set_bit(bit, cm->alloc_map);
cm->allocated++;
+ cm->managed_allocated++;
m->total_allocated++;
+ *mapped_cpu = cpu;
trace_irq_matrix_alloc_managed(bit, cpu, m, cm);
return bit;
}
@@ -322,37 +377,27 @@ void irq_matrix_remove_reserved(struct irq_matrix *m)
int irq_matrix_alloc(struct irq_matrix *m, const struct cpumask *msk,
bool reserved, unsigned int *mapped_cpu)
{
- unsigned int cpu, best_cpu, maxavl = 0;
+ unsigned int cpu, bit;
struct cpumap *cm;
- unsigned int bit;
- best_cpu = UINT_MAX;
- for_each_cpu(cpu, msk) {
- cm = per_cpu_ptr(m->maps, cpu);
+ cpu = matrix_find_best_cpu(m, msk);
+ if (cpu == UINT_MAX)
+ return -ENOSPC;
- if (!cm->online || cm->available <= maxavl)
- continue;
+ cm = per_cpu_ptr(m->maps, cpu);
+ bit = matrix_alloc_area(m, cm, 1, false);
+ if (bit >= m->alloc_end)
+ return -ENOSPC;
+ cm->allocated++;
+ cm->available--;
+ m->total_allocated++;
+ m->global_available--;
+ if (reserved)
+ m->global_reserved--;
+ *mapped_cpu = cpu;
+ trace_irq_matrix_alloc(bit, cpu, m, cm);
+ return bit;
- best_cpu = cpu;
- maxavl = cm->available;
- }
-
- if (maxavl) {
- cm = per_cpu_ptr(m->maps, best_cpu);
- bit = matrix_alloc_area(m, cm, 1, false);
- if (bit < m->alloc_end) {
- cm->allocated++;
- cm->available--;
- m->total_allocated++;
- m->global_available--;
- if (reserved)
- m->global_reserved--;
- *mapped_cpu = best_cpu;
- trace_irq_matrix_alloc(bit, best_cpu, m, cm);
- return bit;
- }
- }
- return -ENOSPC;
}
/**
@@ -373,6 +418,8 @@ void irq_matrix_free(struct irq_matrix *m, unsigned int cpu,
clear_bit(bit, cm->alloc_map);
cm->allocated--;
+ if(managed)
+ cm->managed_allocated--;
if (cm->online)
m->total_allocated--;
@@ -442,13 +489,14 @@ void irq_matrix_debug_show(struct seq_file *sf, struct irq_matrix *m, int ind)
seq_printf(sf, "Total allocated: %6u\n", m->total_allocated);
seq_printf(sf, "System: %u: %*pbl\n", nsys, m->matrix_bits,
m->system_map);
- seq_printf(sf, "%*s| CPU | avl | man | act | vectors\n", ind, " ");
+ seq_printf(sf, "%*s| CPU | avl | man | mac | act | vectors\n", ind, " ");
cpus_read_lock();
for_each_online_cpu(cpu) {
struct cpumap *cm = per_cpu_ptr(m->maps, cpu);
- seq_printf(sf, "%*s %4d %4u %4u %4u %*pbl\n", ind, " ",
- cpu, cm->available, cm->managed, cm->allocated,
+ seq_printf(sf, "%*s %4d %4u %4u %4u %4u %*pbl\n", ind, " ",
+ cpu, cm->available, cm->managed,
+ cm->managed_allocated, cm->allocated,
m->matrix_bits, cm->alloc_map);
}
cpus_read_unlock();
diff --git a/kernel/kthread.c b/kernel/kthread.c
index 087d18d..e3dfad2 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -11,6 +11,7 @@
#include <linux/kthread.h>
#include <linux/completion.h>
#include <linux/err.h>
+#include <linux/cgroup.h>
#include <linux/cpuset.h>
#include <linux/unistd.h>
#include <linux/file.h>
diff --git a/kernel/locking/rwsem-xadd.c b/kernel/locking/rwsem-xadd.c
index 3064c50..ef90935 100644
--- a/kernel/locking/rwsem-xadd.c
+++ b/kernel/locking/rwsem-xadd.c
@@ -198,15 +198,22 @@ static void __rwsem_mark_wake(struct rw_semaphore *sem,
woken++;
tsk = waiter->task;
- wake_q_add(wake_q, tsk);
+ get_task_struct(tsk);
list_del(&waiter->list);
/*
- * Ensure that the last operation is setting the reader
+ * Ensure calling get_task_struct() before setting the reader
* waiter to nil such that rwsem_down_read_failed() cannot
* race with do_exit() by always holding a reference count
* to the task to wakeup.
*/
smp_store_release(&waiter->task, NULL);
+ /*
+ * Ensure issuing the wakeup (either by us or someone else)
+ * after setting the reader waiter to nil.
+ */
+ wake_q_add(wake_q, tsk);
+ /* wake_q_add() already take the task ref */
+ put_task_struct(tsk);
}
adjustment = woken * RWSEM_ACTIVE_READ_BIAS - adjustment;
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 594c3fa..fcfe2b3 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -408,10 +408,11 @@ void wake_q_add(struct wake_q_head *head, struct task_struct *task)
* its already queued (either by us or someone else) and will get the
* wakeup due to that.
*
- * This cmpxchg() executes a full barrier, which pairs with the full
- * barrier executed by the wakeup in wake_up_q().
+ * In order to ensure that a pending wakeup will observe our pending
+ * state, even in the failed case, an explicit smp_mb() must be used.
*/
- if (cmpxchg(&node->next, NULL, WAKE_Q_TAIL))
+ smp_mb__before_atomic();
+ if (cmpxchg_relaxed(&node->next, NULL, WAKE_Q_TAIL))
return;
head->count++;
@@ -6134,7 +6135,9 @@ int sched_isolate_cpu(int cpu)
smp_call_function_any(&avail_cpus, timer_quiesce_cpu, &cpu, 1);
watchdog_disable(cpu);
+ irq_lock_sparse();
stop_cpus(cpumask_of(cpu), do_isolation_work_cpu_stop, 0);
+ irq_unlock_sparse();
calc_load_migrate(rq);
update_max_interval();
diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
index 176040f..3a84a1a 100644
--- a/kernel/sched/cpufreq_schedutil.c
+++ b/kernel/sched/cpufreq_schedutil.c
@@ -968,7 +968,7 @@ static struct kobj_type sugov_tunables_ktype = {
/********************** cpufreq governor interface *********************/
-struct cpufreq_governor schedutil_gov;
+static struct cpufreq_governor schedutil_gov;
static struct sugov_policy *sugov_policy_alloc(struct cpufreq_policy *policy)
{
@@ -1272,7 +1272,7 @@ static void sugov_limits(struct cpufreq_policy *policy)
sg_policy->need_freq_update = true;
}
-struct cpufreq_governor schedutil_gov = {
+static struct cpufreq_governor schedutil_gov = {
.name = "schedutil",
.owner = THIS_MODULE,
.dynamic_switching = true,
@@ -1295,36 +1295,3 @@ static int __init sugov_register(void)
return cpufreq_register_governor(&schedutil_gov);
}
fs_initcall(sugov_register);
-
-#ifdef CONFIG_ENERGY_MODEL
-extern bool sched_energy_update;
-extern struct mutex sched_energy_mutex;
-
-static void rebuild_sd_workfn(struct work_struct *work)
-{
- mutex_lock(&sched_energy_mutex);
- sched_energy_update = true;
- rebuild_sched_domains();
- sched_energy_update = false;
- mutex_unlock(&sched_energy_mutex);
-}
-static DECLARE_WORK(rebuild_sd_work, rebuild_sd_workfn);
-
-/*
- * EAS shouldn't be attempted without sugov, so rebuild the sched_domains
- * on governor changes to make sure the scheduler knows about it.
- */
-void sched_cpufreq_governor_change(struct cpufreq_policy *policy,
- struct cpufreq_governor *old_gov)
-{
- if (old_gov == &schedutil_gov || policy->governor == &schedutil_gov) {
- /*
- * When called from the cpufreq_register_driver() path, the
- * cpu_hotplug_lock is already held, so use a work item to
- * avoid nested locking in rebuild_sched_domains().
- */
- schedule_work(&rebuild_sd_work);
- }
-
-}
-#endif
diff --git a/kernel/sched/psi.c b/kernel/sched/psi.c
index 0e97ca9..1b99eef 100644
--- a/kernel/sched/psi.c
+++ b/kernel/sched/psi.c
@@ -140,9 +140,9 @@ static int psi_bug __read_mostly;
DEFINE_STATIC_KEY_FALSE(psi_disabled);
#ifdef CONFIG_PSI_DEFAULT_DISABLED
-bool psi_enable;
+static bool psi_enable;
#else
-bool psi_enable = true;
+static bool psi_enable = true;
#endif
static int __init setup_psi(char *str)
{
@@ -165,7 +165,7 @@ static struct psi_group psi_system = {
.pcpu = &system_group_pcpu,
};
-static void psi_update_work(struct work_struct *work);
+static void psi_avgs_work(struct work_struct *work);
static void group_init(struct psi_group *group)
{
@@ -173,9 +173,9 @@ static void group_init(struct psi_group *group)
for_each_possible_cpu(cpu)
seqcount_init(&per_cpu_ptr(group->pcpu, cpu)->seq);
- group->next_update = sched_clock() + psi_period;
- INIT_DELAYED_WORK(&group->clock_work, psi_update_work);
- mutex_init(&group->stat_lock);
+ group->avg_next_update = sched_clock() + psi_period;
+ INIT_DELAYED_WORK(&group->avgs_work, psi_avgs_work);
+ mutex_init(&group->avgs_lock);
}
void __init psi_init(void)
@@ -210,20 +210,23 @@ static bool test_state(unsigned int *tasks, enum psi_states state)
}
}
-static void get_recent_times(struct psi_group *group, int cpu, u32 *times)
+static void get_recent_times(struct psi_group *group, int cpu, u32 *times,
+ u32 *pchanged_states)
{
struct psi_group_cpu *groupc = per_cpu_ptr(group->pcpu, cpu);
- unsigned int tasks[NR_PSI_TASK_COUNTS];
u64 now, state_start;
+ enum psi_states s;
unsigned int seq;
- int s;
+ u32 state_mask;
+
+ *pchanged_states = 0;
/* Snapshot a coherent view of the CPU state */
do {
seq = read_seqcount_begin(&groupc->seq);
now = cpu_clock(cpu);
memcpy(times, groupc->times, sizeof(groupc->times));
- memcpy(tasks, groupc->tasks, sizeof(groupc->tasks));
+ state_mask = groupc->state_mask;
state_start = groupc->state_start;
} while (read_seqcount_retry(&groupc->seq, seq));
@@ -239,13 +242,15 @@ static void get_recent_times(struct psi_group *group, int cpu, u32 *times)
* (u32) and our reported pressure close to what's
* actually happening.
*/
- if (test_state(tasks, s))
+ if (state_mask & (1 << s))
times[s] += now - state_start;
delta = times[s] - groupc->times_prev[s];
groupc->times_prev[s] = times[s];
times[s] = delta;
+ if (delta)
+ *pchanged_states |= (1 << s);
}
}
@@ -269,17 +274,14 @@ static void calc_avgs(unsigned long avg[3], int missed_periods,
avg[2] = calc_load(avg[2], EXP_300s, pct);
}
-static bool update_stats(struct psi_group *group)
+static void collect_percpu_times(struct psi_group *group, u32 *pchanged_states)
{
u64 deltas[NR_PSI_STATES - 1] = { 0, };
- unsigned long missed_periods = 0;
unsigned long nonidle_total = 0;
- u64 now, expires, period;
+ u32 changed_states = 0;
int cpu;
int s;
- mutex_lock(&group->stat_lock);
-
/*
* Collect the per-cpu time buckets and average them into a
* single time sample that is normalized to wallclock time.
@@ -291,8 +293,11 @@ static bool update_stats(struct psi_group *group)
for_each_possible_cpu(cpu) {
u32 times[NR_PSI_STATES];
u32 nonidle;
+ u32 cpu_changed_states;
- get_recent_times(group, cpu, times);
+ get_recent_times(group, cpu, times,
+ &cpu_changed_states);
+ changed_states |= cpu_changed_states;
nonidle = nsecs_to_jiffies(times[PSI_NONIDLE]);
nonidle_total += nonidle;
@@ -317,11 +322,19 @@ static bool update_stats(struct psi_group *group)
for (s = 0; s < NR_PSI_STATES - 1; s++)
group->total[s] += div_u64(deltas[s], max(nonidle_total, 1UL));
+ if (pchanged_states)
+ *pchanged_states = changed_states;
+}
+
+static u64 update_averages(struct psi_group *group, u64 now)
+{
+ unsigned long missed_periods = 0;
+ u64 expires, period;
+ u64 avg_next_update;
+ int s;
+
/* avgX= */
- now = sched_clock();
- expires = group->next_update;
- if (now < expires)
- goto out;
+ expires = group->avg_next_update;
if (now - expires >= psi_period)
missed_periods = div_u64(now - expires, psi_period);
@@ -332,14 +345,14 @@ static bool update_stats(struct psi_group *group)
* But the deltas we sample out of the per-cpu buckets above
* are based on the actual time elapsing between clock ticks.
*/
- group->next_update = expires + ((1 + missed_periods) * psi_period);
- period = now - (group->last_update + (missed_periods * psi_period));
- group->last_update = now;
+ avg_next_update = expires + ((1 + missed_periods) * psi_period);
+ period = now - (group->avg_last_update + (missed_periods * psi_period));
+ group->avg_last_update = now;
for (s = 0; s < NR_PSI_STATES - 1; s++) {
u32 sample;
- sample = group->total[s] - group->total_prev[s];
+ sample = group->total[s] - group->avg_total[s];
/*
* Due to the lockless sampling of the time buckets,
* recorded time deltas can slip into the next period,
@@ -359,23 +372,30 @@ static bool update_stats(struct psi_group *group)
*/
if (sample > period)
sample = period;
- group->total_prev[s] += sample;
+ group->avg_total[s] += sample;
calc_avgs(group->avg[s], missed_periods, sample, period);
}
-out:
- mutex_unlock(&group->stat_lock);
- return nonidle_total;
+
+ return avg_next_update;
}
-static void psi_update_work(struct work_struct *work)
+static void psi_avgs_work(struct work_struct *work)
{
struct delayed_work *dwork;
struct psi_group *group;
+ u32 changed_states;
bool nonidle;
+ u64 now;
dwork = to_delayed_work(work);
- group = container_of(dwork, struct psi_group, clock_work);
+ group = container_of(dwork, struct psi_group, avgs_work);
+ mutex_lock(&group->avgs_lock);
+
+ now = sched_clock();
+
+ collect_percpu_times(group, &changed_states);
+ nonidle = changed_states & (1 << PSI_NONIDLE);
/*
* If there is task activity, periodically fold the per-cpu
* times and feed samples into the running averages. If things
@@ -383,18 +403,15 @@ static void psi_update_work(struct work_struct *work)
* Once restarted, we'll catch up the running averages in one
* go - see calc_avgs() and missed_periods.
*/
-
- nonidle = update_stats(group);
+ if (now >= group->avg_next_update)
+ group->avg_next_update = update_averages(group, now);
if (nonidle) {
- unsigned long delay = 0;
- u64 now;
-
- now = sched_clock();
- if (group->next_update > now)
- delay = nsecs_to_jiffies(group->next_update - now) + 1;
- schedule_delayed_work(dwork, delay);
+ schedule_delayed_work(dwork, nsecs_to_jiffies(
+ group->avg_next_update - now) + 1);
}
+
+ mutex_unlock(&group->avgs_lock);
}
static void record_times(struct psi_group_cpu *groupc, int cpu,
@@ -407,15 +424,15 @@ static void record_times(struct psi_group_cpu *groupc, int cpu,
delta = now - groupc->state_start;
groupc->state_start = now;
- if (test_state(groupc->tasks, PSI_IO_SOME)) {
+ if (groupc->state_mask & (1 << PSI_IO_SOME)) {
groupc->times[PSI_IO_SOME] += delta;
- if (test_state(groupc->tasks, PSI_IO_FULL))
+ if (groupc->state_mask & (1 << PSI_IO_FULL))
groupc->times[PSI_IO_FULL] += delta;
}
- if (test_state(groupc->tasks, PSI_MEM_SOME)) {
+ if (groupc->state_mask & (1 << PSI_MEM_SOME)) {
groupc->times[PSI_MEM_SOME] += delta;
- if (test_state(groupc->tasks, PSI_MEM_FULL))
+ if (groupc->state_mask & (1 << PSI_MEM_FULL))
groupc->times[PSI_MEM_FULL] += delta;
else if (memstall_tick) {
u32 sample;
@@ -436,10 +453,10 @@ static void record_times(struct psi_group_cpu *groupc, int cpu,
}
}
- if (test_state(groupc->tasks, PSI_CPU_SOME))
+ if (groupc->state_mask & (1 << PSI_CPU_SOME))
groupc->times[PSI_CPU_SOME] += delta;
- if (test_state(groupc->tasks, PSI_NONIDLE))
+ if (groupc->state_mask & (1 << PSI_NONIDLE))
groupc->times[PSI_NONIDLE] += delta;
}
@@ -448,6 +465,8 @@ static void psi_group_change(struct psi_group *group, int cpu,
{
struct psi_group_cpu *groupc;
unsigned int t, m;
+ enum psi_states s;
+ u32 state_mask = 0;
groupc = per_cpu_ptr(group->pcpu, cpu);
@@ -480,6 +499,13 @@ static void psi_group_change(struct psi_group *group, int cpu,
if (set & (1 << t))
groupc->tasks[t]++;
+ /* Calculate state mask representing active states */
+ for (s = 0; s < NR_PSI_STATES; s++) {
+ if (test_state(groupc->tasks, s))
+ state_mask |= (1 << s);
+ }
+ groupc->state_mask = state_mask;
+
write_seqcount_end(&groupc->seq);
}
@@ -537,13 +563,13 @@ void psi_task_change(struct task_struct *task, int clear, int set)
*/
if (unlikely((clear & TSK_RUNNING) &&
(task->flags & PF_WQ_WORKER) &&
- wq_worker_last_func(task) == psi_update_work))
+ wq_worker_last_func(task) == psi_avgs_work))
wake_clock = false;
while ((group = iterate_groups(task, &iter))) {
psi_group_change(group, cpu, clear, set);
- if (wake_clock && !delayed_work_pending(&group->clock_work))
- schedule_delayed_work(&group->clock_work, PSI_FREQ);
+ if (wake_clock && !delayed_work_pending(&group->avgs_work))
+ schedule_delayed_work(&group->avgs_work, PSI_FREQ);
}
}
@@ -640,7 +666,7 @@ void psi_cgroup_free(struct cgroup *cgroup)
if (static_branch_likely(&psi_disabled))
return;
- cancel_delayed_work_sync(&cgroup->psi.clock_work);
+ cancel_delayed_work_sync(&cgroup->psi.avgs_work);
free_percpu(cgroup->psi.pcpu);
}
@@ -697,11 +723,18 @@ void cgroup_move_task(struct task_struct *task, struct css_set *to)
int psi_show(struct seq_file *m, struct psi_group *group, enum psi_res res)
{
int full;
+ u64 now;
if (static_branch_likely(&psi_disabled))
return -EOPNOTSUPP;
- update_stats(group);
+ /* Update averages before reporting them */
+ mutex_lock(&group->avgs_lock);
+ now = sched_clock();
+ collect_percpu_times(group, NULL);
+ if (now >= group->avg_next_update)
+ group->avg_next_update = update_averages(group, now);
+ mutex_unlock(&group->avgs_lock);
for (full = 0; full < 2 - (res == PSI_CPU); full++) {
unsigned long avg[3];
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index a0b7281..1516804 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -2659,7 +2659,7 @@ unsigned long scale_irq_capacity(unsigned long util, unsigned long irq, unsigned
}
#endif
-#if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL)
+#ifdef CONFIG_ENERGY_MODEL
#define perf_domain_span(pd) (to_cpumask(((pd)->em_pd->cpus)))
#else
#define perf_domain_span(pd) NULL
diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
index 06dbb45..9c392dd 100644
--- a/kernel/sched/topology.c
+++ b/kernel/sched/topology.c
@@ -202,9 +202,7 @@ sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent)
}
DEFINE_STATIC_KEY_FALSE(sched_energy_present);
-#if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL)
-DEFINE_MUTEX(sched_energy_mutex);
-bool sched_energy_update;
+#ifdef CONFIG_ENERGY_MODEL
static void free_pd(struct perf_domain *pd)
{
@@ -292,7 +290,6 @@ static void sched_energy_set(bool has_eas)
* 1. an Energy Model (EM) is available;
* 2. the SD_ASYM_CPUCAPACITY flag is set in the sched_domain hierarchy.
* 3. the EM complexity is low enough to keep scheduling overheads low;
- * 4. schedutil is driving the frequency of all CPUs of the rd;
*
* The complexity of the Energy Model is defined as:
*
@@ -312,15 +309,12 @@ static void sched_energy_set(bool has_eas)
*/
#define EM_MAX_COMPLEXITY 2048
-extern struct cpufreq_governor schedutil_gov;
static bool build_perf_domains(const struct cpumask *cpu_map)
{
int i, nr_pd = 0, nr_cs = 0, nr_cpus = cpumask_weight(cpu_map);
struct perf_domain *pd = NULL, *tmp;
int cpu = cpumask_first(cpu_map);
struct root_domain *rd = cpu_rq(cpu)->rd;
- struct cpufreq_policy *policy;
- struct cpufreq_governor *gov;
/* EAS is enabled for asymmetric CPU capacity topologies. */
if (!per_cpu(sd_asym_cpucapacity, cpu)) {
@@ -336,19 +330,6 @@ static bool build_perf_domains(const struct cpumask *cpu_map)
if (find_pd(pd, i))
continue;
- /* Do not attempt EAS if schedutil is not being used. */
- policy = cpufreq_cpu_get(i);
- if (!policy)
- goto free;
- gov = policy->governor;
- cpufreq_cpu_put(policy);
- if (gov != &schedutil_gov) {
- if (rd->pd)
- pr_warn("rd %*pbl: Disabling EAS, schedutil is mandatory\n",
- cpumask_pr_args(cpu_map));
- goto free;
- }
-
/* Create the new pd and add it to the local list. */
tmp = pd_init(i);
if (!tmp)
@@ -392,7 +373,7 @@ static bool build_perf_domains(const struct cpumask *cpu_map)
}
#else
static void free_pd(struct perf_domain *pd) { }
-#endif /* CONFIG_ENERGY_MODEL && CONFIG_CPU_FREQ_GOV_SCHEDUTIL*/
+#endif /* CONFIG_ENERGY_MODEL */
static void free_rootdomain(struct rcu_head *rcu)
{
@@ -2214,10 +2195,10 @@ void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
;
}
-#if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL)
+#ifdef CONFIG_ENERGY_MODEL
/* Build perf. domains: */
for (i = 0; i < ndoms_new; i++) {
- for (j = 0; j < n && !sched_energy_update; j++) {
+ for (j = 0; j < n; j++) {
if (cpumask_equal(doms_new[i], doms_cur[j]) &&
cpu_rq(cpumask_first(doms_cur[j]))->rd->pd) {
has_eas = true;
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index cf9780a..778ce93 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -1264,31 +1264,6 @@ void tick_irq_enter(void)
* High resolution timer specific code
*/
#ifdef CONFIG_HIGH_RES_TIMERS
-static void update_rq_stats(void)
-{
- unsigned long jiffy_gap = 0;
- unsigned int rq_avg = 0;
- unsigned long flags = 0;
-
- jiffy_gap = jiffies - rq_info.rq_poll_last_jiffy;
- if (jiffy_gap >= rq_info.rq_poll_jiffies) {
- spin_lock_irqsave(&rq_lock, flags);
- if (!rq_info.rq_avg)
- rq_info.rq_poll_total_jiffies = 0;
- rq_avg = nr_running() * 10;
- if (rq_info.rq_poll_total_jiffies) {
- rq_avg = (rq_avg * jiffy_gap) +
- (rq_info.rq_avg *
- rq_info.rq_poll_total_jiffies);
- do_div(rq_avg,
- rq_info.rq_poll_total_jiffies + jiffy_gap);
- }
- rq_info.rq_avg = rq_avg;
- rq_info.rq_poll_total_jiffies += jiffy_gap;
- rq_info.rq_poll_last_jiffy = jiffies;
- spin_unlock_irqrestore(&rq_lock, flags);
- }
-}
static void wakeup_user(void)
{
unsigned long jiffy_gap;
@@ -1322,10 +1297,6 @@ static enum hrtimer_restart tick_sched_timer(struct hrtimer *timer)
if (rq_info.init == 1 &&
tick_do_timer_cpu == smp_processor_id()) {
/*
- * update run queue statistics
- */
- update_rq_stats();
- /*
* wakeup user if needed
*/
wakeup_user();
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 9ef9ece..7d8ae47 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -920,6 +920,16 @@ struct task_struct *wq_worker_sleeping(struct task_struct *task)
* CONTEXT:
* spin_lock_irq(rq->lock)
*
+ * This function is called during schedule() when a kworker is going
+ * to sleep. It's used by psi to identify aggregation workers during
+ * dequeuing, to allow periodic aggregation to shut-off when that
+ * worker is the last task in the system or cgroup to go to sleep.
+ *
+ * As this function doesn't involve any workqueue-related locking, it
+ * only returns stable values when called from inside the scheduler's
+ * queuing and dequeuing paths, when @task, which must be a kworker,
+ * is guaranteed to not be processing any works.
+ *
* Return:
* The last work function %current executed as a worker, NULL if it
* hasn't executed any work yet.
diff --git a/mm/Kconfig b/mm/Kconfig
index 9dce3a8..6975182 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -781,6 +781,28 @@
information includes global and per chunk statistics, which can
be used to help understand percpu memory usage.
+config ARCH_SUPPORTS_SPECULATIVE_PAGE_FAULT
+ def_bool n
+
+config SPECULATIVE_PAGE_FAULT
+ bool "Speculative page faults"
+ default y
+ depends on ARCH_SUPPORTS_SPECULATIVE_PAGE_FAULT
+ depends on MMU && SMP
+ help
+ Try to handle user space page faults without holding the mmap_sem.
+
+ This should allow better concurrency for massively threaded process
+ since the page fault handler will not wait for other threads memory
+ layout change to be done, assuming that this change is done in another
+ part of the process's memory space. This type of page fault is named
+ speculative page fault.
+
+ If the speculative page fault fails because of a concurrency is
+ detected or because underlying PMD or PTE tables are not yet
+ allocating, it is failing its processing and a classic page fault
+ is then tried.
+
config GUP_BENCHMARK
bool "Enable infrastructure for get_user_pages_fast() benchmarking"
default n
diff --git a/mm/backing-dev.c b/mm/backing-dev.c
index 8a8bb87..72e6d0c 100644
--- a/mm/backing-dev.c
+++ b/mm/backing-dev.c
@@ -689,6 +689,7 @@ static int cgwb_bdi_init(struct backing_dev_info *bdi)
INIT_RADIX_TREE(&bdi->cgwb_tree, GFP_ATOMIC);
bdi->cgwb_congested_tree = RB_ROOT;
mutex_init(&bdi->cgwb_release_mutex);
+ init_rwsem(&bdi->wb_switch_rwsem);
ret = wb_init(&bdi->wb, bdi, 1, GFP_KERNEL);
if (!ret) {
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 10e8367..9e5f66c 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -3624,7 +3624,6 @@ static vm_fault_t hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
copy_user_huge_page(new_page, old_page, address, vma,
pages_per_huge_page(h));
__SetPageUptodate(new_page);
- set_page_huge_active(new_page);
mmun_start = haddr;
mmun_end = mmun_start + huge_page_size(h);
@@ -3646,6 +3645,7 @@ static vm_fault_t hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
make_huge_pte(vma, new_page, 1));
page_remove_rmap(old_page, true);
hugepage_add_new_anon_rmap(new_page, vma, haddr);
+ set_page_huge_active(new_page);
/* Make the old page be freed below */
new_page = old_page;
}
@@ -3730,6 +3730,7 @@ static vm_fault_t hugetlb_no_page(struct mm_struct *mm,
pte_t new_pte;
spinlock_t *ptl;
unsigned long haddr = address & huge_page_mask(h);
+ bool new_page = false;
/*
* Currently, we are forced to kill the process in the event the
@@ -3791,7 +3792,7 @@ static vm_fault_t hugetlb_no_page(struct mm_struct *mm,
}
clear_huge_page(page, address, pages_per_huge_page(h));
__SetPageUptodate(page);
- set_page_huge_active(page);
+ new_page = true;
if (vma->vm_flags & VM_MAYSHARE) {
int err = huge_add_to_page_cache(page, mapping, idx);
@@ -3862,6 +3863,15 @@ static vm_fault_t hugetlb_no_page(struct mm_struct *mm,
}
spin_unlock(ptl);
+
+ /*
+ * Only make newly allocated pages active. Existing pages found
+ * in the pagecache could be !page_huge_active() if they have been
+ * isolated for migration.
+ */
+ if (new_page)
+ set_page_huge_active(page);
+
unlock_page(page);
out:
return ret;
@@ -4096,7 +4106,6 @@ int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
* the set_pte_at() write.
*/
__SetPageUptodate(page);
- set_page_huge_active(page);
mapping = dst_vma->vm_file->f_mapping;
idx = vma_hugecache_offset(h, dst_vma, dst_addr);
@@ -4164,6 +4173,7 @@ int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
update_mmu_cache(dst_vma, dst_addr, dst_pte);
spin_unlock(ptl);
+ set_page_huge_active(page);
if (vm_shared)
unlock_page(page);
ret = 0;
diff --git a/mm/migrate.c b/mm/migrate.c
index f381635..b80f4da 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -1305,6 +1305,16 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
lock_page(hpage);
}
+ /*
+ * Check for pages which are in the process of being freed. Without
+ * page_mapping() set, hugetlbfs specific move page routine will not
+ * be called and we could leak usage counts for subpools.
+ */
+ if (page_private(hpage) && !page_mapping(hpage)) {
+ rc = -EBUSY;
+ goto out_unlock;
+ }
+
if (PageAnon(hpage))
anon_vma = page_get_anon_vma(hpage);
@@ -1335,6 +1345,7 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
put_new_page = NULL;
}
+out_unlock:
unlock_page(hpage);
out:
if (rc != -EAGAIN)
diff --git a/mm/mmap.c b/mm/mmap.c
index 53bbe0d..2ffb564 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -2400,12 +2400,11 @@ int expand_downwards(struct vm_area_struct *vma,
{
struct mm_struct *mm = vma->vm_mm;
struct vm_area_struct *prev;
- int error;
+ int error = 0;
address &= PAGE_MASK;
- error = security_mmap_addr(address);
- if (error)
- return error;
+ if (address < mmap_min_addr)
+ return -EPERM;
/* Enforce stack_guard_gap */
prev = vma->vm_prev;
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index c2abe9d..40c5102 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -1478,6 +1478,10 @@ static int ieee80211_add_station(struct wiphy *wiphy, struct net_device *dev,
if (params->sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER))
sta->sta.tdls = true;
+ if (sta->sta.tdls && sdata->vif.type == NL80211_IFTYPE_STATION &&
+ !sdata->u.mgd.associated)
+ return -EINVAL;
+
err = sta_apply_parameters(local, sta, params);
if (err) {
sta_info_free(local, sta);
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 828348b..e946ee4 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -221,7 +221,7 @@ static void ieee80211_handle_mu_mimo_mon(struct ieee80211_sub_if_data *sdata,
struct ieee80211_hdr_3addr hdr;
u8 category;
u8 action_code;
- } __packed action;
+ } __packed __aligned(2) action;
if (!sdata)
return;
@@ -2678,7 +2678,9 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
skb_set_queue_mapping(skb, q);
if (!--mesh_hdr->ttl) {
- IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, dropped_frames_ttl);
+ if (!is_multicast_ether_addr(hdr->addr1))
+ IEEE80211_IFSTA_MESH_CTR_INC(ifmsh,
+ dropped_frames_ttl);
goto out;
}
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index c7ccd7b..743cde6 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -3614,10 +3614,10 @@ void __ieee80211_subif_start_xmit(struct sk_buff *skb,
/* We need a bit of data queued to build aggregates properly, so
* instruct the TCP stack to allow more than a single ms of data
* to be queued in the stack. The value is a bit-shift of 1
- * second, so 8 is ~4ms of queued data. Only affects local TCP
+ * second, so 7 is ~8ms of queued data. Only affects local TCP
* sockets.
*/
- sk_pacing_shift_update(skb->sk, 8);
+ sk_pacing_shift_update(skb->sk, 7);
fast_tx = rcu_dereference(sta->fast_tx);
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index d5d0f31..1c2d500 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -1291,7 +1291,7 @@ static bool is_valid_rd(const struct ieee80211_regdomain *rd)
* definitions (the "2.4 GHz band", the "5 GHz band" and the "60GHz band"),
* however it is safe for now to assume that a frequency rule should not be
* part of a frequency's band if the start freq or end freq are off by more
- * than 2 GHz for the 2.4 and 5 GHz bands, and by more than 10 GHz for the
+ * than 2 GHz for the 2.4 and 5 GHz bands, and by more than 20 GHz for the
* 60 GHz band.
* This resolution can be lowered and should be considered as we add
* regulatory rule support for other "bands".
@@ -1306,7 +1306,7 @@ static bool freq_in_rule_band(const struct ieee80211_freq_range *freq_range,
* with the Channel starting frequency above 45 GHz.
*/
u32 limit = freq_khz > 45 * ONE_GHZ_IN_KHZ ?
- 10 * ONE_GHZ_IN_KHZ : 2 * ONE_GHZ_IN_KHZ;
+ 20 * ONE_GHZ_IN_KHZ : 2 * ONE_GHZ_IN_KHZ;
if (abs(freq_khz - freq_range->start_freq_khz) <= limit)
return true;
if (abs(freq_khz - freq_range->end_freq_khz) <= limit)
diff --git a/sound/core/compress_offload.c b/sound/core/compress_offload.c
index 5fb078a..009e469 100644
--- a/sound/core/compress_offload.c
+++ b/sound/core/compress_offload.c
@@ -532,7 +532,8 @@ static int snd_compress_check_input(struct snd_compr_params *params)
{
/* first let's check the buffer parameter's */
if (params->buffer.fragment_size == 0 ||
- params->buffer.fragments > U32_MAX / params->buffer.fragment_size)
+ params->buffer.fragments > U32_MAX / params->buffer.fragment_size ||
+ params->buffer.fragments == 0)
return -EINVAL;
/* now codec parameters */
diff --git a/sound/soc/codecs/rt274.c b/sound/soc/codecs/rt274.c
index d88e673..18a931c 100644
--- a/sound/soc/codecs/rt274.c
+++ b/sound/soc/codecs/rt274.c
@@ -1126,8 +1126,11 @@ static int rt274_i2c_probe(struct i2c_client *i2c,
return ret;
}
- regmap_read(rt274->regmap,
+ ret = regmap_read(rt274->regmap,
RT274_GET_PARAM(AC_NODE_ROOT, AC_PAR_VENDOR_ID), &val);
+ if (ret)
+ return ret;
+
if (val != RT274_VENDOR_ID) {
dev_err(&i2c->dev,
"Device with ID register %#x is not rt274\n", val);
diff --git a/sound/soc/codecs/rt5682.h b/sound/soc/codecs/rt5682.h
index 8068140..cdd659f 100644
--- a/sound/soc/codecs/rt5682.h
+++ b/sound/soc/codecs/rt5682.h
@@ -849,18 +849,18 @@
#define RT5682_SCLK_SRC_PLL2 (0x2 << 13)
#define RT5682_SCLK_SRC_SDW (0x3 << 13)
#define RT5682_SCLK_SRC_RCCLK (0x4 << 13)
-#define RT5682_PLL1_SRC_MASK (0x3 << 10)
-#define RT5682_PLL1_SRC_SFT 10
-#define RT5682_PLL1_SRC_MCLK (0x0 << 10)
-#define RT5682_PLL1_SRC_BCLK1 (0x1 << 10)
-#define RT5682_PLL1_SRC_SDW (0x2 << 10)
-#define RT5682_PLL1_SRC_RC (0x3 << 10)
-#define RT5682_PLL2_SRC_MASK (0x3 << 8)
-#define RT5682_PLL2_SRC_SFT 8
-#define RT5682_PLL2_SRC_MCLK (0x0 << 8)
-#define RT5682_PLL2_SRC_BCLK1 (0x1 << 8)
-#define RT5682_PLL2_SRC_SDW (0x2 << 8)
-#define RT5682_PLL2_SRC_RC (0x3 << 8)
+#define RT5682_PLL2_SRC_MASK (0x3 << 10)
+#define RT5682_PLL2_SRC_SFT 10
+#define RT5682_PLL2_SRC_MCLK (0x0 << 10)
+#define RT5682_PLL2_SRC_BCLK1 (0x1 << 10)
+#define RT5682_PLL2_SRC_SDW (0x2 << 10)
+#define RT5682_PLL2_SRC_RC (0x3 << 10)
+#define RT5682_PLL1_SRC_MASK (0x3 << 8)
+#define RT5682_PLL1_SRC_SFT 8
+#define RT5682_PLL1_SRC_MCLK (0x0 << 8)
+#define RT5682_PLL1_SRC_BCLK1 (0x1 << 8)
+#define RT5682_PLL1_SRC_SDW (0x2 << 8)
+#define RT5682_PLL1_SRC_RC (0x3 << 8)
diff --git a/sound/soc/fsl/imx-audmux.c b/sound/soc/fsl/imx-audmux.c
index 392d5eef..99e07b0 100644
--- a/sound/soc/fsl/imx-audmux.c
+++ b/sound/soc/fsl/imx-audmux.c
@@ -86,49 +86,49 @@ static ssize_t audmux_read_file(struct file *file, char __user *user_buf,
if (!buf)
return -ENOMEM;
- ret = snprintf(buf, PAGE_SIZE, "PDCR: %08x\nPTCR: %08x\n",
+ ret = scnprintf(buf, PAGE_SIZE, "PDCR: %08x\nPTCR: %08x\n",
pdcr, ptcr);
if (ptcr & IMX_AUDMUX_V2_PTCR_TFSDIR)
- ret += snprintf(buf + ret, PAGE_SIZE - ret,
+ ret += scnprintf(buf + ret, PAGE_SIZE - ret,
"TxFS output from %s, ",
audmux_port_string((ptcr >> 27) & 0x7));
else
- ret += snprintf(buf + ret, PAGE_SIZE - ret,
+ ret += scnprintf(buf + ret, PAGE_SIZE - ret,
"TxFS input, ");
if (ptcr & IMX_AUDMUX_V2_PTCR_TCLKDIR)
- ret += snprintf(buf + ret, PAGE_SIZE - ret,
+ ret += scnprintf(buf + ret, PAGE_SIZE - ret,
"TxClk output from %s",
audmux_port_string((ptcr >> 22) & 0x7));
else
- ret += snprintf(buf + ret, PAGE_SIZE - ret,
+ ret += scnprintf(buf + ret, PAGE_SIZE - ret,
"TxClk input");
- ret += snprintf(buf + ret, PAGE_SIZE - ret, "\n");
+ ret += scnprintf(buf + ret, PAGE_SIZE - ret, "\n");
if (ptcr & IMX_AUDMUX_V2_PTCR_SYN) {
- ret += snprintf(buf + ret, PAGE_SIZE - ret,
+ ret += scnprintf(buf + ret, PAGE_SIZE - ret,
"Port is symmetric");
} else {
if (ptcr & IMX_AUDMUX_V2_PTCR_RFSDIR)
- ret += snprintf(buf + ret, PAGE_SIZE - ret,
+ ret += scnprintf(buf + ret, PAGE_SIZE - ret,
"RxFS output from %s, ",
audmux_port_string((ptcr >> 17) & 0x7));
else
- ret += snprintf(buf + ret, PAGE_SIZE - ret,
+ ret += scnprintf(buf + ret, PAGE_SIZE - ret,
"RxFS input, ");
if (ptcr & IMX_AUDMUX_V2_PTCR_RCLKDIR)
- ret += snprintf(buf + ret, PAGE_SIZE - ret,
+ ret += scnprintf(buf + ret, PAGE_SIZE - ret,
"RxClk output from %s",
audmux_port_string((ptcr >> 12) & 0x7));
else
- ret += snprintf(buf + ret, PAGE_SIZE - ret,
+ ret += scnprintf(buf + ret, PAGE_SIZE - ret,
"RxClk input");
}
- ret += snprintf(buf + ret, PAGE_SIZE - ret,
+ ret += scnprintf(buf + ret, PAGE_SIZE - ret,
"\nData received from %s\n",
audmux_port_string((pdcr >> 13) & 0x7));
diff --git a/sound/soc/intel/boards/broadwell.c b/sound/soc/intel/boards/broadwell.c
index 7b0ee67..78ec97b 100644
--- a/sound/soc/intel/boards/broadwell.c
+++ b/sound/soc/intel/boards/broadwell.c
@@ -192,7 +192,7 @@ static struct snd_soc_dai_link broadwell_rt286_dais[] = {
.stream_name = "Loopback",
.cpu_dai_name = "Loopback Pin",
.platform_name = "haswell-pcm-audio",
- .dynamic = 0,
+ .dynamic = 1,
.codec_name = "snd-soc-dummy",
.codec_dai_name = "snd-soc-dummy-dai",
.trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST},
diff --git a/sound/soc/intel/boards/haswell.c b/sound/soc/intel/boards/haswell.c
index eab1f43..a402298 100644
--- a/sound/soc/intel/boards/haswell.c
+++ b/sound/soc/intel/boards/haswell.c
@@ -146,7 +146,7 @@ static struct snd_soc_dai_link haswell_rt5640_dais[] = {
.stream_name = "Loopback",
.cpu_dai_name = "Loopback Pin",
.platform_name = "haswell-pcm-audio",
- .dynamic = 0,
+ .dynamic = 1,
.codec_name = "snd-soc-dummy",
.codec_dai_name = "snd-soc-dummy-dai",
.trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST},
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
index f8d35c5..252ff3f 100644
--- a/sound/soc/soc-dapm.c
+++ b/sound/soc/soc-dapm.c
@@ -2039,19 +2039,19 @@ static ssize_t dapm_widget_power_read_file(struct file *file,
out = is_connected_output_ep(w, NULL, NULL);
}
- ret = snprintf(buf, PAGE_SIZE, "%s: %s%s in %d out %d",
+ ret = scnprintf(buf, PAGE_SIZE, "%s: %s%s in %d out %d",
w->name, w->power ? "On" : "Off",
w->force ? " (forced)" : "", in, out);
if (w->reg >= 0)
- ret += snprintf(buf + ret, PAGE_SIZE - ret,
+ ret += scnprintf(buf + ret, PAGE_SIZE - ret,
" - R%d(0x%x) mask 0x%x",
w->reg, w->reg, w->mask << w->shift);
- ret += snprintf(buf + ret, PAGE_SIZE - ret, "\n");
+ ret += scnprintf(buf + ret, PAGE_SIZE - ret, "\n");
if (w->sname)
- ret += snprintf(buf + ret, PAGE_SIZE - ret, " stream %s %s\n",
+ ret += scnprintf(buf + ret, PAGE_SIZE - ret, " stream %s %s\n",
w->sname,
w->active ? "active" : "inactive");
@@ -2064,7 +2064,7 @@ static ssize_t dapm_widget_power_read_file(struct file *file,
if (!p->connect)
continue;
- ret += snprintf(buf + ret, PAGE_SIZE - ret,
+ ret += scnprintf(buf + ret, PAGE_SIZE - ret,
" %s \"%s\" \"%s\"\n",
(rdir == SND_SOC_DAPM_DIR_IN) ? "in" : "out",
p->name ? p->name : "static",
diff --git a/tools/testing/selftests/gpio/gpio-mockup-chardev.c b/tools/testing/selftests/gpio/gpio-mockup-chardev.c
index f8d468f..aaa1e9f 100644
--- a/tools/testing/selftests/gpio/gpio-mockup-chardev.c
+++ b/tools/testing/selftests/gpio/gpio-mockup-chardev.c
@@ -37,7 +37,7 @@ static int get_debugfs(char **path)
struct libmnt_table *tb;
struct libmnt_iter *itr = NULL;
struct libmnt_fs *fs;
- int found = 0;
+ int found = 0, ret;
cxt = mnt_new_context();
if (!cxt)
@@ -58,8 +58,11 @@ static int get_debugfs(char **path)
break;
}
}
- if (found)
- asprintf(path, "%s/gpio", mnt_fs_get_target(fs));
+ if (found) {
+ ret = asprintf(path, "%s/gpio", mnt_fs_get_target(fs));
+ if (ret < 0)
+ err(EXIT_FAILURE, "failed to format string");
+ }
mnt_free_iter(itr);
mnt_free_context(cxt);
diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c
index 6fd8c08..fb5d2d1 100644
--- a/tools/testing/selftests/kvm/lib/kvm_util.c
+++ b/tools/testing/selftests/kvm/lib/kvm_util.c
@@ -590,7 +590,7 @@ void vm_userspace_mem_region_add(struct kvm_vm *vm,
* already exist.
*/
region = (struct userspace_mem_region *) userspace_mem_region_find(
- vm, guest_paddr, guest_paddr + npages * vm->page_size);
+ vm, guest_paddr, (guest_paddr + npages * vm->page_size) - 1);
if (region != NULL)
TEST_ASSERT(false, "overlapping userspace_mem_region already "
"exists\n"
@@ -606,15 +606,10 @@ void vm_userspace_mem_region_add(struct kvm_vm *vm,
region = region->next) {
if (region->region.slot == slot)
break;
- if ((guest_paddr <= (region->region.guest_phys_addr
- + region->region.memory_size))
- && ((guest_paddr + npages * vm->page_size)
- >= region->region.guest_phys_addr))
- break;
}
if (region != NULL)
TEST_ASSERT(false, "A mem region with the requested slot "
- "or overlapping physical memory range already exists.\n"
+ "already exists.\n"
" requested slot: %u paddr: 0x%lx npages: 0x%lx\n"
" existing slot: %u paddr: 0x%lx size: 0x%lx",
slot, guest_paddr, npages,
diff --git a/tools/testing/selftests/rtc/rtctest.c b/tools/testing/selftests/rtc/rtctest.c
index e20b017..b206553 100644
--- a/tools/testing/selftests/rtc/rtctest.c
+++ b/tools/testing/selftests/rtc/rtctest.c
@@ -145,15 +145,12 @@ TEST_F(rtc, alarm_alm_set) {
rc = select(self->fd + 1, &readfds, NULL, NULL, &tv);
ASSERT_NE(-1, rc);
- EXPECT_NE(0, rc);
+ ASSERT_NE(0, rc);
/* Disable alarm interrupts */
rc = ioctl(self->fd, RTC_AIE_OFF, 0);
ASSERT_NE(-1, rc);
- if (rc == 0)
- return;
-
rc = read(self->fd, &data, sizeof(unsigned long));
ASSERT_NE(-1, rc);
TH_LOG("data: %lx", data);
@@ -202,7 +199,109 @@ TEST_F(rtc, alarm_wkalm_set) {
rc = select(self->fd + 1, &readfds, NULL, NULL, &tv);
ASSERT_NE(-1, rc);
- EXPECT_NE(0, rc);
+ ASSERT_NE(0, rc);
+
+ rc = read(self->fd, &data, sizeof(unsigned long));
+ ASSERT_NE(-1, rc);
+
+ rc = ioctl(self->fd, RTC_RD_TIME, &tm);
+ ASSERT_NE(-1, rc);
+
+ new = timegm((struct tm *)&tm);
+ ASSERT_EQ(new, secs);
+}
+
+TEST_F(rtc, alarm_alm_set_minute) {
+ struct timeval tv = { .tv_sec = 62 };
+ unsigned long data;
+ struct rtc_time tm;
+ fd_set readfds;
+ time_t secs, new;
+ int rc;
+
+ rc = ioctl(self->fd, RTC_RD_TIME, &tm);
+ ASSERT_NE(-1, rc);
+
+ secs = timegm((struct tm *)&tm) + 60 - tm.tm_sec;
+ gmtime_r(&secs, (struct tm *)&tm);
+
+ rc = ioctl(self->fd, RTC_ALM_SET, &tm);
+ if (rc == -1) {
+ ASSERT_EQ(EINVAL, errno);
+ TH_LOG("skip alarms are not supported.");
+ return;
+ }
+
+ rc = ioctl(self->fd, RTC_ALM_READ, &tm);
+ ASSERT_NE(-1, rc);
+
+ TH_LOG("Alarm time now set to %02d:%02d:%02d.",
+ tm.tm_hour, tm.tm_min, tm.tm_sec);
+
+ /* Enable alarm interrupts */
+ rc = ioctl(self->fd, RTC_AIE_ON, 0);
+ ASSERT_NE(-1, rc);
+
+ FD_ZERO(&readfds);
+ FD_SET(self->fd, &readfds);
+
+ rc = select(self->fd + 1, &readfds, NULL, NULL, &tv);
+ ASSERT_NE(-1, rc);
+ ASSERT_NE(0, rc);
+
+ /* Disable alarm interrupts */
+ rc = ioctl(self->fd, RTC_AIE_OFF, 0);
+ ASSERT_NE(-1, rc);
+
+ rc = read(self->fd, &data, sizeof(unsigned long));
+ ASSERT_NE(-1, rc);
+ TH_LOG("data: %lx", data);
+
+ rc = ioctl(self->fd, RTC_RD_TIME, &tm);
+ ASSERT_NE(-1, rc);
+
+ new = timegm((struct tm *)&tm);
+ ASSERT_EQ(new, secs);
+}
+
+TEST_F(rtc, alarm_wkalm_set_minute) {
+ struct timeval tv = { .tv_sec = 62 };
+ struct rtc_wkalrm alarm = { 0 };
+ struct rtc_time tm;
+ unsigned long data;
+ fd_set readfds;
+ time_t secs, new;
+ int rc;
+
+ rc = ioctl(self->fd, RTC_RD_TIME, &alarm.time);
+ ASSERT_NE(-1, rc);
+
+ secs = timegm((struct tm *)&alarm.time) + 60 - alarm.time.tm_sec;
+ gmtime_r(&secs, (struct tm *)&alarm.time);
+
+ alarm.enabled = 1;
+
+ rc = ioctl(self->fd, RTC_WKALM_SET, &alarm);
+ if (rc == -1) {
+ ASSERT_EQ(EINVAL, errno);
+ TH_LOG("skip alarms are not supported.");
+ return;
+ }
+
+ rc = ioctl(self->fd, RTC_WKALM_RD, &alarm);
+ ASSERT_NE(-1, rc);
+
+ TH_LOG("Alarm time now set to %02d/%02d/%02d %02d:%02d:%02d.",
+ alarm.time.tm_mday, alarm.time.tm_mon + 1,
+ alarm.time.tm_year + 1900, alarm.time.tm_hour,
+ alarm.time.tm_min, alarm.time.tm_sec);
+
+ FD_ZERO(&readfds);
+ FD_SET(self->fd, &readfds);
+
+ rc = select(self->fd + 1, &readfds, NULL, NULL, &tv);
+ ASSERT_NE(-1, rc);
+ ASSERT_NE(0, rc);
rc = read(self->fd, &data, sizeof(unsigned long));
ASSERT_NE(-1, rc);
diff --git a/tools/testing/selftests/seccomp/Makefile b/tools/testing/selftests/seccomp/Makefile
index fce7f4c..1760b3e 100644
--- a/tools/testing/selftests/seccomp/Makefile
+++ b/tools/testing/selftests/seccomp/Makefile
@@ -9,7 +9,7 @@
CFLAGS += -Wl,-no-as-needed -Wall
seccomp_bpf: seccomp_bpf.c ../kselftest_harness.h
- $(CC) $(CFLAGS) $(LDFLAGS) -lpthread $< -o $@
+ $(CC) $(CFLAGS) $(LDFLAGS) $< -lpthread -o $@
TEST_PROGS += $(BINARIES)
EXTRA_CLEAN := $(BINARIES)
diff --git a/tools/testing/selftests/vm/gup_benchmark.c b/tools/testing/selftests/vm/gup_benchmark.c
index 36df551..9601bc2 100644
--- a/tools/testing/selftests/vm/gup_benchmark.c
+++ b/tools/testing/selftests/vm/gup_benchmark.c
@@ -22,6 +22,7 @@ struct gup_benchmark {
__u64 size;
__u32 nr_pages_per_call;
__u32 flags;
+ __u64 expansion[10]; /* For future use */
};
int main(int argc, char **argv)