Merge "msm: vidc: Update extradata control API"
diff --git a/Documentation/ABI/testing/sysfs-bus-i3c b/Documentation/ABI/testing/sysfs-bus-i3c
new file mode 100644
index 0000000..d7c7485
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-bus-i3c
@@ -0,0 +1,146 @@
+What: /sys/bus/i3c/devices/i3c-<bus-id>
+KernelVersion: 4.20
+Contact: linux-i3c@vger.kernel.org
+Description:
+ An I3C bus. This directory will contain one sub-directory per
+ I3C device present on the bus.
+
+What: /sys/bus/i3c/devices/i3c-<bus-id>/current_master
+KernelVersion: 4.20
+Contact: linux-i3c@vger.kernel.org
+Description:
+ Expose the master that owns the bus (<bus-id>-<master-pid>) at
+ the time this file is read. Note that bus ownership can change
+ overtime, so there's no guarantee that when the read() call
+ returns, the value returned is still valid.
+
+What: /sys/bus/i3c/devices/i3c-<bus-id>/mode
+KernelVersion: 4.20
+Contact: linux-i3c@vger.kernel.org
+Description:
+ I3C bus mode. Can be "pure", "mixed-fast" or "mixed-slow". See
+ the I3C specification for a detailed description of what each
+ of these modes implies.
+
+What: /sys/bus/i3c/devices/i3c-<bus-id>/i3c_scl_frequency
+KernelVersion: 4.20
+Contact: linux-i3c@vger.kernel.org
+Description:
+ The frequency (expressed in Hz) of the SCL signal when
+ operating in I3C SDR mode.
+
+What: /sys/bus/i3c/devices/i3c-<bus-id>/i2c_scl_frequency
+KernelVersion: 4.20
+Contact: linux-i3c@vger.kernel.org
+Description:
+ The frequency (expressed in Hz) of the SCL signal when
+ operating in I2C mode.
+
+What: /sys/bus/i3c/devices/i3c-<bus-id>/dynamic_address
+KernelVersion: 4.20
+Contact: linux-i3c@vger.kernel.org
+Description:
+ Dynamic address assigned to the master controller. This
+ address may change if the bus is re-initialized.
+
+What: /sys/bus/i3c/devices/i3c-<bus-id>/bcr
+KernelVersion: 4.20
+Contact: linux-i3c@vger.kernel.org
+Description:
+ BCR stands for Bus Characteristics Register and express the
+ device capabilities in term of speed, maximum read/write
+ length, etc. See the I3C specification for more details.
+ This entry describes the BCR of the master controller driving
+ the bus.
+
+What: /sys/bus/i3c/devices/i3c-<bus-id>/dcr
+KernelVersion: 4.20
+Contact: linux-i3c@vger.kernel.org
+Description:
+ DCR stands for Device Characteristics Register and express the
+ device capabilities in term of exposed features. See the I3C
+ specification for more details.
+ This entry describes the DCR of the master controller driving
+ the bus.
+
+What: /sys/bus/i3c/devices/i3c-<bus-id>/pid
+KernelVersion: 4.20
+Contact: linux-i3c@vger.kernel.org
+Description:
+ PID stands for Provisional ID and is used to uniquely identify
+ a device on a bus. This PID contains information about the
+ vendor, the part and an instance ID so that several devices of
+ the same type can be connected on the same bus.
+ See the I3C specification for more details.
+ This entry describes the PID of the master controller driving
+ the bus.
+
+What: /sys/bus/i3c/devices/i3c-<bus-id>/hdrcap
+KernelVersion: 4.20
+Contact: linux-i3c@vger.kernel.org
+Description:
+ Expose the HDR (High Data Rate) capabilities of a device.
+ Returns a list of supported HDR mode, each element is separated
+ by space. Modes can be "hdr-ddr", "hdr-tsp" and "hdr-tsl".
+ See the I3C specification for more details about these HDR
+ modes.
+ This entry describes the HDRCAP of the master controller
+ driving the bus.
+
+What: /sys/bus/i3c/devices/i3c-<bus-id>/<bus-id>-<device-pid>
+KernelVersion: 4.20
+Contact: linux-i3c@vger.kernel.org
+Description:
+ An I3C device present on I3C bus identified by <bus-id>. Note
+ that all devices are represented including the master driving
+ the bus.
+
+What: /sys/bus/i3c/devices/i3c-<bus-id>/<bus-id>-<device-pid>/dynamic_address
+KernelVersion: 4.20
+Contact: linux-i3c@vger.kernel.org
+Description:
+ Dynamic address assigned to device <bus-id>-<device-pid>. This
+ address may change if the bus is re-initialized.
+
+What: /sys/bus/i3c/devices/i3c-<bus-id>/<bus-id>-<device-pid>/bcr
+KernelVersion: 4.20
+Contact: linux-i3c@vger.kernel.org
+Description:
+ BCR stands for Bus Characteristics Register and express the
+ device capabilities in term of speed, maximum read/write
+ length, etc. See the I3C specification for more details.
+
+What: /sys/bus/i3c/devices/i3c-<bus-id>/<bus-id>-<device-pid>/dcr
+KernelVersion: 4.20
+Contact: linux-i3c@vger.kernel.org
+Description:
+ DCR stands for Device Characteristics Register and express the
+ device capabilities in term of exposed features. See the I3C
+ specification for more details.
+
+What: /sys/bus/i3c/devices/i3c-<bus-id>/<bus-id>-<device-pid>/pid
+KernelVersion: 4.20
+Contact: linux-i3c@vger.kernel.org
+Description:
+ PID stands for Provisional ID and is used to uniquely identify
+ a device on a bus. This PID contains information about the
+ vendor, the part and an instance ID so that several devices of
+ the same type can be connected on the same bus.
+ See the I3C specification for more details.
+
+What: /sys/bus/i3c/devices/i3c-<bus-id>/<bus-id>-<device-pid>/hdrcap
+KernelVersion: 4.20
+Contact: linux-i3c@vger.kernel.org
+Description:
+ Expose the HDR (High Data Rate) capabilities of a device.
+ Returns a list of supported HDR mode, each element is separated
+ by space. Modes can be "hdr-ddr", "hdr-tsp" and "hdr-tsl".
+ See the I3C specification for more details about these HDR
+ modes.
+
+What: /sys/bus/i3c/devices/<bus-id>-<device-pid>
+KernelVersion: 4.20
+Contact: linux-i3c@vger.kernel.org
+Description:
+ These directories are just symbolic links to
+ /sys/bus/i3c/devices/i3c-<bus-id>/<bus-id>-<device-pid>.
diff --git a/Documentation/accounting/psi.txt b/Documentation/accounting/psi.txt
index 3753a82..b8ca28b 100644
--- a/Documentation/accounting/psi.txt
+++ b/Documentation/accounting/psi.txt
@@ -62,3 +62,12 @@
tracked and exported as well, to allow detection of latency spikes
which wouldn't necessarily make a dent in the time averages, or to
average trends over custom time frames.
+
+Cgroup2 interface
+=================
+
+In a system with a CONFIG_CGROUP=y kernel and the cgroup2 filesystem
+mounted, pressure stall information is also tracked for tasks grouped
+into cgroups. Each subdirectory in the cgroupfs mountpoint contains
+cpu.pressure, memory.pressure, and io.pressure files; the format is
+the same as the /proc/pressure/ files.
diff --git a/Documentation/admin-guide/cgroup-v2.rst b/Documentation/admin-guide/cgroup-v2.rst
index 184193b..52d093b 100644
--- a/Documentation/admin-guide/cgroup-v2.rst
+++ b/Documentation/admin-guide/cgroup-v2.rst
@@ -966,6 +966,12 @@
$PERIOD duration. "max" for $MAX indicates no limit. If only
one number is written, $MAX is updated.
+ cpu.pressure
+ A read-only nested-key file which exists on non-root cgroups.
+
+ Shows pressure stall information for CPU. See
+ Documentation/accounting/psi.txt for details.
+
Memory
------
@@ -1271,6 +1277,12 @@
higher than the limit for an extended period of time. This
reduces the impact on the workload and memory management.
+ memory.pressure
+ A read-only nested-key file which exists on non-root cgroups.
+
+ Shows pressure stall information for memory. See
+ Documentation/accounting/psi.txt for details.
+
Usage Guidelines
~~~~~~~~~~~~~~~~
@@ -1408,6 +1420,12 @@
8:16 rbps=2097152 wbps=max riops=max wiops=max
+ io.pressure
+ A read-only nested-key file which exists on non-root cgroups.
+
+ Shows pressure stall information for IO. See
+ Documentation/accounting/psi.txt for details.
+
Writeback
~~~~~~~~~
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index 5449396..0a55b3e 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -3490,6 +3490,10 @@
before loading.
See Documentation/blockdev/ramdisk.txt.
+ psi= [KNL] Enable or disable pressure stall information
+ tracking.
+ Format: <bool>
+
psmouse.proto= [HW,MOUSE] Highest PS2 mouse protocol extension to
probe for; one of (bare|imps|exps|lifebook|any).
psmouse.rate= [HW,MOUSE] Set desired mouse report rate, in reports
diff --git a/Documentation/devicetree/bindings/eeprom/at24.txt b/Documentation/devicetree/bindings/eeprom/at24.txt
index aededdb..f9a7c98 100644
--- a/Documentation/devicetree/bindings/eeprom/at24.txt
+++ b/Documentation/devicetree/bindings/eeprom/at24.txt
@@ -27,6 +27,7 @@
"atmel,24c256",
"atmel,24c512",
"atmel,24c1024",
+ "atmel,24c2048",
If <manufacturer> is not "atmel", then a fallback must be used
with the same <model> and "atmel" as manufacturer.
diff --git a/Documentation/devicetree/bindings/i3c/i3c.txt b/Documentation/devicetree/bindings/i3c/i3c.txt
new file mode 100644
index 0000000..ab729a0
--- /dev/null
+++ b/Documentation/devicetree/bindings/i3c/i3c.txt
@@ -0,0 +1,138 @@
+Generic device tree bindings for I3C busses
+===========================================
+
+This document describes generic bindings that should be used to describe I3C
+busses in a device tree.
+
+Required properties
+-------------------
+
+- #address-cells - should be <3>. Read more about addresses below.
+- #size-cells - should be <0>.
+- compatible - name of the I3C master controller driving the I3C bus
+
+For other required properties e.g. to describe register sets,
+clocks, etc. check the binding documentation of the specific driver.
+The node describing an I3C bus should be named i3c-master.
+
+Optional properties
+-------------------
+
+These properties may not be supported by all I3C master drivers. Each I3C
+master bindings should specify which of them are supported.
+
+- i3c-scl-hz: frequency of the SCL signal used for I3C transfers.
+ When undefined the core sets it to 12.5MHz.
+
+- i2c-scl-hz: frequency of the SCL signal used for I2C transfers.
+ When undefined, the core looks at LVR (Legacy Virtual Register)
+ values of I2C devices described in the device tree to determine
+ the maximum I2C frequency.
+
+I2C devices
+===========
+
+Each I2C device connected to the bus should be described in a subnode. All
+properties described in Documentation/devicetree/bindings/i2c/i2c.txt are
+valid here, but several new properties have been added.
+
+New constraint on existing properties:
+--------------------------------------
+- reg: contains 3 cells
+ + first cell : still encoding the I2C address
+
+ + second cell: shall be 0
+
+ + third cell: shall encode the I3C LVR (Legacy Virtual Register)
+ bit[31:8]: unused/ignored
+ bit[7:5]: I2C device index. Possible values
+ * 0: I2C device has a 50 ns spike filter
+ * 1: I2C device does not have a 50 ns spike filter but supports high
+ frequency on SCL
+ * 2: I2C device does not have a 50 ns spike filter and is not tolerant
+ to high frequencies
+ * 3-7: reserved
+
+ bit[4]: tell whether the device operates in FM (Fast Mode) or FM+ mode
+ * 0: FM+ mode
+ * 1: FM mode
+
+ bit[3:0]: device type
+ * 0-15: reserved
+
+The I2C node unit-address should always match the first cell of the reg
+property: <device-type>@<i2c-address>.
+
+I3C devices
+===========
+
+All I3C devices are supposed to support DAA (Dynamic Address Assignment), and
+are thus discoverable. So, by default, I3C devices do not have to be described
+in the device tree.
+This being said, one might want to attach extra resources to these devices,
+and those resources may have to be described in the device tree, which in turn
+means we have to describe I3C devices.
+
+Another use case for describing an I3C device in the device tree is when this
+I3C device has a static I2C address and we want to assign it a specific I3C
+dynamic address before the DAA takes place (so that other devices on the bus
+can't take this dynamic address).
+
+The I3C device should be names <device-type>@<static-i2c-address>,<i3c-pid>,
+where device-type is describing the type of device connected on the bus
+(gpio-controller, sensor, ...).
+
+Required properties
+-------------------
+- reg: contains 3 cells
+ + first cell : encodes the static I2C address. Should be 0 if the device does
+ not have one (0 is not a valid I2C address).
+
+ + second and third cells: should encode the ProvisionalID. The second cell
+ contains the manufacturer ID left-shifted by 1.
+ The third cell contains ORing of the part ID
+ left-shifted by 16, the instance ID left-shifted
+ by 12 and the extra information. This encoding is
+ following the PID definition provided by the I3C
+ specification.
+
+Optional properties
+-------------------
+- assigned-address: dynamic address to be assigned to this device. This
+ property is only valid if the I3C device has a static
+ address (first cell of the reg property != 0).
+
+
+Example:
+
+ i3c-master@d040000 {
+ compatible = "cdns,i3c-master";
+ clocks = <&coreclock>, <&i3csysclock>;
+ clock-names = "pclk", "sysclk";
+ interrupts = <3 0>;
+ reg = <0x0d040000 0x1000>;
+ #address-cells = <3>;
+ #size-cells = <0>;
+ i2c-scl-hz = <100000>;
+
+ /* I2C device. */
+ nunchuk: nunchuk@52 {
+ compatible = "nintendo,nunchuk";
+ reg = <0x52 0x0 0x10>;
+ };
+
+ /* I3C device with a static I2C address. */
+ thermal_sensor: sensor@68,39200144004 {
+ reg = <0x68 0x392 0x144004>;
+ assigned-address = <0xa>;
+ };
+
+ /*
+ * I3C device without a static I2C address but requiring
+ * resources described in the DT.
+ */
+ sensor@0,39200154004 {
+ reg = <0x0 0x392 0x154004>;
+ clocks = <&clock_provider 0>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/i3c/qcom,geni-i3c.txt b/Documentation/devicetree/bindings/i3c/qcom,geni-i3c.txt
new file mode 100644
index 0000000..59425ba
--- /dev/null
+++ b/Documentation/devicetree/bindings/i3c/qcom,geni-i3c.txt
@@ -0,0 +1,48 @@
+Qualcomm Technologies, Inc. GENI I3C master block
+
+Generic bindings document for GENI I3C master controller driver.
+
+Required properties:
+- compatible: shall be "qcom,geni-i3c".
+- clocks: shall reference the se clock.
+- clock-names: shall contain clock name corresponding to the serial engine.
+- interrupts: the interrupt line connected to this I3C master.
+- reg: I3C master registers.
+- qcom,wrapper-core: Wrapper QUPv3 core containing this I3C controller.
+
+Optional properties:
+- se-clock-frequency: Source serial clock frequency to use.
+- dfs-index: Dynamic frequency scaling table index to use.
+
+Mandatory properties defined by the generic binding (see
+Documentation/devicetree/bindings/i3c/i3c.txt for more details):
+
+- #address-cells: shall be set to 3.
+- #size-cells: shall be set to 0.
+
+Optional properties defined by the generic binding (see
+Documentation/devicetree/bindings/i3c/i3c.txt for more details):
+
+- i2c-scl-hz: frequency for i2c transfers.
+- i3c-scl-hz: frequency for i3c transfers.
+
+I3C device connected on the bus follow the generic description (see
+Documentation/devicetree/bindings/i3c/i3c.txt for more details).
+
+Example:
+ i3c0: i3c@980000 {
+ compatible = "qcom,geni-i3c";
+ reg = <0x980000 0x4000>,
+ <0xec30000 0x10000>;
+ clock-names = "se-clk", "m-ahb", "s-ahb";
+ clocks = <&clock_gcc GCC_QUPV3_WRAP0_S0_CLK>,
+ <&clock_gcc GCC_QUPV3_WRAP_0_M_AHB_CLK>,
+ <&clock_gcc GCC_QUPV3_WRAP_0_S_AHB_CLK>;
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&qupv3_se0_i3c_active>;
+ pinctrl-1 = <&qupv3_se0_i3c_sleep>;
+ interrupts = <GIC_SPI 601 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <3>;
+ #size-cells = <0>;
+ qcom,wrapper-core = <&qupv3_0>;
+ };
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,kona-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/qcom,kona-pinctrl.txt
index 03f1fd9..78d6e0d 100644
--- a/Documentation/devicetree/bindings/pinctrl/qcom,kona-pinctrl.txt
+++ b/Documentation/devicetree/bindings/pinctrl/qcom,kona-pinctrl.txt
@@ -40,6 +40,11 @@
Definition: must be 2. Specifying the pin number and flags, as defined
in <dt-bindings/gpio/gpio.h>
+- wakeup-parent:
+ Usage: optional
+ Value type: <phandle>
+ Definition: A phandle to the wakeup interrupt controller for the SoC.
+
Please refer to ../gpio/gpio.txt and ../interrupt-controller/interrupts.txt for
a general description of GPIO and interrupt bindings.
@@ -183,4 +188,5 @@
#gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
+ wakeup-parent = <&pdc>;
};
diff --git a/Documentation/devicetree/bindings/platform/msm/ipa_mpm.txt b/Documentation/devicetree/bindings/platform/msm/ipa_mpm.txt
new file mode 100644
index 0000000..a32b320
--- /dev/null
+++ b/Documentation/devicetree/bindings/platform/msm/ipa_mpm.txt
@@ -0,0 +1,23 @@
+* Qualcomm Technologies, Inc. IPA MHI Prime Manager driver module
+
+This module enables IPA Modem to IPA APQ communication using
+MHI Prime.
+
+Required properties:
+- compatible: Must be "qcom,ipa-mpm".
+- qcom,mhi-chdb-base: MHI channel doorbell base address in MMIO space.
+- qcom,mhi-erdb-base: MHI event doorbell base address in MMIO space.
+
+Optional:
+- qcom,iova-mapping: Start address and size of the carved IOVA space
+ dedicated for MHI control structures
+ (such as transfer rings, event rings, doorbells).
+ If not present, SMMU S1 is considered to be in bypass mode.
+
+Example:
+ ipa_mpm: qcom,ipa-mpm {
+ compatible = "qcom,ipa-mpm";
+ qcom,mhi-chdb-base = <0x40300300>;
+ qcom,mhi-erdb-base = <0x40300700>;
+ qcom,iova-mapping = <0x10000000 0x1FFFFFFF>;
+ }
diff --git a/Documentation/devicetree/bindings/power/supply/qcom/qpnp-smb5.txt b/Documentation/devicetree/bindings/power/supply/qcom/qpnp-smb5.txt
index d173819..e81b8e7 100644
--- a/Documentation/devicetree/bindings/power/supply/qcom/qpnp-smb5.txt
+++ b/Documentation/devicetree/bindings/power/supply/qcom/qpnp-smb5.txt
@@ -280,6 +280,31 @@
Value type: bool
Definition: Boolean flag which when present disables USB-PD operation.
+- qcom,hw-die-temp-mitigation
+ Usage: optional
+ Value type: bool
+ Definition: Boolean flag which when present enables h/w based thermal
+ mitigation.
+
+- qcom,hw-connector-mitigation
+ Usage: optional
+ Value type: bool
+ Definition: Boolean flag which when present enables h/w based
+ connector temperature mitigation.
+
+- qcom,hw-skin-temp-mitigation
+ Usage: optional
+ Value type: bool
+ Definition: Boolean flag which when present enables h/w based skin
+ temperature mitigation.
+
+- qcom,connector-internal-pull-kohm
+ Usage: optional
+ Value type: <u32>
+ Definition: Specifies internal pull-up configuration to be applied to
+ connector THERM. The only valid values are (0/30/100/400).
+ If not specified 100K is used as default pull-up.
+
=============================================
Second Level Nodes - SMB5 Charger Peripherals
=============================================
diff --git a/Documentation/driver-api/i3c/device-driver-api.rst b/Documentation/driver-api/i3c/device-driver-api.rst
new file mode 100644
index 0000000..85bc3381
--- /dev/null
+++ b/Documentation/driver-api/i3c/device-driver-api.rst
@@ -0,0 +1,9 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+=====================
+I3C device driver API
+=====================
+
+.. kernel-doc:: include/linux/i3c/device.h
+
+.. kernel-doc:: drivers/i3c/device.c
diff --git a/Documentation/driver-api/i3c/index.rst b/Documentation/driver-api/i3c/index.rst
new file mode 100644
index 0000000..783d6da
--- /dev/null
+++ b/Documentation/driver-api/i3c/index.rst
@@ -0,0 +1,11 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+=============
+I3C subsystem
+=============
+
+.. toctree::
+
+ protocol
+ device-driver-api
+ master-driver-api
diff --git a/Documentation/driver-api/i3c/master-driver-api.rst b/Documentation/driver-api/i3c/master-driver-api.rst
new file mode 100644
index 0000000..332552b
--- /dev/null
+++ b/Documentation/driver-api/i3c/master-driver-api.rst
@@ -0,0 +1,9 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+================================
+I3C master controller driver API
+================================
+
+.. kernel-doc:: drivers/i3c/master.c
+
+.. kernel-doc:: include/linux/i3c/master.h
diff --git a/Documentation/driver-api/i3c/protocol.rst b/Documentation/driver-api/i3c/protocol.rst
new file mode 100644
index 0000000..dae3b6d
--- /dev/null
+++ b/Documentation/driver-api/i3c/protocol.rst
@@ -0,0 +1,203 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+============
+I3C protocol
+============
+
+Disclaimer
+==========
+
+This chapter will focus on aspects that matter to software developers. For
+everything hardware related (like how things are transmitted on the bus, how
+collisions are prevented, ...) please have a look at the I3C specification.
+
+This document is just a brief introduction to the I3C protocol and the concepts
+it brings to the table. If you need more information, please refer to the MIPI
+I3C specification (can be downloaded here
+http://resources.mipi.org/mipi-i3c-v1-download).
+
+Introduction
+============
+
+The I3C (pronounced 'eye-three-see') is a MIPI standardized protocol designed
+to overcome I2C limitations (limited speed, external signals needed for
+interrupts, no automatic detection of the devices connected to the bus, ...)
+while remaining power-efficient.
+
+I3C Bus
+=======
+
+An I3C bus is made of several I3C devices and possibly some I2C devices as
+well, but let's focus on I3C devices for now.
+
+An I3C device on the I3C bus can have one of the following roles:
+
+* Master: the device is driving the bus. It's the one in charge of initiating
+ transactions or deciding who is allowed to talk on the bus (slave generated
+ events are possible in I3C, see below).
+* Slave: the device acts as a slave, and is not able to send frames to another
+ slave on the bus. The device can still send events to the master on
+ its own initiative if the master allowed it.
+
+I3C is a multi-master protocol, so there might be several masters on a bus,
+though only one device can act as a master at a given time. In order to gain
+bus ownership, a master has to follow a specific procedure.
+
+Each device on the I3C bus has to be assigned a dynamic address to be able to
+communicate. Until this is done, the device should only respond to a limited
+set of commands. If it has a static address (also called legacy I2C address),
+the device can reply to I2C transfers.
+
+In addition to these per-device addresses, the protocol defines a broadcast
+address in order to address all devices on the bus.
+
+Once a dynamic address has been assigned to a device, this address will be used
+for any direct communication with the device. Note that even after being
+assigned a dynamic address, the device should still process broadcast messages.
+
+I3C Device discovery
+====================
+
+The I3C protocol defines a mechanism to automatically discover devices present
+on the bus, their capabilities and the functionalities they provide. In this
+regard I3C is closer to a discoverable bus like USB than it is to I2C or SPI.
+
+The discovery mechanism is called DAA (Dynamic Address Assignment), because it
+not only discovers devices but also assigns them a dynamic address.
+
+During DAA, each I3C device reports 3 important things:
+
+* BCR: Bus Characteristic Register. This 8-bit register describes the device bus
+ related capabilities
+* DCR: Device Characteristic Register. This 8-bit register describes the
+ functionalities provided by the device
+* Provisional ID: A 48-bit unique identifier. On a given bus there should be no
+ Provisional ID collision, otherwise the discovery mechanism may fail.
+
+I3C slave events
+================
+
+The I3C protocol allows slaves to generate events on their own, and thus allows
+them to take temporary control of the bus.
+
+This mechanism is called IBI for In Band Interrupts, and as stated in the name,
+it allows devices to generate interrupts without requiring an external signal.
+
+During DAA, each device on the bus has been assigned an address, and this
+address will serve as a priority identifier to determine who wins if 2 different
+devices are generating an interrupt at the same moment on the bus (the lower the
+dynamic address the higher the priority).
+
+Masters are allowed to inhibit interrupts if they want to. This inhibition
+request can be broadcast (applies to all devices) or sent to a specific
+device.
+
+I3C Hot-Join
+============
+
+The Hot-Join mechanism is similar to USB hotplug. This mechanism allows
+slaves to join the bus after it has been initialized by the master.
+
+This covers the following use cases:
+
+* the device is not powered when the bus is probed
+* the device is hotplugged on the bus through an extension board
+
+This mechanism is relying on slave events to inform the master that a new
+device joined the bus and is waiting for a dynamic address.
+
+The master is then free to address the request as it wishes: ignore it or
+assign a dynamic address to the slave.
+
+I3C transfer types
+==================
+
+If you omit SMBus (which is just a standardization on how to access registers
+exposed by I2C devices), I2C has only one transfer type.
+
+I3C defines 3 different classes of transfer in addition to I2C transfers which
+are here for backward compatibility with I2C devices.
+
+I3C CCC commands
+----------------
+
+CCC (Common Command Code) commands are meant to be used for anything that is
+related to bus management and all features that are common to a set of devices.
+
+CCC commands contain an 8-bit CCC ID describing the command that is executed.
+The MSB of this ID specifies whether this is a broadcast command (bit7 = 0) or a
+unicast one (bit7 = 1).
+
+The command ID can be followed by a payload. Depending on the command, this
+payload is either sent by the master sending the command (write CCC command),
+or sent by the slave receiving the command (read CCC command). Of course, read
+accesses only apply to unicast commands.
+Note that, when sending a CCC command to a specific device, the device address
+is passed in the first byte of the payload.
+
+The payload length is not explicitly passed on the bus, and should be extracted
+from the CCC ID.
+
+Note that vendors can use a dedicated range of CCC IDs for their own commands
+(0x61-0x7f and 0xe0-0xef).
+
+I3C Private SDR transfers
+-------------------------
+
+Private SDR (Single Data Rate) transfers should be used for anything that is
+device specific and does not require high transfer speed.
+
+It is the equivalent of I2C transfers but in the I3C world. Each transfer is
+passed the device address (dynamic address assigned during DAA), a payload
+and a direction.
+
+The only difference with I2C is that the transfer is much faster (typical clock
+frequency is 12.5MHz).
+
+I3C HDR commands
+----------------
+
+HDR commands should be used for anything that is device specific and requires
+high transfer speed.
+
+The first thing attached to an HDR command is the HDR mode. There are currently
+3 different modes defined by the I3C specification (refer to the specification
+for more details):
+
+* HDR-DDR: Double Data Rate mode
+* HDR-TSP: Ternary Symbol Pure. Only usable on busses with no I2C devices
+* HDR-TSL: Ternary Symbol Legacy. Usable on busses with I2C devices
+
+When sending an HDR command, the whole bus has to enter HDR mode, which is done
+using a broadcast CCC command.
+Once the bus has entered a specific HDR mode, the master sends the HDR command.
+An HDR command is made of:
+
+* one 16-bits command word in big endian
+* N 16-bits data words in big endian
+
+Those words may be wrapped with specific preambles/post-ambles which depend on
+the chosen HDR mode and are detailed here (see the specification for more
+details).
+
+The 16-bits command word is made of:
+
+* bit[15]: direction bit, read is 1, write is 0
+* bit[14:8]: command code. Identifies the command being executed, the amount of
+ data words and their meaning
+* bit[7:1]: I3C address of the device this command is addressed to
+* bit[0]: reserved/parity-bit
+
+Backward compatibility with I2C devices
+=======================================
+
+The I3C protocol has been designed to be backward compatible with I2C devices.
+This backward compatibility allows one to connect a mix of I2C and I3C devices
+on the same bus, though, in order to be really efficient, I2C devices should
+be equipped with 50 ns spike filters.
+
+I2C devices can't be discovered like I3C ones and have to be statically
+declared. In order to let the master know what these devices are capable of
+(both in terms of bus related limitations and functionalities), the software
+has to provide some information, which is done through the LVR (Legacy I2C
+Virtual Register).
diff --git a/Documentation/driver-api/index.rst b/Documentation/driver-api/index.rst
index 6d9f2f9..cc6a33f 100644
--- a/Documentation/driver-api/index.rst
+++ b/Documentation/driver-api/index.rst
@@ -32,6 +32,7 @@
pci
spi
i2c
+ i3c/index
hsi
edac
scsi
diff --git a/Makefile b/Makefile
index 3904be0..bc15999a 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 4
PATCHLEVEL = 19
-SUBLEVEL = 20
+SUBLEVEL = 26
EXTRAVERSION =
NAME = "People's Front"
diff --git a/arch/alpha/include/asm/irq.h b/arch/alpha/include/asm/irq.h
index 4d17cac..432402c 100644
--- a/arch/alpha/include/asm/irq.h
+++ b/arch/alpha/include/asm/irq.h
@@ -56,15 +56,15 @@
#elif defined(CONFIG_ALPHA_DP264) || \
defined(CONFIG_ALPHA_LYNX) || \
- defined(CONFIG_ALPHA_SHARK) || \
- defined(CONFIG_ALPHA_EIGER)
+ defined(CONFIG_ALPHA_SHARK)
# define NR_IRQS 64
#elif defined(CONFIG_ALPHA_TITAN)
#define NR_IRQS 80
#elif defined(CONFIG_ALPHA_RAWHIDE) || \
- defined(CONFIG_ALPHA_TAKARA)
+ defined(CONFIG_ALPHA_TAKARA) || \
+ defined(CONFIG_ALPHA_EIGER)
# define NR_IRQS 128
#elif defined(CONFIG_ALPHA_WILDFIRE)
diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
index d73dc47..188fc92 100644
--- a/arch/alpha/mm/fault.c
+++ b/arch/alpha/mm/fault.c
@@ -78,7 +78,7 @@ __load_new_mm_context(struct mm_struct *next_mm)
/* Macro for exception fixup code to access integer registers. */
#define dpf_reg(r) \
(((unsigned long *)regs)[(r) <= 8 ? (r) : (r) <= 15 ? (r)-16 : \
- (r) <= 18 ? (r)+8 : (r)-10])
+ (r) <= 18 ? (r)+10 : (r)-10])
asmlinkage void
do_page_fault(unsigned long address, unsigned long mmcsr,
diff --git a/arch/arc/include/asm/cache.h b/arch/arc/include/asm/cache.h
index ff7d323..db681cf 100644
--- a/arch/arc/include/asm/cache.h
+++ b/arch/arc/include/asm/cache.h
@@ -52,6 +52,17 @@
#define cache_line_size() SMP_CACHE_BYTES
#define ARCH_DMA_MINALIGN SMP_CACHE_BYTES
+/*
+ * Make sure slab-allocated buffers are 64-bit aligned when atomic64_t uses
+ * ARCv2 64-bit atomics (LLOCKD/SCONDD). This guarantess runtime 64-bit
+ * alignment for any atomic64_t embedded in buffer.
+ * Default ARCH_SLAB_MINALIGN is __alignof__(long long) which has a relaxed
+ * value of 4 (and not 8) in ARC ABI.
+ */
+#if defined(CONFIG_ARC_HAS_LL64) && defined(CONFIG_ARC_HAS_LLSC)
+#define ARCH_SLAB_MINALIGN 8
+#endif
+
extern void arc_cache_init(void);
extern char *arc_cache_mumbojumbo(int cpu_id, char *buf, int len);
extern void read_decode_cache_bcr(void);
diff --git a/arch/arc/kernel/head.S b/arch/arc/kernel/head.S
index 8b90d25..1f945d0 100644
--- a/arch/arc/kernel/head.S
+++ b/arch/arc/kernel/head.S
@@ -17,6 +17,7 @@
#include <asm/entry.h>
#include <asm/arcregs.h>
#include <asm/cache.h>
+#include <asm/irqflags.h>
.macro CPU_EARLY_SETUP
@@ -47,6 +48,15 @@
sr r5, [ARC_REG_DC_CTRL]
1:
+
+#ifdef CONFIG_ISA_ARCV2
+ ; Unaligned access is disabled at reset, so re-enable early as
+ ; gcc 7.3.1 (ARC GNU 2018.03) onwards generates unaligned access
+ ; by default
+ lr r5, [status32]
+ bset r5, r5, STATUS_AD_BIT
+ kflag r5
+#endif
.endm
.section .init.text, "ax",@progbits
@@ -93,9 +103,9 @@
#ifdef CONFIG_ARC_UBOOT_SUPPORT
; Uboot - kernel ABI
; r0 = [0] No uboot interaction, [1] cmdline in r2, [2] DTB in r2
- ; r1 = magic number (board identity, unused as of now
+ ; r1 = magic number (always zero as of now)
; r2 = pointer to uboot provided cmdline or external DTB in mem
- ; These are handled later in setup_arch()
+ ; These are handled later in handle_uboot_args()
st r0, [@uboot_tag]
st r2, [@uboot_arg]
#endif
diff --git a/arch/arc/kernel/setup.c b/arch/arc/kernel/setup.c
index b2cae79..62a30e5 100644
--- a/arch/arc/kernel/setup.c
+++ b/arch/arc/kernel/setup.c
@@ -449,43 +449,80 @@ void setup_processor(void)
arc_chk_core_config();
}
-static inline int is_kernel(unsigned long addr)
+static inline bool uboot_arg_invalid(unsigned long addr)
{
- if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
- return 1;
- return 0;
+ /*
+ * Check that it is a untranslated address (although MMU is not enabled
+ * yet, it being a high address ensures this is not by fluke)
+ */
+ if (addr < PAGE_OFFSET)
+ return true;
+
+ /* Check that address doesn't clobber resident kernel image */
+ return addr >= (unsigned long)_stext && addr <= (unsigned long)_end;
+}
+
+#define IGNORE_ARGS "Ignore U-boot args: "
+
+/* uboot_tag values for U-boot - kernel ABI revision 0; see head.S */
+#define UBOOT_TAG_NONE 0
+#define UBOOT_TAG_CMDLINE 1
+#define UBOOT_TAG_DTB 2
+
+void __init handle_uboot_args(void)
+{
+ bool use_embedded_dtb = true;
+ bool append_cmdline = false;
+
+#ifdef CONFIG_ARC_UBOOT_SUPPORT
+ /* check that we know this tag */
+ if (uboot_tag != UBOOT_TAG_NONE &&
+ uboot_tag != UBOOT_TAG_CMDLINE &&
+ uboot_tag != UBOOT_TAG_DTB) {
+ pr_warn(IGNORE_ARGS "invalid uboot tag: '%08x'\n", uboot_tag);
+ goto ignore_uboot_args;
+ }
+
+ if (uboot_tag != UBOOT_TAG_NONE &&
+ uboot_arg_invalid((unsigned long)uboot_arg)) {
+ pr_warn(IGNORE_ARGS "invalid uboot arg: '%px'\n", uboot_arg);
+ goto ignore_uboot_args;
+ }
+
+ /* see if U-boot passed an external Device Tree blob */
+ if (uboot_tag == UBOOT_TAG_DTB) {
+ machine_desc = setup_machine_fdt((void *)uboot_arg);
+
+ /* external Device Tree blob is invalid - use embedded one */
+ use_embedded_dtb = !machine_desc;
+ }
+
+ if (uboot_tag == UBOOT_TAG_CMDLINE)
+ append_cmdline = true;
+
+ignore_uboot_args:
+#endif
+
+ if (use_embedded_dtb) {
+ machine_desc = setup_machine_fdt(__dtb_start);
+ if (!machine_desc)
+ panic("Embedded DT invalid\n");
+ }
+
+ /*
+ * NOTE: @boot_command_line is populated by setup_machine_fdt() so this
+ * append processing can only happen after.
+ */
+ if (append_cmdline) {
+ /* Ensure a whitespace between the 2 cmdlines */
+ strlcat(boot_command_line, " ", COMMAND_LINE_SIZE);
+ strlcat(boot_command_line, uboot_arg, COMMAND_LINE_SIZE);
+ }
}
void __init setup_arch(char **cmdline_p)
{
-#ifdef CONFIG_ARC_UBOOT_SUPPORT
- /* make sure that uboot passed pointer to cmdline/dtb is valid */
- if (uboot_tag && is_kernel((unsigned long)uboot_arg))
- panic("Invalid uboot arg\n");
-
- /* See if u-boot passed an external Device Tree blob */
- machine_desc = setup_machine_fdt(uboot_arg); /* uboot_tag == 2 */
- if (!machine_desc)
-#endif
- {
- /* No, so try the embedded one */
- machine_desc = setup_machine_fdt(__dtb_start);
- if (!machine_desc)
- panic("Embedded DT invalid\n");
-
- /*
- * If we are here, it is established that @uboot_arg didn't
- * point to DT blob. Instead if u-boot says it is cmdline,
- * append to embedded DT cmdline.
- * setup_machine_fdt() would have populated @boot_command_line
- */
- if (uboot_tag == 1) {
- /* Ensure a whitespace between the 2 cmdlines */
- strlcat(boot_command_line, " ", COMMAND_LINE_SIZE);
- strlcat(boot_command_line, uboot_arg,
- COMMAND_LINE_SIZE);
- }
- }
+ handle_uboot_args();
/* Save unparsed command line copy for /proc/cmdline */
*cmdline_p = boot_command_line;
diff --git a/arch/arm/boot/dts/aspeed-bmc-arm-centriq2400-rep.dts b/arch/arm/boot/dts/aspeed-bmc-arm-centriq2400-rep.dts
index df12276..c2ece0b 100644
--- a/arch/arm/boot/dts/aspeed-bmc-arm-centriq2400-rep.dts
+++ b/arch/arm/boot/dts/aspeed-bmc-arm-centriq2400-rep.dts
@@ -13,7 +13,7 @@
bootargs = "console=ttyS4,115200 earlyprintk";
};
- memory {
+ memory@80000000 {
reg = <0x80000000 0x40000000>;
};
diff --git a/arch/arm/boot/dts/aspeed-bmc-intel-s2600wf.dts b/arch/arm/boot/dts/aspeed-bmc-intel-s2600wf.dts
index 7a291de..22dade6 100644
--- a/arch/arm/boot/dts/aspeed-bmc-intel-s2600wf.dts
+++ b/arch/arm/boot/dts/aspeed-bmc-intel-s2600wf.dts
@@ -13,7 +13,7 @@
bootargs = "earlyprintk";
};
- memory {
+ memory@80000000 {
reg = <0x80000000 0x20000000>;
};
diff --git a/arch/arm/boot/dts/aspeed-bmc-opp-lanyang.dts b/arch/arm/boot/dts/aspeed-bmc-opp-lanyang.dts
index d598b63..024e52a 100644
--- a/arch/arm/boot/dts/aspeed-bmc-opp-lanyang.dts
+++ b/arch/arm/boot/dts/aspeed-bmc-opp-lanyang.dts
@@ -14,7 +14,7 @@
bootargs = "console=ttyS4,115200 earlyprintk";
};
- memory {
+ memory@80000000 {
reg = <0x80000000 0x40000000>;
};
@@ -322,4 +322,3 @@
&adc {
status = "okay";
};
-
diff --git a/arch/arm/boot/dts/aspeed-bmc-portwell-neptune.dts b/arch/arm/boot/dts/aspeed-bmc-portwell-neptune.dts
index 43ed139..33d7045 100644
--- a/arch/arm/boot/dts/aspeed-bmc-portwell-neptune.dts
+++ b/arch/arm/boot/dts/aspeed-bmc-portwell-neptune.dts
@@ -17,7 +17,7 @@
bootargs = "console=ttyS4,115200 earlyprintk";
};
- memory {
+ memory@80000000 {
reg = <0x80000000 0x20000000>;
};
diff --git a/arch/arm/boot/dts/da850-evm.dts b/arch/arm/boot/dts/da850-evm.dts
index f9b7579..016616c 100644
--- a/arch/arm/boot/dts/da850-evm.dts
+++ b/arch/arm/boot/dts/da850-evm.dts
@@ -94,6 +94,28 @@
regulator-boot-on;
};
+ baseboard_3v3: fixedregulator-3v3 {
+ /* TPS73701DCQ */
+ compatible = "regulator-fixed";
+ regulator-name = "baseboard_3v3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ vin-supply = <&vbat>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ baseboard_1v8: fixedregulator-1v8 {
+ /* TPS73701DCQ */
+ compatible = "regulator-fixed";
+ regulator-name = "baseboard_1v8";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ vin-supply = <&vbat>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
backlight_lcd: backlight-regulator {
compatible = "regulator-fixed";
regulator-name = "lcd_backlight_pwr";
@@ -105,7 +127,7 @@
sound {
compatible = "simple-audio-card";
- simple-audio-card,name = "DA850/OMAP-L138 EVM";
+ simple-audio-card,name = "DA850-OMAPL138 EVM";
simple-audio-card,widgets =
"Line", "Line In",
"Line", "Line Out";
@@ -210,10 +232,9 @@
/* Regulators */
IOVDD-supply = <&vdcdc2_reg>;
- /* Derived from VBAT: Baseboard 3.3V / 1.8V */
- AVDD-supply = <&vbat>;
- DRVDD-supply = <&vbat>;
- DVDD-supply = <&vbat>;
+ AVDD-supply = <&baseboard_3v3>;
+ DRVDD-supply = <&baseboard_3v3>;
+ DVDD-supply = <&baseboard_1v8>;
};
tca6416: gpio@20 {
compatible = "ti,tca6416";
diff --git a/arch/arm/boot/dts/da850-lcdk.dts b/arch/arm/boot/dts/da850-lcdk.dts
index 0177e3e..3a2fa6e 100644
--- a/arch/arm/boot/dts/da850-lcdk.dts
+++ b/arch/arm/boot/dts/da850-lcdk.dts
@@ -39,9 +39,39 @@
};
};
+ vcc_5vd: fixedregulator-vcc_5vd {
+ compatible = "regulator-fixed";
+ regulator-name = "vcc_5vd";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ regulator-boot-on;
+ };
+
+ vcc_3v3d: fixedregulator-vcc_3v3d {
+ /* TPS650250 - VDCDC1 */
+ compatible = "regulator-fixed";
+ regulator-name = "vcc_3v3d";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ vin-supply = <&vcc_5vd>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
+ vcc_1v8d: fixedregulator-vcc_1v8d {
+ /* TPS650250 - VDCDC2 */
+ compatible = "regulator-fixed";
+ regulator-name = "vcc_1v8d";
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <1800000>;
+ vin-supply = <&vcc_5vd>;
+ regulator-always-on;
+ regulator-boot-on;
+ };
+
sound {
compatible = "simple-audio-card";
- simple-audio-card,name = "DA850/OMAP-L138 LCDK";
+ simple-audio-card,name = "DA850-OMAPL138 LCDK";
simple-audio-card,widgets =
"Line", "Line In",
"Line", "Line Out";
@@ -221,6 +251,12 @@
compatible = "ti,tlv320aic3106";
reg = <0x18>;
status = "okay";
+
+ /* Regulators */
+ IOVDD-supply = <&vcc_3v3d>;
+ AVDD-supply = <&vcc_3v3d>;
+ DRVDD-supply = <&vcc_3v3d>;
+ DVDD-supply = <&vcc_1v8d>;
};
};
diff --git a/arch/arm/boot/dts/da850.dtsi b/arch/arm/boot/dts/da850.dtsi
index 47aa53b..559659b 100644
--- a/arch/arm/boot/dts/da850.dtsi
+++ b/arch/arm/boot/dts/da850.dtsi
@@ -476,7 +476,7 @@
clocksource: timer@20000 {
compatible = "ti,da830-timer";
reg = <0x20000 0x1000>;
- interrupts = <12>, <13>;
+ interrupts = <21>, <22>;
interrupt-names = "tint12", "tint34";
clocks = <&pll0_auxclk>;
};
diff --git a/arch/arm/boot/dts/gemini-dlink-dir-685.dts b/arch/arm/boot/dts/gemini-dlink-dir-685.dts
index 6f258b5..502a361 100644
--- a/arch/arm/boot/dts/gemini-dlink-dir-685.dts
+++ b/arch/arm/boot/dts/gemini-dlink-dir-685.dts
@@ -274,20 +274,16 @@
read-only;
};
/*
- * Between the boot loader and the rootfs is the kernel
- * in a custom Storlink format flashed from the boot
- * menu. The rootfs is in squashfs format.
+ * This firmware image contains the kernel catenated
+ * with the squashfs root filesystem. For some reason
+ * this is called "upgrade" on the vendor system.
*/
- partition@1800c0 {
- label = "rootfs";
- reg = <0x001800c0 0x01dbff40>;
- read-only;
- };
- partition@1f40000 {
+ partition@40000 {
label = "upgrade";
- reg = <0x01f40000 0x00040000>;
+ reg = <0x00040000 0x01f40000>;
read-only;
};
+ /* RGDB, Residental Gateway Database? */
partition@1f80000 {
label = "rgdb";
reg = <0x01f80000 0x00040000>;
diff --git a/arch/arm/boot/dts/imx51-zii-rdu1.dts b/arch/arm/boot/dts/imx51-zii-rdu1.dts
index 469cce2..6e80254 100644
--- a/arch/arm/boot/dts/imx51-zii-rdu1.dts
+++ b/arch/arm/boot/dts/imx51-zii-rdu1.dts
@@ -477,6 +477,15 @@
};
&gpio1 {
+ gpio-line-names = "", "", "", "",
+ "", "", "", "",
+ "", "hp-amp-shutdown-b", "", "",
+ "", "", "", "",
+ "", "", "", "",
+ "", "", "", "",
+ "", "", "", "",
+ "", "", "", "";
+
unused-sd3-wp-gpio {
/*
* See pinctrl_esdhc1 below for more details on this
@@ -501,9 +510,6 @@
hpa1: amp@60 {
compatible = "ti,tpa6130a2";
reg = <0x60>;
- pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_ampgpio>;
- power-gpio = <&gpio1 9 GPIO_ACTIVE_HIGH>;
Vdd-supply = <®_3p3v>;
};
@@ -677,7 +683,10 @@
};
&iomuxc {
- pinctrl_ampgpio: ampgpiogrp {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_hog>;
+
+ pinctrl_hog: hoggrp {
fsl,pins = <
MX51_PAD_GPIO1_9__GPIO1_9 0x5e
>;
diff --git a/arch/arm/boot/dts/kirkwood-dnskw.dtsi b/arch/arm/boot/dts/kirkwood-dnskw.dtsi
index cbaf06f..eb91746 100644
--- a/arch/arm/boot/dts/kirkwood-dnskw.dtsi
+++ b/arch/arm/boot/dts/kirkwood-dnskw.dtsi
@@ -36,8 +36,8 @@
compatible = "gpio-fan";
pinctrl-0 = <&pmx_fan_high_speed &pmx_fan_low_speed>;
pinctrl-names = "default";
- gpios = <&gpio1 14 GPIO_ACTIVE_LOW
- &gpio1 13 GPIO_ACTIVE_LOW>;
+ gpios = <&gpio1 14 GPIO_ACTIVE_HIGH
+ &gpio1 13 GPIO_ACTIVE_HIGH>;
gpio-fan,speed-map = <0 0
3000 1
6000 2>;
diff --git a/arch/arm/boot/dts/mmp2.dtsi b/arch/arm/boot/dts/mmp2.dtsi
index 766bbb8..47e5b63 100644
--- a/arch/arm/boot/dts/mmp2.dtsi
+++ b/arch/arm/boot/dts/mmp2.dtsi
@@ -220,12 +220,15 @@
status = "disabled";
};
- twsi2: i2c@d4025000 {
+ twsi2: i2c@d4031000 {
compatible = "mrvl,mmp-twsi";
- reg = <0xd4025000 0x1000>;
- interrupts = <58>;
+ reg = <0xd4031000 0x1000>;
+ interrupt-parent = <&intcmux17>;
+ interrupts = <0>;
clocks = <&soc_clocks MMP2_CLK_TWSI1>;
resets = <&soc_clocks MMP2_CLK_TWSI1>;
+ #address-cells = <1>;
+ #size-cells = <0>;
status = "disabled";
};
diff --git a/arch/arm/boot/dts/omap4-sdp.dts b/arch/arm/boot/dts/omap4-sdp.dts
index 490726b..9dc7ec7 100644
--- a/arch/arm/boot/dts/omap4-sdp.dts
+++ b/arch/arm/boot/dts/omap4-sdp.dts
@@ -33,6 +33,7 @@
gpio = <&gpio2 16 GPIO_ACTIVE_HIGH>; /* gpio line 48 */
enable-active-high;
regulator-boot-on;
+ startup-delay-us = <25000>;
};
vbat: fixedregulator-vbat {
diff --git a/arch/arm/boot/dts/omap5-board-common.dtsi b/arch/arm/boot/dts/omap5-board-common.dtsi
index ab6f640..8b8db9d 100644
--- a/arch/arm/boot/dts/omap5-board-common.dtsi
+++ b/arch/arm/boot/dts/omap5-board-common.dtsi
@@ -317,7 +317,8 @@
palmas_sys_nirq_pins: pinmux_palmas_sys_nirq_pins {
pinctrl-single,pins = <
- OMAP5_IOPAD(0x068, PIN_INPUT_PULLUP | MUX_MODE0) /* sys_nirq1 */
+ /* sys_nirq1 is pulled down as the SoC is inverting it for GIC */
+ OMAP5_IOPAD(0x068, PIN_INPUT_PULLUP | MUX_MODE0)
>;
};
@@ -385,7 +386,8 @@
palmas: palmas@48 {
compatible = "ti,palmas";
- interrupts = <GIC_SPI 7 IRQ_TYPE_NONE>; /* IRQ_SYS_1N */
+ /* sys_nirq/ext_sys_irq pins get inverted at mpuss wakeupgen */
+ interrupts = <GIC_SPI 7 IRQ_TYPE_LEVEL_LOW>;
reg = <0x48>;
interrupt-controller;
#interrupt-cells = <2>;
@@ -651,7 +653,8 @@
pinctrl-names = "default";
pinctrl-0 = <&twl6040_pins>;
- interrupts = <GIC_SPI 119 IRQ_TYPE_NONE>; /* IRQ_SYS_2N cascaded to gic */
+ /* sys_nirq/ext_sys_irq pins get inverted at mpuss wakeupgen */
+ interrupts = <GIC_SPI 119 IRQ_TYPE_LEVEL_LOW>;
/* audpwron gpio defined in the board specific dts */
diff --git a/arch/arm/boot/dts/omap5-cm-t54.dts b/arch/arm/boot/dts/omap5-cm-t54.dts
index 5e21fb4..e78d371 100644
--- a/arch/arm/boot/dts/omap5-cm-t54.dts
+++ b/arch/arm/boot/dts/omap5-cm-t54.dts
@@ -181,6 +181,13 @@
OMAP5_IOPAD(0x0042, PIN_INPUT_PULLDOWN | MUX_MODE6) /* llib_wakereqin.gpio1_wk15 */
>;
};
+
+ palmas_sys_nirq_pins: pinmux_palmas_sys_nirq_pins {
+ pinctrl-single,pins = <
+ /* sys_nirq1 is pulled down as the SoC is inverting it for GIC */
+ OMAP5_IOPAD(0x068, PIN_INPUT_PULLUP | MUX_MODE0)
+ >;
+ };
};
&omap5_pmx_core {
@@ -414,8 +421,11 @@
palmas: palmas@48 {
compatible = "ti,palmas";
- interrupts = <GIC_SPI 7 IRQ_TYPE_NONE>; /* IRQ_SYS_1N */
reg = <0x48>;
+ pinctrl-0 = <&palmas_sys_nirq_pins>;
+ pinctrl-names = "default";
+ /* sys_nirq/ext_sys_irq pins get inverted at mpuss wakeupgen */
+ interrupts = <GIC_SPI 7 IRQ_TYPE_LEVEL_LOW>;
interrupt-controller;
#interrupt-cells = <2>;
ti,system-power-controller;
diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h
index b17ee03..88286dd 100644
--- a/arch/arm/include/asm/assembler.h
+++ b/arch/arm/include/asm/assembler.h
@@ -467,6 +467,17 @@ THUMB( orr \reg , \reg , #PSR_T_BIT )
#endif
.endm
+ .macro uaccess_mask_range_ptr, addr:req, size:req, limit:req, tmp:req
+#ifdef CONFIG_CPU_SPECTRE
+ sub \tmp, \limit, #1
+ subs \tmp, \tmp, \addr @ tmp = limit - 1 - addr
+ addhs \tmp, \tmp, #1 @ if (tmp >= 0) {
+ subhss \tmp, \tmp, \size @ tmp = limit - (addr + size) }
+ movlo \addr, #0 @ if (tmp < 0) addr = NULL
+ csdb
+#endif
+ .endm
+
.macro uaccess_disable, tmp, isb=1
#ifdef CONFIG_CPU_SW_DOMAIN_PAN
/*
diff --git a/arch/arm/include/asm/cputype.h b/arch/arm/include/asm/cputype.h
index 0d28924..775cac3 100644
--- a/arch/arm/include/asm/cputype.h
+++ b/arch/arm/include/asm/cputype.h
@@ -111,6 +111,7 @@
#include <linux/kernel.h>
extern unsigned int processor_id;
+struct proc_info_list *lookup_processor(u32 midr);
#ifdef CONFIG_CPU_CP15
#define read_cpuid(reg) \
diff --git a/arch/arm/include/asm/proc-fns.h b/arch/arm/include/asm/proc-fns.h
index e25f439..e1b6f28 100644
--- a/arch/arm/include/asm/proc-fns.h
+++ b/arch/arm/include/asm/proc-fns.h
@@ -23,7 +23,7 @@ struct mm_struct;
/*
* Don't change this structure - ASM code relies on it.
*/
-extern struct processor {
+struct processor {
/* MISC
* get data abort address/flags
*/
@@ -79,9 +79,13 @@ extern struct processor {
unsigned int suspend_size;
void (*do_suspend)(void *);
void (*do_resume)(void *);
-} processor;
+};
#ifndef MULTI_CPU
+static inline void init_proc_vtable(const struct processor *p)
+{
+}
+
extern void cpu_proc_init(void);
extern void cpu_proc_fin(void);
extern int cpu_do_idle(void);
@@ -98,17 +102,50 @@ extern void cpu_reset(unsigned long addr, bool hvc) __attribute__((noreturn));
extern void cpu_do_suspend(void *);
extern void cpu_do_resume(void *);
#else
-#define cpu_proc_init processor._proc_init
-#define cpu_proc_fin processor._proc_fin
-#define cpu_reset processor.reset
-#define cpu_do_idle processor._do_idle
-#define cpu_dcache_clean_area processor.dcache_clean_area
-#define cpu_set_pte_ext processor.set_pte_ext
-#define cpu_do_switch_mm processor.switch_mm
-/* These three are private to arch/arm/kernel/suspend.c */
-#define cpu_do_suspend processor.do_suspend
-#define cpu_do_resume processor.do_resume
+extern struct processor processor;
+#if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR)
+#include <linux/smp.h>
+/*
+ * This can't be a per-cpu variable because we need to access it before
+ * per-cpu has been initialised. We have a couple of functions that are
+ * called in a pre-emptible context, and so can't use smp_processor_id()
+ * there, hence PROC_TABLE(). We insist in init_proc_vtable() that the
+ * function pointers for these are identical across all CPUs.
+ */
+extern struct processor *cpu_vtable[];
+#define PROC_VTABLE(f) cpu_vtable[smp_processor_id()]->f
+#define PROC_TABLE(f) cpu_vtable[0]->f
+static inline void init_proc_vtable(const struct processor *p)
+{
+ unsigned int cpu = smp_processor_id();
+ *cpu_vtable[cpu] = *p;
+ WARN_ON_ONCE(cpu_vtable[cpu]->dcache_clean_area !=
+ cpu_vtable[0]->dcache_clean_area);
+ WARN_ON_ONCE(cpu_vtable[cpu]->set_pte_ext !=
+ cpu_vtable[0]->set_pte_ext);
+}
+#else
+#define PROC_VTABLE(f) processor.f
+#define PROC_TABLE(f) processor.f
+static inline void init_proc_vtable(const struct processor *p)
+{
+ processor = *p;
+}
+#endif
+
+#define cpu_proc_init PROC_VTABLE(_proc_init)
+#define cpu_check_bugs PROC_VTABLE(check_bugs)
+#define cpu_proc_fin PROC_VTABLE(_proc_fin)
+#define cpu_reset PROC_VTABLE(reset)
+#define cpu_do_idle PROC_VTABLE(_do_idle)
+#define cpu_dcache_clean_area PROC_TABLE(dcache_clean_area)
+#define cpu_set_pte_ext PROC_TABLE(set_pte_ext)
+#define cpu_do_switch_mm PROC_VTABLE(switch_mm)
+
+/* These two are private to arch/arm/kernel/suspend.c */
+#define cpu_do_suspend PROC_VTABLE(do_suspend)
+#define cpu_do_resume PROC_VTABLE(do_resume)
#endif
extern void cpu_resume(void);
diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
index 9b37b6a..8f55dc5 100644
--- a/arch/arm/include/asm/thread_info.h
+++ b/arch/arm/include/asm/thread_info.h
@@ -121,8 +121,8 @@ extern void vfp_flush_hwstate(struct thread_info *);
struct user_vfp;
struct user_vfp_exc;
-extern int vfp_preserve_user_clear_hwstate(struct user_vfp __user *,
- struct user_vfp_exc __user *);
+extern int vfp_preserve_user_clear_hwstate(struct user_vfp *,
+ struct user_vfp_exc *);
extern int vfp_restore_user_hwstate(struct user_vfp *,
struct user_vfp_exc *);
#endif
diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
index 5451e1f..c136eef 100644
--- a/arch/arm/include/asm/uaccess.h
+++ b/arch/arm/include/asm/uaccess.h
@@ -69,6 +69,14 @@ extern int __put_user_bad(void);
static inline void set_fs(mm_segment_t fs)
{
current_thread_info()->addr_limit = fs;
+
+ /*
+ * Prevent a mispredicted conditional call to set_fs from forwarding
+ * the wrong address limit to access_ok under speculation.
+ */
+ dsb(nsh);
+ isb();
+
modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_CLIENT : DOMAIN_MANAGER);
}
@@ -92,6 +100,32 @@ static inline void set_fs(mm_segment_t fs)
__typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
/*
+ * Sanitise a uaccess pointer such that it becomes NULL if addr+size
+ * is above the current addr_limit.
+ */
+#define uaccess_mask_range_ptr(ptr, size) \
+ ((__typeof__(ptr))__uaccess_mask_range_ptr(ptr, size))
+static inline void __user *__uaccess_mask_range_ptr(const void __user *ptr,
+ size_t size)
+{
+ void __user *safe_ptr = (void __user *)ptr;
+ unsigned long tmp;
+
+ asm volatile(
+ " sub %1, %3, #1\n"
+ " subs %1, %1, %0\n"
+ " addhs %1, %1, #1\n"
+ " subhss %1, %1, %2\n"
+ " movlo %0, #0\n"
+ : "+r" (safe_ptr), "=&r" (tmp)
+ : "r" (size), "r" (current_thread_info()->addr_limit)
+ : "cc");
+
+ csdb();
+ return safe_ptr;
+}
+
+/*
* Single-value transfer routines. They automatically use the right
* size if we just have the right pointer type. Note that the functions
* which read from user space (*get_*) need to take care not to leak
@@ -362,6 +396,14 @@ do { \
__pu_err; \
})
+#ifdef CONFIG_CPU_SPECTRE
+/*
+ * When mitigating Spectre variant 1.1, all accessors need to include
+ * verification of the address space.
+ */
+#define __put_user(x, ptr) put_user(x, ptr)
+
+#else
#define __put_user(x, ptr) \
({ \
long __pu_err = 0; \
@@ -369,12 +411,6 @@ do { \
__pu_err; \
})
-#define __put_user_error(x, ptr, err) \
-({ \
- __put_user_switch((x), (ptr), (err), __put_user_nocheck); \
- (void) 0; \
-})
-
#define __put_user_nocheck(x, __pu_ptr, __err, __size) \
do { \
unsigned long __pu_addr = (unsigned long)__pu_ptr; \
@@ -454,6 +490,7 @@ do { \
: "r" (x), "i" (-EFAULT) \
: "cc")
+#endif /* !CONFIG_CPU_SPECTRE */
#ifdef CONFIG_MMU
extern unsigned long __must_check
diff --git a/arch/arm/kernel/bugs.c b/arch/arm/kernel/bugs.c
index 7be5113..d41d359 100644
--- a/arch/arm/kernel/bugs.c
+++ b/arch/arm/kernel/bugs.c
@@ -6,8 +6,8 @@
void check_other_bugs(void)
{
#ifdef MULTI_CPU
- if (processor.check_bugs)
- processor.check_bugs();
+ if (cpu_check_bugs)
+ cpu_check_bugs();
#endif
}
diff --git a/arch/arm/kernel/head-common.S b/arch/arm/kernel/head-common.S
index 6e0375e..997b023 100644
--- a/arch/arm/kernel/head-common.S
+++ b/arch/arm/kernel/head-common.S
@@ -145,6 +145,9 @@
#endif
.size __mmap_switched_data, . - __mmap_switched_data
+ __FINIT
+ .text
+
/*
* This provides a C-API version of __lookup_processor_type
*/
@@ -156,9 +159,6 @@
ldmfd sp!, {r4 - r6, r9, pc}
ENDPROC(lookup_processor_type)
- __FINIT
- .text
-
/*
* Read processor ID register (CP#15, CR0), and look up in the linker-built
* supported processor list. Note that we can't use the absolute addresses
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index 2bc3bae..632f129 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -124,6 +124,11 @@ EXPORT_SYMBOL(cold_boot);
#ifdef MULTI_CPU
struct processor processor __ro_after_init;
+#if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR)
+struct processor *cpu_vtable[NR_CPUS] = {
+ [0] = &processor,
+};
+#endif
#endif
#ifdef MULTI_TLB
struct cpu_tlb_fns cpu_tlb __ro_after_init;
@@ -676,28 +681,33 @@ static void __init smp_build_mpidr_hash(void)
}
#endif
+/*
+ * locate processor in the list of supported processor types. The linker
+ * builds this table for us from the entries in arch/arm/mm/proc-*.S
+ */
+struct proc_info_list *lookup_processor(u32 midr)
+{
+ struct proc_info_list *list = lookup_processor_type(midr);
+
+ if (!list) {
+ pr_err("CPU%u: configuration botched (ID %08x), CPU halted\n",
+ smp_processor_id(), midr);
+ while (1)
+ /* can't use cpu_relax() here as it may require MMU setup */;
+ }
+
+ return list;
+}
+
static void __init setup_processor(void)
{
- struct proc_info_list *list;
-
- /*
- * locate processor in the list of supported processor
- * types. The linker builds this table for us from the
- * entries in arch/arm/mm/proc-*.S
- */
- list = lookup_processor_type(read_cpuid_id());
- if (!list) {
- pr_err("CPU configuration botched (ID %08x), unable to continue.\n",
- read_cpuid_id());
- while (1);
- }
+ unsigned int midr = read_cpuid_id();
+ struct proc_info_list *list = lookup_processor(midr);
cpu_name = list->cpu_name;
__cpu_architecture = __get_cpu_architecture();
-#ifdef MULTI_CPU
- processor = *list->proc;
-#endif
+ init_proc_vtable(list->proc);
#ifdef MULTI_TLB
cpu_tlb = *list->tlb;
#endif
@@ -709,7 +719,7 @@ static void __init setup_processor(void)
#endif
pr_info("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
- cpu_name, read_cpuid_id(), read_cpuid_id() & 15,
+ list->cpu_name, midr, midr & 15,
proc_arch[cpu_architecture()], get_cr());
snprintf(init_utsname()->machine, __NEW_UTS_LEN + 1, "%s%c",
diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
index b8f766c..b908382 100644
--- a/arch/arm/kernel/signal.c
+++ b/arch/arm/kernel/signal.c
@@ -77,8 +77,6 @@ static int preserve_iwmmxt_context(struct iwmmxt_sigframe __user *frame)
kframe->magic = IWMMXT_MAGIC;
kframe->size = IWMMXT_STORAGE_SIZE;
iwmmxt_task_copy(current_thread_info(), &kframe->storage);
-
- err = __copy_to_user(frame, kframe, sizeof(*frame));
} else {
/*
* For bug-compatibility with older kernels, some space
@@ -86,10 +84,14 @@ static int preserve_iwmmxt_context(struct iwmmxt_sigframe __user *frame)
* Set the magic and size appropriately so that properly
* written userspace can skip it reliably:
*/
- __put_user_error(DUMMY_MAGIC, &frame->magic, err);
- __put_user_error(IWMMXT_STORAGE_SIZE, &frame->size, err);
+ *kframe = (struct iwmmxt_sigframe) {
+ .magic = DUMMY_MAGIC,
+ .size = IWMMXT_STORAGE_SIZE,
+ };
}
+ err = __copy_to_user(frame, kframe, sizeof(*kframe));
+
return err;
}
@@ -135,17 +137,18 @@ static int restore_iwmmxt_context(char __user **auxp)
static int preserve_vfp_context(struct vfp_sigframe __user *frame)
{
- const unsigned long magic = VFP_MAGIC;
- const unsigned long size = VFP_STORAGE_SIZE;
+ struct vfp_sigframe kframe;
int err = 0;
- __put_user_error(magic, &frame->magic, err);
- __put_user_error(size, &frame->size, err);
+ memset(&kframe, 0, sizeof(kframe));
+ kframe.magic = VFP_MAGIC;
+ kframe.size = VFP_STORAGE_SIZE;
+ err = vfp_preserve_user_clear_hwstate(&kframe.ufp, &kframe.ufp_exc);
if (err)
- return -EFAULT;
+ return err;
- return vfp_preserve_user_clear_hwstate(&frame->ufp, &frame->ufp_exc);
+ return __copy_to_user(frame, &kframe, sizeof(kframe));
}
static int restore_vfp_context(char __user **auxp)
@@ -288,30 +291,35 @@ static int
setup_sigframe(struct sigframe __user *sf, struct pt_regs *regs, sigset_t *set)
{
struct aux_sigframe __user *aux;
+ struct sigcontext context;
int err = 0;
- __put_user_error(regs->ARM_r0, &sf->uc.uc_mcontext.arm_r0, err);
- __put_user_error(regs->ARM_r1, &sf->uc.uc_mcontext.arm_r1, err);
- __put_user_error(regs->ARM_r2, &sf->uc.uc_mcontext.arm_r2, err);
- __put_user_error(regs->ARM_r3, &sf->uc.uc_mcontext.arm_r3, err);
- __put_user_error(regs->ARM_r4, &sf->uc.uc_mcontext.arm_r4, err);
- __put_user_error(regs->ARM_r5, &sf->uc.uc_mcontext.arm_r5, err);
- __put_user_error(regs->ARM_r6, &sf->uc.uc_mcontext.arm_r6, err);
- __put_user_error(regs->ARM_r7, &sf->uc.uc_mcontext.arm_r7, err);
- __put_user_error(regs->ARM_r8, &sf->uc.uc_mcontext.arm_r8, err);
- __put_user_error(regs->ARM_r9, &sf->uc.uc_mcontext.arm_r9, err);
- __put_user_error(regs->ARM_r10, &sf->uc.uc_mcontext.arm_r10, err);
- __put_user_error(regs->ARM_fp, &sf->uc.uc_mcontext.arm_fp, err);
- __put_user_error(regs->ARM_ip, &sf->uc.uc_mcontext.arm_ip, err);
- __put_user_error(regs->ARM_sp, &sf->uc.uc_mcontext.arm_sp, err);
- __put_user_error(regs->ARM_lr, &sf->uc.uc_mcontext.arm_lr, err);
- __put_user_error(regs->ARM_pc, &sf->uc.uc_mcontext.arm_pc, err);
- __put_user_error(regs->ARM_cpsr, &sf->uc.uc_mcontext.arm_cpsr, err);
+ context = (struct sigcontext) {
+ .arm_r0 = regs->ARM_r0,
+ .arm_r1 = regs->ARM_r1,
+ .arm_r2 = regs->ARM_r2,
+ .arm_r3 = regs->ARM_r3,
+ .arm_r4 = regs->ARM_r4,
+ .arm_r5 = regs->ARM_r5,
+ .arm_r6 = regs->ARM_r6,
+ .arm_r7 = regs->ARM_r7,
+ .arm_r8 = regs->ARM_r8,
+ .arm_r9 = regs->ARM_r9,
+ .arm_r10 = regs->ARM_r10,
+ .arm_fp = regs->ARM_fp,
+ .arm_ip = regs->ARM_ip,
+ .arm_sp = regs->ARM_sp,
+ .arm_lr = regs->ARM_lr,
+ .arm_pc = regs->ARM_pc,
+ .arm_cpsr = regs->ARM_cpsr,
- __put_user_error(current->thread.trap_no, &sf->uc.uc_mcontext.trap_no, err);
- __put_user_error(current->thread.error_code, &sf->uc.uc_mcontext.error_code, err);
- __put_user_error(current->thread.address, &sf->uc.uc_mcontext.fault_address, err);
- __put_user_error(set->sig[0], &sf->uc.uc_mcontext.oldmask, err);
+ .trap_no = current->thread.trap_no,
+ .error_code = current->thread.error_code,
+ .fault_address = current->thread.address,
+ .oldmask = set->sig[0],
+ };
+
+ err |= __copy_to_user(&sf->uc.uc_mcontext, &context, sizeof(context));
err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(*set));
@@ -328,7 +336,7 @@ setup_sigframe(struct sigframe __user *sf, struct pt_regs *regs, sigset_t *set)
if (err == 0)
err |= preserve_vfp_context(&aux->vfp);
#endif
- __put_user_error(0, &aux->end_magic, err);
+ err |= __put_user(0, &aux->end_magic);
return err;
}
@@ -491,7 +499,7 @@ setup_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs)
/*
* Set uc.uc_flags to a value which sc.trap_no would never have.
*/
- __put_user_error(0x5ac3c35a, &frame->uc.uc_flags, err);
+ err = __put_user(0x5ac3c35a, &frame->uc.uc_flags);
err |= setup_sigframe(frame, regs, set);
if (err == 0)
@@ -511,8 +519,8 @@ setup_rt_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs)
err |= copy_siginfo_to_user(&frame->info, &ksig->info);
- __put_user_error(0, &frame->sig.uc.uc_flags, err);
- __put_user_error(NULL, &frame->sig.uc.uc_link, err);
+ err |= __put_user(0, &frame->sig.uc.uc_flags);
+ err |= __put_user(NULL, &frame->sig.uc.uc_link);
err |= __save_altstack(&frame->sig.uc.uc_stack, regs->ARM_sp);
err |= setup_sigframe(&frame->sig, regs, set);
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index 2a26e31..5bff527 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -42,6 +42,7 @@
#include <asm/mmu_context.h>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
+#include <asm/procinfo.h>
#include <asm/processor.h>
#include <asm/sections.h>
#include <asm/tlbflush.h>
@@ -102,6 +103,30 @@ static unsigned long get_arch_pgd(pgd_t *pgd)
#endif
}
+#if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR)
+static int secondary_biglittle_prepare(unsigned int cpu)
+{
+ if (!cpu_vtable[cpu])
+ cpu_vtable[cpu] = kzalloc(sizeof(*cpu_vtable[cpu]), GFP_KERNEL);
+
+ return cpu_vtable[cpu] ? 0 : -ENOMEM;
+}
+
+static void secondary_biglittle_init(void)
+{
+ init_proc_vtable(lookup_processor(read_cpuid_id())->proc);
+}
+#else
+static int secondary_biglittle_prepare(unsigned int cpu)
+{
+ return 0;
+}
+
+static void secondary_biglittle_init(void)
+{
+}
+#endif
+
int __cpu_up(unsigned int cpu, struct task_struct *idle)
{
int ret;
@@ -109,6 +134,10 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle)
if (!smp_ops.smp_boot_secondary)
return -ENOSYS;
+ ret = secondary_biglittle_prepare(cpu);
+ if (ret)
+ return ret;
+
/*
* We need to tell the secondary core where to find
* its stack and the page tables.
@@ -359,6 +388,8 @@ asmlinkage void secondary_start_kernel(void)
struct mm_struct *mm = &init_mm;
unsigned int cpu;
+ secondary_biglittle_init();
+
/*
* The identity mapping is uncached (strongly ordered), so
* switch away from it before attempting any exclusive accesses.
@@ -708,6 +739,21 @@ void smp_send_stop(void)
pr_warn("SMP: failed to stop secondary CPUs\n");
}
+/* In case panic() and panic() called at the same time on CPU1 and CPU2,
+ * and CPU 1 calls panic_smp_self_stop() before crash_smp_send_stop()
+ * CPU1 can't receive the ipi irqs from CPU2, CPU1 will be always online,
+ * kdump fails. So split out the panic_smp_self_stop() and add
+ * set_cpu_online(smp_processor_id(), false).
+ */
+void panic_smp_self_stop(void)
+{
+ pr_debug("CPU %u will stop doing anything useful since another CPU has paniced\n",
+ smp_processor_id());
+ set_cpu_online(smp_processor_id(), false);
+ while (1)
+ cpu_relax();
+}
+
/*
* not supported here
*/
diff --git a/arch/arm/kernel/sys_oabi-compat.c b/arch/arm/kernel/sys_oabi-compat.c
index f0dd4b6..40da087 100644
--- a/arch/arm/kernel/sys_oabi-compat.c
+++ b/arch/arm/kernel/sys_oabi-compat.c
@@ -277,6 +277,7 @@ asmlinkage long sys_oabi_epoll_wait(int epfd,
int maxevents, int timeout)
{
struct epoll_event *kbuf;
+ struct oabi_epoll_event e;
mm_segment_t fs;
long ret, err, i;
@@ -295,8 +296,11 @@ asmlinkage long sys_oabi_epoll_wait(int epfd,
set_fs(fs);
err = 0;
for (i = 0; i < ret; i++) {
- __put_user_error(kbuf[i].events, &events->events, err);
- __put_user_error(kbuf[i].data, &events->data, err);
+ e.events = kbuf[i].events;
+ e.data = kbuf[i].data;
+ err = __copy_to_user(events, &e, sizeof(e));
+ if (err)
+ break;
events++;
}
kfree(kbuf);
diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S
index a826df3..6709a8d 100644
--- a/arch/arm/lib/copy_from_user.S
+++ b/arch/arm/lib/copy_from_user.S
@@ -93,11 +93,7 @@
#ifdef CONFIG_CPU_SPECTRE
get_thread_info r3
ldr r3, [r3, #TI_ADDR_LIMIT]
- adds ip, r1, r2 @ ip=addr+size
- sub r3, r3, #1 @ addr_limit - 1
- cmpcc ip, r3 @ if (addr+size > addr_limit - 1)
- movcs r1, #0 @ addr = NULL
- csdb
+ uaccess_mask_range_ptr r1, r2, r3, ip
#endif
#include "copy_template.S"
diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S
index caf5019..970abe5 100644
--- a/arch/arm/lib/copy_to_user.S
+++ b/arch/arm/lib/copy_to_user.S
@@ -94,6 +94,11 @@
ENTRY(__copy_to_user_std)
WEAK(arm_copy_to_user)
+#ifdef CONFIG_CPU_SPECTRE
+ get_thread_info r3
+ ldr r3, [r3, #TI_ADDR_LIMIT]
+ uaccess_mask_range_ptr r0, r2, r3, ip
+#endif
#include "copy_template.S"
@@ -108,4 +113,3 @@
rsb r0, r0, r2
copy_abort_end
.popsection
-
diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
index 9b4ed17..73dc736 100644
--- a/arch/arm/lib/uaccess_with_memcpy.c
+++ b/arch/arm/lib/uaccess_with_memcpy.c
@@ -152,7 +152,8 @@ arm_copy_to_user(void __user *to, const void *from, unsigned long n)
n = __copy_to_user_std(to, from, n);
uaccess_restore(ua_flags);
} else {
- n = __copy_to_user_memcpy(to, from, n);
+ n = __copy_to_user_memcpy(uaccess_mask_range_ptr(to, n),
+ from, n);
}
return n;
}
diff --git a/arch/arm/mach-integrator/impd1.c b/arch/arm/mach-integrator/impd1.c
index a109f648..0f916c2 100644
--- a/arch/arm/mach-integrator/impd1.c
+++ b/arch/arm/mach-integrator/impd1.c
@@ -393,7 +393,11 @@ static int __ref impd1_probe(struct lm_device *dev)
sizeof(*lookup) + 3 * sizeof(struct gpiod_lookup),
GFP_KERNEL);
chipname = devm_kstrdup(&dev->dev, devname, GFP_KERNEL);
- mmciname = kasprintf(GFP_KERNEL, "lm%x:00700", dev->id);
+ mmciname = devm_kasprintf(&dev->dev, GFP_KERNEL,
+ "lm%x:00700", dev->id);
+ if (!lookup || !chipname || !mmciname)
+ return -ENOMEM;
+
lookup->dev_id = mmciname;
/*
* Offsets on GPIO block 1:
diff --git a/arch/arm/mach-iop32x/n2100.c b/arch/arm/mach-iop32x/n2100.c
index 3b73813..23e8c93 100644
--- a/arch/arm/mach-iop32x/n2100.c
+++ b/arch/arm/mach-iop32x/n2100.c
@@ -75,8 +75,7 @@ void __init n2100_map_io(void)
/*
* N2100 PCI.
*/
-static int __init
-n2100_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+static int n2100_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
{
int irq;
diff --git a/arch/arm/mach-omap2/omap-wakeupgen.c b/arch/arm/mach-omap2/omap-wakeupgen.c
index fc5fb77..17558be 100644
--- a/arch/arm/mach-omap2/omap-wakeupgen.c
+++ b/arch/arm/mach-omap2/omap-wakeupgen.c
@@ -50,6 +50,9 @@
#define OMAP4_NR_BANKS 4
#define OMAP4_NR_IRQS 128
+#define SYS_NIRQ1_EXT_SYS_IRQ_1 7
+#define SYS_NIRQ2_EXT_SYS_IRQ_2 119
+
static void __iomem *wakeupgen_base;
static void __iomem *sar_base;
static DEFINE_RAW_SPINLOCK(wakeupgen_lock);
@@ -153,6 +156,37 @@ static void wakeupgen_unmask(struct irq_data *d)
irq_chip_unmask_parent(d);
}
+/*
+ * The sys_nirq pins bypass peripheral modules and are wired directly
+ * to MPUSS wakeupgen. They get automatically inverted for GIC.
+ */
+static int wakeupgen_irq_set_type(struct irq_data *d, unsigned int type)
+{
+ bool inverted = false;
+
+ switch (type) {
+ case IRQ_TYPE_LEVEL_LOW:
+ type &= ~IRQ_TYPE_LEVEL_MASK;
+ type |= IRQ_TYPE_LEVEL_HIGH;
+ inverted = true;
+ break;
+ case IRQ_TYPE_EDGE_FALLING:
+ type &= ~IRQ_TYPE_EDGE_BOTH;
+ type |= IRQ_TYPE_EDGE_RISING;
+ inverted = true;
+ break;
+ default:
+ break;
+ }
+
+ if (inverted && d->hwirq != SYS_NIRQ1_EXT_SYS_IRQ_1 &&
+ d->hwirq != SYS_NIRQ2_EXT_SYS_IRQ_2)
+ pr_warn("wakeupgen: irq%li polarity inverted in dts\n",
+ d->hwirq);
+
+ return irq_chip_set_type_parent(d, type);
+}
+
#ifdef CONFIG_HOTPLUG_CPU
static DEFINE_PER_CPU(u32 [MAX_NR_REG_BANKS], irqmasks);
@@ -446,7 +480,7 @@ static struct irq_chip wakeupgen_chip = {
.irq_mask = wakeupgen_mask,
.irq_unmask = wakeupgen_unmask,
.irq_retrigger = irq_chip_retrigger_hierarchy,
- .irq_set_type = irq_chip_set_type_parent,
+ .irq_set_type = wakeupgen_irq_set_type,
.flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_MASK_ON_SUSPEND,
#ifdef CONFIG_SMP
.irq_set_affinity = irq_chip_set_affinity_parent,
diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
index cd65ea4..ec3789b 100644
--- a/arch/arm/mach-omap2/omap_hwmod.c
+++ b/arch/arm/mach-omap2/omap_hwmod.c
@@ -2397,7 +2397,7 @@ static int __init _init(struct omap_hwmod *oh, void *data)
* a stub; implementing this properly requires iclk autoidle usecounting in
* the clock code. No return value.
*/
-static void __init _setup_iclk_autoidle(struct omap_hwmod *oh)
+static void _setup_iclk_autoidle(struct omap_hwmod *oh)
{
struct omap_hwmod_ocp_if *os;
@@ -2428,7 +2428,7 @@ static void __init _setup_iclk_autoidle(struct omap_hwmod *oh)
* reset. Returns 0 upon success or a negative error code upon
* failure.
*/
-static int __init _setup_reset(struct omap_hwmod *oh)
+static int _setup_reset(struct omap_hwmod *oh)
{
int r;
@@ -2489,7 +2489,7 @@ static int __init _setup_reset(struct omap_hwmod *oh)
*
* No return value.
*/
-static void __init _setup_postsetup(struct omap_hwmod *oh)
+static void _setup_postsetup(struct omap_hwmod *oh)
{
u8 postsetup_state;
diff --git a/arch/arm/mach-pxa/cm-x300.c b/arch/arm/mach-pxa/cm-x300.c
index c5c0ab8a..024c1fb 100644
--- a/arch/arm/mach-pxa/cm-x300.c
+++ b/arch/arm/mach-pxa/cm-x300.c
@@ -558,7 +558,7 @@ static struct pxa3xx_u2d_platform_data cm_x300_u2d_platform_data = {
.exit = cm_x300_u2d_exit,
};
-static void cm_x300_init_u2d(void)
+static void __init cm_x300_init_u2d(void)
{
pxa3xx_set_u2d_info(&cm_x300_u2d_platform_data);
}
diff --git a/arch/arm/mach-pxa/littleton.c b/arch/arm/mach-pxa/littleton.c
index 9e132b3..9960ea1 100644
--- a/arch/arm/mach-pxa/littleton.c
+++ b/arch/arm/mach-pxa/littleton.c
@@ -184,7 +184,7 @@ static struct pxafb_mach_info littleton_lcd_info = {
.lcd_conn = LCD_COLOR_TFT_16BPP,
};
-static void littleton_init_lcd(void)
+static void __init littleton_init_lcd(void)
{
pxa_set_fb_info(NULL, &littleton_lcd_info);
}
diff --git a/arch/arm/mach-pxa/zeus.c b/arch/arm/mach-pxa/zeus.c
index e385179..68a536d 100644
--- a/arch/arm/mach-pxa/zeus.c
+++ b/arch/arm/mach-pxa/zeus.c
@@ -559,7 +559,7 @@ static struct pxaohci_platform_data zeus_ohci_platform_data = {
.flags = ENABLE_PORT_ALL | POWER_SENSE_LOW,
};
-static void zeus_register_ohci(void)
+static void __init zeus_register_ohci(void)
{
/* Port 2 is shared between host and client interface. */
UP2OCR = UP2OCR_HXOE | UP2OCR_HXS | UP2OCR_DMPDE | UP2OCR_DPPDE;
diff --git a/arch/arm/mach-tango/pm.c b/arch/arm/mach-tango/pm.c
index 028e50c..a32c3b6 100644
--- a/arch/arm/mach-tango/pm.c
+++ b/arch/arm/mach-tango/pm.c
@@ -3,6 +3,7 @@
#include <linux/suspend.h>
#include <asm/suspend.h>
#include "smc.h"
+#include "pm.h"
static int tango_pm_powerdown(unsigned long arg)
{
@@ -24,10 +25,7 @@ static const struct platform_suspend_ops tango_pm_ops = {
.valid = suspend_valid_only_mem,
};
-static int __init tango_pm_init(void)
+void __init tango_pm_init(void)
{
suspend_set_ops(&tango_pm_ops);
- return 0;
}
-
-late_initcall(tango_pm_init);
diff --git a/arch/arm/mach-tango/pm.h b/arch/arm/mach-tango/pm.h
new file mode 100644
index 0000000..35ea705
--- /dev/null
+++ b/arch/arm/mach-tango/pm.h
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifdef CONFIG_SUSPEND
+void __init tango_pm_init(void);
+#else
+#define tango_pm_init NULL
+#endif
diff --git a/arch/arm/mach-tango/setup.c b/arch/arm/mach-tango/setup.c
index 677dd7b..824f907 100644
--- a/arch/arm/mach-tango/setup.c
+++ b/arch/arm/mach-tango/setup.c
@@ -2,6 +2,7 @@
#include <asm/mach/arch.h>
#include <asm/hardware/cache-l2x0.h>
#include "smc.h"
+#include "pm.h"
static void tango_l2c_write(unsigned long val, unsigned int reg)
{
@@ -15,4 +16,5 @@ DT_MACHINE_START(TANGO_DT, "Sigma Tango DT")
.dt_compat = tango_dt_compat,
.l2c_aux_mask = ~0,
.l2c_write_sec = tango_l2c_write,
+ .init_late = tango_pm_init,
MACHINE_END
diff --git a/arch/arm/mm/proc-macros.S b/arch/arm/mm/proc-macros.S
index 81d0efb..5461d58 100644
--- a/arch/arm/mm/proc-macros.S
+++ b/arch/arm/mm/proc-macros.S
@@ -274,6 +274,13 @@
.endm
.macro define_processor_functions name:req, dabort:req, pabort:req, nommu=0, suspend=0, bugs=0
+/*
+ * If we are building for big.Little with branch predictor hardening,
+ * we need the processor function tables to remain available after boot.
+ */
+#if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR)
+ .section ".rodata"
+#endif
.type \name\()_processor_functions, #object
.align 2
ENTRY(\name\()_processor_functions)
@@ -309,6 +316,9 @@
.endif
.size \name\()_processor_functions, . - \name\()_processor_functions
+#if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR)
+ .previous
+#endif
.endm
.macro define_cache_functions name:req
diff --git a/arch/arm/mm/proc-v7-bugs.c b/arch/arm/mm/proc-v7-bugs.c
index 5544b82..9a07916 100644
--- a/arch/arm/mm/proc-v7-bugs.c
+++ b/arch/arm/mm/proc-v7-bugs.c
@@ -52,8 +52,6 @@ static void cpu_v7_spectre_init(void)
case ARM_CPU_PART_CORTEX_A17:
case ARM_CPU_PART_CORTEX_A73:
case ARM_CPU_PART_CORTEX_A75:
- if (processor.switch_mm != cpu_v7_bpiall_switch_mm)
- goto bl_error;
per_cpu(harden_branch_predictor_fn, cpu) =
harden_branch_predictor_bpiall;
spectre_v2_method = "BPIALL";
@@ -61,8 +59,6 @@ static void cpu_v7_spectre_init(void)
case ARM_CPU_PART_CORTEX_A15:
case ARM_CPU_PART_BRAHMA_B15:
- if (processor.switch_mm != cpu_v7_iciallu_switch_mm)
- goto bl_error;
per_cpu(harden_branch_predictor_fn, cpu) =
harden_branch_predictor_iciallu;
spectre_v2_method = "ICIALLU";
@@ -88,11 +84,9 @@ static void cpu_v7_spectre_init(void)
ARM_SMCCC_ARCH_WORKAROUND_1, &res);
if ((int)res.a0 != 0)
break;
- if (processor.switch_mm != cpu_v7_hvc_switch_mm && cpu)
- goto bl_error;
per_cpu(harden_branch_predictor_fn, cpu) =
call_hvc_arch_workaround_1;
- processor.switch_mm = cpu_v7_hvc_switch_mm;
+ cpu_do_switch_mm = cpu_v7_hvc_switch_mm;
spectre_v2_method = "hypervisor";
break;
@@ -101,11 +95,9 @@ static void cpu_v7_spectre_init(void)
ARM_SMCCC_ARCH_WORKAROUND_1, &res);
if ((int)res.a0 != 0)
break;
- if (processor.switch_mm != cpu_v7_smc_switch_mm && cpu)
- goto bl_error;
per_cpu(harden_branch_predictor_fn, cpu) =
call_smc_arch_workaround_1;
- processor.switch_mm = cpu_v7_smc_switch_mm;
+ cpu_do_switch_mm = cpu_v7_smc_switch_mm;
spectre_v2_method = "firmware";
break;
@@ -119,11 +111,6 @@ static void cpu_v7_spectre_init(void)
if (spectre_v2_method)
pr_info("CPU%u: Spectre v2: using %s workaround\n",
smp_processor_id(), spectre_v2_method);
- return;
-
-bl_error:
- pr_err("CPU%u: Spectre v2: incorrect context switching function, system vulnerable\n",
- cpu);
}
#else
static void cpu_v7_spectre_init(void)
diff --git a/arch/arm/probes/kprobes/opt-arm.c b/arch/arm/probes/kprobes/opt-arm.c
index 2c118a6..0dc23fc 100644
--- a/arch/arm/probes/kprobes/opt-arm.c
+++ b/arch/arm/probes/kprobes/opt-arm.c
@@ -247,7 +247,7 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *or
}
/* Copy arch-dep-instance from template. */
- memcpy(code, (unsigned char *)optprobe_template_entry,
+ memcpy(code, (unsigned long *)&optprobe_template_entry,
TMPL_END_IDX * sizeof(kprobe_opcode_t));
/* Adjust buffer according to instruction. */
diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c
index dc7e6b5..66c5e69 100644
--- a/arch/arm/vfp/vfpmodule.c
+++ b/arch/arm/vfp/vfpmodule.c
@@ -553,12 +553,11 @@ void vfp_flush_hwstate(struct thread_info *thread)
* Save the current VFP state into the provided structures and prepare
* for entry into a new function (signal handler).
*/
-int vfp_preserve_user_clear_hwstate(struct user_vfp __user *ufp,
- struct user_vfp_exc __user *ufp_exc)
+int vfp_preserve_user_clear_hwstate(struct user_vfp *ufp,
+ struct user_vfp_exc *ufp_exc)
{
struct thread_info *thread = current_thread_info();
struct vfp_hard_struct *hwstate = &thread->vfpstate.hard;
- int err = 0;
/* Ensure that the saved hwstate is up-to-date. */
vfp_sync_hwstate(thread);
@@ -567,22 +566,19 @@ int vfp_preserve_user_clear_hwstate(struct user_vfp __user *ufp,
* Copy the floating point registers. There can be unused
* registers see asm/hwcap.h for details.
*/
- err |= __copy_to_user(&ufp->fpregs, &hwstate->fpregs,
- sizeof(hwstate->fpregs));
+ memcpy(&ufp->fpregs, &hwstate->fpregs, sizeof(hwstate->fpregs));
+
/*
* Copy the status and control register.
*/
- __put_user_error(hwstate->fpscr, &ufp->fpscr, err);
+ ufp->fpscr = hwstate->fpscr;
/*
* Copy the exception registers.
*/
- __put_user_error(hwstate->fpexc, &ufp_exc->fpexc, err);
- __put_user_error(hwstate->fpinst, &ufp_exc->fpinst, err);
- __put_user_error(hwstate->fpinst2, &ufp_exc->fpinst2, err);
-
- if (err)
- return -EFAULT;
+ ufp_exc->fpexc = hwstate->fpexc;
+ ufp_exc->fpinst = hwstate->fpinst;
+ ufp_exc->fpinst2 = hwstate->fpinst2;
/* Ensure that VFP is disabled. */
vfp_flush_hwstate(thread);
diff --git a/arch/arm64/boot/dts/qcom/Makefile b/arch/arm64/boot/dts/qcom/Makefile
index 8ffb521..a38031f 100644
--- a/arch/arm64/boot/dts/qcom/Makefile
+++ b/arch/arm64/boot/dts/qcom/Makefile
@@ -14,15 +14,19 @@
kona-rumi-overlay.dtbo \
kona-qrd-overlay.dtbo
-kona-cdp-overlay.dtbo-base := kona.dtb
-kona-mtp-overlay.dtbo-base := kona.dtb
-kona-rumi-overlay.dtbo-base := kona.dtb
-kona-qrd-overlay.dtbo-base := kona.dtb
+kona-cdp-overlay.dtbo-base := kona.dtb kona-v2.dtb
+kona-mtp-overlay.dtbo-base := kona.dtb kona-v2.dtb
+kona-rumi-overlay.dtbo-base := kona.dtb kona-v2.dtb
+kona-qrd-overlay.dtbo-base := kona.dtb kona-v2.dtb
else
dtb-$(CONFIG_ARCH_KONA) += kona-rumi.dtb \
kona-mtp.dtb \
kona-cdp.dtb \
- kona-qrd.dtb
+ kona-qrd.dtb \
+ kona-v2-rumi.dtb \
+ kona-v2-mtp.dtb \
+ kona-v2-cdp.dtb \
+ kona-v2-qrd.dtb
endif
ifeq ($(CONFIG_BUILD_ARM64_DT_OVERLAY),y)
diff --git a/arch/arm64/boot/dts/qcom/kona-audio-overlay.dtsi b/arch/arm64/boot/dts/qcom/kona-audio-overlay.dtsi
index 9a3f55c..8e81f00 100644
--- a/arch/arm64/boot/dts/qcom/kona-audio-overlay.dtsi
+++ b/arch/arm64/boot/dts/qcom/kona-audio-overlay.dtsi
@@ -230,6 +230,9 @@
"RX_TX DEC3_INP", "TX DEC3 MUX",
"SpkrLeft IN", "WSA_SPK1 OUT",
"SpkrRight IN", "WSA_SPK2 OUT",
+ "VA_AIF1 CAP", "VA_SWR_CLK",
+ "VA_AIF2 CAP", "VA_SWR_CLK",
+ "VA_AIF3 CAP", "VA_SWR_CLK",
"VA DMIC0", "MIC BIAS3",
"VA DMIC1", "MIC BIAS3",
"VA DMIC2", "MIC BIAS1",
diff --git a/arch/arm64/boot/dts/qcom/kona-audio.dtsi b/arch/arm64/boot/dts/qcom/kona-audio.dtsi
index 604d389..929168ba 100644
--- a/arch/arm64/boot/dts/qcom/kona-audio.dtsi
+++ b/arch/arm64/boot/dts/qcom/kona-audio.dtsi
@@ -3,6 +3,8 @@
* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
*/
+#include <dt-bindings/clock/qcom,audio-ext-clk.h>
+
#include "kona-lpi.dtsi"
#include "msm-audio-lpass.dtsi"
@@ -25,8 +27,17 @@
&audio_apr {
q6core: qcom,q6core-audio {
compatible = "qcom,q6core-audio";
+
+ lpass_core_hw_vote: vote_lpass_core_hw {
+ compatible = "qcom,audio-ref-clk";
+ qcom,codec-ext-clk-src = <AUDIO_LPASS_CORE_HW_VOTE>;
+ #clock-cells = <1>;
+ };
+
bolero: bolero-cdc {
compatible = "qcom,bolero-codec";
+ clock-names = "lpass_core_hw_vote";
+ clocks = <&lpass_core_hw_vote 0>;
tx_macro: tx-macro@3220000 {
swr2: tx_swr_master {
};
diff --git a/arch/arm64/boot/dts/qcom/kona-camera-sensor-cdp.dtsi b/arch/arm64/boot/dts/qcom/kona-camera-sensor-cdp.dtsi
index 09baf55..c2b1fdd 100644
--- a/arch/arm64/boot/dts/qcom/kona-camera-sensor-cdp.dtsi
+++ b/arch/arm64/boot/dts/qcom/kona-camera-sensor-cdp.dtsi
@@ -38,7 +38,7 @@
rgltr-cntrl-support;
rgltr-min-voltage = <2856000>;
rgltr-max-voltage = <3104000>;
- rgltr-load-current = <0>;
+ rgltr-load-current = <100000>;
};
eeprom_rear: qcom,eeprom0 {
diff --git a/arch/arm64/boot/dts/qcom/kona-camera-sensor-mtp.dtsi b/arch/arm64/boot/dts/qcom/kona-camera-sensor-mtp.dtsi
index 514349c..65cdd1b 100644
--- a/arch/arm64/boot/dts/qcom/kona-camera-sensor-mtp.dtsi
+++ b/arch/arm64/boot/dts/qcom/kona-camera-sensor-mtp.dtsi
@@ -38,7 +38,7 @@
rgltr-cntrl-support;
rgltr-min-voltage = <2856000>;
rgltr-max-voltage = <3104000>;
- rgltr-load-current = <0>;
+ rgltr-load-current = <100000>;
};
eeprom_rear: qcom,eeprom0 {
diff --git a/arch/arm64/boot/dts/qcom/kona-camera-sensor-qrd.dtsi b/arch/arm64/boot/dts/qcom/kona-camera-sensor-qrd.dtsi
index 44060f0..9348b4c 100644
--- a/arch/arm64/boot/dts/qcom/kona-camera-sensor-qrd.dtsi
+++ b/arch/arm64/boot/dts/qcom/kona-camera-sensor-qrd.dtsi
@@ -38,7 +38,7 @@
rgltr-cntrl-support;
rgltr-min-voltage = <2856000>;
rgltr-max-voltage = <3104000>;
- rgltr-load-current = <0>;
+ rgltr-load-current = <100000>;
};
eeprom_rear: qcom,eeprom0 {
diff --git a/arch/arm64/boot/dts/qcom/kona-cdp.dtsi b/arch/arm64/boot/dts/qcom/kona-cdp.dtsi
index 09e23c8..3199342 100644
--- a/arch/arm64/boot/dts/qcom/kona-cdp.dtsi
+++ b/arch/arm64/boot/dts/qcom/kona-cdp.dtsi
@@ -4,6 +4,8 @@
*/
#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
+
#include "kona-pmic-overlay.dtsi"
#include "kona-sde-display.dtsi"
#include "kona-camera-sensor-cdp.dtsi"
@@ -152,6 +154,18 @@
qca,bt-vdd-rfa1-voltage-level = <1900000 1900000>;
qca,bt-vdd-rfa2-voltage-level = <1350000 1350000>;
};
+
+ extcon_usb1: extcon_usb1 {
+ compatible = "linux,extcon-usb-gpio";
+ vbus-gpio = <&pm8150_gpios 10 GPIO_ACTIVE_HIGH>;
+ id-gpio = <&tlmm 91 GPIO_ACTIVE_HIGH>;
+ vbus-out-gpio = <&pm8150_gpios 9 GPIO_ACTIVE_HIGH>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&usb2_vbus_det_default
+ &usb2_id_det_default
+ &usb2_vbus_boost_default>;
+ };
};
&dsi_sw43404_amoled_cmd {
@@ -519,6 +533,9 @@
"RX_TX DEC3_INP", "TX DEC3 MUX",
"SpkrLeft IN", "WSA_SPK1 OUT",
"SpkrRight IN", "WSA_SPK2 OUT",
+ "VA_AIF1 CAP", "VA_SWR_CLK",
+ "VA_AIF2 CAP", "VA_SWR_CLK",
+ "VA_AIF3 CAP", "VA_SWR_CLK",
"VA DMIC0", "MIC BIAS3",
"VA DMIC1", "MIC BIAS3",
"VA DMIC2", "MIC BIAS1",
@@ -643,3 +660,6 @@
status = "ok";
};
+&usb1 {
+ extcon = <&extcon_usb1>;
+};
diff --git a/arch/arm64/boot/dts/qcom/kona-coresight.dtsi b/arch/arm64/boot/dts/qcom/kona-coresight.dtsi
index 38a3d7e..df7ce4a 100644
--- a/arch/arm64/boot/dts/qcom/kona-coresight.dtsi
+++ b/arch/arm64/boot/dts/qcom/kona-coresight.dtsi
@@ -492,6 +492,107 @@
};
};
+ funnel_gpu: funnel@6902000 {
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x000bb908>;
+
+ reg = <0x6902000 0x1000>;
+ reg-names = "funnel-base";
+
+ coresight-name = "coresight-funnel-gpu";
+
+ clocks = <&clock_aop QDSS_CLK>,
+ <&clock_gpucc GPU_CC_CXO_CLK>,
+ <&clock_gcc GCC_DDRSS_GPU_AXI_CLK>,
+ <&clock_gcc GCC_GPU_MEMNOC_GFX_CLK>,
+ <&clock_gpucc GPU_CC_CX_GMU_CLK>,
+ <&clock_gpucc GPU_CC_AHB_CLK>,
+ <&clock_cpucc L3_GPU_VOTE_CLK>;
+
+ clock-names = "apb_pclk",
+ "rbbmtimer_clk",
+ "mem_clk",
+ "mem_iface_clk",
+ "gmu_clk",
+ "gpu_cc_ahb",
+ "l3_vote";
+
+ qcom,proxy-clks = "rbbmtimer_clk",
+ "mem_clk",
+ "mem_iface_clk",
+ "gmu_clk",
+ "gpu_cc_ahb",
+ "l3_vote";
+
+ vddcx-supply = <&gpu_cx_gdsc>;
+ vdd-supply = <&gpu_gx_gdsc>;
+ regulator-names = "vddcx", "vdd";
+ qcom,proxy-regs = "vddcx", "vdd";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ port@0 {
+ reg = <0>;
+ funnel_gpu_out_tpda: endpoint {
+ remote-endpoint =
+ <&tpda_in_funnel_gpu>;
+ };
+ };
+
+ port@1 {
+ reg = <0>;
+ funnel_gpu_in_tpdm_gpu: endpoint {
+ slave-mode;
+ remote-endpoint =
+ <&tpdm_gpu_out_funnel_gpu>;
+ };
+ };
+ };
+ };
+
+ tpdm_gpu: tpdm@6900000 {
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b968>;
+ reg = <0x6900000 0x1000>;
+ reg-names = "tpdm-base";
+
+ coresight-name = "coresight-tpdm-gpu";
+
+ clocks = <&clock_aop QDSS_CLK>,
+ <&clock_gpucc GPU_CC_CXO_CLK>,
+ <&clock_gcc GCC_DDRSS_GPU_AXI_CLK>,
+ <&clock_gcc GCC_GPU_MEMNOC_GFX_CLK>,
+ <&clock_gpucc GPU_CC_CX_GMU_CLK>,
+ <&clock_gpucc GPU_CC_AHB_CLK>,
+ <&clock_cpucc L3_GPU_VOTE_CLK>;
+ clock-names = "apb_pclk",
+ "rbbmtimer_clk",
+ "mem_clk",
+ "mem_iface_clk",
+ "gmu_clk",
+ "gpu_cc_ahb",
+ "l3_vote";
+
+ qcom,proxy-clks = "rbbmtimer_clk",
+ "mem_clk",
+ "mem_iface_clk",
+ "gmu_clk",
+ "gpu_cc_ahb",
+ "l3_vote";
+
+ vddcx-supply = <&gpu_cx_gdsc>;
+ vdd-supply = <&gpu_gx_gdsc>;
+ regulator-names = "vddcx", "vdd";
+ qcom,proxy-regs = "vddcx", "vdd";
+
+ port {
+ tpdm_gpu_out_funnel_gpu: endpoint {
+ remote-endpoint = <&funnel_gpu_in_tpdm_gpu>;
+ };
+ };
+ };
+
tpda: tpda@6004000 {
compatible = "arm,primecell";
arm,primecell-periphid = <0x000bb969>;
@@ -544,6 +645,15 @@
};
port@1 {
+ reg = <1>;
+ tpda_in_funnel_gpu: endpoint {
+ slave-mode;
+ remote-endpoint =
+ <&funnel_gpu_out_tpda>;
+ };
+ };
+
+ port@2 {
reg = <6>;
tpda_6_in_tpdm_venus: endpoint {
slave-mode;
@@ -552,7 +662,7 @@
};
};
- port@2 {
+ port@3 {
reg = <7>;
tpda_7_in_tpdm_mdss: endpoint {
slave-mode;
@@ -561,7 +671,7 @@
};
};
- port@3 {
+ port@4 {
reg = <9>;
tpda_9_in_tpdm_mm: endpoint {
slave-mode;
@@ -570,7 +680,7 @@
};
};
- port@4 {
+ port@5 {
reg = <10>;
tpda_10_in_funnel_dl_center: endpoint {
slave-mode;
@@ -579,7 +689,7 @@
};
};
- port@5 {
+ port@6 {
reg = <11>;
tpda_11_in_tpdm_ddr_ch02: endpoint {
slave-mode;
@@ -588,7 +698,7 @@
};
};
- port@6 {
+ port@7 {
reg = <12>;
tpda_12_in_tpdm_ddr_ch13: endpoint {
slave-mode;
@@ -597,7 +707,7 @@
};
};
- port@7 {
+ port@8 {
reg = <13>;
tpda_13_in_tpdm_ddr: endpoint {
slave-mode;
@@ -606,7 +716,7 @@
};
};
- port@8 {
+ port@9 {
reg = <14>;
tpda_14_in_tpdm_turing: endpoint {
slave-mode;
@@ -615,7 +725,7 @@
};
};
- port@9 {
+ port@10 {
reg = <15>;
tpda_15_in_tpdm_llm_turing: endpoint {
slave-mode;
@@ -624,7 +734,7 @@
};
};
- port@10 {
+ port@11 {
reg = <16>;
tpda_16_in_tpdm_npu: endpoint {
slave-mode;
@@ -633,7 +743,7 @@
};
};
- port@11 {
+ port@12 {
reg = <17>;
tpda_17_in_tpdm_npu_llm: endpoint {
slave-mode;
@@ -642,7 +752,7 @@
};
};
- port@12 {
+ port@13 {
reg = <18>;
tpda_18_in_tpdm_npu_dpm: endpoint {
slave-mode;
@@ -651,7 +761,7 @@
};
};
- port@13 {
+ port@14 {
reg = <19>;
tpda_19_in_tpdm_dlct: endpoint {
slave-mode;
@@ -660,7 +770,7 @@
};
};
- port@14 {
+ port@15 {
reg = <20>;
tpda_20_in_tpdm_ipcc: endpoint {
slave-mode;
@@ -669,7 +779,7 @@
};
};
- port@15 {
+ port@16 {
reg = <21>;
tpda_in_tpdm_vsense: endpoint {
slave-mode;
@@ -678,7 +788,7 @@
};
};
- port@16 {
+ port@17 {
reg = <22>;
tpda_in_tpdm_dcc: endpoint {
slave-mode;
@@ -687,7 +797,7 @@
};
};
- port@17 {
+ port@18 {
reg = <23>;
tpda_in_tpdm_prng: endpoint {
slave-mode;
@@ -696,7 +806,7 @@
};
};
- port@18 {
+ port@19 {
reg = <24>;
tpda_in_tpdm_qm: endpoint {
slave-mode;
@@ -705,7 +815,7 @@
};
};
- port@19 {
+ port@20 {
reg = <25>;
tpda_in_tpdm_pimem: endpoint {
slave-mode;
@@ -2683,6 +2793,193 @@
clock-names = "apb_pclk";
};
+ cti_gpu_m3: cti@6962000 {
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x000bb966>;
+ reg = <0x6962000 0x1000>;
+ reg-names = "cti-base";
+
+ coresight-name = "coresight-cti-gpu_cortex_m3";
+
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
+ };
+
+ cti_gpu_isdb: cti@6961000 {
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x000bb966>;
+ reg = <0x6961000 0x1000>;
+ reg-names = "cti-base";
+
+ coresight-name = "coresight-cti-gpu_isdb_cti";
+
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
+ };
+
+ cti_iris: cti@6831000 {
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x000bb966>;
+ reg = <0x6831000 0x1000>;
+ reg-names = "cti-base";
+
+ coresight-name = "coresight-cti-iris_dl_cti";
+
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
+ };
+
+ cti_lpass: cti@6845000 {
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x000bb966>;
+ reg = <0x6845000 0x1000>;
+ reg-names = "cti-base";
+
+ coresight-name = "coresight-cti-lpass_dl_cti";
+
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
+ };
+
+ cti_lpass_lpi: cti@6b21000 {
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x000bb966>;
+ reg = <0x6b21000 0x1000>;
+ reg-names = "cti-base";
+
+ coresight-name = "coresight-cti-lpass_lpi_cti";
+ status = "disabled";
+
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
+ };
+
+ cti_lpass_q6: cti@6b2b000 {
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x000bb966>;
+ reg = <0x6b2b000 0x1000>;
+ reg-names = "cti-base";
+
+ coresight-name = "coresight-cti-lpass_q6_cti";
+ status = "disabled";
+
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
+ };
+
+ cti_mdss: cti@6c61000 {
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x000bb966>;
+ reg = <0x6c61000 0x1000>;
+ reg-names = "cti-base";
+
+ coresight-name = "coresight-cti-mdss_dl_cti";
+
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
+ };
+
+ cti_npu_dl0: cti@6c42000 {
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x000bb966>;
+ reg = <0x6c42000 0x1000>;
+ reg-names = "cti-base";
+
+ coresight-name = "coresight-cti-npu_dl_cti_0";
+
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
+ };
+
+ cti_npu_dl1: cti@6c43000 {
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x000bb966>;
+ reg = <0x6c43000 0x1000>;
+ reg-names = "cti-base";
+
+ coresight-name = "coresight-cti-npu_dl_cti_1";
+
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
+ };
+
+ cti_npu: cti@6c4b000 {
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x000bb966>;
+ reg = <0x6c4b000 0x1000>;
+ reg-names = "cti-base";
+
+ coresight-name = "coresight-cti-npu_q6_cti";
+
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
+ };
+
+ cti_titan: cti@6c13000 {
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x000bb966>;
+ reg = <0x6c13000 0x1000>;
+ reg-names = "cti-base";
+
+ coresight-name = "coresight-cti-sierra_a6_cti";
+ status = "disabled";
+
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
+ };
+
+ cti_sdc: cti@6b40000 {
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x000bb966>;
+ reg = <0x6b40000 0x1000>;
+ reg-names = "cti-base";
+
+ coresight-name = "coresight-cti-ssc_cortex_m3";
+ status = "disabled";
+
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
+ };
+
+ cti_ssc0: cti@6b4b000 {
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x000bb966>;
+ reg = <0x6b4b000 0x1000>;
+ reg-names = "cti-base";
+
+ coresight-name = "coresight-cti-ssc_cti0_q6";
+ status = "disabled";
+
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
+ };
+
+ cti_ssc1: cti@6b41000 {
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x000bb966>;
+ reg = <0x6b41000 0x1000>;
+ reg-names = "cti-base";
+
+ coresight-name = "coresight-cti-ssc_cti1";
+ status = "disabled";
+
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
+ };
+
+ cti_ssc4: cti@6b4e000 {
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x000bb966>;
+ reg = <0x6b4e000 0x1000>;
+ reg-names = "cti-base";
+
+ coresight-name = "coresight-cti-ssc_cti_noc";
+ status = "disabled";
+
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
+ };
+
cti0_swao:cti@6b00000 {
compatible = "arm,primecell";
arm,primecell-periphid = <0x000bb966>;
@@ -2695,6 +2992,78 @@
clock-names = "apb_pclk";
};
+ cti1_swao:cti@6b01000 {
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x000bb966>;
+ reg = <0x6b01000 0x1000>;
+ reg-names = "cti-base";
+
+ coresight-name = "coresight-cti-swao_cti1";
+
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
+ };
+
+ cti2_swao:cti@6b02000 {
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x000bb966>;
+ reg = <0x6b02000 0x1000>;
+ reg-names = "cti-base";
+
+ coresight-name = "coresight-cti-swao_cti2";
+
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
+ };
+
+ cti3_swao:cti@6b03000 {
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x000bb966>;
+ reg = <0x6b03000 0x1000>;
+ reg-names = "cti-base";
+
+ coresight-name = "coresight-cti-swao_cti3";
+
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
+ };
+
+ cti_turing:cti@6982000 {
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x000bb966>;
+ reg = <0x6982000 0x1000>;
+ reg-names = "cti-base";
+
+ coresight-name = "coresight-cti-turing_dl_cti";
+
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
+ };
+
+ cti_turing_q6:cti@698b000 {
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x000bb966>;
+ reg = <0x698b000 0x1000>;
+ reg-names = "cti-base";
+
+ coresight-name = "coresight-cti-turing_q6_cti";
+
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
+ };
+
+ cti_compute:cti@6c38000 {
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x000bb966>;
+ reg = <0x6c38000 0x1000>;
+ reg-names = "cti-base";
+
+ coresight-name = "coresight-cti-compute_dl_cti";
+
+ clocks = <&clock_aop QDSS_CLK>;
+ clock-names = "apb_pclk";
+ };
+
ipcb_tgu: tgu@6b0b000 {
compatible = "arm,primecell";
arm,primecell-periphid = <0x000bb999>;
diff --git a/arch/arm64/boot/dts/qcom/kona-gpu.dtsi b/arch/arm64/boot/dts/qcom/kona-gpu.dtsi
index 82a7cfb..155e294 100644
--- a/arch/arm64/boot/dts/qcom/kona-gpu.dtsi
+++ b/arch/arm64/boot/dts/qcom/kona-gpu.dtsi
@@ -224,11 +224,12 @@
/* CB5(ATOS) & CB5/6/7 are protected by HYP */
qcom,protect = <0xa0000 0xc000>;
- clocks =<&clock_gcc GCC_GPU_CFG_AHB_CLK>,
- <&clock_gcc GCC_DDRSS_GPU_AXI_CLK>,
- <&clock_gcc GCC_GPU_MEMNOC_GFX_CLK>;
-
- clock-names = "iface_clk", "mem_clk", "mem_iface_clk";
+ clocks = <&clock_gcc GCC_GPU_MEMNOC_GFX_CLK>,
+ <&clock_gcc GCC_GPU_SNOC_DVM_GFX_CLK>,
+ <&clock_gpucc GPU_CC_AHB_CLK>;
+ clock-names = "gcc_gpu_memnoc_gfx",
+ "gcc_gpu_snoc_dvm_gfx",
+ "gpu_cc_ahb";
qcom,secure_align_mask = <0xfff>;
qcom,global_pt;
diff --git a/arch/arm64/boot/dts/qcom/kona-lpi.dtsi b/arch/arm64/boot/dts/qcom/kona-lpi.dtsi
index 9b03761..04b591c 100644
--- a/arch/arm64/boot/dts/qcom/kona-lpi.dtsi
+++ b/arch/arm64/boot/dts/qcom/kona-lpi.dtsi
@@ -26,6 +26,9 @@
<0x00000010>, <0x00000012>,
<0x00000000>, <0x00000000>;
+ clock-names = "lpass_core_hw_vote";
+ clocks = <&lpass_core_hw_vote 0>;
+
quat_mi2s_sck {
quat_mi2s_sck_sleep: quat_mi2s_sck_sleep {
mux {
diff --git a/arch/arm64/boot/dts/qcom/kona-mtp.dtsi b/arch/arm64/boot/dts/qcom/kona-mtp.dtsi
index e99155f..932f194 100644
--- a/arch/arm64/boot/dts/qcom/kona-mtp.dtsi
+++ b/arch/arm64/boot/dts/qcom/kona-mtp.dtsi
@@ -4,6 +4,8 @@
*/
#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
+
#include "kona-pmic-overlay.dtsi"
#include "kona-sde-display.dtsi"
#include "kona-camera-sensor-mtp.dtsi"
@@ -150,6 +152,18 @@
#include "fg-gen4-batterydata-alium-3600mah.dtsi"
#include "fg-gen4-batterydata-ascent-3450mah.dtsi"
};
+
+ extcon_usb1: extcon_usb1 {
+ compatible = "linux,extcon-usb-gpio";
+ vbus-gpio = <&pm8150_gpios 10 GPIO_ACTIVE_HIGH>;
+ id-gpio = <&tlmm 91 GPIO_ACTIVE_HIGH>;
+ vbus-out-gpio = <&pm8150_gpios 9 GPIO_ACTIVE_HIGH>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&usb2_vbus_det_default
+ &usb2_id_det_default
+ &usb2_vbus_boost_default>;
+ };
};
&vreg_hap_boost {
@@ -247,6 +261,24 @@
qcom,fg-esr-cal-temp-thresh = <10 40>;
};
+&qupv3_se15_i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+#include "smb1390.dtsi"
+};
+
+&smb1390 {
+ pinctrl-names = "default";
+ pinctrl-0 = <&smb_stat_default>;
+ status = "ok";
+};
+
+&smb1390_charger {
+ io-channels = <&pm8150b_vadc ADC_AMUX_THM2>;
+ io-channel-names = "cp_die_temp";
+ status = "ok";
+};
+
&pm8150_vadc {
#address-cells = <1>;
#size-cells = <0>;
@@ -395,6 +427,42 @@
qcom,platform-reset-gpio = <&tlmm 75 0>;
};
+&dsi_sim_cmd {
+ qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+ qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_dcs";
+ qcom,platform-reset-gpio = <&tlmm 75 0>;
+};
+
+&dsi_sim_vid {
+ qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+ qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_dcs";
+ qcom,platform-reset-gpio = <&tlmm 75 0>;
+};
+
+&dsi_sim_dsc_375_cmd {
+ qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+ qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_dcs";
+ qcom,platform-reset-gpio = <&tlmm 75 0>;
+};
+
+&dsi_dual_sim_cmd {
+ qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+ qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_dcs";
+ qcom,platform-reset-gpio = <&tlmm 75 0>;
+};
+
+&dsi_dual_sim_vid {
+ qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+ qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_dcs";
+ qcom,platform-reset-gpio = <&tlmm 75 0>;
+};
+
+&dsi_dual_sim_dsc_375_cmd {
+ qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
+ qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_dcs";
+ qcom,platform-reset-gpio = <&tlmm 75 0>;
+};
+
&sde_dsi {
qcom,dsi-default-panel = <&dsi_sw43404_amoled_cmd>;
};
@@ -503,3 +571,6 @@
status = "ok";
};
+&usb1 {
+ extcon = <&extcon_usb1>;
+};
diff --git a/arch/arm64/boot/dts/qcom/kona-pinctrl.dtsi b/arch/arm64/boot/dts/qcom/kona-pinctrl.dtsi
index bb5cbca..fbde9a3 100644
--- a/arch/arm64/boot/dts/qcom/kona-pinctrl.dtsi
+++ b/arch/arm64/boot/dts/qcom/kona-pinctrl.dtsi
@@ -2538,7 +2538,35 @@
};
};
- /* QUPv3_0 North SE mappings */
+ /* QUPv3_0 North SE0 mappings */
+ qupv3_se0_i3c_pins: qupv3_se0_i3c_pins {
+ qupv3_se0_i3c_active: qupv3_se0_i3c_active {
+ mux {
+ pins = "gpio28", "gpio29";
+ function = "ibi_i3c";
+ };
+
+ config {
+ pins = "gpio28", "gpio29";
+ drive-strength = <16>;
+ bias-pull-up;
+ };
+ };
+
+ qupv3_se0_i3c_sleep: qupv3_se0_i3c_sleep {
+ mux {
+ pins = "gpio28", "gpio29";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio28", "gpio29";
+ drive-strength = <2>;
+ bias-disable;
+ };
+ };
+ };
+
/* SE 0 pin mappings */
qupv3_se0_i2c_pins: qupv3_se0_i2c_pins {
qupv3_se0_i2c_active: qupv3_se0_i2c_active {
@@ -3852,5 +3880,14 @@
};
};
};
+
+ usb2_id_det_default: usb2_id_det_default {
+ config {
+ pins = "gpio91";
+ function = "gpio";
+ input-enable;
+ bias-pull-up;
+ };
+ };
};
};
diff --git a/arch/arm64/boot/dts/qcom/kona-pmic-overlay.dtsi b/arch/arm64/boot/dts/qcom/kona-pmic-overlay.dtsi
index fbf792a..634c688 100644
--- a/arch/arm64/boot/dts/qcom/kona-pmic-overlay.dtsi
+++ b/arch/arm64/boot/dts/qcom/kona-pmic-overlay.dtsi
@@ -56,6 +56,25 @@
power-source = <1>;
};
};
+
+ usb2_vbus_boost {
+ usb2_vbus_boost_default: usb2_vbus_boost_default {
+ pins = "gpio9";
+ function = "normal";
+ output-low;
+ power-source = <1>; /* 1.8V input supply */
+ };
+ };
+
+ usb2_vbus_det {
+ usb2_vbus_det_default: usb2_vbus_det_default {
+ pins = "gpio10";
+ function = "normal";
+ input-enable;
+ bias-pull-down;
+ power-source = <1>; /* 1.8V input supply */
+ };
+ };
};
&pm8150b_gpios {
diff --git a/arch/arm64/boot/dts/qcom/kona-qrd.dtsi b/arch/arm64/boot/dts/qcom/kona-qrd.dtsi
index d095d5c..29b02de 100644
--- a/arch/arm64/boot/dts/qcom/kona-qrd.dtsi
+++ b/arch/arm64/boot/dts/qcom/kona-qrd.dtsi
@@ -76,16 +76,30 @@
"RX_TX DEC2_INP", "TX DEC2 MUX",
"RX_TX DEC3_INP", "TX DEC3 MUX",
"SpkrRight IN", "WSA_SPK2 OUT",
+ "VA_AIF1 CAP", "VA_SWR_CLK",
+ "VA_AIF2 CAP", "VA_SWR_CLK",
+ "VA_AIF3 CAP", "VA_SWR_CLK",
+ "VA DMIC0", "MIC BIAS3",
"VA DMIC1", "MIC BIAS3",
"VA DMIC2", "MIC BIAS1",
"VA DMIC3", "MIC BIAS1",
"VA DMIC5", "MIC BIAS4",
+ "VA SWR_MIC0", "DMIC1_OUTPUT",
+ "VA SWR_MIC1", "DMIC2_OUTPUT",
+ "VA SWR_MIC2", "DMIC3_OUTPUT",
+ "VA SWR_MIC3", "DMIC4_OUTPUT",
+ "VA SWR_MIC4", "DMIC5_OUTPUT",
+ "VA SWR_MIC5", "DMIC6_OUTPUT",
+ "VA SWR_MIC6", "DMIC7_OUTPUT",
+ "VA SWR_MIC7", "DMIC8_OUTPUT",
"VA SWR_ADC1", "ADC2_OUTPUT";
qcom,wsa-max-devs = <1>;
qcom,wsa-devs = <&wsa881x_0212>, <&wsa881x_0214>;
qcom,wsa-aux-dev-prefix = "SpkrRight", "SpkrRight";
qcom,msm-mbhc-usbc-audio-supported = <1>;
+ qcom,msm-mbhc-hphl-swh = <0>;
+ qcom,msm-mbhc-gnd-swh = <0>;
};
&qupv3_se1_i2c {
diff --git a/arch/arm64/boot/dts/qcom/kona-qupv3.dtsi b/arch/arm64/boot/dts/qcom/kona-qupv3.dtsi
index 85e332c..a7df830 100644
--- a/arch/arm64/boot/dts/qcom/kona-qupv3.dtsi
+++ b/arch/arm64/boot/dts/qcom/kona-qupv3.dtsi
@@ -17,6 +17,24 @@
qcom,iommu-dma = "bypass";
};
+ /* QUPV3_0_SE0 */
+ i3c0: i3c@980000 {
+ compatible = "qcom,geni-i3c";
+ reg = <0x980000 0x4000>;
+ clock-names = "se-clk", "m-ahb", "s-ahb";
+ clocks = <&clock_gcc GCC_QUPV3_WRAP0_S0_CLK>,
+ <&clock_gcc GCC_QUPV3_WRAP_0_M_AHB_CLK>,
+ <&clock_gcc GCC_QUPV3_WRAP_0_S_AHB_CLK>;
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&qupv3_se0_i3c_active>;
+ pinctrl-1 = <&qupv3_se0_i3c_sleep>;
+ interrupts = <GIC_SPI 601 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <3>;
+ #size-cells = <0>;
+ qcom,wrapper-core = <&qupv3_0>;
+ status = "disabled";
+ };
+
/* Debug UART Instance for RUMI platform */
qupv3_se2_2uart: qcom,qup_uart@988000 {
compatible = "qcom,msm-geni-console";
diff --git a/arch/arm64/boot/dts/qcom/kona-sde-pll.dtsi b/arch/arm64/boot/dts/qcom/kona-sde-pll.dtsi
index d257c0a..373150d 100644
--- a/arch/arm64/boot/dts/qcom/kona-sde-pll.dtsi
+++ b/arch/arm64/boot/dts/qcom/kona-sde-pll.dtsi
@@ -17,6 +17,8 @@
clock-names = "iface_clk";
clock-rate = <0>;
gdsc-supply = <&mdss_core_gdsc>;
+ qcom,dsi-pll-ssc-en;
+ qcom,dsi-pll-ssc-mode = "down-spread";
qcom,platform-supply-entries {
#address-cells = <1>;
#size-cells = <0>;
@@ -44,6 +46,8 @@
clock-names = "iface_clk";
clock-rate = <0>;
gdsc-supply = <&mdss_core_gdsc>;
+ qcom,dsi-pll-ssc-en;
+ qcom,dsi-pll-ssc-mode = "down-spread";
qcom,platform-supply-entries {
#address-cells = <1>;
#size-cells = <0>;
diff --git a/arch/arm64/boot/dts/qcom/kona-sde.dtsi b/arch/arm64/boot/dts/qcom/kona-sde.dtsi
index 3c30d5b..6158a8e 100644
--- a/arch/arm64/boot/dts/qcom/kona-sde.dtsi
+++ b/arch/arm64/boot/dts/qcom/kona-sde.dtsi
@@ -181,17 +181,17 @@
qcom,sde-vbif-qos-lutdma-remap = <3 3 3 3 4 4 4 4>;
/* macrotile & macrotile-qseed has the same configs */
- qcom,sde-danger-lut = <0x0000000f 0x0000ffff
+ qcom,sde-danger-lut = <0x000000ff 0x0000ffff
0x00000000 0x00000000 0x0000ffff>;
- qcom,sde-safe-lut-linear = <0 0xfffc>;
+ qcom,sde-safe-lut-linear = <0 0xfff0>;
qcom,sde-safe-lut-macrotile = <0 0xff00>;
/* same as safe-lut-macrotile */
qcom,sde-safe-lut-macrotile-qseed = <0 0xff00>;
qcom,sde-safe-lut-nrt = <0 0xffff>;
qcom,sde-safe-lut-cwb = <0 0x3ff>;
- qcom,sde-qos-lut-linear = <0 0x00112222 0x22223357>;
+ qcom,sde-qos-lut-linear = <0 0x00112222 0x22335777>;
qcom,sde-qos-lut-macrotile = <0 0x00112233 0x44556677>;
qcom,sde-qos-lut-macrotile-qseed = <0 0x00112233 0x66777777>;
qcom,sde-qos-lut-nrt = <0 0x00000000 0x00000000>;
@@ -374,6 +374,10 @@
qcom,mst-enable;
qcom,widebus-enable;
+ qcom,dsc-feature-enable;
+ qcom,fec-feature-enable;
+ qcom,max-dp-dsc-blks = <2>;
+ qcom,max-dp-dsc-input-width-pixs = <2048>;
qcom,ctrl-supply-entries {
#address-cells = <1>;
diff --git a/arch/arm64/boot/dts/qcom/kona-usb.dtsi b/arch/arm64/boot/dts/qcom/kona-usb.dtsi
index a29c7ee..9b39f65 100644
--- a/arch/arm64/boot/dts/qcom/kona-usb.dtsi
+++ b/arch/arm64/boot/dts/qcom/kona-usb.dtsi
@@ -340,6 +340,7 @@
usb_audio_qmi_dev {
compatible = "qcom,usb-audio-qmi-dev";
iommus = <&apps_smmu 0x180f 0x0>;
+ qcom,iommu-dma = "disabled";
qcom,usb-audio-stream-id = <0xf>;
qcom,usb-audio-intr-num = <2>;
};
@@ -347,4 +348,242 @@
usb_nop_phy: usb_nop_phy {
compatible = "usb-nop-xceiv";
};
+
+ /* Secondary USB port related controller */
+ usb1: ssusb@a800000 {
+ compatible = "qcom,dwc-usb3-msm";
+ reg = <0xa800000 0x100000>;
+ reg-names = "core_base";
+
+ iommus = <&apps_smmu 0x20 0x0>;
+ qcom,iommu-dma = "atomic";
+ qcom,iommu-dma-addr-pool = <0x90000000 0x60000000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges;
+
+ interrupts-extended = <&pdc 12 IRQ_TYPE_LEVEL_HIGH>,
+ <&intc GIC_SPI 135 IRQ_TYPE_LEVEL_HIGH>,
+ <&pdc 16 IRQ_TYPE_LEVEL_HIGH>,
+ <&pdc 13 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "dp_hs_phy_irq", "pwr_event_irq",
+ "ss_phy_irq", "dm_hs_phy_irq";
+ qcom,use-pdc-interrupts;
+
+ USB3_GDSC-supply = <&usb30_sec_gdsc>;
+ clocks = <&clock_gcc GCC_USB30_SEC_MASTER_CLK>,
+ <&clock_gcc GCC_CFG_NOC_USB3_SEC_AXI_CLK>,
+ <&clock_gcc GCC_AGGRE_USB3_SEC_AXI_CLK>,
+ <&clock_gcc GCC_USB30_SEC_MOCK_UTMI_CLK>,
+ <&clock_gcc GCC_USB30_SEC_SLEEP_CLK>,
+ <&clock_gcc GCC_USB3_SEC_CLKREF_EN>;
+
+ clock-names = "core_clk", "iface_clk", "bus_aggr_clk",
+ "utmi_clk", "sleep_clk", "xo";
+
+ resets = <&clock_gcc GCC_USB30_SEC_BCR>;
+ reset-names = "core_reset";
+
+ qcom,core-clk-rate = <200000000>;
+ qcom,core-clk-rate-hs = <66666667>;
+ qcom,num-gsi-evt-buffs = <0x3>;
+ qcom,gsi-reg-offset =
+ <0x0fc /* GSI_GENERAL_CFG */
+ 0x110 /* GSI_DBL_ADDR_L */
+ 0x120 /* GSI_DBL_ADDR_H */
+ 0x130 /* GSI_RING_BASE_ADDR_L */
+ 0x144 /* GSI_RING_BASE_ADDR_H */
+ 0x1a4>; /* GSI_IF_STS */
+ qcom,dwc-usb3-msm-tx-fifo-size = <27696>;
+ qcom,charging-disabled;
+
+ qcom,msm-bus,name = "usb1";
+ qcom,msm-bus,num-cases = <3>;
+ qcom,msm-bus,num-paths = <3>;
+ qcom,msm-bus,vectors-KBps =
+ /* suspend vote */
+ <MSM_BUS_MASTER_USB3_1 MSM_BUS_SLAVE_EBI_CH0 0 0>,
+ <MSM_BUS_MASTER_USB3_1 MSM_BUS_SLAVE_IPA_CFG 0 0>,
+ <MSM_BUS_MASTER_AMPSS_M0 MSM_BUS_SLAVE_USB3_1 0 0>,
+
+ /* nominal vote */
+ <MSM_BUS_MASTER_USB3_1
+ MSM_BUS_SLAVE_EBI_CH0 1000000 2500000>,
+ <MSM_BUS_MASTER_USB3_1 MSM_BUS_SLAVE_IPA_CFG 0 2400>,
+ <MSM_BUS_MASTER_AMPSS_M0 MSM_BUS_SLAVE_USB3_1 0 40000>,
+
+ /* svs vote */
+ <MSM_BUS_MASTER_USB3_1
+ MSM_BUS_SLAVE_EBI_CH0 240000 700000>,
+ <MSM_BUS_MASTER_USB3_1 MSM_BUS_SLAVE_IPA_CFG 0 2400>,
+ <MSM_BUS_MASTER_AMPSS_M0 MSM_BUS_SLAVE_USB3_1 0 40000>;
+
+ dwc3@a800000 {
+ compatible = "snps,dwc3";
+ reg = <0xa800000 0xcd00>;
+ interrupts = <GIC_SPI 138 IRQ_TYPE_LEVEL_HIGH>;
+ usb-phy = <&usb2_phy1>, <&usb_qmp_phy>;
+ linux,sysdev_is_parent;
+ snps,disable-clk-gating;
+ snps,has-lpm-erratum;
+ snps,hird-threshold = /bits/ 8 <0x10>;
+ snps,usb3_lpm_capable;
+ usb-core-id = <1>;
+ tx-fifo-resize;
+ maximum-speed = "super-speed";
+ dr_mode = "drd";
+ };
+ };
+
+ /* Primary USB port related High Speed PHY */
+ usb2_phy1: hsphy@88e4000 {
+ compatible = "qcom,usb-hsphy-snps-femto";
+ reg = <0x88e4000 0x110>;
+ reg-names = "hsusb_phy_base";
+
+ vdd-supply = <&pm8150_l5>;
+ vdda18-supply = <&pm8150_l12>;
+ vdda33-supply = <&pm8150_l2>;
+ qcom,vdd-voltage-level = <0 880000 880000>;
+
+ clocks = <&clock_rpmh RPMH_CXO_CLK>;
+ clock-names = "ref_clk_src";
+
+ resets = <&clock_gcc GCC_QUSB2PHY_SEC_BCR>;
+ reset-names = "phy_reset";
+ };
+
+ /* Secondary USB port related QMP PHY */
+ usb_qmp_phy: ssphy@88eb000 {
+ compatible = "qcom,usb-ssphy-qmp-v2";
+ reg = <0x88eb000 0x1000>,
+ <0x088eb88c 0x4>;
+ reg-names = "qmp_phy_base",
+ "pcs_clamp_enable_reg";
+
+ vdd-supply = <&pm8150_l18>;
+ qcom,vdd-voltage-level = <0 880000 880000>;
+ qcom,vdd-max-load-uA = <47000>;
+ core-supply = <&pm8150_l9>;
+ qcom,vbus-valid-override;
+ qcom,qmp-phy-init-seq =
+ /* <reg_offset, value, delay> */
+ <USB3_UNI_QSERDES_COM_SYSCLK_EN_SEL 0x1a 0
+ USB3_UNI_QSERDES_COM_BIN_VCOCAL_HSCLK_SEL 0x11 0
+ USB3_UNI_QSERDES_COM_HSCLK_SEL 0x01 0
+ USB3_UNI_QSERDES_COM_DEC_START_MODE0 0x82 0
+ USB3_UNI_QSERDES_COM_DIV_FRAC_START1_MODE0 0xab 0
+ USB3_UNI_QSERDES_COM_DIV_FRAC_START2_MODE0 0xea 0
+ USB3_UNI_QSERDES_COM_DIV_FRAC_START3_MODE0 0x02 0
+ USB3_UNI_QSERDES_COM_BIN_VCOCAL_CMP_CODE1_MODE0 0xca 0
+ USB3_UNI_QSERDES_COM_BIN_VCOCAL_CMP_CODE2_MODE0 0x1e 0
+ USB3_UNI_QSERDES_COM_CP_CTRL_MODE0 0x06 0
+ USB3_UNI_QSERDES_COM_PLL_RCTRL_MODE0 0x16 0
+ USB3_UNI_QSERDES_COM_PLL_CCTRL_MODE0 0x36 0
+ USB3_UNI_QSERDES_COM_VCO_TUNE1_MODE0 0x24 0
+ USB3_UNI_QSERDES_COM_LOCK_CMP2_MODE0 0x34 0
+ USB3_UNI_QSERDES_COM_LOCK_CMP1_MODE0 0x14 0
+ USB3_UNI_QSERDES_COM_LOCK_CMP_EN 0x04 0
+ USB3_UNI_QSERDES_COM_SYSCLK_BUF_ENABLE 0x0a 0
+ USB3_UNI_QSERDES_COM_VCO_TUNE2_MODE1 0x02 0
+ USB3_UNI_QSERDES_COM_VCO_TUNE1_MODE1 0x24 0
+ USB3_UNI_QSERDES_COM_CORECLK_DIV_MODE1 0x08 0
+ USB3_UNI_QSERDES_COM_DEC_START_MODE1 0x82 0
+ USB3_UNI_QSERDES_COM_DIV_FRAC_START1_MODE1 0xab 0
+ USB3_UNI_QSERDES_COM_DIV_FRAC_START2_MODE1 0xea 0
+ USB3_UNI_QSERDES_COM_DIV_FRAC_START3_MODE1 0x02 0
+ USB3_UNI_QSERDES_COM_LOCK_CMP2_MODE1 0x82 0
+ USB3_UNI_QSERDES_COM_LOCK_CMP1_MODE1 0x34 0
+ USB3_UNI_QSERDES_COM_CP_CTRL_MODE1 0x06 0
+ USB3_UNI_QSERDES_COM_PLL_RCTRL_MODE1 0x16 0
+ USB3_UNI_QSERDES_COM_PLL_CCTRL_MODE1 0x36 0
+ USB3_UNI_QSERDES_COM_BIN_VCOCAL_CMP_CODE1_MODE1 0xca 0
+ USB3_UNI_QSERDES_COM_BIN_VCOCAL_CMP_CODE2_MODE1 0x1e 0
+ USB3_UNI_QSERDES_COM_CMN_IPTRIM 0x20 0
+ USB3_UNI_QSERDES_COM_SSC_EN_CENTER 0x01 0
+ USB3_UNI_QSERDES_COM_SSC_PER1 0x31 0
+ USB3_UNI_QSERDES_COM_SSC_PER2 0x01 0
+ USB3_UNI_QSERDES_COM_SSC_STEP_SIZE1_MODE1 0xde 0
+ USB3_UNI_QSERDES_COM_SSC_STEP_SIZE2_MODE1 0x07 0
+ USB3_UNI_QSERDES_COM_SSC_STEP_SIZE1_MODE0 0xde 0
+ USB3_UNI_QSERDES_COM_SSC_STEP_SIZE2_MODE0 0x07 0
+ USB3_UNI_QSERDES_COM_VCO_TUNE_MAP 0x02 0
+ USB3_UNI_QSERDES_RX_RX_MODE_00_HIGH4 0xb8 0
+ USB3_UNI_QSERDES_RX_RX_MODE_00_HIGH3 0xff 0
+ USB3_UNI_QSERDES_RX_RX_MODE_00_HIGH2 0xb7 0
+ USB3_UNI_QSERDES_RX_RX_MODE_00_HIGH 0x7f 0
+ USB3_UNI_QSERDES_RX_RX_MODE_00_LOW 0x7f 0
+ USB3_UNI_QSERDES_RX_RX_MODE_01_HIGH4 0xb4 0
+ USB3_UNI_QSERDES_RX_RX_MODE_01_HIGH3 0x7b 0
+ USB3_UNI_QSERDES_RX_RX_MODE_01_HIGH2 0x5c 0
+ USB3_UNI_QSERDES_RX_RX_MODE_01_HIGH 0xdc 0
+ USB3_UNI_QSERDES_RX_RX_MODE_01_LOW 0xdc 0
+ USB3_UNI_QSERDES_RX_UCDR_PI_CONTROLS 0x99 0
+ USB3_UNI_QSERDES_RX_UCDR_SB2_THRESH1 0x04 0
+ USB3_UNI_QSERDES_RX_UCDR_SB2_THRESH2 0x08 0
+ USB3_UNI_QSERDES_RX_UCDR_SB2_GAIN1 0x05 0
+ USB3_UNI_QSERDES_RX_UCDR_SB2_GAIN2 0x05 0
+ USB3_UNI_QSERDES_RX_UCDR_FASTLOCK_FO_GAIN 0x2f 0
+ USB3_UNI_QSERDES_RX_UCDR_FASTLOCK_COUNT_LOW 0xff 0
+ USB3_UNI_QSERDES_RX_UCDR_FASTLOCK_COUNT_HIGH 0x0f 0
+ USB3_UNI_QSERDES_RX_UCDR_SO_SATURATION_AND_ENABLE 0x7f 0
+ USB3_UNI_QSERDES_RX_UCDR_FO_GAIN 0x0a 0
+ USB3_UNI_QSERDES_RX_VGA_CAL_CNTRL1 0x54 0
+ USB3_UNI_QSERDES_RX_VGA_CAL_CNTRL2 0x0c 0
+ USB3_UNI_QSERDES_RX_RX_EQU_ADAPTOR_CNTRL2 0x0f 0
+ USB3_UNI_QSERDES_RX_RX_EQU_ADAPTOR_CNTRL3 0x4a 0
+ USB3_UNI_QSERDES_RX_RX_EQU_ADAPTOR_CNTRL4 0x0a 0
+ USB3_UNI_QSERDES_RX_DFE_EN_TIMER 0x04 0
+ USB3_UNI_QSERDES_RX_RX_EQ_OFFSET_ADAPTOR_CNTRL1 0x47 0
+ USB3_UNI_QSERDES_RX_RX_OFFSET_ADAPTOR_CNTRL2 0x80 0
+ USB3_UNI_QSERDES_RX_SIGDET_CNTRL 0x04 0
+ USB3_UNI_QSERDES_RX_SIGDET_DEGLITCH_CNTRL 0x0e 0
+ USB3_UNI_QSERDES_RX_RX_IDAC_TSETTLE_HIGH 0x00 0
+ USB3_UNI_QSERDES_RX_RX_IDAC_TSETTLE_LOW 0xc0 0
+ USB3_UNI_QSERDES_RX_DFE_CTLE_POST_CAL_OFFSET 0x38 0
+ USB3_UNI_QSERDES_RX_UCDR_SO_GAIN 0x06 0
+ USB3_UNI_QSERDES_RX_DCC_CTRL1 0x0c 0
+ USB3_UNI_QSERDES_RX_GM_CAL 0x1f 0
+ USB3_UNI_QSERDES_TX_RCV_DETECT_LVL_2 0x12 0
+ USB3_UNI_QSERDES_TX_LANE_MODE_1 0xd5 0
+ USB3_UNI_QSERDES_TX_PI_QEC_CTRL 0x54 0
+ USB3_UNI_QSERDES_TX_RES_CODE_LANE_OFFSET_TX 0x08 0
+ USB3_UNI_PCS_LOCK_DETECT_CONFIG1 0xd0 0
+ USB3_UNI_PCS_LOCK_DETECT_CONFIG2 0x07 0
+ USB3_UNI_PCS_LOCK_DETECT_CONFIG3 0x20 0
+ USB3_UNI_PCS_LOCK_DETECT_CONFIG6 0x13 0
+ USB3_UNI_PCS_RCVR_DTCT_DLY_P1U2_L 0xe7 0
+ USB3_UNI_PCS_RCVR_DTCT_DLY_P1U2_H 0x03 0
+ USB3_UNI_PCS_RX_SIGDET_LVL 0xaa 0
+ USB3_UNI_PCS_PCS_TX_RX_CONFIG 0x0c 0
+ USB3_UNI_PCS_USB3_RXEQTRAINING_DFE_TIME_S2 0x07 0
+ USB3_UNI_PCS_USB3_LFPS_DET_HIGH_COUNT_VAL 0xf8 0
+ USB3_UNI_PCS_CDR_RESET_TIME 0x0a 0
+ USB3_UNI_PCS_ALIGN_DETECT_CONFIG1 0x88 0
+ USB3_UNI_PCS_ALIGN_DETECT_CONFIG2 0x13 0
+ USB3_UNI_PCS_EQ_CONFIG1 0x4b 0
+ USB3_UNI_PCS_EQ_CONFIG5 0x10 0
+ USB3_UNI_PCS_REFGEN_REQ_CONFIG1 0x21 0
+ 0xffffffff 0xffffffff 0x00>;
+
+ qcom,qmp-phy-reg-offset =
+ <USB3_UNI_PCS_PCS_STATUS1
+ USB3_UNI_PCS_USB3_AUTONOMOUS_MODE_CTRL
+ USB3_UNI_PCS_USB3_LFPS_RXTERM_IRQ_CLEAR
+ USB3_UNI_PCS_POWER_DOWN_CONTROL
+ USB3_UNI_PCS_SW_RESET
+ USB3_UNI_PCS_START_CONTROL>;
+
+ clocks = <&clock_gcc GCC_USB3_SEC_PHY_AUX_CLK>,
+ <&clock_gcc GCC_USB3_SEC_PHY_PIPE_CLK>,
+ <&clock_rpmh RPMH_CXO_CLK>,
+ <&clock_gcc GCC_USB3_SEC_CLKREF_EN>,
+ <&clock_gcc GCC_USB3_SEC_PHY_COM_AUX_CLK>;
+ clock-names = "aux_clk", "pipe_clk", "ref_clk_src",
+ "ref_clk", "com_aux_clk";
+
+ resets = <&clock_gcc GCC_USB3_PHY_SEC_BCR>,
+ <&clock_gcc GCC_USB3PHY_PHY_SEC_BCR>;
+ reset-names = "phy_reset", "phy_phy_reset";
+ };
};
diff --git a/arch/arm64/boot/dts/qcom/kona-v2-cdp.dts b/arch/arm64/boot/dts/qcom/kona-v2-cdp.dts
new file mode 100644
index 0000000..5716b11
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/kona-v2-cdp.dts
@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+
+/dts-v1/;
+
+#include "kona-v2.dtsi"
+#include "kona-cdp.dtsi"
+
+/ {
+ model = "Qualcomm Technologies, Inc. kona CDP";
+ compatible = "qcom,kona-cdp", "qcom,kona", "qcom,cdp";
+ qcom,board-id = <1 0>;
+};
diff --git a/arch/arm64/boot/dts/qcom/kona-v2-mtp.dts b/arch/arm64/boot/dts/qcom/kona-v2-mtp.dts
new file mode 100644
index 0000000..eaf41c5
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/kona-v2-mtp.dts
@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+
+/dts-v1/;
+
+#include "kona-v2.dtsi"
+#include "kona-mtp.dtsi"
+
+/ {
+ model = "Qualcomm Technologies, Inc. kona MTP";
+ compatible = "qcom,kona-mtp", "qcom,kona", "qcom,mtp";
+ qcom,board-id = <8 0>;
+};
diff --git a/arch/arm64/boot/dts/qcom/kona-v2-qrd.dts b/arch/arm64/boot/dts/qcom/kona-v2-qrd.dts
new file mode 100644
index 0000000..cbd380f
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/kona-v2-qrd.dts
@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+
+/dts-v1/;
+
+#include "kona-v2.dtsi"
+#include "kona-qrd.dtsi"
+
+/ {
+ model = "Qualcomm Technologies, Inc. kona QRD";
+ compatible = "qcom,kona-qrd", "qcom,kona", "qcom,qrd";
+ qcom,board-id = <11 0>;
+};
diff --git a/arch/arm64/boot/dts/qcom/kona-v2-rumi.dts b/arch/arm64/boot/dts/qcom/kona-v2-rumi.dts
new file mode 100644
index 0000000..32efd28
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/kona-v2-rumi.dts
@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+
+/dts-v1/;
+
+#include "kona-v2.dtsi"
+#include "kona-rumi.dtsi"
+
+/ {
+ model = "Qualcomm Technologies, Inc. kona RUMI";
+ compatible = "qcom,kona-rumi", "qcom,kona", "qcom,rumi";
+ qcom,board-id = <15 0>;
+};
diff --git a/arch/arm64/boot/dts/qcom/kona-v2.dts b/arch/arm64/boot/dts/qcom/kona-v2.dts
new file mode 100644
index 0000000..fa0a0323
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/kona-v2.dts
@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+
+/dts-v1/;
+
+#include "kona-v2.dtsi"
+
+/ {
+ model = "Qualcomm Technologies, Inc. kona v2 SoC";
+ compatible = "qcom,kona";
+ qcom,board-id = <0 0>;
+};
diff --git a/arch/arm64/boot/dts/qcom/kona-v2.dtsi b/arch/arm64/boot/dts/qcom/kona-v2.dtsi
new file mode 100644
index 0000000..e39bc9c
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/kona-v2.dtsi
@@ -0,0 +1,13 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+
+#include "kona.dtsi"
+
+/ {
+ model = "Qualcomm Technologies, Inc. kona v2";
+ compatible = "qcom,kona";
+ qcom,msm-id = <356 0x20000>;
+};
+
diff --git a/arch/arm64/boot/dts/qcom/kona-vidc.dtsi b/arch/arm64/boot/dts/qcom/kona-vidc.dtsi
index 758f9d4..67f8dbe 100644
--- a/arch/arm64/boot/dts/qcom/kona-vidc.dtsi
+++ b/arch/arm64/boot/dts/qcom/kona-vidc.dtsi
@@ -62,7 +62,7 @@
qcom,bus-slave = <MSM_BUS_SLAVE_EBI_CH0>;
qcom,bus-governor = "msm-vidc-ddr";
qcom,bus-range-kbps = <762 6533000>;
- operating-points-v2 = <&ddr_bw_opp_table>;
+ operating-points-v2 = <&suspendable_ddr_bw_opp_table>;
};
venus_bus_llcc {
@@ -72,7 +72,7 @@
qcom,bus-slave = <MSM_BUS_SLAVE_LLCC>;
qcom,bus-governor = "msm-vidc-llcc";
qcom,bus-range-kbps = <2288 6533000>;
- operating-points-v2 = <&llcc_bw_opp_table>;
+ operating-points-v2 = <&suspendable_llcc_bw_opp_table>;
};
/* MMUs */
diff --git a/arch/arm64/boot/dts/qcom/kona.dtsi b/arch/arm64/boot/dts/qcom/kona.dtsi
index 5108cce..4106d92 100644
--- a/arch/arm64/boot/dts/qcom/kona.dtsi
+++ b/arch/arm64/boot/dts/qcom/kona.dtsi
@@ -978,6 +978,18 @@
BW_OPP_ENTRY( 1000, 16); /* 15258 MB/s */
};
+ suspendable_llcc_bw_opp_table: suspendable-llcc-bw-opp-table {
+ compatible = "operating-points-v2";
+ BW_OPP_ENTRY( 0, 16); /* 0 MB/s */
+ BW_OPP_ENTRY( 150, 16); /* 2288 MB/s */
+ BW_OPP_ENTRY( 300, 16); /* 4577 MB/s */
+ BW_OPP_ENTRY( 466, 16); /* 7110 MB/s */
+ BW_OPP_ENTRY( 600, 16); /* 9155 MB/s */
+ BW_OPP_ENTRY( 806, 16); /* 12298 MB/s */
+ BW_OPP_ENTRY( 933, 16); /* 14236 MB/s */
+ BW_OPP_ENTRY( 1000, 16); /* 15258 MB/s */
+ };
+
ddr_bw_opp_table: ddr-bw-opp-table {
compatible = "operating-points-v2";
BW_OPP_ENTRY( 200, 4); /* 762 MB/s */
@@ -1193,7 +1205,7 @@
compatible = "qcom,arm-memlat-mon";
qcom,cpulist = <&CPU0 &CPU1 &CPU2 &CPU3>;
qcom,target-dev = <&cpu0_llcc_ddr_lat>;
- qcom,cachemiss-ev = <0x1000>;
+ qcom,cachemiss-ev = <0x2A>;
qcom,core-dev-table =
< 300000 MHZ_TO_MBPS( 200, 4) >,
< 729600 MHZ_TO_MBPS( 451, 4) >,
@@ -1215,7 +1227,7 @@
compatible = "qcom,arm-memlat-mon";
qcom,cpulist = <&CPU4 &CPU5 &CPU6 &CPU7>;
qcom,target-dev = <&cpu4_llcc_ddr_lat>;
- qcom,cachemiss-ev = <0x1000>;
+ qcom,cachemiss-ev = <0x2A>;
qcom,core-dev-table =
< 300000 MHZ_TO_MBPS( 200, 4) >,
< 691200 MHZ_TO_MBPS( 451, 4) >,
@@ -3247,9 +3259,8 @@
qcom,cnss-qca6390@a0000000 {
compatible = "qcom,cnss-qca6390";
- reg = <0xa0000000 0x10000000>,
- <0xb0000000 0x10000>;
- reg-names = "smmu_iova_base", "smmu_iova_ipa";
+ reg = <0xb0000000 0x10000>;
+ reg-names = "smmu_iova_ipa";
wlan-en-gpio = <&tlmm 20 0>;
pinctrl-names = "wlan_en_active", "wlan_en_sleep";
pinctrl-0 = <&cnss_wlan_en_active>;
@@ -3375,7 +3386,16 @@
cnss_pci: cnss_pci {
reg = <0 0 0 0 0>;
- qcom,iommu-dma = "disabled";
+ qcom,iommu-group = <&cnss_pci_iommu_group>;
+
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ cnss_pci_iommu_group: cnss_pci_iommu_group {
+ qcom,iommu-dma-addr-pool = <0xa0000000 0x10000000>;
+ qcom,iommu-dma = "fastmap";
+ qcom,iommu-pagetable = "coherent";
+ };
};
};
diff --git a/arch/arm64/boot/dts/qcom/lito.dtsi b/arch/arm64/boot/dts/qcom/lito.dtsi
index 27c3bea3..e1e8b2c 100644
--- a/arch/arm64/boot/dts/qcom/lito.dtsi
+++ b/arch/arm64/boot/dts/qcom/lito.dtsi
@@ -1401,7 +1401,7 @@
qcom,complete-ramdump;
/* Inputs from lpass */
- interrupts-extended = <&intc GIC_SPI 162 IRQ_TYPE_EDGE_RISING>,
+ interrupts-extended = <&intc GIC_SPI 162 IRQ_TYPE_LEVEL_HIGH>,
<&adsp_smp2p_in 0 0>,
<&adsp_smp2p_in 1 0>,
<&adsp_smp2p_in 2 0>,
@@ -1419,6 +1419,47 @@
mbox-names = "adsp-pil";
};
+
+ qcom,turing@8300000 {
+ compatible = "qcom,pil-tz-generic";
+ reg = <0x8300000 0x100000>;
+
+ vdd_cx-supply = <&VDD_CX_LEVEL>;
+ qcom,proxy-reg-names = "vdd_cx";
+ qcom,vdd_cx-uV-uA = <RPMH_REGULATOR_LEVEL_TURBO 100000>;
+
+ clocks = <&rpmhcc RPMH_CXO_CLK>;
+ clock-names = "xo";
+ qcom,proxy-clock-names = "xo";
+
+ qcom,pas-id = <18>;
+ qcom,proxy-timeout-ms = <10000>;
+ qcom,smem-id = <601>;
+ qcom,sysmon-id = <7>;
+ qcom,ssctl-instance-id = <0x17>;
+ qcom,firmware-name = "cdsp";
+ memory-region = <&pil_cdsp_mem>;
+ qcom,complete-ramdump;
+
+ /* Inputs from turing */
+ interrupts-extended = <&intc GIC_SPI 578 IRQ_TYPE_LEVEL_HIGH>,
+ <&cdsp_smp2p_in 0 0>,
+ <&cdsp_smp2p_in 2 0>,
+ <&cdsp_smp2p_in 1 0>,
+ <&cdsp_smp2p_in 3 0>;
+
+ interrupt-names = "qcom,wdog",
+ "qcom,err-fatal",
+ "qcom,proxy-unvote",
+ "qcom,err-ready",
+ "qcom,stop-ack";
+
+ /* Outputs to turing */
+ qcom,smem-states = <&cdsp_smp2p_out 0>;
+ qcom,smem-state-names = "qcom,force-stop";
+
+ mbox-names = "cdsp-pil";
+ };
};
#include "lito-pinctrl.dtsi"
diff --git a/arch/arm64/boot/dts/qcom/msm-arm-smmu-kona.dtsi b/arch/arm64/boot/dts/qcom/msm-arm-smmu-kona.dtsi
index 930087a..2862a0f 100644
--- a/arch/arm64/boot/dts/qcom/msm-arm-smmu-kona.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm-arm-smmu-kona.dtsi
@@ -41,6 +41,22 @@
<GIC_SPI 684 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 685 IRQ_TYPE_LEVEL_HIGH>;
+ qcom,msm-bus,vectors-KBps =
+ <MSM_BUS_MASTER_GPU_TCU>,
+ <MSM_BUS_SLAVE_EBI_CH0>,
+ <0 0>,
+ <MSM_BUS_MASTER_GPU_TCU>,
+ <MSM_BUS_SLAVE_EBI_CH0>,
+ <0 1000>;
+
+ qcom,actlr =
+ /* All CBs of GFX: +15 deep PF */
+ <0x2 0x400 0x303>,
+ <0x4 0x400 0x303>,
+ <0x5 0x400 0x303>,
+ <0x7 0x400 0x303>,
+ <0x0 0x401 0x303>;
+
gfx_0_tbu: gfx_0_tbu@3dc5000 {
compatible = "qcom,qsmmuv500-tbu";
reg = <0x3DC5000 0x1000>,
@@ -180,6 +196,23 @@
<MSM_BUS_SLAVE_IMEM_CFG>,
<0 1000>;
+ qcom,actlr =
+ /* For HF-0 TBU +3 deep PF */
+ <0x800 0x3ff 0x103>,
+ /* For HF-1 TBU +3 deep PF */
+ <0xC00 0x3ff 0x103>,
+ /* For SF-0 TBU +3 deep PF */
+ <0x2000 0x3ff 0x103>,
+ /* For SF-1 TBU +3 deep PF */
+ <0x2400 0x3ff 0x103>,
+ /* For NPU +3 deep PF */
+ <0x1081 0x400 0x103>,
+ <0x1082 0x400 0x103>,
+ <0x1085 0x400 0x103>,
+ <0x10a1 0x400 0x103>,
+ <0x10a2 0x400 0x103>,
+ <0x10a5 0x400 0x103>;
+
anoc_1_tbu: anoc_1_tbu@15185000 {
compatible = "qcom,qsmmuv500-tbu";
reg = <0x15185000 0x1000>,
diff --git a/arch/arm64/configs/cuttlefish_defconfig b/arch/arm64/configs/cuttlefish_defconfig
index b7ba3f6..d1ea96f 100644
--- a/arch/arm64/configs/cuttlefish_defconfig
+++ b/arch/arm64/configs/cuttlefish_defconfig
@@ -84,6 +84,7 @@
CONFIG_PACKET=y
CONFIG_UNIX=y
CONFIG_XFRM_USER=y
+CONFIG_XFRM_INTERFACE=y
CONFIG_NET_KEY=y
CONFIG_INET=y
CONFIG_IP_MULTICAST=y
@@ -143,6 +144,7 @@
CONFIG_NETFILTER_XT_MATCH_LIMIT=y
CONFIG_NETFILTER_XT_MATCH_MAC=y
CONFIG_NETFILTER_XT_MATCH_MARK=y
+CONFIG_NETFILTER_XT_MATCH_OWNER=y
CONFIG_NETFILTER_XT_MATCH_POLICY=y
CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y
CONFIG_NETFILTER_XT_MATCH_QUOTA=y
@@ -177,6 +179,7 @@
CONFIG_L2TP=y
CONFIG_NET_SCHED=y
CONFIG_NET_SCH_HTB=y
+CONFIG_NET_SCH_NETEM=y
CONFIG_NET_CLS_U32=y
CONFIG_NET_CLS_BPF=y
CONFIG_NET_EMATCH=y
diff --git a/arch/arm64/configs/vendor/kona-perf_defconfig b/arch/arm64/configs/vendor/kona-perf_defconfig
index 678c21c..e49949e 100644
--- a/arch/arm64/configs/vendor/kona-perf_defconfig
+++ b/arch/arm64/configs/vendor/kona-perf_defconfig
@@ -9,6 +9,7 @@
CONFIG_TASKSTATS=y
CONFIG_TASK_XACCT=y
CONFIG_TASK_IO_ACCOUNTING=y
+CONFIG_PSI=y
CONFIG_RCU_EXPERT=y
CONFIG_RCU_FAST_NO_HZ=y
CONFIG_RCU_NOCB_CPU=y
@@ -421,7 +422,7 @@
CONFIG_MSM_HSUSB_PHY=y
CONFIG_USB_QCOM_EMU_PHY=y
CONFIG_USB_GADGET=y
-CONFIG_USB_GADGET_VBUS_DRAW=500
+CONFIG_USB_GADGET_VBUS_DRAW=900
CONFIG_USB_CONFIGFS=y
CONFIG_USB_CONFIGFS_UEVENT=y
CONFIG_USB_CONFIGFS_NCM=y
diff --git a/arch/arm64/configs/vendor/kona_defconfig b/arch/arm64/configs/vendor/kona_defconfig
index 4724711..9997d0d 100644
--- a/arch/arm64/configs/vendor/kona_defconfig
+++ b/arch/arm64/configs/vendor/kona_defconfig
@@ -8,6 +8,7 @@
CONFIG_TASKSTATS=y
CONFIG_TASK_XACCT=y
CONFIG_TASK_IO_ACCOUNTING=y
+CONFIG_PSI=y
CONFIG_RCU_EXPERT=y
CONFIG_RCU_FAST_NO_HZ=y
CONFIG_RCU_NOCB_CPU=y
@@ -107,6 +108,7 @@
CONFIG_MEMORY_HOTREMOVE=y
CONFIG_CLEANCACHE=y
CONFIG_CMA=y
+CONFIG_CMA_DEBUGFS=y
CONFIG_CMA_AREAS=16
CONFIG_ZSMALLOC=y
CONFIG_NET=y
@@ -431,7 +433,7 @@
CONFIG_MSM_HSUSB_PHY=y
CONFIG_USB_QCOM_EMU_PHY=y
CONFIG_USB_GADGET=y
-CONFIG_USB_GADGET_VBUS_DRAW=500
+CONFIG_USB_GADGET_VBUS_DRAW=900
CONFIG_USB_CONFIGFS=y
CONFIG_USB_CONFIGFS_UEVENT=y
CONFIG_USB_CONFIGFS_NCM=y
diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h
index 48d6061..3b21bcd 100644
--- a/arch/arm64/include/asm/io.h
+++ b/arch/arm64/include/asm/io.h
@@ -138,7 +138,23 @@ static inline u64 __raw_readq_no_log(const volatile void __iomem *addr)
#define __raw_readq(a) __raw_read_logged((a), q, u64)
/* IO barriers */
-#define __iormb() rmb()
+#define __iormb(v) \
+({ \
+ unsigned long tmp; \
+ \
+ rmb(); \
+ \
+ /* \
+ * Create a dummy control dependency from the IO read to any \
+ * later instructions. This ensures that a subsequent call to \
+ * udelay() will be ordered due to the ISB in get_cycles(). \
+ */ \
+ asm volatile("eor %0, %1, %1\n" \
+ "cbnz %0, ." \
+ : "=r" (tmp) : "r" ((unsigned long)(v)) \
+ : "memory"); \
+})
+
#define __iowmb() wmb()
#define mmiowb() do { } while (0)
@@ -179,10 +195,10 @@ static inline u64 __raw_readq_no_log(const volatile void __iomem *addr)
* following Normal memory access. Writes are ordered relative to any prior
* Normal memory access.
*/
-#define readb(c) ({ u8 __v = readb_relaxed(c); __iormb(); __v; })
-#define readw(c) ({ u16 __v = readw_relaxed(c); __iormb(); __v; })
-#define readl(c) ({ u32 __v = readl_relaxed(c); __iormb(); __v; })
-#define readq(c) ({ u64 __v = readq_relaxed(c); __iormb(); __v; })
+#define readb(c) ({ u8 __v = readb_relaxed(c); __iormb(__v); __v; })
+#define readw(c) ({ u16 __v = readw_relaxed(c); __iormb(__v); __v; })
+#define readl(c) ({ u32 __v = readl_relaxed(c); __iormb(__v); __v; })
+#define readq(c) ({ u64 __v = readq_relaxed(c); __iormb(__v); __v; })
#define writeb(v,c) ({ __iowmb(); writeb_relaxed((v),(c)); })
#define writew(v,c) ({ __iowmb(); writew_relaxed((v),(c)); })
@@ -190,13 +206,13 @@ static inline u64 __raw_readq_no_log(const volatile void __iomem *addr)
#define writeq(v,c) ({ __iowmb(); writeq_relaxed((v),(c)); })
#define readb_no_log(c) \
- ({ u8 __v = readb_relaxed_no_log(c); __iormb(); __v; })
+ ({ u8 __v = readb_relaxed_no_log(c); __iormb(__v); __v; })
#define readw_no_log(c) \
- ({ u16 __v = readw_relaxed_no_log(c); __iormb(); __v; })
+ ({ u16 __v = readw_relaxed_no_log(c); __iormb(__v); __v; })
#define readl_no_log(c) \
- ({ u32 __v = readl_relaxed_no_log(c); __iormb(); __v; })
+ ({ u32 __v = readl_relaxed_no_log(c); __iormb(__v); __v; })
#define readq_no_log(c) \
- ({ u64 __v = readq_relaxed_no_log(c); __iormb(); __v; })
+ ({ u64 __v = readq_relaxed_no_log(c); __iormb(__v); __v; })
#define writeb_no_log(v, c) \
({ __iowmb(); writeb_relaxed_no_log((v), (c)); })
@@ -251,9 +267,9 @@ extern void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size);
/*
* io{read,write}{16,32,64}be() macros
*/
-#define ioread16be(p) ({ __u16 __v = be16_to_cpu((__force __be16)__raw_readw_no_log(p)); __iormb(); __v; })
-#define ioread32be(p) ({ __u32 __v = be32_to_cpu((__force __be32)__raw_readl_no_log(p)); __iormb(); __v; })
-#define ioread64be(p) ({ __u64 __v = be64_to_cpu((__force __be64)__raw_readq_no_log(p)); __iormb(); __v; })
+#define ioread16be(p) ({ __u16 __v = be16_to_cpu((__force __be16)__raw_readw_no_log(p)); __iormb(__v); __v; })
+#define ioread32be(p) ({ __u32 __v = be32_to_cpu((__force __be32)__raw_readl_no_log(p)); __iormb(__v); __v; })
+#define ioread64be(p) ({ __u64 __v = be64_to_cpu((__force __be64)__raw_readq_no_log(p)); __iormb(__v); __v; })
#define iowrite16be(v,p) ({ __iowmb(); __raw_writew_no_log((__force __u16)cpu_to_be16(v), p); })
#define iowrite32be(v,p) ({ __iowmb(); __raw_writel_no_log((__force __u32)cpu_to_be32(v), p); })
diff --git a/arch/arm64/include/uapi/asm/ptrace.h b/arch/arm64/include/uapi/asm/ptrace.h
index 98c4ce5..ad64d2c 100644
--- a/arch/arm64/include/uapi/asm/ptrace.h
+++ b/arch/arm64/include/uapi/asm/ptrace.h
@@ -130,7 +130,7 @@ struct user_sve_header {
/* Offset from the start of struct user_sve_header to the register data */
#define SVE_PT_REGS_OFFSET \
- ((sizeof(struct sve_context) + (SVE_VQ_BYTES - 1)) \
+ ((sizeof(struct user_sve_header) + (SVE_VQ_BYTES - 1)) \
/ SVE_VQ_BYTES * SVE_VQ_BYTES)
/*
diff --git a/arch/arm64/kernel/entry-ftrace.S b/arch/arm64/kernel/entry-ftrace.S
index 1175f58..295951f 100644
--- a/arch/arm64/kernel/entry-ftrace.S
+++ b/arch/arm64/kernel/entry-ftrace.S
@@ -79,7 +79,6 @@
.macro mcount_get_lr reg
ldr \reg, [x29]
ldr \reg, [\reg, #8]
- mcount_adjust_addr \reg, \reg
.endm
.macro mcount_get_lr_addr reg
diff --git a/arch/mips/boot/dts/img/boston.dts b/arch/mips/boot/dts/img/boston.dts
index 65af3f6..84328af 100644
--- a/arch/mips/boot/dts/img/boston.dts
+++ b/arch/mips/boot/dts/img/boston.dts
@@ -141,6 +141,12 @@
#size-cells = <2>;
#interrupt-cells = <1>;
+ eg20t_phub@2,0,0 {
+ compatible = "pci8086,8801";
+ reg = <0x00020000 0 0 0 0>;
+ intel,eg20t-prefetch = <0>;
+ };
+
eg20t_mac@2,0,1 {
compatible = "pci8086,8802";
reg = <0x00020100 0 0 0 0>;
diff --git a/arch/mips/configs/ath79_defconfig b/arch/mips/configs/ath79_defconfig
index 951c423..4c47b3f 100644
--- a/arch/mips/configs/ath79_defconfig
+++ b/arch/mips/configs/ath79_defconfig
@@ -71,6 +71,7 @@
# CONFIG_SERIAL_8250_PCI is not set
CONFIG_SERIAL_8250_NR_UARTS=1
CONFIG_SERIAL_8250_RUNTIME_UARTS=1
+CONFIG_SERIAL_OF_PLATFORM=y
CONFIG_SERIAL_AR933X=y
CONFIG_SERIAL_AR933X_CONSOLE=y
# CONFIG_HW_RANDOM is not set
diff --git a/arch/mips/include/asm/mach-jz4740/jz4740_mmc.h b/arch/mips/include/asm/mach-jz4740/jz4740_mmc.h
index e9cc62c..ff50aeb 100644
--- a/arch/mips/include/asm/mach-jz4740/jz4740_mmc.h
+++ b/arch/mips/include/asm/mach-jz4740/jz4740_mmc.h
@@ -4,8 +4,6 @@
struct jz4740_mmc_platform_data {
int gpio_power;
- int gpio_card_detect;
- int gpio_read_only;
unsigned card_detect_active_low:1;
unsigned read_only_active_low:1;
unsigned power_active_low:1;
diff --git a/arch/mips/include/uapi/asm/inst.h b/arch/mips/include/uapi/asm/inst.h
index c05dcf5..273ef58 100644
--- a/arch/mips/include/uapi/asm/inst.h
+++ b/arch/mips/include/uapi/asm/inst.h
@@ -369,8 +369,8 @@ enum mm_32a_minor_op {
mm_ext_op = 0x02c,
mm_pool32axf_op = 0x03c,
mm_srl32_op = 0x040,
+ mm_srlv32_op = 0x050,
mm_sra_op = 0x080,
- mm_srlv32_op = 0x090,
mm_rotr_op = 0x0c0,
mm_lwxs_op = 0x118,
mm_addu32_op = 0x150,
diff --git a/arch/mips/jazz/jazzdma.c b/arch/mips/jazz/jazzdma.c
index d31bc2f..fb2b6d0 100644
--- a/arch/mips/jazz/jazzdma.c
+++ b/arch/mips/jazz/jazzdma.c
@@ -74,14 +74,15 @@ static int __init vdma_init(void)
get_order(VDMA_PGTBL_SIZE));
BUG_ON(!pgtbl);
dma_cache_wback_inv((unsigned long)pgtbl, VDMA_PGTBL_SIZE);
- pgtbl = (VDMA_PGTBL_ENTRY *)KSEG1ADDR(pgtbl);
+ pgtbl = (VDMA_PGTBL_ENTRY *)CKSEG1ADDR((unsigned long)pgtbl);
/*
* Clear the R4030 translation table
*/
vdma_pgtbl_init();
- r4030_write_reg32(JAZZ_R4030_TRSTBL_BASE, CPHYSADDR(pgtbl));
+ r4030_write_reg32(JAZZ_R4030_TRSTBL_BASE,
+ CPHYSADDR((unsigned long)pgtbl));
r4030_write_reg32(JAZZ_R4030_TRSTBL_LIM, VDMA_PGTBL_SIZE);
r4030_write_reg32(JAZZ_R4030_TRSTBL_INV, 0);
diff --git a/arch/mips/jz4740/board-qi_lb60.c b/arch/mips/jz4740/board-qi_lb60.c
index af0c8ac..705593d 100644
--- a/arch/mips/jz4740/board-qi_lb60.c
+++ b/arch/mips/jz4740/board-qi_lb60.c
@@ -43,7 +43,6 @@
#include "clock.h"
/* GPIOs */
-#define QI_LB60_GPIO_SD_CD JZ_GPIO_PORTD(0)
#define QI_LB60_GPIO_SD_VCC_EN_N JZ_GPIO_PORTD(2)
#define QI_LB60_GPIO_KEYOUT(x) (JZ_GPIO_PORTC(10) + (x))
@@ -386,12 +385,18 @@ static struct platform_device qi_lb60_gpio_keys = {
};
static struct jz4740_mmc_platform_data qi_lb60_mmc_pdata = {
- .gpio_card_detect = QI_LB60_GPIO_SD_CD,
- .gpio_read_only = -1,
.gpio_power = QI_LB60_GPIO_SD_VCC_EN_N,
.power_active_low = 1,
};
+static struct gpiod_lookup_table qi_lb60_mmc_gpio_table = {
+ .dev_id = "jz4740-mmc.0",
+ .table = {
+ GPIO_LOOKUP("GPIOD", 0, "cd", GPIO_ACTIVE_HIGH),
+ { },
+ },
+};
+
/* beeper */
static struct pwm_lookup qi_lb60_pwm_lookup[] = {
PWM_LOOKUP("jz4740-pwm", 4, "pwm-beeper", NULL, 0,
@@ -500,6 +505,7 @@ static int __init qi_lb60_init_platform_devices(void)
gpiod_add_lookup_table(&qi_lb60_audio_gpio_table);
gpiod_add_lookup_table(&qi_lb60_nand_gpio_table);
gpiod_add_lookup_table(&qi_lb60_spigpio_gpio_table);
+ gpiod_add_lookup_table(&qi_lb60_mmc_gpio_table);
spi_register_board_info(qi_lb60_spi_board_info,
ARRAY_SIZE(qi_lb60_spi_board_info));
diff --git a/arch/mips/kernel/mips-cm.c b/arch/mips/kernel/mips-cm.c
index 8f5bd04..7f3f136 100644
--- a/arch/mips/kernel/mips-cm.c
+++ b/arch/mips/kernel/mips-cm.c
@@ -457,5 +457,5 @@ void mips_cm_error_report(void)
}
/* reprime cause register */
- write_gcr_error_cause(0);
+ write_gcr_error_cause(cm_error);
}
diff --git a/arch/mips/loongson64/common/reset.c b/arch/mips/loongson64/common/reset.c
index a60715e..b26892c 100644
--- a/arch/mips/loongson64/common/reset.c
+++ b/arch/mips/loongson64/common/reset.c
@@ -59,7 +59,12 @@ static void loongson_poweroff(void)
{
#ifndef CONFIG_LEFI_FIRMWARE_INTERFACE
mach_prepare_shutdown();
- unreachable();
+
+ /*
+ * It needs a wait loop here, but mips/kernel/reset.c already calls
+ * a generic delay loop, machine_hang(), so simply return.
+ */
+ return;
#else
void (*fw_poweroff)(void) = (void *)loongson_sysconf.poweroff_addr;
diff --git a/arch/mips/net/ebpf_jit.c b/arch/mips/net/ebpf_jit.c
index aeb7b1b..252c009 100644
--- a/arch/mips/net/ebpf_jit.c
+++ b/arch/mips/net/ebpf_jit.c
@@ -343,12 +343,15 @@ static int build_int_epilogue(struct jit_ctx *ctx, int dest_reg)
const struct bpf_prog *prog = ctx->skf;
int stack_adjust = ctx->stack_size;
int store_offset = stack_adjust - 8;
+ enum reg_val_type td;
int r0 = MIPS_R_V0;
- if (dest_reg == MIPS_R_RA &&
- get_reg_val_type(ctx, prog->len, BPF_REG_0) == REG_32BIT_ZERO_EX)
+ if (dest_reg == MIPS_R_RA) {
/* Don't let zero extended value escape. */
- emit_instr(ctx, sll, r0, r0, 0);
+ td = get_reg_val_type(ctx, prog->len, BPF_REG_0);
+ if (td == REG_64BIT || td == REG_32BIT_ZERO_EX)
+ emit_instr(ctx, sll, r0, r0, 0);
+ }
if (ctx->flags & EBPF_SAVE_RA) {
emit_instr(ctx, ld, MIPS_R_RA, store_offset, MIPS_R_SP);
diff --git a/arch/mips/pci/pci-octeon.c b/arch/mips/pci/pci-octeon.c
index 5017d58..fc29b85 100644
--- a/arch/mips/pci/pci-octeon.c
+++ b/arch/mips/pci/pci-octeon.c
@@ -568,6 +568,11 @@ static int __init octeon_pci_setup(void)
if (octeon_has_feature(OCTEON_FEATURE_PCIE))
return 0;
+ if (!octeon_is_pci_host()) {
+ pr_notice("Not in host mode, PCI Controller not initialized\n");
+ return 0;
+ }
+
/* Point pcibios_map_irq() to the PCI version of it */
octeon_pcibios_map_irq = octeon_pci_pcibios_map_irq;
@@ -579,11 +584,6 @@ static int __init octeon_pci_setup(void)
else
octeon_dma_bar_type = OCTEON_DMA_BAR_TYPE_BIG;
- if (!octeon_is_pci_host()) {
- pr_notice("Not in host mode, PCI Controller not initialized\n");
- return 0;
- }
-
/* PCI I/O and PCI MEM values */
set_io_port_base(OCTEON_PCI_IOSPACE_BASE);
ioport_resource.start = 0;
diff --git a/arch/mips/ralink/Kconfig b/arch/mips/ralink/Kconfig
index 1f9cb0e..613d617 100644
--- a/arch/mips/ralink/Kconfig
+++ b/arch/mips/ralink/Kconfig
@@ -38,6 +38,7 @@
config SOC_MT7620
bool "MT7620/8"
+ select CPU_MIPSR2_IRQ_VI
select HW_HAS_PCI
config SOC_MT7621
diff --git a/arch/mips/vdso/Makefile b/arch/mips/vdso/Makefile
index 34605ca..6f10312 100644
--- a/arch/mips/vdso/Makefile
+++ b/arch/mips/vdso/Makefile
@@ -8,6 +8,7 @@
$(filter -E%,$(KBUILD_CFLAGS)) \
$(filter -mmicromips,$(KBUILD_CFLAGS)) \
$(filter -march=%,$(KBUILD_CFLAGS)) \
+ $(filter -m%-float,$(KBUILD_CFLAGS)) \
-D__VDSO__
ifeq ($(cc-name),clang)
@@ -128,7 +129,7 @@
$(call cmd,force_checksrc)
$(call if_changed_rule,cc_o_c)
-$(obj)/vdso-o32.lds: KBUILD_CPPFLAGS := -mabi=32
+$(obj)/vdso-o32.lds: KBUILD_CPPFLAGS := $(ccflags-vdso) -mabi=32
$(obj)/vdso-o32.lds: $(src)/vdso.lds.S FORCE
$(call if_changed_dep,cpp_lds_S)
@@ -168,7 +169,7 @@
$(call cmd,force_checksrc)
$(call if_changed_rule,cc_o_c)
-$(obj)/vdso-n32.lds: KBUILD_CPPFLAGS := -mabi=n32
+$(obj)/vdso-n32.lds: KBUILD_CPPFLAGS := $(ccflags-vdso) -mabi=n32
$(obj)/vdso-n32.lds: $(src)/vdso.lds.S FORCE
$(call if_changed_dep,cpp_lds_S)
diff --git a/arch/nds32/mm/Makefile b/arch/nds32/mm/Makefile
index 6b68558..7c5c15a 100644
--- a/arch/nds32/mm/Makefile
+++ b/arch/nds32/mm/Makefile
@@ -4,4 +4,8 @@
obj-$(CONFIG_ALIGNMENT_TRAP) += alignment.o
obj-$(CONFIG_HIGHMEM) += highmem.o
-CFLAGS_proc-n13.o += -fomit-frame-pointer
+
+ifdef CONFIG_FUNCTION_TRACER
+CFLAGS_REMOVE_proc.o = $(CC_FLAGS_FTRACE)
+endif
+CFLAGS_proc.o += -fomit-frame-pointer
diff --git a/arch/parisc/kernel/ptrace.c b/arch/parisc/kernel/ptrace.c
index 2582df1..0964c23 100644
--- a/arch/parisc/kernel/ptrace.c
+++ b/arch/parisc/kernel/ptrace.c
@@ -308,15 +308,29 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
long do_syscall_trace_enter(struct pt_regs *regs)
{
- if (test_thread_flag(TIF_SYSCALL_TRACE) &&
- tracehook_report_syscall_entry(regs)) {
+ if (test_thread_flag(TIF_SYSCALL_TRACE)) {
+ int rc = tracehook_report_syscall_entry(regs);
+
/*
- * Tracing decided this syscall should not happen or the
- * debugger stored an invalid system call number. Skip
- * the system call and the system call restart handling.
+ * As tracesys_next does not set %r28 to -ENOSYS
+ * when %r20 is set to -1, initialize it here.
*/
- regs->gr[20] = -1UL;
- goto out;
+ regs->gr[28] = -ENOSYS;
+
+ if (rc) {
+ /*
+ * A nonzero return code from
+ * tracehook_report_syscall_entry() tells us
+ * to prevent the syscall execution. Skip
+ * the syscall call and the syscall restart handling.
+ *
+ * Note that the tracer may also just change
+ * regs->gr[20] to an invalid syscall number,
+ * that is handled by tracesys_next.
+ */
+ regs->gr[20] = -1UL;
+ return -1;
+ }
}
/* Do the secure computing check after ptrace. */
@@ -340,7 +354,6 @@ long do_syscall_trace_enter(struct pt_regs *regs)
regs->gr[24] & 0xffffffff,
regs->gr[23] & 0xffffffff);
-out:
/*
* Sign extend the syscall number to 64bit since it may have been
* modified by a compat ptrace call
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
index 2a24865..855dbae 100644
--- a/arch/powerpc/include/asm/book3s/64/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
@@ -1234,21 +1234,13 @@ extern pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
#define pmd_move_must_withdraw pmd_move_must_withdraw
struct spinlock;
-static inline int pmd_move_must_withdraw(struct spinlock *new_pmd_ptl,
- struct spinlock *old_pmd_ptl,
- struct vm_area_struct *vma)
-{
- if (radix_enabled())
- return false;
- /*
- * Archs like ppc64 use pgtable to store per pmd
- * specific information. So when we switch the pmd,
- * we should also withdraw and deposit the pgtable
- */
- return true;
-}
-
-
+extern int pmd_move_must_withdraw(struct spinlock *new_pmd_ptl,
+ struct spinlock *old_pmd_ptl,
+ struct vm_area_struct *vma);
+/*
+ * Hash translation mode use the deposited table to store hash pte
+ * slot information.
+ */
#define arch_needs_pgtable_deposit arch_needs_pgtable_deposit
static inline bool arch_needs_pgtable_deposit(void)
{
diff --git a/arch/powerpc/include/asm/fadump.h b/arch/powerpc/include/asm/fadump.h
index 1e7a335..15bc07a 100644
--- a/arch/powerpc/include/asm/fadump.h
+++ b/arch/powerpc/include/asm/fadump.h
@@ -200,7 +200,7 @@ struct fad_crash_memory_ranges {
unsigned long long size;
};
-extern int is_fadump_boot_memory_area(u64 addr, ulong size);
+extern int is_fadump_memory_area(u64 addr, ulong size);
extern int early_init_dt_scan_fw_dump(unsigned long node,
const char *uname, int depth, void *data);
extern int fadump_reserve_mem(void);
diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
index bac225b..23bea99 100644
--- a/arch/powerpc/include/asm/uaccess.h
+++ b/arch/powerpc/include/asm/uaccess.h
@@ -63,7 +63,7 @@ static inline int __access_ok(unsigned long addr, unsigned long size,
#endif
#define access_ok(type, addr, size) \
- (__chk_user_ptr(addr), \
+ (__chk_user_ptr(addr), (void)(type), \
__access_ok((__force unsigned long)(addr), (size), get_fs()))
/*
diff --git a/arch/powerpc/kernel/fadump.c b/arch/powerpc/kernel/fadump.c
index a711d22..c02c952 100644
--- a/arch/powerpc/kernel/fadump.c
+++ b/arch/powerpc/kernel/fadump.c
@@ -118,13 +118,19 @@ int __init early_init_dt_scan_fw_dump(unsigned long node,
/*
* If fadump is registered, check if the memory provided
- * falls within boot memory area.
+ * falls within boot memory area and reserved memory area.
*/
-int is_fadump_boot_memory_area(u64 addr, ulong size)
+int is_fadump_memory_area(u64 addr, ulong size)
{
+ u64 d_start = fw_dump.reserve_dump_area_start;
+ u64 d_end = d_start + fw_dump.reserve_dump_area_size;
+
if (!fw_dump.dump_registered)
return 0;
+ if (((addr + size) > d_start) && (addr <= d_end))
+ return 1;
+
return (addr + size) > RMA_START && addr <= fw_dump.boot_memory_size;
}
diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S
index 81d4574..9fd2ff2 100644
--- a/arch/powerpc/kernel/head_8xx.S
+++ b/arch/powerpc/kernel/head_8xx.S
@@ -919,11 +919,12 @@
/* set up the PTE pointers for the Abatron bdiGDB.
*/
- tovirt(r6,r6)
lis r5, abatron_pteptrs@h
ori r5, r5, abatron_pteptrs@l
stw r5, 0xf0(0) /* Must match your Abatron config file */
tophys(r5,r5)
+ lis r6, swapper_pg_dir@h
+ ori r6, r6, swapper_pg_dir@l
stw r6, 0(r5)
/* Now turn on the MMU for real! */
diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S
index 07ae018..53016c7 100644
--- a/arch/powerpc/kernel/vmlinux.lds.S
+++ b/arch/powerpc/kernel/vmlinux.lds.S
@@ -296,6 +296,10 @@
#ifdef CONFIG_PPC32
.data : AT(ADDR(.data) - LOAD_OFFSET) {
DATA_DATA
+#ifdef CONFIG_UBSAN
+ *(.data..Lubsan_data*)
+ *(.data..Lubsan_type*)
+#endif
*(.data.rel*)
*(SDATA_MAIN)
*(.sdata2)
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index eba5756..79b7940 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -543,8 +543,11 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
#ifdef CONFIG_PPC_BOOK3S_64
case KVM_CAP_SPAPR_TCE:
case KVM_CAP_SPAPR_TCE_64:
- /* fallthrough */
+ r = 1;
+ break;
case KVM_CAP_SPAPR_TCE_VFIO:
+ r = !!cpu_has_feature(CPU_FTR_HVMODE);
+ break;
case KVM_CAP_PPC_RTAS:
case KVM_CAP_PPC_FIXUP_HCALL:
case KVM_CAP_PPC_ENABLE_HCALL:
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
index d51cf5f..365526e 100644
--- a/arch/powerpc/mm/fault.c
+++ b/arch/powerpc/mm/fault.c
@@ -221,7 +221,9 @@ static int mm_fault_error(struct pt_regs *regs, unsigned long addr,
static bool bad_kernel_fault(bool is_exec, unsigned long error_code,
unsigned long address)
{
- if (is_exec && (error_code & (DSISR_NOEXEC_OR_G | DSISR_KEYFAULT))) {
+ /* NX faults set DSISR_PROTFAULT on the 8xx, DSISR_NOEXEC_OR_G on others */
+ if (is_exec && (error_code & (DSISR_NOEXEC_OR_G | DSISR_KEYFAULT |
+ DSISR_PROTFAULT))) {
printk_ratelimited(KERN_CRIT "kernel tried to execute"
" exec-protected page (%lx) -"
"exploit attempt? (uid: %d)\n",
diff --git a/arch/powerpc/mm/pgtable-book3s64.c b/arch/powerpc/mm/pgtable-book3s64.c
index 01d7c0f..297db66 100644
--- a/arch/powerpc/mm/pgtable-book3s64.c
+++ b/arch/powerpc/mm/pgtable-book3s64.c
@@ -477,3 +477,25 @@ void arch_report_meminfo(struct seq_file *m)
atomic_long_read(&direct_pages_count[MMU_PAGE_1G]) << 20);
}
#endif /* CONFIG_PROC_FS */
+
+/*
+ * For hash translation mode, we use the deposited table to store hash slot
+ * information and they are stored at PTRS_PER_PMD offset from related pmd
+ * location. Hence a pmd move requires deposit and withdraw.
+ *
+ * For radix translation with split pmd ptl, we store the deposited table in the
+ * pmd page. Hence if we have different pmd page we need to withdraw during pmd
+ * move.
+ *
+ * With hash we use deposited table always irrespective of anon or not.
+ * With radix we use deposited table only for anonymous mapping.
+ */
+int pmd_move_must_withdraw(struct spinlock *new_pmd_ptl,
+ struct spinlock *old_pmd_ptl,
+ struct vm_area_struct *vma)
+{
+ if (radix_enabled())
+ return (new_pmd_ptl != old_pmd_ptl) && vma_is_anonymous(vma);
+
+ return true;
+}
diff --git a/arch/powerpc/perf/isa207-common.c b/arch/powerpc/perf/isa207-common.c
index 177de81..6a2f65d 100644
--- a/arch/powerpc/perf/isa207-common.c
+++ b/arch/powerpc/perf/isa207-common.c
@@ -226,8 +226,13 @@ void isa207_get_mem_weight(u64 *weight)
u64 mmcra = mfspr(SPRN_MMCRA);
u64 exp = MMCRA_THR_CTR_EXP(mmcra);
u64 mantissa = MMCRA_THR_CTR_MANT(mmcra);
+ u64 sier = mfspr(SPRN_SIER);
+ u64 val = (sier & ISA207_SIER_TYPE_MASK) >> ISA207_SIER_TYPE_SHIFT;
- *weight = mantissa << (2 * exp);
+ if (val == 0 || val == 7)
+ *weight = 0;
+ else
+ *weight = mantissa << (2 * exp);
}
int isa207_get_constraint(u64 event, unsigned long *maskp, unsigned long *valp)
diff --git a/arch/powerpc/platforms/powernv/pci-ioda-tce.c b/arch/powerpc/platforms/powernv/pci-ioda-tce.c
index fe96910..7639b21 100644
--- a/arch/powerpc/platforms/powernv/pci-ioda-tce.c
+++ b/arch/powerpc/platforms/powernv/pci-ioda-tce.c
@@ -299,7 +299,7 @@ long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset,
if (alloc_userspace_copy) {
offset = 0;
uas = pnv_pci_ioda2_table_do_alloc_pages(nid, level_shift,
- levels, tce_table_size, &offset,
+ tmplevels, tce_table_size, &offset,
&total_allocated_uas);
if (!uas)
goto free_tces_exit;
diff --git a/arch/powerpc/platforms/pseries/dlpar.c b/arch/powerpc/platforms/pseries/dlpar.c
index a0b20c0..e3010b1 100644
--- a/arch/powerpc/platforms/pseries/dlpar.c
+++ b/arch/powerpc/platforms/pseries/dlpar.c
@@ -272,6 +272,8 @@ int dlpar_detach_node(struct device_node *dn)
if (rc)
return rc;
+ of_node_put(dn);
+
return 0;
}
diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c
index c1578f5..e4c658c 100644
--- a/arch/powerpc/platforms/pseries/hotplug-memory.c
+++ b/arch/powerpc/platforms/pseries/hotplug-memory.c
@@ -389,8 +389,11 @@ static bool lmb_is_removable(struct drmem_lmb *lmb)
phys_addr = lmb->base_addr;
#ifdef CONFIG_FA_DUMP
- /* Don't hot-remove memory that falls in fadump boot memory area */
- if (is_fadump_boot_memory_area(phys_addr, block_sz))
+ /*
+ * Don't hot-remove memory that falls in fadump boot memory area
+ * and memory that is reserved for capturing old kernel memory.
+ */
+ if (is_fadump_memory_area(phys_addr, block_sz))
return false;
#endif
diff --git a/arch/riscv/include/asm/pgtable-bits.h b/arch/riscv/include/asm/pgtable-bits.h
index 2fa2942..470755c 100644
--- a/arch/riscv/include/asm/pgtable-bits.h
+++ b/arch/riscv/include/asm/pgtable-bits.h
@@ -35,6 +35,12 @@
#define _PAGE_SPECIAL _PAGE_SOFT
#define _PAGE_TABLE _PAGE_PRESENT
+/*
+ * _PAGE_PROT_NONE is set on not-present pages (and ignored by the hardware) to
+ * distinguish them from swapped out pages
+ */
+#define _PAGE_PROT_NONE _PAGE_READ
+
#define _PAGE_PFN_SHIFT 10
/* Set of bits to preserve across pte_modify() */
diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h
index 1630196..a8179a8 100644
--- a/arch/riscv/include/asm/pgtable.h
+++ b/arch/riscv/include/asm/pgtable.h
@@ -44,7 +44,7 @@
/* Page protection bits */
#define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_USER)
-#define PAGE_NONE __pgprot(0)
+#define PAGE_NONE __pgprot(_PAGE_PROT_NONE)
#define PAGE_READ __pgprot(_PAGE_BASE | _PAGE_READ)
#define PAGE_WRITE __pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_WRITE)
#define PAGE_EXEC __pgprot(_PAGE_BASE | _PAGE_EXEC)
@@ -98,7 +98,7 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
static inline int pmd_present(pmd_t pmd)
{
- return (pmd_val(pmd) & _PAGE_PRESENT);
+ return (pmd_val(pmd) & (_PAGE_PRESENT | _PAGE_PROT_NONE));
}
static inline int pmd_none(pmd_t pmd)
@@ -178,7 +178,7 @@ static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long addr)
static inline int pte_present(pte_t pte)
{
- return (pte_val(pte) & _PAGE_PRESENT);
+ return (pte_val(pte) & (_PAGE_PRESENT | _PAGE_PROT_NONE));
}
static inline int pte_none(pte_t pte)
@@ -380,7 +380,7 @@ static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
*
* Format of swap PTE:
* bit 0: _PAGE_PRESENT (zero)
- * bit 1: reserved for future use (zero)
+ * bit 1: _PAGE_PROT_NONE (zero)
* bits 2 to 6: swap type
* bits 7 to XLEN-1: swap offset
*/
diff --git a/arch/riscv/kernel/ptrace.c b/arch/riscv/kernel/ptrace.c
index 9f82a7e..9db7d00 100644
--- a/arch/riscv/kernel/ptrace.c
+++ b/arch/riscv/kernel/ptrace.c
@@ -120,6 +120,6 @@ void do_syscall_trace_exit(struct pt_regs *regs)
#ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS
if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
- trace_sys_exit(regs, regs->regs[0]);
+ trace_sys_exit(regs, regs_return_value(regs));
#endif
}
diff --git a/arch/s390/include/uapi/asm/zcrypt.h b/arch/s390/include/uapi/asm/zcrypt.h
index 2bb1f3b..48c784f 100644
--- a/arch/s390/include/uapi/asm/zcrypt.h
+++ b/arch/s390/include/uapi/asm/zcrypt.h
@@ -147,8 +147,8 @@ struct ica_xcRB {
* @cprb_len: CPRB header length [0x0020]
* @cprb_ver_id: CPRB version id. [0x04]
* @pad_000: Alignment pad bytes
- * @flags: Admin cmd [0x80] or functional cmd [0x00]
- * @func_id: Function id / subtype [0x5434]
+ * @flags: Admin bit [0x80], Special bit [0x20]
+ * @func_id: Function id / subtype [0x5434] "T4"
* @source_id: Source id [originator id]
* @target_id: Target id [usage/ctrl domain id]
* @ret_code: Return code
diff --git a/arch/sh/boards/mach-kfr2r09/setup.c b/arch/sh/boards/mach-kfr2r09/setup.c
index e59c577..c70bc78 100644
--- a/arch/sh/boards/mach-kfr2r09/setup.c
+++ b/arch/sh/boards/mach-kfr2r09/setup.c
@@ -25,7 +25,6 @@
#include <linux/memblock.h>
#include <linux/mfd/tmio.h>
#include <linux/mmc/host.h>
-#include <linux/mtd/onenand.h>
#include <linux/mtd/physmap.h>
#include <linux/platform_data/lv5207lp.h>
#include <linux/platform_device.h>
diff --git a/arch/um/include/asm/pgtable.h b/arch/um/include/asm/pgtable.h
index 7485398..9c04562 100644
--- a/arch/um/include/asm/pgtable.h
+++ b/arch/um/include/asm/pgtable.h
@@ -197,12 +197,17 @@ static inline pte_t pte_mkold(pte_t pte)
static inline pte_t pte_wrprotect(pte_t pte)
{
- pte_clear_bits(pte, _PAGE_RW);
+ if (likely(pte_get_bits(pte, _PAGE_RW)))
+ pte_clear_bits(pte, _PAGE_RW);
+ else
+ return pte;
return(pte_mknewprot(pte));
}
static inline pte_t pte_mkread(pte_t pte)
{
+ if (unlikely(pte_get_bits(pte, _PAGE_USER)))
+ return pte;
pte_set_bits(pte, _PAGE_USER);
return(pte_mknewprot(pte));
}
@@ -221,6 +226,8 @@ static inline pte_t pte_mkyoung(pte_t pte)
static inline pte_t pte_mkwrite(pte_t pte)
{
+ if (unlikely(pte_get_bits(pte, _PAGE_RW)))
+ return pte;
pte_set_bits(pte, _PAGE_RW);
return(pte_mknewprot(pte));
}
diff --git a/arch/x86/configs/x86_64_cuttlefish_defconfig b/arch/x86/configs/x86_64_cuttlefish_defconfig
index e8bc98bb..8229cf8 100644
--- a/arch/x86/configs/x86_64_cuttlefish_defconfig
+++ b/arch/x86/configs/x86_64_cuttlefish_defconfig
@@ -81,6 +81,7 @@
CONFIG_PACKET=y
CONFIG_UNIX=y
CONFIG_XFRM_USER=y
+CONFIG_XFRM_INTERFACE=y
CONFIG_NET_KEY=y
CONFIG_INET=y
CONFIG_IP_MULTICAST=y
@@ -147,6 +148,7 @@
CONFIG_NETFILTER_XT_MATCH_LIMIT=y
CONFIG_NETFILTER_XT_MATCH_MAC=y
CONFIG_NETFILTER_XT_MATCH_MARK=y
+CONFIG_NETFILTER_XT_MATCH_OWNER=y
CONFIG_NETFILTER_XT_MATCH_POLICY=y
CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y
CONFIG_NETFILTER_XT_MATCH_QUOTA=y
@@ -182,6 +184,7 @@
CONFIG_IP6_NF_RAW=y
CONFIG_NET_SCHED=y
CONFIG_NET_SCH_HTB=y
+CONFIG_NET_SCH_NETEM=y
CONFIG_NET_CLS_U32=y
CONFIG_NET_CLS_BPF=y
CONFIG_NET_EMATCH=y
diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
index c8d08da..c04a8813 100644
--- a/arch/x86/events/core.c
+++ b/arch/x86/events/core.c
@@ -2253,6 +2253,19 @@ void perf_check_microcode(void)
x86_pmu.check_microcode();
}
+static int x86_pmu_check_period(struct perf_event *event, u64 value)
+{
+ if (x86_pmu.check_period && x86_pmu.check_period(event, value))
+ return -EINVAL;
+
+ if (value && x86_pmu.limit_period) {
+ if (x86_pmu.limit_period(event, value) > value)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static struct pmu pmu = {
.pmu_enable = x86_pmu_enable,
.pmu_disable = x86_pmu_disable,
@@ -2277,6 +2290,7 @@ static struct pmu pmu = {
.event_idx = x86_pmu_event_idx,
.sched_task = x86_pmu_sched_task,
.task_ctx_size = sizeof(struct x86_perf_task_context),
+ .check_period = x86_pmu_check_period,
};
void arch_perf_update_userpage(struct perf_event *event,
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index 155fa4b..fbd7551 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -3440,6 +3440,11 @@ static void free_excl_cntrs(int cpu)
static void intel_pmu_cpu_dying(int cpu)
{
+ fini_debug_store_on_cpu(cpu);
+}
+
+static void intel_pmu_cpu_dead(int cpu)
+{
struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
struct intel_shared_regs *pc;
@@ -3451,8 +3456,6 @@ static void intel_pmu_cpu_dying(int cpu)
}
free_excl_cntrs(cpu);
-
- fini_debug_store_on_cpu(cpu);
}
static void intel_pmu_sched_task(struct perf_event_context *ctx,
@@ -3462,6 +3465,11 @@ static void intel_pmu_sched_task(struct perf_event_context *ctx,
intel_pmu_lbr_sched_task(ctx, sched_in);
}
+static int intel_pmu_check_period(struct perf_event *event, u64 value)
+{
+ return intel_pmu_has_bts_period(event, value) ? -EINVAL : 0;
+}
+
PMU_FORMAT_ATTR(offcore_rsp, "config1:0-63");
PMU_FORMAT_ATTR(ldlat, "config1:0-15");
@@ -3541,6 +3549,9 @@ static __initconst const struct x86_pmu core_pmu = {
.cpu_prepare = intel_pmu_cpu_prepare,
.cpu_starting = intel_pmu_cpu_starting,
.cpu_dying = intel_pmu_cpu_dying,
+ .cpu_dead = intel_pmu_cpu_dead,
+
+ .check_period = intel_pmu_check_period,
};
static struct attribute *intel_pmu_attrs[];
@@ -3581,8 +3592,12 @@ static __initconst const struct x86_pmu intel_pmu = {
.cpu_prepare = intel_pmu_cpu_prepare,
.cpu_starting = intel_pmu_cpu_starting,
.cpu_dying = intel_pmu_cpu_dying,
+ .cpu_dead = intel_pmu_cpu_dead,
+
.guest_get_msrs = intel_guest_get_msrs,
.sched_task = intel_pmu_sched_task,
+
+ .check_period = intel_pmu_check_period,
};
static __init void intel_clovertown_quirk(void)
diff --git a/arch/x86/events/intel/uncore_snbep.c b/arch/x86/events/intel/uncore_snbep.c
index c07bee3..b10e043 100644
--- a/arch/x86/events/intel/uncore_snbep.c
+++ b/arch/x86/events/intel/uncore_snbep.c
@@ -1222,6 +1222,8 @@ static struct pci_driver snbep_uncore_pci_driver = {
.id_table = snbep_uncore_pci_ids,
};
+#define NODE_ID_MASK 0x7
+
/*
* build pci bus to socket mapping
*/
@@ -1243,7 +1245,7 @@ static int snbep_pci2phy_map_init(int devid, int nodeid_loc, int idmap_loc, bool
err = pci_read_config_dword(ubox_dev, nodeid_loc, &config);
if (err)
break;
- nodeid = config;
+ nodeid = config & NODE_ID_MASK;
/* get the Node ID mapping */
err = pci_read_config_dword(ubox_dev, idmap_loc, &config);
if (err)
diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h
index c5ad9cc..0ee3a44 100644
--- a/arch/x86/events/perf_event.h
+++ b/arch/x86/events/perf_event.h
@@ -644,6 +644,11 @@ struct x86_pmu {
* Intel host/guest support (KVM)
*/
struct perf_guest_switch_msr *(*guest_get_msrs)(int *nr);
+
+ /*
+ * Check period value for PERF_EVENT_IOC_PERIOD ioctl.
+ */
+ int (*check_period) (struct perf_event *event, u64 period);
};
struct x86_perf_task_context {
@@ -855,7 +860,7 @@ static inline int amd_pmu_init(void)
#ifdef CONFIG_CPU_SUP_INTEL
-static inline bool intel_pmu_has_bts(struct perf_event *event)
+static inline bool intel_pmu_has_bts_period(struct perf_event *event, u64 period)
{
struct hw_perf_event *hwc = &event->hw;
unsigned int hw_event, bts_event;
@@ -866,7 +871,14 @@ static inline bool intel_pmu_has_bts(struct perf_event *event)
hw_event = hwc->config & INTEL_ARCH_EVENT_MASK;
bts_event = x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS);
- return hw_event == bts_event && hwc->sample_period == 1;
+ return hw_event == bts_event && period == 1;
+}
+
+static inline bool intel_pmu_has_bts(struct perf_event *event)
+{
+ struct hw_perf_event *hwc = &event->hw;
+
+ return intel_pmu_has_bts_period(event, hwc->sample_period);
}
int intel_pmu_save_and_restart(struct perf_event *event);
diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
index 8e02b30..3ebd777 100644
--- a/arch/x86/ia32/ia32_aout.c
+++ b/arch/x86/ia32/ia32_aout.c
@@ -51,7 +51,7 @@ static unsigned long get_dr(int n)
/*
* fill in the user structure for a core dump..
*/
-static void dump_thread32(struct pt_regs *regs, struct user32 *dump)
+static void fill_dump(struct pt_regs *regs, struct user32 *dump)
{
u32 fs, gs;
memset(dump, 0, sizeof(*dump));
@@ -157,10 +157,12 @@ static int aout_core_dump(struct coredump_params *cprm)
fs = get_fs();
set_fs(KERNEL_DS);
has_dumped = 1;
+
+ fill_dump(cprm->regs, &dump);
+
strncpy(dump.u_comm, current->comm, sizeof(current->comm));
dump.u_ar0 = offsetof(struct user32, regs);
dump.signal = cprm->siginfo->si_signo;
- dump_thread32(cprm->regs, &dump);
/*
* If the size of the dump file exceeds the rlimit, then see
diff --git a/arch/x86/include/asm/fpu/internal.h b/arch/x86/include/asm/fpu/internal.h
index 69dcdf1..fa2c93c 100644
--- a/arch/x86/include/asm/fpu/internal.h
+++ b/arch/x86/include/asm/fpu/internal.h
@@ -106,6 +106,9 @@ extern void fpstate_sanitize_xstate(struct fpu *fpu);
#define user_insn(insn, output, input...) \
({ \
int err; \
+ \
+ might_fault(); \
+ \
asm volatile(ASM_STAC "\n" \
"1:" #insn "\n\t" \
"2: " ASM_CLAC "\n" \
diff --git a/arch/x86/include/asm/uv/bios.h b/arch/x86/include/asm/uv/bios.h
index e652a7c..3f697a9 100644
--- a/arch/x86/include/asm/uv/bios.h
+++ b/arch/x86/include/asm/uv/bios.h
@@ -48,7 +48,8 @@ enum {
BIOS_STATUS_SUCCESS = 0,
BIOS_STATUS_UNIMPLEMENTED = -ENOSYS,
BIOS_STATUS_EINVAL = -EINVAL,
- BIOS_STATUS_UNAVAIL = -EBUSY
+ BIOS_STATUS_UNAVAIL = -EBUSY,
+ BIOS_STATUS_ABORT = -EINTR,
};
/* Address map parameters */
@@ -167,4 +168,9 @@ extern long system_serial_number;
extern struct kobject *sgi_uv_kobj; /* /sys/firmware/sgi_uv */
+/*
+ * EFI runtime lock; cf. firmware/efi/runtime-wrappers.c for details
+ */
+extern struct semaphore __efi_uv_runtime_lock;
+
#endif /* _ASM_X86_UV_BIOS_H */
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index 807d06a..1e0c4c7 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -69,7 +69,7 @@ void __init check_bugs(void)
* identify_boot_cpu() initialized SMT support information, let the
* core code know.
*/
- cpu_smt_check_topology_early();
+ cpu_smt_check_topology();
if (!IS_ENABLED(CONFIG_SMP)) {
pr_info("CPU: ");
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index cdbedeb..f9e7096 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -783,6 +783,7 @@ static int mce_no_way_out(struct mce *m, char **msg, unsigned long *validp,
quirk_no_way_out(i, m, regs);
if (mce_severity(m, mca_cfg.tolerant, &tmp, true) >= MCE_PANIC_SEVERITY) {
+ m->bank = i;
mce_read_aux(m, i);
*msg = tmp;
return 1;
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index 7bcfa61..98d13c6 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -337,6 +337,7 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
unsigned f_mpx = kvm_mpx_supported() ? F(MPX) : 0;
unsigned f_xsaves = kvm_x86_ops->xsaves_supported() ? F(XSAVES) : 0;
unsigned f_umip = kvm_x86_ops->umip_emulated() ? F(UMIP) : 0;
+ unsigned f_la57 = 0;
/* cpuid 1.edx */
const u32 kvm_cpuid_1_edx_x86_features =
@@ -491,7 +492,10 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
// TSC_ADJUST is emulated
entry->ebx |= F(TSC_ADJUST);
entry->ecx &= kvm_cpuid_7_0_ecx_x86_features;
+ f_la57 = entry->ecx & F(LA57);
cpuid_mask(&entry->ecx, CPUID_7_ECX);
+ /* Set LA57 based on hardware capability. */
+ entry->ecx |= f_la57;
entry->ecx |= f_umip;
/* PKU is not yet implemented for shadow paging. */
if (!tdp_enabled || !boot_cpu_has(X86_FEATURE_OSPKE))
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index f1d3fe5..ee8f8d7 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -5837,6 +5837,13 @@ static bool svm_cpu_has_accelerated_tpr(void)
static bool svm_has_emulated_msr(int index)
{
+ switch (index) {
+ case MSR_IA32_MCG_EXT_CTL:
+ return false;
+ default:
+ break;
+ }
+
return true;
}
@@ -6249,6 +6256,9 @@ static int sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp)
int asid, ret;
ret = -EBUSY;
+ if (unlikely(sev->active))
+ return ret;
+
asid = sev_asid_new();
if (asid < 0)
return ret;
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 39a0e34..f6da5c3 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -27,6 +27,7 @@
#include <linux/mm.h>
#include <linux/highmem.h>
#include <linux/sched.h>
+#include <linux/sched/smt.h>
#include <linux/moduleparam.h>
#include <linux/mod_devicetable.h>
#include <linux/trace_events.h>
@@ -2756,7 +2757,8 @@ static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr,
if (!entry_only)
j = find_msr(&m->host, msr);
- if (i == NR_AUTOLOAD_MSRS || j == NR_AUTOLOAD_MSRS) {
+ if ((i < 0 && m->guest.nr == NR_AUTOLOAD_MSRS) ||
+ (j < 0 && m->host.nr == NR_AUTOLOAD_MSRS)) {
printk_once(KERN_WARNING "Not enough msr switch entries. "
"Can't add msr %x\n", msr);
return;
@@ -3600,9 +3602,11 @@ static void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, bool apicv)
* secondary cpu-based controls. Do not include those that
* depend on CPUID bits, they are added later by vmx_cpuid_update.
*/
- rdmsr(MSR_IA32_VMX_PROCBASED_CTLS2,
- msrs->secondary_ctls_low,
- msrs->secondary_ctls_high);
+ if (msrs->procbased_ctls_high & CPU_BASED_ACTIVATE_SECONDARY_CONTROLS)
+ rdmsr(MSR_IA32_VMX_PROCBASED_CTLS2,
+ msrs->secondary_ctls_low,
+ msrs->secondary_ctls_high);
+
msrs->secondary_ctls_low = 0;
msrs->secondary_ctls_high &=
SECONDARY_EXEC_DESC |
@@ -8469,6 +8473,7 @@ static void free_nested(struct vcpu_vmx *vmx)
if (!vmx->nested.vmxon && !vmx->nested.smm.vmxon)
return;
+ hrtimer_cancel(&vmx->nested.preemption_timer);
vmx->nested.vmxon = false;
vmx->nested.smm.vmxon = false;
free_vpid(vmx->nested.vpid02);
@@ -11128,7 +11133,7 @@ static int vmx_vm_init(struct kvm *kvm)
* Warn upon starting the first VM in a potentially
* insecure environment.
*/
- if (cpu_smt_control == CPU_SMT_ENABLED)
+ if (sched_smt_active())
pr_warn_once(L1TF_MSG_SMT);
if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_NEVER)
pr_warn_once(L1TF_MSG_L1D);
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 5a9a3eb..3a7cf7c 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -4904,6 +4904,13 @@ int kvm_read_guest_virt(struct kvm_vcpu *vcpu,
{
u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
+ /*
+ * FIXME: this should call handle_emulation_failure if X86EMUL_IO_NEEDED
+ * is returned, but our callers are not ready for that and they blindly
+ * call kvm_inject_page_fault. Ensure that they at least do not leak
+ * uninitialized kernel stack memory into cr2 and error code.
+ */
+ memset(exception, 0, sizeof(*exception));
return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access,
exception);
}
diff --git a/arch/x86/pci/broadcom_bus.c b/arch/x86/pci/broadcom_bus.c
index 526536c..ca1e8e6 100644
--- a/arch/x86/pci/broadcom_bus.c
+++ b/arch/x86/pci/broadcom_bus.c
@@ -50,8 +50,8 @@ static void __init cnb20le_res(u8 bus, u8 slot, u8 func)
word1 = read_pci_config_16(bus, slot, func, 0xc0);
word2 = read_pci_config_16(bus, slot, func, 0xc2);
if (word1 != word2) {
- res.start = (word1 << 16) | 0x0000;
- res.end = (word2 << 16) | 0xffff;
+ res.start = ((resource_size_t) word1 << 16) | 0x0000;
+ res.end = ((resource_size_t) word2 << 16) | 0xffff;
res.flags = IORESOURCE_MEM;
update_res(info, res.start, res.end, res.flags, 0);
}
diff --git a/arch/x86/platform/uv/bios_uv.c b/arch/x86/platform/uv/bios_uv.c
index 4a6a5a2..eb33432 100644
--- a/arch/x86/platform/uv/bios_uv.c
+++ b/arch/x86/platform/uv/bios_uv.c
@@ -29,7 +29,8 @@
struct uv_systab *uv_systab;
-s64 uv_bios_call(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3, u64 a4, u64 a5)
+static s64 __uv_bios_call(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3,
+ u64 a4, u64 a5)
{
struct uv_systab *tab = uv_systab;
s64 ret;
@@ -51,6 +52,19 @@ s64 uv_bios_call(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3, u64 a4, u64 a5)
return ret;
}
+
+s64 uv_bios_call(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3, u64 a4, u64 a5)
+{
+ s64 ret;
+
+ if (down_interruptible(&__efi_uv_runtime_lock))
+ return BIOS_STATUS_ABORT;
+
+ ret = __uv_bios_call(which, a1, a2, a3, a4, a5);
+ up(&__efi_uv_runtime_lock);
+
+ return ret;
+}
EXPORT_SYMBOL_GPL(uv_bios_call);
s64 uv_bios_call_irqsave(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3,
@@ -59,10 +73,15 @@ s64 uv_bios_call_irqsave(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3,
unsigned long bios_flags;
s64 ret;
+ if (down_interruptible(&__efi_uv_runtime_lock))
+ return BIOS_STATUS_ABORT;
+
local_irq_save(bios_flags);
- ret = uv_bios_call(which, a1, a2, a3, a4, a5);
+ ret = __uv_bios_call(which, a1, a2, a3, a4, a5);
local_irq_restore(bios_flags);
+ up(&__efi_uv_runtime_lock);
+
return ret;
}
diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c
index 52a7c3f..782f98b 100644
--- a/arch/x86/xen/enlighten_pv.c
+++ b/arch/x86/xen/enlighten_pv.c
@@ -899,10 +899,7 @@ static u64 xen_read_msr_safe(unsigned int msr, int *err)
val = native_read_msr_safe(msr, err);
switch (msr) {
case MSR_IA32_APICBASE:
-#ifdef CONFIG_X86_X2APIC
- if (!(cpuid_ecx(1) & (1 << (X86_FEATURE_X2APIC & 31))))
-#endif
- val &= ~X2APIC_ENABLE;
+ val &= ~X2APIC_ENABLE;
break;
}
return val;
diff --git a/arch/xtensa/boot/dts/xtfpga.dtsi b/arch/xtensa/boot/dts/xtfpga.dtsi
index 1090528..e46ae07 100644
--- a/arch/xtensa/boot/dts/xtfpga.dtsi
+++ b/arch/xtensa/boot/dts/xtfpga.dtsi
@@ -103,7 +103,7 @@
};
};
- spi0: spi-master@0d0a0000 {
+ spi0: spi@0d0a0000 {
compatible = "cdns,xtfpga-spi";
#address-cells = <1>;
#size-cells = <0>;
diff --git a/block/bio.c b/block/bio.c
index 55a5386..04f5c14 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -580,6 +580,14 @@ inline int bio_phys_segments(struct request_queue *q, struct bio *bio)
}
EXPORT_SYMBOL(bio_phys_segments);
+static inline void bio_clone_crypt_key(struct bio *dst, const struct bio *src)
+{
+#ifdef CONFIG_PFK
+ dst->bi_crypt_key = src->bi_crypt_key;
+ dst->bi_iter.bi_dun = src->bi_iter.bi_dun;
+#endif
+}
+
/**
* __bio_clone_fast - clone a bio that shares the original bio's biovec
* @bio: destination bio
@@ -609,7 +617,7 @@ void __bio_clone_fast(struct bio *bio, struct bio *bio_src)
bio->bi_write_hint = bio_src->bi_write_hint;
bio->bi_iter = bio_src->bi_iter;
bio->bi_io_vec = bio_src->bi_io_vec;
-
+ bio_clone_crypt_key(bio, bio_src);
bio_clone_blkcg_association(bio, bio_src);
}
EXPORT_SYMBOL(__bio_clone_fast);
diff --git a/block/blk-core.c b/block/blk-core.c
index eb8b522..643b6e4 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -1832,6 +1832,9 @@ bool bio_attempt_front_merge(struct request_queue *q, struct request *req,
bio->bi_next = req->bio;
req->bio = bio;
+#ifdef CONFIG_PFK
+ WARN_ON(req->__dun || bio->bi_iter.bi_dun);
+#endif
req->__sector = bio->bi_iter.bi_sector;
req->__data_len += bio->bi_iter.bi_size;
req->ioprio = ioprio_best(req->ioprio, bio_prio(bio));
@@ -1981,6 +1984,9 @@ void blk_init_request_from_bio(struct request *req, struct bio *bio)
else
req->ioprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_NONE, 0);
req->write_hint = bio->bi_write_hint;
+#ifdef CONFIG_PFK
+ req->__dun = bio->bi_iter.bi_dun;
+#endif
blk_rq_bio_prep(req->q, req, bio);
}
EXPORT_SYMBOL_GPL(blk_init_request_from_bio);
@@ -3123,8 +3129,13 @@ bool blk_update_request(struct request *req, blk_status_t error,
req->__data_len -= total_bytes;
/* update sector only for requests with clear definition of sector */
- if (!blk_rq_is_passthrough(req))
+ if (!blk_rq_is_passthrough(req)) {
req->__sector += total_bytes >> 9;
+#ifdef CONFIG_PFK
+ if (req->__dun)
+ req->__dun += total_bytes >> 12;
+#endif
+ }
/* mixed attributes always follow the first bio */
if (req->rq_flags & RQF_MIXED_MERGE) {
@@ -3488,6 +3499,9 @@ static void __blk_rq_prep_clone(struct request *dst, struct request *src)
{
dst->cpu = src->cpu;
dst->__sector = blk_rq_pos(src);
+#ifdef CONFIG_PFK
+ dst->__dun = blk_rq_dun(src);
+#endif
dst->__data_len = blk_rq_bytes(src);
if (src->rq_flags & RQF_SPECIAL_PAYLOAD) {
dst->rq_flags |= RQF_SPECIAL_PAYLOAD;
diff --git a/block/blk-flush.c b/block/blk-flush.c
index ce41f66..7648794 100644
--- a/block/blk-flush.c
+++ b/block/blk-flush.c
@@ -424,7 +424,7 @@ static void mq_flush_data_end_io(struct request *rq, blk_status_t error)
blk_flush_complete_seq(rq, fq, REQ_FSEQ_DATA, error);
spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
- blk_mq_run_hw_queue(hctx, true);
+ blk_mq_sched_restart(hctx);
}
/**
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 2e04219..4981eda 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -9,7 +9,7 @@
#include <linux/scatterlist.h>
#include <trace/events/block.h>
-
+#include <linux/pfk.h>
#include "blk.h"
static struct bio *blk_bio_discard_split(struct request_queue *q,
@@ -670,6 +670,11 @@ static void blk_account_io_merge(struct request *req)
}
}
+static bool crypto_not_mergeable(const struct bio *bio, const struct bio *nxt)
+{
+ return (!pfk_allow_merge_bio(bio, nxt));
+}
+
/*
* For non-mq, this has to be called with the request spinlock acquired.
* For mq with scheduling, the appropriate queue wide lock should be held.
@@ -708,6 +713,9 @@ static struct request *attempt_merge(struct request_queue *q,
if (req->write_hint != next->write_hint)
return NULL;
+ if (crypto_not_mergeable(req->bio, next->bio))
+ return 0;
+
/*
* If we are allowed to merge, then append bio list
* from next to rq and release next. merge_requests_fn
@@ -838,11 +846,18 @@ bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
if (rq->write_hint != bio->bi_write_hint)
return false;
+ if (crypto_not_mergeable(rq->bio, bio))
+ return false;
+
return true;
}
enum elv_merge blk_try_merge(struct request *rq, struct bio *bio)
{
+#ifdef CONFIG_PFK
+ if (blk_rq_dun(rq) || bio_dun(bio))
+ return ELEVATOR_NO_MERGE;
+#endif
if (req_op(rq) == REQ_OP_DISCARD &&
queue_max_discard_segments(rq->q) > 1)
return ELEVATOR_DISCARD_MERGE;
diff --git a/block/blk.h b/block/blk.h
index 977d4b5..8c2a9cb 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -54,15 +54,6 @@ static inline void queue_lockdep_assert_held(struct request_queue *q)
lockdep_assert_held(q->queue_lock);
}
-static inline void queue_flag_set_unlocked(unsigned int flag,
- struct request_queue *q)
-{
- if (test_bit(QUEUE_FLAG_INIT_DONE, &q->queue_flags) &&
- kref_read(&q->kobj.kref))
- lockdep_assert_held(q->queue_lock);
- __set_bit(flag, &q->queue_flags);
-}
-
static inline void queue_flag_clear_unlocked(unsigned int flag,
struct request_queue *q)
{
diff --git a/block/elevator.c b/block/elevator.c
index fae58b2..a54870f 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -422,7 +422,7 @@ enum elv_merge elv_merge(struct request_queue *q, struct request **req,
{
struct elevator_queue *e = q->elevator;
struct request *__rq;
-
+ enum elv_merge ret;
/*
* Levels of merges:
* nomerges: No merges at all attempted
@@ -435,9 +435,11 @@ enum elv_merge elv_merge(struct request_queue *q, struct request **req,
/*
* First try one-hit cache.
*/
- if (q->last_merge && elv_bio_merge_ok(q->last_merge, bio)) {
- enum elv_merge ret = blk_try_merge(q->last_merge, bio);
+ if (q->last_merge) {
+ if (!elv_bio_merge_ok(q->last_merge, bio))
+ return ELEVATOR_NO_MERGE;
+ ret = blk_try_merge(q->last_merge, bio);
if (ret != ELEVATOR_NO_MERGE) {
*req = q->last_merge;
return ret;
diff --git a/build.config.cuttlefish.aarch64 b/build.config.cuttlefish.aarch64
index 5fb372d..fe921b4 100644
--- a/build.config.cuttlefish.aarch64
+++ b/build.config.cuttlefish.aarch64
@@ -6,7 +6,7 @@
EXTRA_CMDS=''
KERNEL_DIR=common
POST_DEFCONFIG_CMDS="check_defconfig"
-CLANG_PREBUILT_BIN=prebuilts-master/clang/host/linux-x86/clang-r346389b/bin
+CLANG_PREBUILT_BIN=prebuilts-master/clang/host/linux-x86/clang-r349610/bin
LINUX_GCC_CROSS_COMPILE_PREBUILTS_BIN=prebuilts/gcc/linux-x86/aarch64/aarch64-linux-android-4.9/bin
FILES="
arch/arm64/boot/Image.gz
diff --git a/build.config.cuttlefish.x86_64 b/build.config.cuttlefish.x86_64
index a81cb54..31e4057 100644
--- a/build.config.cuttlefish.x86_64
+++ b/build.config.cuttlefish.x86_64
@@ -6,7 +6,7 @@
EXTRA_CMDS=''
KERNEL_DIR=common
POST_DEFCONFIG_CMDS="check_defconfig"
-CLANG_PREBUILT_BIN=prebuilts-master/clang/host/linux-x86/clang-r346389b/bin
+CLANG_PREBUILT_BIN=prebuilts-master/clang/host/linux-x86/clang-r349610/bin
LINUX_GCC_CROSS_COMPILE_PREBUILTS_BIN=prebuilts/gcc/linux-x86/x86/x86_64-linux-android-4.9/bin
FILES="
arch/x86/boot/bzImage
diff --git a/crypto/Kconfig b/crypto/Kconfig
index dd2f67b..fdaa0dc 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -1084,7 +1084,8 @@
8 for decryption), this implementation only uses just two S-boxes of
256 bytes each, and attempts to eliminate data dependent latencies by
prefetching the entire table into the cache at the start of each
- block.
+ block. Interrupts are also disabled to avoid races where cachelines
+ are evicted when the CPU is interrupted to do something else.
config CRYPTO_AES_586
tristate "AES cipher algorithms (i586)"
diff --git a/crypto/aes_ti.c b/crypto/aes_ti.c
index 03023b2..1ff9785b 100644
--- a/crypto/aes_ti.c
+++ b/crypto/aes_ti.c
@@ -269,6 +269,7 @@ static void aesti_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
const u32 *rkp = ctx->key_enc + 4;
int rounds = 6 + ctx->key_length / 4;
u32 st0[4], st1[4];
+ unsigned long flags;
int round;
st0[0] = ctx->key_enc[0] ^ get_unaligned_le32(in);
@@ -276,6 +277,12 @@ static void aesti_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
st0[2] = ctx->key_enc[2] ^ get_unaligned_le32(in + 8);
st0[3] = ctx->key_enc[3] ^ get_unaligned_le32(in + 12);
+ /*
+ * Temporarily disable interrupts to avoid races where cachelines are
+ * evicted when the CPU is interrupted to do something else.
+ */
+ local_irq_save(flags);
+
st0[0] ^= __aesti_sbox[ 0] ^ __aesti_sbox[128];
st0[1] ^= __aesti_sbox[32] ^ __aesti_sbox[160];
st0[2] ^= __aesti_sbox[64] ^ __aesti_sbox[192];
@@ -300,6 +307,8 @@ static void aesti_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
put_unaligned_le32(subshift(st1, 1) ^ rkp[5], out + 4);
put_unaligned_le32(subshift(st1, 2) ^ rkp[6], out + 8);
put_unaligned_le32(subshift(st1, 3) ^ rkp[7], out + 12);
+
+ local_irq_restore(flags);
}
static void aesti_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
@@ -308,6 +317,7 @@ static void aesti_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
const u32 *rkp = ctx->key_dec + 4;
int rounds = 6 + ctx->key_length / 4;
u32 st0[4], st1[4];
+ unsigned long flags;
int round;
st0[0] = ctx->key_dec[0] ^ get_unaligned_le32(in);
@@ -315,6 +325,12 @@ static void aesti_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
st0[2] = ctx->key_dec[2] ^ get_unaligned_le32(in + 8);
st0[3] = ctx->key_dec[3] ^ get_unaligned_le32(in + 12);
+ /*
+ * Temporarily disable interrupts to avoid races where cachelines are
+ * evicted when the CPU is interrupted to do something else.
+ */
+ local_irq_save(flags);
+
st0[0] ^= __aesti_inv_sbox[ 0] ^ __aesti_inv_sbox[128];
st0[1] ^= __aesti_inv_sbox[32] ^ __aesti_inv_sbox[160];
st0[2] ^= __aesti_inv_sbox[64] ^ __aesti_inv_sbox[192];
@@ -339,6 +355,8 @@ static void aesti_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
put_unaligned_le32(inv_subshift(st1, 1) ^ rkp[5], out + 4);
put_unaligned_le32(inv_subshift(st1, 2) ^ rkp[6], out + 8);
put_unaligned_le32(inv_subshift(st1, 3) ^ rkp[7], out + 12);
+
+ local_irq_restore(flags);
}
static struct crypto_alg aes_alg = {
diff --git a/crypto/af_alg.c b/crypto/af_alg.c
index 17eb09d..ec78a04 100644
--- a/crypto/af_alg.c
+++ b/crypto/af_alg.c
@@ -122,8 +122,10 @@ static void alg_do_release(const struct af_alg_type *type, void *private)
int af_alg_release(struct socket *sock)
{
- if (sock->sk)
+ if (sock->sk) {
sock_put(sock->sk);
+ sock->sk = NULL;
+ }
return 0;
}
EXPORT_SYMBOL_GPL(af_alg_release);
diff --git a/drivers/Kconfig b/drivers/Kconfig
index 7e8029c..b37e4b5 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -57,6 +57,8 @@
source "drivers/i2c/Kconfig"
+source "drivers/i3c/Kconfig"
+
source "drivers/spi/Kconfig"
source "drivers/spmi/Kconfig"
diff --git a/drivers/Makefile b/drivers/Makefile
index abf600a..120f0a0 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -114,7 +114,7 @@
obj-$(CONFIG_GAMEPORT) += input/gameport/
obj-$(CONFIG_INPUT) += input/
obj-$(CONFIG_RTC_LIB) += rtc/
-obj-y += i2c/ media/
+obj-y += i2c/ i3c/ media/
obj-$(CONFIG_PPS) += pps/
obj-y += ptp/
obj-$(CONFIG_W1) += w1/
diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
index 02c6fd9..f008ba7 100644
--- a/drivers/acpi/apei/ghes.c
+++ b/drivers/acpi/apei/ghes.c
@@ -691,6 +691,8 @@ static void __ghes_panic(struct ghes *ghes)
{
__ghes_print_estatus(KERN_EMERG, ghes->generic, ghes->estatus);
+ ghes_clear_estatus(ghes);
+
/* reboot to log the error! */
if (!panic_timeout)
panic_timeout = ghes_panic_timeout;
diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
index ea59c01..f530d35 100644
--- a/drivers/acpi/nfit/core.c
+++ b/drivers/acpi/nfit/core.c
@@ -719,6 +719,7 @@ int nfit_get_smbios_id(u32 device_handle, u16 *flags)
struct acpi_nfit_memory_map *memdev;
struct acpi_nfit_desc *acpi_desc;
struct nfit_mem *nfit_mem;
+ u16 physical_id;
mutex_lock(&acpi_desc_lock);
list_for_each_entry(acpi_desc, &acpi_descs, list) {
@@ -726,10 +727,11 @@ int nfit_get_smbios_id(u32 device_handle, u16 *flags)
list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) {
memdev = __to_nfit_memdev(nfit_mem);
if (memdev->device_handle == device_handle) {
+ *flags = memdev->flags;
+ physical_id = memdev->physical_id;
mutex_unlock(&acpi_desc->init_mutex);
mutex_unlock(&acpi_desc_lock);
- *flags = memdev->flags;
- return memdev->physical_id;
+ return physical_id;
}
}
mutex_unlock(&acpi_desc->init_mutex);
diff --git a/drivers/acpi/numa.c b/drivers/acpi/numa.c
index 8516760..0da58f0 100644
--- a/drivers/acpi/numa.c
+++ b/drivers/acpi/numa.c
@@ -147,9 +147,9 @@ acpi_table_print_srat_entry(struct acpi_subtable_header *header)
{
struct acpi_srat_mem_affinity *p =
(struct acpi_srat_mem_affinity *)header;
- pr_debug("SRAT Memory (0x%lx length 0x%lx) in proximity domain %d %s%s%s\n",
- (unsigned long)p->base_address,
- (unsigned long)p->length,
+ pr_debug("SRAT Memory (0x%llx length 0x%llx) in proximity domain %d %s%s%s\n",
+ (unsigned long long)p->base_address,
+ (unsigned long long)p->length,
p->proximity_domain,
(p->flags & ACPI_SRAT_MEM_ENABLED) ?
"enabled" : "disabled",
diff --git a/drivers/acpi/spcr.c b/drivers/acpi/spcr.c
index 9d52743..c336784 100644
--- a/drivers/acpi/spcr.c
+++ b/drivers/acpi/spcr.c
@@ -148,6 +148,13 @@ int __init acpi_parse_spcr(bool enable_earlycon, bool enable_console)
}
switch (table->baud_rate) {
+ case 0:
+ /*
+ * SPCR 1.04 defines 0 as a preconfigured state of UART.
+ * Assume firmware or bootloader configures console correctly.
+ */
+ baud_rate = 0;
+ break;
case 3:
baud_rate = 9600;
break;
@@ -196,6 +203,10 @@ int __init acpi_parse_spcr(bool enable_earlycon, bool enable_console)
* UART so don't attempt to change to the baud rate state
* in the table because driver cannot calculate the dividers
*/
+ baud_rate = 0;
+ }
+
+ if (!baud_rate) {
snprintf(opts, sizeof(opts), "%s,%s,0x%llx", uart, iotype,
table->serial_port.address);
} else {
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index b8c3f9e..adf2878 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -4554,6 +4554,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
{ "SAMSUNG MZMPC128HBFU-000MV", "CXM14M1Q", ATA_HORKAGE_NOLPM, },
{ "SAMSUNG SSD PM830 mSATA *", "CXM13D1Q", ATA_HORKAGE_NOLPM, },
{ "SAMSUNG MZ7TD256HAFV-000L9", NULL, ATA_HORKAGE_NOLPM, },
+ { "SAMSUNG MZ7TE512HMHP-000L1", "EXT06L0Q", ATA_HORKAGE_NOLPM, },
/* devices that don't properly handle queued TRIM commands */
{ "Micron_M500IT_*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
diff --git a/drivers/ata/sata_rcar.c b/drivers/ata/sata_rcar.c
index 10ecb232..03867f5 100644
--- a/drivers/ata/sata_rcar.c
+++ b/drivers/ata/sata_rcar.c
@@ -895,7 +895,9 @@ static int sata_rcar_probe(struct platform_device *pdev)
int ret = 0;
irq = platform_get_irq(pdev, 0);
- if (irq <= 0)
+ if (irq < 0)
+ return irq;
+ if (!irq)
return -EINVAL;
priv = devm_kzalloc(dev, sizeof(struct sata_rcar_priv), GFP_KERNEL);
diff --git a/drivers/atm/he.c b/drivers/atm/he.c
index 29f102d..329ce90 100644
--- a/drivers/atm/he.c
+++ b/drivers/atm/he.c
@@ -717,7 +717,7 @@ static int he_init_cs_block_rcm(struct he_dev *he_dev)
instead of '/ 512', use '>> 9' to prevent a call
to divdu3 on x86 platforms
*/
- rate_cps = (unsigned long long) (1 << exp) * (man + 512) >> 9;
+ rate_cps = (unsigned long long) (1UL << exp) * (man + 512) >> 9;
if (rate_cps < 10)
rate_cps = 10; /* 2.2.1 minimum payload rate is 10 cps */
diff --git a/drivers/base/bus.c b/drivers/base/bus.c
index 585e2e1..e06a579 100644
--- a/drivers/base/bus.c
+++ b/drivers/base/bus.c
@@ -614,8 +614,10 @@ static void remove_probe_files(struct bus_type *bus)
static ssize_t uevent_store(struct device_driver *drv, const char *buf,
size_t count)
{
- kobject_synth_uevent(&drv->p->kobj, buf, count);
- return count;
+ int rc;
+
+ rc = kobject_synth_uevent(&drv->p->kobj, buf, count);
+ return rc ? rc : count;
}
static DRIVER_ATTR_WO(uevent);
@@ -831,8 +833,10 @@ static void klist_devices_put(struct klist_node *n)
static ssize_t bus_uevent_store(struct bus_type *bus,
const char *buf, size_t count)
{
- kobject_synth_uevent(&bus->p->subsys.kobj, buf, count);
- return count;
+ int rc;
+
+ rc = kobject_synth_uevent(&bus->p->subsys.kobj, buf, count);
+ return rc ? rc : count;
}
static BUS_ATTR(uevent, S_IWUSR, NULL, bus_uevent_store);
diff --git a/drivers/base/cacheinfo.c b/drivers/base/cacheinfo.c
index 5d5b598..dd6a685 100644
--- a/drivers/base/cacheinfo.c
+++ b/drivers/base/cacheinfo.c
@@ -79,8 +79,7 @@ static void cache_size(struct cacheinfo *this_leaf, struct device_node *np)
ct_idx = get_cacheinfo_idx(this_leaf->type);
propname = cache_type_info[ct_idx].size_prop;
- if (of_property_read_u32(np, propname, &this_leaf->size))
- this_leaf->size = 0;
+ of_property_read_u32(np, propname, &this_leaf->size);
}
/* not cache_line_size() because that's a macro in include/linux/cache.h */
@@ -114,8 +113,7 @@ static void cache_nr_sets(struct cacheinfo *this_leaf, struct device_node *np)
ct_idx = get_cacheinfo_idx(this_leaf->type);
propname = cache_type_info[ct_idx].nr_sets_prop;
- if (of_property_read_u32(np, propname, &this_leaf->number_of_sets))
- this_leaf->number_of_sets = 0;
+ of_property_read_u32(np, propname, &this_leaf->number_of_sets);
}
static void cache_associativity(struct cacheinfo *this_leaf)
diff --git a/drivers/base/core.c b/drivers/base/core.c
index e7662d1..40c4d48 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -1072,8 +1072,14 @@ static ssize_t uevent_show(struct device *dev, struct device_attribute *attr,
static ssize_t uevent_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- if (kobject_synth_uevent(&dev->kobj, buf, count))
+ int rc;
+
+ rc = kobject_synth_uevent(&dev->kobj, buf, count);
+
+ if (rc) {
dev_err(dev, "uevent: failed to send synthetic uevent\n");
+ return rc;
+ }
return count;
}
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index c7aed86..0ef8197 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -211,6 +211,59 @@ static struct attribute_group cpu_isolated_attr_group = {
#endif
+static ssize_t show_sched_load_boost(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ ssize_t rc;
+ unsigned int boost;
+ struct cpu *cpu = container_of(dev, struct cpu, dev);
+ int cpuid = cpu->dev.id;
+
+ boost = per_cpu(sched_load_boost, cpuid);
+ rc = snprintf(buf, PAGE_SIZE-2, "%d\n", boost);
+
+ return rc;
+}
+
+static ssize_t __ref store_sched_load_boost(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int err;
+ int boost;
+ struct cpu *cpu = container_of(dev, struct cpu, dev);
+ int cpuid = cpu->dev.id;
+
+ err = kstrtoint(strstrip((char *)buf), 0, &boost);
+ if (err)
+ return err;
+
+ /*
+ * -100 is low enough to cancel out CPU's load and make it near zro.
+ * 1000 is close to the maximum value that cpu_util_freq_{walt,pelt}
+ * can take without overflow.
+ */
+ if (boost < -100 || boost > 1000)
+ return -EINVAL;
+
+ per_cpu(sched_load_boost, cpuid) = boost;
+
+ return count;
+}
+
+static DEVICE_ATTR(sched_load_boost, 0644,
+ show_sched_load_boost,
+ store_sched_load_boost);
+
+static struct attribute *sched_cpu_attrs[] = {
+ &dev_attr_sched_load_boost.attr,
+ NULL
+};
+
+static struct attribute_group sched_cpu_attr_group = {
+ .attrs = sched_cpu_attrs,
+};
+
static const struct attribute_group *common_cpu_attr_groups[] = {
#ifdef CONFIG_KEXEC
&crash_note_cpu_attr_group,
@@ -218,6 +271,7 @@ static const struct attribute_group *common_cpu_attr_groups[] = {
#ifdef CONFIG_HOTPLUG_CPU
&cpu_isolated_attr_group,
#endif
+ &sched_cpu_attr_group,
NULL
};
@@ -228,6 +282,7 @@ static const struct attribute_group *hotplugable_cpu_attr_groups[] = {
#ifdef CONFIG_HOTPLUG_CPU
&cpu_isolated_attr_group,
#endif
+ &sched_cpu_attr_group,
NULL
};
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index bb666f6..467c43c 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -947,9 +947,6 @@ static void __device_release_driver(struct device *dev, struct device *parent)
drv = dev->driver;
if (drv) {
- if (driver_allows_async_probing(drv))
- async_synchronize_full();
-
while (device_links_busy(dev)) {
device_unlock(dev);
if (parent && dev->bus->need_parent_lock)
@@ -1055,6 +1052,9 @@ void driver_detach(struct device_driver *drv)
struct device_private *dev_prv;
struct device *dev;
+ if (driver_allows_async_probing(drv))
+ async_synchronize_full();
+
for (;;) {
spin_lock(&drv->p->klist_devices.k_lock);
if (list_empty(&drv->p->klist_devices.k_list)) {
diff --git a/drivers/base/devres.c b/drivers/base/devres.c
index f98a097..d68b52c 100644
--- a/drivers/base/devres.c
+++ b/drivers/base/devres.c
@@ -24,8 +24,14 @@ struct devres_node {
struct devres {
struct devres_node node;
- /* -- 3 pointers */
- unsigned long long data[]; /* guarantee ull alignment */
+ /*
+ * Some archs want to perform DMA into kmalloc caches
+ * and need a guaranteed alignment larger than
+ * the alignment of a 64-bit integer.
+ * Thus we use ARCH_KMALLOC_MINALIGN here and get exactly the same
+ * buffer alignment as if it was allocated by plain kmalloc().
+ */
+ u8 __aligned(ARCH_KMALLOC_MINALIGN) data[];
};
struct devres_group {
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
index d15703b1f..7145031 100644
--- a/drivers/block/drbd/drbd_nl.c
+++ b/drivers/block/drbd/drbd_nl.c
@@ -668,14 +668,15 @@ drbd_set_role(struct drbd_device *const device, enum drbd_role new_role, int for
if (rv == SS_TWO_PRIMARIES) {
/* Maybe the peer is detected as dead very soon...
retry at most once more in this case. */
- int timeo;
- rcu_read_lock();
- nc = rcu_dereference(connection->net_conf);
- timeo = nc ? (nc->ping_timeo + 1) * HZ / 10 : 1;
- rcu_read_unlock();
- schedule_timeout_interruptible(timeo);
- if (try < max_tries)
+ if (try < max_tries) {
+ int timeo;
try = max_tries - 1;
+ rcu_read_lock();
+ nc = rcu_dereference(connection->net_conf);
+ timeo = nc ? (nc->ping_timeo + 1) * HZ / 10 : 1;
+ rcu_read_unlock();
+ schedule_timeout_interruptible(timeo);
+ }
continue;
}
if (rv < SS_SUCCESS) {
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index fc67fd8..81f9bd6 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -3364,7 +3364,7 @@ static enum drbd_conns drbd_sync_handshake(struct drbd_peer_device *peer_device,
enum drbd_conns rv = C_MASK;
enum drbd_disk_state mydisk;
struct net_conf *nc;
- int hg, rule_nr, rr_conflict, tentative;
+ int hg, rule_nr, rr_conflict, tentative, always_asbp;
mydisk = device->state.disk;
if (mydisk == D_NEGOTIATING)
@@ -3415,8 +3415,12 @@ static enum drbd_conns drbd_sync_handshake(struct drbd_peer_device *peer_device,
rcu_read_lock();
nc = rcu_dereference(peer_device->connection->net_conf);
+ always_asbp = nc->always_asbp;
+ rr_conflict = nc->rr_conflict;
+ tentative = nc->tentative;
+ rcu_read_unlock();
- if (hg == 100 || (hg == -100 && nc->always_asbp)) {
+ if (hg == 100 || (hg == -100 && always_asbp)) {
int pcount = (device->state.role == R_PRIMARY)
+ (peer_role == R_PRIMARY);
int forced = (hg == -100);
@@ -3455,9 +3459,6 @@ static enum drbd_conns drbd_sync_handshake(struct drbd_peer_device *peer_device,
"Sync from %s node\n",
(hg < 0) ? "peer" : "this");
}
- rr_conflict = nc->rr_conflict;
- tentative = nc->tentative;
- rcu_read_unlock();
if (hg == -100) {
/* FIXME this log message is not correct if we end up here
@@ -4142,7 +4143,7 @@ static int receive_uuids(struct drbd_connection *connection, struct packet_info
kfree(device->p_uuid);
device->p_uuid = p_uuid;
- if (device->state.conn < C_CONNECTED &&
+ if ((device->state.conn < C_CONNECTED || device->state.pdsk == D_DISKLESS) &&
device->state.disk < D_INCONSISTENT &&
device->state.role == R_PRIMARY &&
(device->ed_uuid & ~((u64)1)) != (p_uuid[UI_CURRENT] & ~((u64)1))) {
diff --git a/drivers/block/sunvdc.c b/drivers/block/sunvdc.c
index f68e9ba..5d70240 100644
--- a/drivers/block/sunvdc.c
+++ b/drivers/block/sunvdc.c
@@ -45,6 +45,8 @@ MODULE_VERSION(DRV_MODULE_VERSION);
#define WAITING_FOR_GEN_CMD 0x04
#define WAITING_FOR_ANY -1
+#define VDC_MAX_RETRIES 10
+
static struct workqueue_struct *sunvdc_wq;
struct vdc_req_entry {
@@ -431,6 +433,7 @@ static int __vdc_tx_trigger(struct vdc_port *port)
.end_idx = dr->prod,
};
int err, delay;
+ int retries = 0;
hdr.seq = dr->snd_nxt;
delay = 1;
@@ -443,6 +446,8 @@ static int __vdc_tx_trigger(struct vdc_port *port)
udelay(delay);
if ((delay <<= 1) > 128)
delay = 128;
+ if (retries++ > VDC_MAX_RETRIES)
+ break;
} while (err == -EAGAIN);
if (err == -ENOTCONN)
diff --git a/drivers/block/swim3.c b/drivers/block/swim3.c
index 469541c..20907a0 100644
--- a/drivers/block/swim3.c
+++ b/drivers/block/swim3.c
@@ -1026,7 +1026,11 @@ static void floppy_release(struct gendisk *disk, fmode_t mode)
struct swim3 __iomem *sw = fs->swim3;
mutex_lock(&swim3_mutex);
- if (fs->ref_count > 0 && --fs->ref_count == 0) {
+ if (fs->ref_count > 0)
+ --fs->ref_count;
+ else if (fs->ref_count == -1)
+ fs->ref_count = 0;
+ if (fs->ref_count == 0) {
swim3_action(fs, MOTOR_OFF);
out_8(&sw->control_bic, 0xff);
swim3_select(fs, RELAX);
diff --git a/drivers/bluetooth/hci_bcm.c b/drivers/bluetooth/hci_bcm.c
index ddbd8c6..8001323 100644
--- a/drivers/bluetooth/hci_bcm.c
+++ b/drivers/bluetooth/hci_bcm.c
@@ -907,6 +907,10 @@ static int bcm_get_resources(struct bcm_device *dev)
dev->clk = devm_clk_get(dev->dev, NULL);
+ /* Handle deferred probing */
+ if (dev->clk == ERR_PTR(-EPROBE_DEFER))
+ return PTR_ERR(dev->clk);
+
dev->device_wakeup = devm_gpiod_get_optional(dev->dev, "device-wakeup",
GPIOD_OUT_LOW);
if (IS_ERR(dev->device_wakeup))
diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c
index ae3a753..72cd96a 100644
--- a/drivers/cdrom/gdrom.c
+++ b/drivers/cdrom/gdrom.c
@@ -889,6 +889,7 @@ static void __exit exit_gdrom(void)
platform_device_unregister(pd);
platform_driver_unregister(&gdrom_driver);
kfree(gd.toc);
+ kfree(gd.cd_info);
}
module_init(init_gdrom);
diff --git a/drivers/char/diag/diag_ipc_logging.h b/drivers/char/diag/diag_ipc_logging.h
index fe754a9..dd4dc3c 100644
--- a/drivers/char/diag/diag_ipc_logging.h
+++ b/drivers/char/diag/diag_ipc_logging.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (c) 2015, 2017-2018 The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015, 2017-2019 The Linux Foundation. All rights reserved.
*/
#ifndef DIAGIPCLOG_H
@@ -16,6 +16,7 @@
#define DIAG_DEBUG_MASKS 0x0010
#define DIAG_DEBUG_POWER 0x0020
#define DIAG_DEBUG_BRIDGE 0x0040
+#define DIAG_DEBUG_CMD_INFO 0x0080
#ifdef CONFIG_IPC_LOGGING
extern uint16_t diag_debug_mask;
diff --git a/drivers/char/diag/diag_mux.c b/drivers/char/diag/diag_mux.c
index 2ecd05b..ec537f1 100644
--- a/drivers/char/diag/diag_mux.c
+++ b/drivers/char/diag/diag_mux.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
-/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2019, The Linux Foundation. All rights reserved.
*/
#include <linux/slab.h>
@@ -128,9 +128,10 @@ int diag_mux_queue_read(int proc)
int diag_mux_write(int proc, unsigned char *buf, int len, int ctx)
{
struct diag_logger_t *logger = NULL;
- int peripheral;
+ int peripheral = -EINVAL, type = -EINVAL, log_sink;
+ unsigned char *offset = NULL;
- if (proc < 0 || proc >= NUM_MUX_PROC)
+ if (proc < 0 || proc >= NUM_MUX_PROC || !buf)
return -EINVAL;
if (!diag_mux)
return -EIO;
@@ -138,16 +139,38 @@ int diag_mux_write(int proc, unsigned char *buf, int len, int ctx)
peripheral = diag_md_get_peripheral(ctx);
if (peripheral < 0) {
DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
- "diag:%s:%d invalid peripheral = %d\n",
- __func__, __LINE__, peripheral);
+ "diag: invalid peripheral = %d\n", peripheral);
return -EINVAL;
}
- if (MD_PERIPHERAL_MASK(peripheral) & diag_mux->mux_mask)
+ if (MD_PERIPHERAL_MASK(peripheral) & diag_mux->mux_mask) {
logger = diag_mux->md_ptr;
- else
+ log_sink = DIAG_MEMORY_DEVICE_MODE;
+ } else {
logger = diag_mux->usb_ptr;
+ log_sink = DIAG_USB_MODE;
+ }
+ if (!proc) {
+ type = GET_BUF_TYPE(ctx);
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "diag: Packet from PD: %d, type: %d, len: %d to be written to %s\n",
+ peripheral, type, len,
+ (log_sink ? "MD_device" : "USB"));
+
+ if (type == TYPE_CMD) {
+ if (driver->p_hdlc_disabled[peripheral])
+ offset = buf + 4;
+ else
+ offset = buf;
+
+ DIAG_LOG(DIAG_DEBUG_CMD_INFO,
+ "diag: cmd rsp (%02x %02x %02x %02x) from PD: %d to be written to %s\n",
+ *(offset), *(offset+1), *(offset+2),
+ *(offset+3), peripheral,
+ (log_sink ? "MD_device" : "USB"));
+ }
+ }
if (logger && logger->log_ops && logger->log_ops->write)
return logger->log_ops->write(proc, buf, len, ctx);
return 0;
diff --git a/drivers/char/diag/diagchar.h b/drivers/char/diag/diagchar.h
index 768c30f..750413b 100644
--- a/drivers/char/diag/diagchar.h
+++ b/drivers/char/diag/diagchar.h
@@ -134,6 +134,7 @@
#define DIAG_GET_TIME_API 0x21B
#define DIAG_SET_TIME_API 0x21C
#define DIAG_GET_DIAG_ID 0x222
+#define DIAG_FEATURE_QUERY 0x225
#define DIAG_SWITCH_COMMAND 0x081B
#define DIAG_BUFFERING_MODE 0x080C
@@ -175,6 +176,10 @@
#define FEATURE_MASK_LEN 4
+#define F_DIAG_EVENT_REPORT 0
+#define F_DIAG_HW_ACCELERATION 1
+#define F_DIAG_MULTI_SIM_MASK 2
+
#define DIAG_MD_NONE 0
#define DIAG_MD_PERIPHERAL 1
@@ -457,6 +462,12 @@ struct diag_cmd_hdlc_disable_rsp_t {
uint8_t result;
};
+struct diag_cmd_feature_query_rsp_t {
+ struct diag_pkt_header_t header;
+ uint8_t version;
+ uint8_t feature_len;
+};
+
struct diag_pkt_frame_t {
uint8_t start;
uint8_t version;
@@ -628,6 +639,7 @@ struct diagchar_dev {
struct diagfwd_info *diagfwd_cmd[NUM_PERIPHERALS];
struct diagfwd_info *diagfwd_dci_cmd[NUM_PERIPHERALS];
struct diag_feature_t feature[NUM_PERIPHERALS];
+ uint32_t apps_feature;
struct diag_buffering_mode_t buffering_mode[NUM_MD_SESSIONS];
uint8_t buffering_flag[NUM_MD_SESSIONS];
struct mutex mode_lock;
diff --git a/drivers/char/diag/diagfwd.c b/drivers/char/diag/diagfwd.c
index 1453d9d..ae3673a 100644
--- a/drivers/char/diag/diagfwd.c
+++ b/drivers/char/diag/diagfwd.c
@@ -42,6 +42,8 @@
#define STM_RSP_NUM_BYTES 9
#define RETRY_MAX_COUNT 1000
+#define SET_APPS_FEATURE(driver, n) ((driver->apps_feature) |= (1 << (n)))
+
struct diag_md_hdlc_reset_work {
int pid;
struct work_struct work;
@@ -983,6 +985,43 @@ static int diag_cmd_disable_hdlc(unsigned char *src_buf, int src_len,
return write_len;
}
+int diag_cmd_feature_query(unsigned char *src_buf, int src_len,
+ unsigned char *dest_buf, int dest_len)
+{
+ int write_len = 0;
+ struct diag_pkt_header_t *header = NULL;
+ struct diag_cmd_feature_query_rsp_t rsp;
+
+ if (!src_buf || !dest_buf || src_len <= sizeof(struct diag_pkt_header_t)
+ || dest_len <= 0 || dest_len > DIAG_MAX_RSP_SIZE) {
+ pr_err("diag: Feature query, invalid input src_buf: %pK, src_len: %d, dest_buf: %pK, dest_len: %d\n",
+ src_buf, src_len, dest_buf, dest_len);
+ return -EINVAL;
+ }
+
+ header = (struct diag_pkt_header_t *)src_buf;
+
+ rsp.header.cmd_code = header->cmd_code;
+ rsp.header.subsys_id = header->subsys_id;
+ rsp.header.subsys_cmd_code = header->subsys_cmd_code;
+ rsp.version = 1;
+ rsp.feature_len = sizeof(driver->apps_feature);
+ if (dest_len < (sizeof(rsp) + sizeof(driver->apps_feature)))
+ return -EINVAL;
+ memcpy(dest_buf, &rsp, sizeof(rsp));
+ memcpy(dest_buf + sizeof(rsp), &(driver->apps_feature),
+ sizeof(driver->apps_feature));
+ write_len = sizeof(rsp) + sizeof(driver->apps_feature);
+ return write_len;
+}
+
+static void diag_init_apps_feature(void)
+{
+ driver->apps_feature = 0;
+
+ SET_APPS_FEATURE(driver, F_DIAG_EVENT_REPORT);
+}
+
void diag_send_error_rsp(unsigned char *buf, int len,
int pid)
{
@@ -1029,8 +1068,8 @@ int diag_process_apps_pkt(unsigned char *buf, int len, int pid)
entry.cmd_code_lo = (uint16_t)(*(uint16_t *)temp);
temp += sizeof(uint16_t);
- pr_debug("diag: In %s, received cmd %02x %02x %02x\n",
- __func__, entry.cmd_code, entry.subsys_id, entry.cmd_code_hi);
+ DIAG_LOG(DIAG_DEBUG_CMD_INFO, "diag: received cmd %02x %02x %02x\n",
+ entry.cmd_code, entry.subsys_id, entry.cmd_code_hi);
if (*buf == DIAG_CMD_LOG_ON_DMND && driver->log_on_demand_support &&
driver->feature[PERIPHERAL_MODEM].rcvd_feature_mask) {
@@ -1123,6 +1162,17 @@ int diag_process_apps_pkt(unsigned char *buf, int len, int pid)
diag_send_rsp(driver->apps_rsp_buf, write_len, pid);
return 0;
}
+ /* Check for Diag Feature Query command */
+ else if ((*buf == DIAG_CMD_DIAG_SUBSYS) &&
+ (*(buf+1) == DIAG_SS_DIAG) &&
+ (*(uint16_t *)(buf+2) == DIAG_FEATURE_QUERY)) {
+ write_len = diag_cmd_feature_query(buf, len,
+ driver->apps_rsp_buf,
+ DIAG_MAX_RSP_SIZE);
+ if (write_len > 0)
+ diag_send_rsp(driver->apps_rsp_buf, write_len, pid);
+ return 0;
+ }
/* Check for download command */
else if ((chk_apps_master()) && (*buf == 0x3A)) {
/* send response back */
@@ -1910,6 +1960,8 @@ int diagfwd_init(void)
driver->feature[i].diag_id_support = 0;
}
+ diag_init_apps_feature();
+
for (i = 0; i < NUM_MD_SESSIONS; i++) {
driver->buffering_mode[i].peripheral = i;
driver->buffering_mode[i].mode = DIAG_BUFFERING_MODE_STREAMING;
diff --git a/drivers/char/diag/diagfwd_peripheral.c b/drivers/char/diag/diagfwd_peripheral.c
index 1fb82bd..e85acd7 100644
--- a/drivers/char/diag/diagfwd_peripheral.c
+++ b/drivers/char/diag/diagfwd_peripheral.c
@@ -444,6 +444,7 @@ static void diagfwd_data_read_untag_done(struct diagfwd_info *fwd_info,
struct diagfwd_buf_t *temp_fwdinfo_upd = NULL;
int flag_buf_1 = 0, flag_buf_2 = 0;
uint8_t peripheral, temp_diagid_val;
+ unsigned char *buf_offset = NULL;
if (!fwd_info || !buf || len <= 0) {
diag_ws_release();
@@ -498,9 +499,24 @@ static void diagfwd_data_read_untag_done(struct diagfwd_info *fwd_info,
}
while (processed < len) {
+ /* Debug log to check diag_id header validity*/
pr_debug("diag_fr:untagged packet buf contents: %02x %02x %02x %02x\n",
*temp_buf_main, *(temp_buf_main+1),
*(temp_buf_main+2), *(temp_buf_main+3));
+
+ /* Debug log ONLY for CMD channel*/
+ if (fwd_info->type == TYPE_CMD) {
+ /* buf_offset taking into account
+ * diag_id header and non-hdlc header
+ */
+ buf_offset = temp_buf_main + 8;
+
+ DIAG_LOG(DIAG_DEBUG_CMD_INFO,
+ "diag: cmd rsp (%02x %02x %02x %02x) received from peripheral: %d\n",
+ *(buf_offset), *(buf_offset+1),
+ *(buf_offset+2), *(buf_offset+3), peripheral);
+ }
+
packet_len =
*(uint16_t *) (temp_buf_main + 2);
if (packet_len > PERIPHERAL_BUF_SZ)
@@ -605,7 +621,7 @@ static void diagfwd_data_read_done(struct diagfwd_info *fwd_info,
{
int err = 0;
int write_len = 0;
- unsigned char *write_buf = NULL;
+ unsigned char *write_buf = NULL, *buf_offset = NULL;
struct diagfwd_buf_t *temp_buf = NULL;
uint8_t hdlc_disabled = 0;
@@ -631,6 +647,16 @@ static void diagfwd_data_read_done(struct diagfwd_info *fwd_info,
hdlc_disabled = driver->p_hdlc_disabled[fwd_info->peripheral];
+ if (fwd_info->type == TYPE_CMD) {
+ /*buf_offset taking into account non-hdlc header */
+ buf_offset = buf + 4;
+
+ DIAG_LOG(DIAG_DEBUG_CMD_INFO,
+ "diag: cmd rsp(%02x %02x %02x %02x) received from peripheral: %d\n",
+ *(buf_offset), *(buf_offset+1), *(buf_offset+2),
+ *(buf_offset+3), fwd_info->peripheral);
+ }
+
if (!driver->feature[fwd_info->peripheral].encode_hdlc) {
if (fwd_info->buf_1 && fwd_info->buf_1->data == buf) {
temp_buf = fwd_info->buf_1;
@@ -1124,8 +1150,9 @@ int diagfwd_write(uint8_t peripheral, uint8_t type, void *buf, int len)
int err = 0;
uint8_t retry_count = 0;
uint8_t max_retries = 3;
+ unsigned char *temp_buf = NULL;
- if (peripheral >= NUM_PERIPHERALS || type >= NUM_TYPES)
+ if (peripheral >= NUM_PERIPHERALS || type >= NUM_TYPES || !buf)
return -EINVAL;
if (type == TYPE_CMD || type == TYPE_DCI_CMD) {
@@ -1156,6 +1183,17 @@ int diagfwd_write(uint8_t peripheral, uint8_t type, void *buf, int len)
if (!(fwd_info->p_ops && fwd_info->p_ops->write && fwd_info->ctxt))
return -EIO;
+ if (type == TYPE_CMD) {
+ temp_buf = (unsigned char *)(buf);
+ /* Only raw bytes is sent to peripheral,
+ * HDLC/NON-HDLC need not be considered
+ */
+ DIAG_LOG(DIAG_DEBUG_CMD_INFO,
+ "diag: cmd (%02x %02x %02x %02x) ready to be written to p: %d\n",
+ *(temp_buf), *(temp_buf+1), *(temp_buf+2), *(temp_buf+3),
+ peripheral);
+ }
+
while (retry_count < max_retries) {
err = 0;
err = fwd_info->p_ops->write(fwd_info->ctxt, buf, len);
diff --git a/drivers/char/diag/diagfwd_socket.c b/drivers/char/diag/diagfwd_socket.c
index 202f0a1..8d216d4 100644
--- a/drivers/char/diag/diagfwd_socket.c
+++ b/drivers/char/diag/diagfwd_socket.c
@@ -352,6 +352,7 @@ static void diag_state_open_socket(void *ctxt);
static void diag_state_close_socket(void *ctxt);
static int diag_socket_write(void *ctxt, unsigned char *buf, int len);
static int diag_socket_read(void *ctxt, unsigned char *buf, int buf_len);
+static void diag_socket_drop_data(struct diag_socket_info *info);
static void diag_socket_queue_read(void *ctxt);
static struct diag_peripheral_ops socket_ops = {
@@ -613,6 +614,9 @@ static void socket_read_work_fn(struct work_struct *work)
return;
}
+ if (!info->fwd_ctxt && info->port_type == PORT_TYPE_SERVER)
+ diag_socket_drop_data(info);
+
if (!atomic_read(&info->opened) && info->port_type == PORT_TYPE_SERVER)
diagfwd_buffers_init(info->fwd_ctxt);
@@ -683,6 +687,41 @@ static void handle_ctrl_pkt(struct diag_socket_info *info, void *buf, int len)
}
}
+static void diag_socket_drop_data(struct diag_socket_info *info)
+{
+ int err = 0;
+ int pkt_len = 0;
+ int read_len = 0;
+ unsigned char *temp = NULL;
+ struct kvec iov;
+ struct msghdr read_msg = {NULL, 0};
+ struct sockaddr_qrtr src_addr = {0};
+ unsigned long flags;
+
+ temp = vzalloc(PERIPHERAL_BUF_SZ);
+ if (!temp)
+ return;
+
+ while (info->data_ready > 0) {
+ iov.iov_base = temp;
+ iov.iov_len = PERIPHERAL_BUF_SZ;
+ read_msg.msg_name = &src_addr;
+ read_msg.msg_namelen = sizeof(src_addr);
+ err = info->hdl->ops->ioctl(info->hdl, TIOCINQ,
+ (unsigned long)&pkt_len);
+ if (err || pkt_len < 0)
+ break;
+ spin_lock_irqsave(&info->lock, flags);
+ info->data_ready--;
+ spin_unlock_irqrestore(&info->lock, flags);
+ read_len = kernel_recvmsg(info->hdl, &read_msg, &iov, 1,
+ pkt_len, MSG_DONTWAIT);
+ pr_debug("%s : %s drop total bytes: %d\n", __func__,
+ info->name, read_len);
+ }
+ vfree(temp);
+}
+
static int diag_socket_read(void *ctxt, unsigned char *buf, int buf_len)
{
int err = 0;
@@ -693,8 +732,8 @@ static int diag_socket_read(void *ctxt, unsigned char *buf, int buf_len)
int qrtr_ctrl_recd = 0;
uint8_t buf_full = 0;
unsigned char *temp = NULL;
- struct kvec iov = {0};
- struct msghdr read_msg = {0};
+ struct kvec iov;
+ struct msghdr read_msg = {NULL, 0};
struct sockaddr_qrtr src_addr = {0};
struct diag_socket_info *info;
struct mutex *channel_mutex;
diff --git a/drivers/clk/imgtec/clk-boston.c b/drivers/clk/imgtec/clk-boston.c
index 15af423..f5d54a6 100644
--- a/drivers/clk/imgtec/clk-boston.c
+++ b/drivers/clk/imgtec/clk-boston.c
@@ -73,27 +73,32 @@ static void __init clk_boston_setup(struct device_node *np)
hw = clk_hw_register_fixed_rate(NULL, "input", NULL, 0, in_freq);
if (IS_ERR(hw)) {
pr_err("failed to register input clock: %ld\n", PTR_ERR(hw));
- return;
+ goto error;
}
onecell->hws[BOSTON_CLK_INPUT] = hw;
hw = clk_hw_register_fixed_rate(NULL, "sys", "input", 0, sys_freq);
if (IS_ERR(hw)) {
pr_err("failed to register sys clock: %ld\n", PTR_ERR(hw));
- return;
+ goto error;
}
onecell->hws[BOSTON_CLK_SYS] = hw;
hw = clk_hw_register_fixed_rate(NULL, "cpu", "input", 0, cpu_freq);
if (IS_ERR(hw)) {
pr_err("failed to register cpu clock: %ld\n", PTR_ERR(hw));
- return;
+ goto error;
}
onecell->hws[BOSTON_CLK_CPU] = hw;
err = of_clk_add_hw_provider(np, of_clk_hw_onecell_get, onecell);
if (err)
pr_err("failed to add DT provider: %d\n", err);
+
+ return;
+
+error:
+ kfree(onecell);
}
/*
diff --git a/drivers/clk/imx/clk-imx6sl.c b/drivers/clk/imx/clk-imx6sl.c
index eb6bcbf..390e3e0 100644
--- a/drivers/clk/imx/clk-imx6sl.c
+++ b/drivers/clk/imx/clk-imx6sl.c
@@ -17,6 +17,8 @@
#include "clk.h"
+#define CCDR 0x4
+#define BM_CCM_CCDR_MMDC_CH0_MASK (1 << 17)
#define CCSR 0xc
#define BM_CCSR_PLL1_SW_CLK_SEL (1 << 2)
#define CACRR 0x10
@@ -409,6 +411,10 @@ static void __init imx6sl_clocks_init(struct device_node *ccm_node)
clks[IMX6SL_CLK_USDHC3] = imx_clk_gate2("usdhc3", "usdhc3_podf", base + 0x80, 6);
clks[IMX6SL_CLK_USDHC4] = imx_clk_gate2("usdhc4", "usdhc4_podf", base + 0x80, 8);
+ /* Ensure the MMDC CH0 handshake is bypassed */
+ writel_relaxed(readl_relaxed(base + CCDR) |
+ BM_CCM_CCDR_MMDC_CH0_MASK, base + CCDR);
+
imx_check_clocks(clks, ARRAY_SIZE(clks));
clk_data.clks = clks;
diff --git a/drivers/clk/meson/meson8b.c b/drivers/clk/meson/meson8b.c
index 50060e8..9d79ff8 100644
--- a/drivers/clk/meson/meson8b.c
+++ b/drivers/clk/meson/meson8b.c
@@ -583,7 +583,7 @@ static struct clk_regmap meson8b_cpu_scale_div = {
.data = &(struct clk_regmap_div_data){
.offset = HHI_SYS_CPU_CLK_CNTL1,
.shift = 20,
- .width = 9,
+ .width = 10,
.table = cpu_scale_table,
.flags = CLK_DIVIDER_ALLOW_ZERO,
},
@@ -596,20 +596,27 @@ static struct clk_regmap meson8b_cpu_scale_div = {
},
};
+static u32 mux_table_cpu_scale_out_sel[] = { 0, 1, 3 };
static struct clk_regmap meson8b_cpu_scale_out_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = HHI_SYS_CPU_CLK_CNTL0,
.mask = 0x3,
.shift = 2,
+ .table = mux_table_cpu_scale_out_sel,
},
.hw.init = &(struct clk_init_data){
.name = "cpu_scale_out_sel",
.ops = &clk_regmap_mux_ro_ops,
+ /*
+ * NOTE: We are skipping the parent with value 0x2 (which is
+ * "cpu_div3") because it results in a duty cycle of 33% which
+ * makes the system unstable and can result in a lockup of the
+ * whole system.
+ */
.parent_names = (const char *[]) { "cpu_in_sel",
"cpu_div2",
- "cpu_div3",
"cpu_scale_div" },
- .num_parents = 4,
+ .num_parents = 3,
.flags = CLK_SET_RATE_PARENT,
},
};
@@ -627,7 +634,8 @@ static struct clk_regmap meson8b_cpu_clk = {
"cpu_scale_out_sel" },
.num_parents = 2,
.flags = (CLK_SET_RATE_PARENT |
- CLK_SET_RATE_NO_REPARENT),
+ CLK_SET_RATE_NO_REPARENT |
+ CLK_IS_CRITICAL),
},
};
diff --git a/drivers/clk/qcom/debugcc-kona.c b/drivers/clk/qcom/debugcc-kona.c
index e0fbd63..7d71211 100644
--- a/drivers/clk/qcom/debugcc-kona.c
+++ b/drivers/clk/qcom/debugcc-kona.c
@@ -19,7 +19,7 @@
static struct measure_clk_data debug_mux_priv = {
.ctl_reg = 0x62038,
.status_reg = 0x6203C,
- .xo_div4_cbcr = 0x43008,
+ .xo_div4_cbcr = 0x4300C,
};
static const char *const debug_mux_parent_names[] = {
@@ -28,7 +28,6 @@ static const char *const debug_mux_parent_names[] = {
"cam_cc_bps_axi_clk",
"cam_cc_bps_clk",
"cam_cc_camnoc_axi_clk",
- "cam_cc_camnoc_dcd_xo_clk",
"cam_cc_cci_0_clk",
"cam_cc_cci_1_clk",
"cam_cc_core_ahb_clk",
@@ -47,7 +46,6 @@ static const char *const debug_mux_parent_names[] = {
"cam_cc_csiphy5_clk",
"cam_cc_fd_core_clk",
"cam_cc_fd_core_uar_clk",
- "cam_cc_gdsc_clk",
"cam_cc_icp_ahb_clk",
"cam_cc_icp_clk",
"cam_cc_ife_0_ahb_clk",
@@ -120,28 +118,22 @@ static const char *const debug_mux_parent_names[] = {
"disp_cc_mdss_rscc_ahb_clk",
"disp_cc_mdss_rscc_vsync_clk",
"disp_cc_mdss_vsync_clk",
- "disp_cc_xo_clk",
"gcc_aggre_noc_pcie_tbu_clk",
"gcc_aggre_ufs_card_axi_clk",
"gcc_aggre_ufs_phy_axi_clk",
"gcc_aggre_usb3_prim_axi_clk",
"gcc_aggre_usb3_sec_axi_clk",
- "gcc_boot_rom_ahb_clk",
"gcc_camera_ahb_clk",
"gcc_camera_hf_axi_clk",
"gcc_camera_sf_axi_clk",
- "gcc_camera_xo_clk",
"gcc_cfg_noc_usb3_prim_axi_clk",
"gcc_cfg_noc_usb3_sec_axi_clk",
- "gcc_cpuss_ahb_clk",
- "gcc_cpuss_dvm_bus_clk",
"gcc_cpuss_rbcpr_clk",
"gcc_ddrss_gpu_axi_clk",
+ "gcc_ddrss_pcie_sf_tbu_clk",
"gcc_disp_ahb_clk",
"gcc_disp_hf_axi_clk",
"gcc_disp_sf_axi_clk",
- "gcc_disp_xo_clk",
- "gcc_dpm_ahb_clk",
"gcc_dpm_clk",
"gcc_gp1_clk",
"gcc_gp2_clk",
@@ -151,7 +143,6 @@ static const char *const debug_mux_parent_names[] = {
"gcc_gpu_gpll0_div_clk_src",
"gcc_gpu_memnoc_gfx_clk",
"gcc_gpu_snoc_dvm_gfx_clk",
- "gcc_npu_at_clk",
"gcc_npu_axi_clk",
"gcc_npu_bwmon_axi_clk",
"gcc_npu_bwmon_cfg_ahb_clk",
@@ -159,7 +150,6 @@ static const char *const debug_mux_parent_names[] = {
"gcc_npu_dma_clk",
"gcc_npu_gpll0_clk_src",
"gcc_npu_gpll0_div_clk_src",
- "gcc_npu_trig_clk",
"gcc_pcie0_phy_refgen_clk",
"gcc_pcie1_phy_refgen_clk",
"gcc_pcie2_phy_refgen_clk",
@@ -183,14 +173,9 @@ static const char *const debug_mux_parent_names[] = {
"gcc_pcie_2_slv_q2a_axi_clk",
"gcc_pcie_phy_aux_clk",
"gcc_pdm2_clk",
- "gcc_pdm_ahb_clk",
- "gcc_pdm_xo4_clk",
"gcc_prng_ahb_clk",
- "gcc_qmip_camera_nrt_ahb_clk",
- "gcc_qmip_camera_rt_ahb_clk",
- "gcc_qmip_disp_ahb_clk",
- "gcc_qmip_video_cvp_ahb_clk",
- "gcc_qmip_video_vcodec_ahb_clk",
+ "gcc_qupv3_wrap0_core_2x_clk",
+ "gcc_qupv3_wrap0_core_clk",
"gcc_qupv3_wrap0_s0_clk",
"gcc_qupv3_wrap0_s1_clk",
"gcc_qupv3_wrap0_s2_clk",
@@ -199,31 +184,27 @@ static const char *const debug_mux_parent_names[] = {
"gcc_qupv3_wrap0_s5_clk",
"gcc_qupv3_wrap0_s6_clk",
"gcc_qupv3_wrap0_s7_clk",
+ "gcc_qupv3_wrap1_core_2x_clk",
+ "gcc_qupv3_wrap1_core_clk",
"gcc_qupv3_wrap1_s0_clk",
"gcc_qupv3_wrap1_s1_clk",
"gcc_qupv3_wrap1_s2_clk",
"gcc_qupv3_wrap1_s3_clk",
"gcc_qupv3_wrap1_s4_clk",
"gcc_qupv3_wrap1_s5_clk",
+ "gcc_qupv3_wrap2_core_2x_clk",
+ "gcc_qupv3_wrap2_core_clk",
"gcc_qupv3_wrap2_s0_clk",
"gcc_qupv3_wrap2_s1_clk",
"gcc_qupv3_wrap2_s2_clk",
"gcc_qupv3_wrap2_s3_clk",
"gcc_qupv3_wrap2_s4_clk",
"gcc_qupv3_wrap2_s5_clk",
- "gcc_qupv3_wrap_0_m_ahb_clk",
- "gcc_qupv3_wrap_0_s_ahb_clk",
- "gcc_qupv3_wrap_1_m_ahb_clk",
- "gcc_qupv3_wrap_1_s_ahb_clk",
- "gcc_qupv3_wrap_2_m_ahb_clk",
- "gcc_qupv3_wrap_2_s_ahb_clk",
"gcc_sdcc2_ahb_clk",
"gcc_sdcc2_apps_clk",
"gcc_sdcc4_ahb_clk",
"gcc_sdcc4_apps_clk",
"gcc_sys_noc_cpuss_ahb_clk",
- "gcc_tsif_ahb_clk",
- "gcc_tsif_inactivity_timers_clk",
"gcc_tsif_ref_clk",
"gcc_ufs_card_ahb_clk",
"gcc_ufs_card_axi_clk",
@@ -243,10 +224,8 @@ static const char *const debug_mux_parent_names[] = {
"gcc_ufs_phy_unipro_core_clk",
"gcc_usb30_prim_master_clk",
"gcc_usb30_prim_mock_utmi_clk",
- "gcc_usb30_prim_sleep_clk",
"gcc_usb30_sec_master_clk",
"gcc_usb30_sec_mock_utmi_clk",
- "gcc_usb30_sec_sleep_clk",
"gcc_usb3_prim_phy_aux_clk",
"gcc_usb3_prim_phy_com_aux_clk",
"gcc_usb3_prim_phy_pipe_clk",
@@ -256,18 +235,17 @@ static const char *const debug_mux_parent_names[] = {
"gcc_video_ahb_clk",
"gcc_video_axi0_clk",
"gcc_video_axi1_clk",
- "gcc_video_xo_clk",
"gpu_cc_ahb_clk",
- "gpu_cc_crc_ahb_clk",
- "gpu_cc_cx_apb_clk",
"gpu_cc_cx_gmu_clk",
"gpu_cc_cx_snoc_dvm_clk",
- "gpu_cc_cxo_aon_clk",
- "gpu_cc_cxo_clk",
"gpu_cc_gx_gmu_clk",
"gpu_cc_gx_vsense_clk",
- "npu_cc_aon_clk",
- "npu_cc_atb_clk",
+ "measure_only_cnoc_clk",
+ "measure_only_gpu_cc_cx_gfx3d_clk",
+ "measure_only_gpu_cc_cx_gfx3d_slv_clk",
+ "measure_only_gpu_cc_gx_gfx3d_clk",
+ "measure_only_ipa_2x_clk",
+ "measure_only_snoc_clk",
"npu_cc_bto_core_clk",
"npu_cc_bwmon_clk",
"npu_cc_cal_hm0_cdc_clk",
@@ -283,7 +261,6 @@ static const char *const debug_mux_parent_names[] = {
"npu_cc_dl_llm_clk",
"npu_cc_dpm_clk",
"npu_cc_dpm_temp_clk",
- "npu_cc_dpm_xo_clk",
"npu_cc_dsp_ahbm_clk",
"npu_cc_dsp_ahbs_clk",
"npu_cc_dsp_axi_clk",
@@ -293,20 +270,15 @@ static const char *const debug_mux_parent_names[] = {
"npu_cc_llm_clk",
"npu_cc_llm_curr_clk",
"npu_cc_llm_temp_clk",
- "npu_cc_llm_xo_clk",
- "npu_cc_noc_ahb_clk",
"npu_cc_noc_axi_clk",
"npu_cc_noc_dma_clk",
- "npu_cc_rsc_xo_clk",
"npu_cc_s2p_clk",
- "npu_cc_xo_clk",
"video_cc_ahb_clk",
"video_cc_mvs0_clk",
"video_cc_mvs0c_clk",
"video_cc_mvs1_clk",
"video_cc_mvs1_div2_clk",
"video_cc_mvs1c_clk",
- "video_cc_xo_clk",
};
static struct clk_debug_mux gcc_debug_mux = {
@@ -319,574 +291,518 @@ static struct clk_debug_mux gcc_debug_mux = {
.post_div_mask = 0xF,
.post_div_shift = 0,
MUX_SRC_LIST(
- { "cam_cc_bps_ahb_clk", 0x55, 1, CAM_CC,
+ { "cam_cc_bps_ahb_clk", 0x55, 2, CAM_CC,
0x18, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 },
- { "cam_cc_bps_areg_clk", 0x55, 1, CAM_CC,
+ { "cam_cc_bps_areg_clk", 0x55, 2, CAM_CC,
0x17, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 },
- { "cam_cc_bps_axi_clk", 0x55, 1, CAM_CC,
+ { "cam_cc_bps_axi_clk", 0x55, 2, CAM_CC,
0x16, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 },
- { "cam_cc_bps_clk", 0x55, 1, CAM_CC,
+ { "cam_cc_bps_clk", 0x55, 2, CAM_CC,
0x14, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 },
- { "cam_cc_camnoc_axi_clk", 0x55, 1, CAM_CC,
+ { "cam_cc_camnoc_axi_clk", 0x55, 2, CAM_CC,
0x3C, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 },
- { "cam_cc_camnoc_dcd_xo_clk", 0x55, 1, CAM_CC,
- 0x3D, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 },
- { "cam_cc_cci_0_clk", 0x55, 1, CAM_CC,
+ { "cam_cc_cci_0_clk", 0x55, 2, CAM_CC,
0x39, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 },
- { "cam_cc_cci_1_clk", 0x55, 1, CAM_CC,
+ { "cam_cc_cci_1_clk", 0x55, 2, CAM_CC,
0x3A, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 },
- { "cam_cc_core_ahb_clk", 0x55, 1, CAM_CC,
+ { "cam_cc_core_ahb_clk", 0x55, 2, CAM_CC,
0x40, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 },
- { "cam_cc_cpas_ahb_clk", 0x55, 1, CAM_CC,
+ { "cam_cc_cpas_ahb_clk", 0x55, 2, CAM_CC,
0x3B, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 },
- { "cam_cc_csi0phytimer_clk", 0x55, 1, CAM_CC,
+ { "cam_cc_csi0phytimer_clk", 0x55, 2, CAM_CC,
0x8, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 },
- { "cam_cc_csi1phytimer_clk", 0x55, 1, CAM_CC,
+ { "cam_cc_csi1phytimer_clk", 0x55, 2, CAM_CC,
0xA, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 },
- { "cam_cc_csi2phytimer_clk", 0x55, 1, CAM_CC,
+ { "cam_cc_csi2phytimer_clk", 0x55, 2, CAM_CC,
0xC, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 },
- { "cam_cc_csi3phytimer_clk", 0x55, 1, CAM_CC,
+ { "cam_cc_csi3phytimer_clk", 0x55, 2, CAM_CC,
0xE, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 },
- { "cam_cc_csi4phytimer_clk", 0x55, 1, CAM_CC,
+ { "cam_cc_csi4phytimer_clk", 0x55, 2, CAM_CC,
0x10, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 },
- { "cam_cc_csi5phytimer_clk", 0x55, 1, CAM_CC,
+ { "cam_cc_csi5phytimer_clk", 0x55, 2, CAM_CC,
0x12, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 },
- { "cam_cc_csiphy0_clk", 0x55, 1, CAM_CC,
+ { "cam_cc_csiphy0_clk", 0x55, 2, CAM_CC,
0x9, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 },
- { "cam_cc_csiphy1_clk", 0x55, 1, CAM_CC,
+ { "cam_cc_csiphy1_clk", 0x55, 2, CAM_CC,
0xB, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 },
- { "cam_cc_csiphy2_clk", 0x55, 1, CAM_CC,
+ { "cam_cc_csiphy2_clk", 0x55, 2, CAM_CC,
0xD, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 },
- { "cam_cc_csiphy3_clk", 0x55, 1, CAM_CC,
+ { "cam_cc_csiphy3_clk", 0x55, 2, CAM_CC,
0xF, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 },
- { "cam_cc_csiphy4_clk", 0x55, 1, CAM_CC,
+ { "cam_cc_csiphy4_clk", 0x55, 2, CAM_CC,
0x11, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 },
- { "cam_cc_csiphy5_clk", 0x55, 1, CAM_CC,
+ { "cam_cc_csiphy5_clk", 0x55, 2, CAM_CC,
0x13, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 },
- { "cam_cc_fd_core_clk", 0x55, 1, CAM_CC,
+ { "cam_cc_fd_core_clk", 0x55, 2, CAM_CC,
0x37, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 },
- { "cam_cc_fd_core_uar_clk", 0x55, 1, CAM_CC,
+ { "cam_cc_fd_core_uar_clk", 0x55, 2, CAM_CC,
0x38, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 },
- { "cam_cc_gdsc_clk", 0x55, 1, CAM_CC,
- 0x41, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 },
- { "cam_cc_icp_ahb_clk", 0x55, 1, CAM_CC,
+ { "cam_cc_icp_ahb_clk", 0x55, 2, CAM_CC,
0x36, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 },
- { "cam_cc_icp_clk", 0x55, 1, CAM_CC,
+ { "cam_cc_icp_clk", 0x55, 2, CAM_CC,
0x35, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 },
- { "cam_cc_ife_0_ahb_clk", 0x55, 1, CAM_CC,
+ { "cam_cc_ife_0_ahb_clk", 0x55, 2, CAM_CC,
0x26, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 },
- { "cam_cc_ife_0_areg_clk", 0x55, 1, CAM_CC,
+ { "cam_cc_ife_0_areg_clk", 0x55, 2, CAM_CC,
0x1F, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 },
- { "cam_cc_ife_0_axi_clk", 0x55, 1, CAM_CC,
+ { "cam_cc_ife_0_axi_clk", 0x55, 2, CAM_CC,
0x25, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 },
- { "cam_cc_ife_0_clk", 0x55, 1, CAM_CC,
+ { "cam_cc_ife_0_clk", 0x55, 2, CAM_CC,
0x1E, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 },
- { "cam_cc_ife_0_cphy_rx_clk", 0x55, 1, CAM_CC,
+ { "cam_cc_ife_0_cphy_rx_clk", 0x55, 2, CAM_CC,
0x24, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 },
- { "cam_cc_ife_0_csid_clk", 0x55, 1, CAM_CC,
+ { "cam_cc_ife_0_csid_clk", 0x55, 2, CAM_CC,
0x22, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 },
- { "cam_cc_ife_0_dsp_clk", 0x55, 1, CAM_CC,
+ { "cam_cc_ife_0_dsp_clk", 0x55, 2, CAM_CC,
0x21, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 },
- { "cam_cc_ife_1_ahb_clk", 0x55, 1, CAM_CC,
+ { "cam_cc_ife_1_ahb_clk", 0x55, 2, CAM_CC,
0x2E, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 },
- { "cam_cc_ife_1_areg_clk", 0x55, 1, CAM_CC,
+ { "cam_cc_ife_1_areg_clk", 0x55, 2, CAM_CC,
0x29, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 },
- { "cam_cc_ife_1_axi_clk", 0x55, 1, CAM_CC,
+ { "cam_cc_ife_1_axi_clk", 0x55, 2, CAM_CC,
0x2D, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 },
- { "cam_cc_ife_1_clk", 0x55, 1, CAM_CC,
+ { "cam_cc_ife_1_clk", 0x55, 2, CAM_CC,
0x27, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 },
- { "cam_cc_ife_1_cphy_rx_clk", 0x55, 1, CAM_CC,
+ { "cam_cc_ife_1_cphy_rx_clk", 0x55, 2, CAM_CC,
0x2C, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 },
- { "cam_cc_ife_1_csid_clk", 0x55, 1, CAM_CC,
+ { "cam_cc_ife_1_csid_clk", 0x55, 2, CAM_CC,
0x2B, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 },
- { "cam_cc_ife_1_dsp_clk", 0x55, 1, CAM_CC,
+ { "cam_cc_ife_1_dsp_clk", 0x55, 2, CAM_CC,
0x2A, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 },
- { "cam_cc_ife_lite_ahb_clk", 0x55, 1, CAM_CC,
+ { "cam_cc_ife_lite_ahb_clk", 0x55, 2, CAM_CC,
0x32, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 },
- { "cam_cc_ife_lite_axi_clk", 0x55, 1, CAM_CC,
+ { "cam_cc_ife_lite_axi_clk", 0x55, 2, CAM_CC,
0x49, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 },
- { "cam_cc_ife_lite_clk", 0x55, 1, CAM_CC,
+ { "cam_cc_ife_lite_clk", 0x55, 2, CAM_CC,
0x2F, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 },
- { "cam_cc_ife_lite_cphy_rx_clk", 0x55, 1, CAM_CC,
+ { "cam_cc_ife_lite_cphy_rx_clk", 0x55, 2, CAM_CC,
0x31, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 },
- { "cam_cc_ife_lite_csid_clk", 0x55, 1, CAM_CC,
+ { "cam_cc_ife_lite_csid_clk", 0x55, 2, CAM_CC,
0x30, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 },
- { "cam_cc_ipe_0_ahb_clk", 0x55, 1, CAM_CC,
+ { "cam_cc_ipe_0_ahb_clk", 0x55, 2, CAM_CC,
0x1D, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 },
- { "cam_cc_ipe_0_areg_clk", 0x55, 1, CAM_CC,
+ { "cam_cc_ipe_0_areg_clk", 0x55, 2, CAM_CC,
0x1C, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 },
- { "cam_cc_ipe_0_axi_clk", 0x55, 1, CAM_CC,
+ { "cam_cc_ipe_0_axi_clk", 0x55, 2, CAM_CC,
0x1B, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 },
- { "cam_cc_ipe_0_clk", 0x55, 1, CAM_CC,
+ { "cam_cc_ipe_0_clk", 0x55, 2, CAM_CC,
0x19, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 },
- { "cam_cc_jpeg_clk", 0x55, 1, CAM_CC,
+ { "cam_cc_jpeg_clk", 0x55, 2, CAM_CC,
0x33, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 },
- { "cam_cc_mclk0_clk", 0x55, 1, CAM_CC,
+ { "cam_cc_mclk0_clk", 0x55, 2, CAM_CC,
0x1, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 },
- { "cam_cc_mclk1_clk", 0x55, 1, CAM_CC,
+ { "cam_cc_mclk1_clk", 0x55, 2, CAM_CC,
0x2, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 },
- { "cam_cc_mclk2_clk", 0x55, 1, CAM_CC,
+ { "cam_cc_mclk2_clk", 0x55, 2, CAM_CC,
0x3, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 },
- { "cam_cc_mclk3_clk", 0x55, 1, CAM_CC,
+ { "cam_cc_mclk3_clk", 0x55, 2, CAM_CC,
0x4, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 },
- { "cam_cc_mclk4_clk", 0x55, 1, CAM_CC,
+ { "cam_cc_mclk4_clk", 0x55, 2, CAM_CC,
0x5, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 },
- { "cam_cc_mclk5_clk", 0x55, 1, CAM_CC,
+ { "cam_cc_mclk5_clk", 0x55, 2, CAM_CC,
0x6, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 },
- { "cam_cc_mclk6_clk", 0x55, 1, CAM_CC,
+ { "cam_cc_mclk6_clk", 0x55, 2, CAM_CC,
0x7, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 },
- { "cam_cc_sbi_ahb_clk", 0x55, 1, CAM_CC,
+ { "cam_cc_sbi_ahb_clk", 0x55, 2, CAM_CC,
0x4E, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 },
- { "cam_cc_sbi_axi_clk", 0x55, 1, CAM_CC,
+ { "cam_cc_sbi_axi_clk", 0x55, 2, CAM_CC,
0x4D, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 },
- { "cam_cc_sbi_clk", 0x55, 1, CAM_CC,
+ { "cam_cc_sbi_clk", 0x55, 2, CAM_CC,
0x4A, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 },
- { "cam_cc_sbi_cphy_rx_clk", 0x55, 1, CAM_CC,
+ { "cam_cc_sbi_cphy_rx_clk", 0x55, 2, CAM_CC,
0x4C, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 },
- { "cam_cc_sbi_csid_clk", 0x55, 1, CAM_CC,
+ { "cam_cc_sbi_csid_clk", 0x55, 2, CAM_CC,
0x4B, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 },
- { "cam_cc_sbi_ife_0_clk", 0x55, 1, CAM_CC,
+ { "cam_cc_sbi_ife_0_clk", 0x55, 2, CAM_CC,
0x4F, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 },
- { "cam_cc_sbi_ife_1_clk", 0x55, 1, CAM_CC,
+ { "cam_cc_sbi_ife_1_clk", 0x55, 2, CAM_CC,
0x50, 0xFF, 0, 0xF, 0, 4, 0xD000, 0xD004, 0xD008 },
- { "disp_cc_mdss_ahb_clk", 0x56, 1, DISP_CC,
+ { "disp_cc_mdss_ahb_clk", 0x56, 2, DISP_CC,
0x2B, 0xFF, 0, 0x3, 0, 4, 0x7000, 0x5008, 0x500C },
- { "disp_cc_mdss_byte0_clk", 0x56, 1, DISP_CC,
+ { "disp_cc_mdss_byte0_clk", 0x56, 2, DISP_CC,
0x15, 0xFF, 0, 0x3, 0, 4, 0x7000, 0x5008, 0x500C },
- { "disp_cc_mdss_byte0_intf_clk", 0x56, 1, DISP_CC,
+ { "disp_cc_mdss_byte0_intf_clk", 0x56, 2, DISP_CC,
0x16, 0xFF, 0, 0x3, 0, 4, 0x7000, 0x5008, 0x500C },
- { "disp_cc_mdss_byte1_clk", 0x56, 1, DISP_CC,
+ { "disp_cc_mdss_byte1_clk", 0x56, 2, DISP_CC,
0x17, 0xFF, 0, 0x3, 0, 4, 0x7000, 0x5008, 0x500C },
- { "disp_cc_mdss_byte1_intf_clk", 0x56, 1, DISP_CC,
+ { "disp_cc_mdss_byte1_intf_clk", 0x56, 2, DISP_CC,
0x18, 0xFF, 0, 0x3, 0, 4, 0x7000, 0x5008, 0x500C },
- { "disp_cc_mdss_dp_aux1_clk", 0x56, 1, DISP_CC,
+ { "disp_cc_mdss_dp_aux1_clk", 0x56, 2, DISP_CC,
0x25, 0xFF, 0, 0x3, 0, 4, 0x7000, 0x5008, 0x500C },
- { "disp_cc_mdss_dp_aux_clk", 0x56, 1, DISP_CC,
+ { "disp_cc_mdss_dp_aux_clk", 0x56, 2, DISP_CC,
0x20, 0xFF, 0, 0x3, 0, 4, 0x7000, 0x5008, 0x500C },
- { "disp_cc_mdss_dp_crypto1_clk", 0x56, 1, DISP_CC,
+ { "disp_cc_mdss_dp_crypto1_clk", 0x56, 2, DISP_CC,
0x24, 0xFF, 0, 0x3, 0, 4, 0x7000, 0x5008, 0x500C },
- { "disp_cc_mdss_dp_crypto_clk", 0x56, 1, DISP_CC,
+ { "disp_cc_mdss_dp_crypto_clk", 0x56, 2, DISP_CC,
0x1D, 0xFF, 0, 0x3, 0, 4, 0x7000, 0x5008, 0x500C },
- { "disp_cc_mdss_dp_link1_clk", 0x56, 1, DISP_CC,
+ { "disp_cc_mdss_dp_link1_clk", 0x56, 2, DISP_CC,
0x22, 0xFF, 0, 0x3, 0, 4, 0x7000, 0x5008, 0x500C },
- { "disp_cc_mdss_dp_link1_intf_clk", 0x56, 1, DISP_CC,
+ { "disp_cc_mdss_dp_link1_intf_clk", 0x56, 2, DISP_CC,
0x23, 0xFF, 0, 0x3, 0, 4, 0x7000, 0x5008, 0x500C },
- { "disp_cc_mdss_dp_link_clk", 0x56, 1, DISP_CC,
+ { "disp_cc_mdss_dp_link_clk", 0x56, 2, DISP_CC,
0x1B, 0xFF, 0, 0x3, 0, 4, 0x7000, 0x5008, 0x500C },
- { "disp_cc_mdss_dp_link_intf_clk", 0x56, 1, DISP_CC,
+ { "disp_cc_mdss_dp_link_intf_clk", 0x56, 2, DISP_CC,
0x1C, 0xFF, 0, 0x3, 0, 4, 0x7000, 0x5008, 0x500C },
- { "disp_cc_mdss_dp_pixel1_clk", 0x56, 1, DISP_CC,
+ { "disp_cc_mdss_dp_pixel1_clk", 0x56, 2, DISP_CC,
0x1F, 0xFF, 0, 0x3, 0, 4, 0x7000, 0x5008, 0x500C },
- { "disp_cc_mdss_dp_pixel2_clk", 0x56, 1, DISP_CC,
+ { "disp_cc_mdss_dp_pixel2_clk", 0x56, 2, DISP_CC,
0x21, 0xFF, 0, 0x3, 0, 4, 0x7000, 0x5008, 0x500C },
- { "disp_cc_mdss_dp_pixel_clk", 0x56, 1, DISP_CC,
+ { "disp_cc_mdss_dp_pixel_clk", 0x56, 2, DISP_CC,
0x1E, 0xFF, 0, 0x3, 0, 4, 0x7000, 0x5008, 0x500C },
- { "disp_cc_mdss_edp_aux_clk", 0x56, 1, DISP_CC,
+ { "disp_cc_mdss_edp_aux_clk", 0x56, 2, DISP_CC,
0x29, 0xFF, 0, 0x3, 0, 4, 0x7000, 0x5008, 0x500C },
- { "disp_cc_mdss_edp_gtc_clk", 0x56, 1, DISP_CC,
+ { "disp_cc_mdss_edp_gtc_clk", 0x56, 2, DISP_CC,
0x2A, 0xFF, 0, 0x3, 0, 4, 0x7000, 0x5008, 0x500C },
- { "disp_cc_mdss_edp_link_clk", 0x56, 1, DISP_CC,
+ { "disp_cc_mdss_edp_link_clk", 0x56, 2, DISP_CC,
0x27, 0xFF, 0, 0x3, 0, 4, 0x7000, 0x5008, 0x500C },
- { "disp_cc_mdss_edp_link_intf_clk", 0x56, 1, DISP_CC,
+ { "disp_cc_mdss_edp_link_intf_clk", 0x56, 2, DISP_CC,
0x28, 0xFF, 0, 0x3, 0, 4, 0x7000, 0x5008, 0x500C },
- { "disp_cc_mdss_edp_pixel_clk", 0x56, 1, DISP_CC,
+ { "disp_cc_mdss_edp_pixel_clk", 0x56, 2, DISP_CC,
0x26, 0xFF, 0, 0x3, 0, 4, 0x7000, 0x5008, 0x500C },
- { "disp_cc_mdss_esc0_clk", 0x56, 1, DISP_CC,
+ { "disp_cc_mdss_esc0_clk", 0x56, 2, DISP_CC,
0x19, 0xFF, 0, 0x3, 0, 4, 0x7000, 0x5008, 0x500C },
- { "disp_cc_mdss_esc1_clk", 0x56, 1, DISP_CC,
+ { "disp_cc_mdss_esc1_clk", 0x56, 2, DISP_CC,
0x1A, 0xFF, 0, 0x3, 0, 4, 0x7000, 0x5008, 0x500C },
- { "disp_cc_mdss_mdp_clk", 0x56, 1, DISP_CC,
+ { "disp_cc_mdss_mdp_clk", 0x56, 2, DISP_CC,
0x11, 0xFF, 0, 0x3, 0, 4, 0x7000, 0x5008, 0x500C },
- { "disp_cc_mdss_mdp_lut_clk", 0x56, 1, DISP_CC,
+ { "disp_cc_mdss_mdp_lut_clk", 0x56, 2, DISP_CC,
0x13, 0xFF, 0, 0x3, 0, 4, 0x7000, 0x5008, 0x500C },
- { "disp_cc_mdss_non_gdsc_ahb_clk", 0x56, 1, DISP_CC,
+ { "disp_cc_mdss_non_gdsc_ahb_clk", 0x56, 2, DISP_CC,
0x2C, 0xFF, 0, 0x3, 0, 4, 0x7000, 0x5008, 0x500C },
- { "disp_cc_mdss_pclk0_clk", 0x56, 1, DISP_CC,
+ { "disp_cc_mdss_pclk0_clk", 0x56, 2, DISP_CC,
0xF, 0xFF, 0, 0x3, 0, 4, 0x7000, 0x5008, 0x500C },
- { "disp_cc_mdss_pclk1_clk", 0x56, 1, DISP_CC,
+ { "disp_cc_mdss_pclk1_clk", 0x56, 2, DISP_CC,
0x10, 0xFF, 0, 0x3, 0, 4, 0x7000, 0x5008, 0x500C },
- { "disp_cc_mdss_rot_clk", 0x56, 1, DISP_CC,
+ { "disp_cc_mdss_rot_clk", 0x56, 2, DISP_CC,
0x12, 0xFF, 0, 0x3, 0, 4, 0x7000, 0x5008, 0x500C },
- { "disp_cc_mdss_rscc_ahb_clk", 0x56, 1, DISP_CC,
+ { "disp_cc_mdss_rscc_ahb_clk", 0x56, 2, DISP_CC,
0x2E, 0xFF, 0, 0x3, 0, 4, 0x7000, 0x5008, 0x500C },
- { "disp_cc_mdss_rscc_vsync_clk", 0x56, 1, DISP_CC,
+ { "disp_cc_mdss_rscc_vsync_clk", 0x56, 2, DISP_CC,
0x2D, 0xFF, 0, 0x3, 0, 4, 0x7000, 0x5008, 0x500C },
- { "disp_cc_mdss_vsync_clk", 0x56, 1, DISP_CC,
+ { "disp_cc_mdss_vsync_clk", 0x56, 2, DISP_CC,
0x14, 0xFF, 0, 0x3, 0, 4, 0x7000, 0x5008, 0x500C },
- { "disp_cc_xo_clk", 0x56, 1, DISP_CC,
- 0x36, 0xFF, 0, 0x3, 0, 4, 0x7000, 0x5008, 0x500C },
- { "gcc_aggre_noc_pcie_tbu_clk", 0x36, 1, GCC,
- 0x36, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
- { "gcc_aggre_ufs_card_axi_clk", 0x142, 1, GCC,
- 0x142, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
- { "gcc_aggre_ufs_phy_axi_clk", 0x141, 1, GCC,
- 0x141, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
- { "gcc_aggre_usb3_prim_axi_clk", 0x13F, 1, GCC,
- 0x13F, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
- { "gcc_aggre_usb3_sec_axi_clk", 0x140, 1, GCC,
- 0x140, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
- { "gcc_boot_rom_ahb_clk", 0xA3, 1, GCC,
- 0xA3, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
- { "gcc_camera_ahb_clk", 0x44, 1, GCC,
- 0x44, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
- { "gcc_camera_hf_axi_clk", 0x4D, 1, GCC,
- 0x4D, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
- { "gcc_camera_sf_axi_clk", 0x4E, 1, GCC,
- 0x4E, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
- { "gcc_camera_xo_clk", 0x52, 1, GCC,
- 0x52, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
- { "gcc_cfg_noc_usb3_prim_axi_clk", 0x21, 1, GCC,
- 0x21, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
- { "gcc_cfg_noc_usb3_sec_axi_clk", 0x22, 1, GCC,
- 0x22, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
- { "gcc_cpuss_ahb_clk", 0xE0, 1, GCC,
- 0xE0, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
- { "gcc_cpuss_dvm_bus_clk", 0xE4, 1, GCC,
- 0xE4, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
- { "gcc_cpuss_rbcpr_clk", 0xE1, 1, GCC,
- 0xE1, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
- { "gcc_ddrss_gpu_axi_clk", 0xC4, 1, GCC,
- 0xC4, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
- { "gcc_disp_ahb_clk", 0x45, 1, GCC,
- 0x45, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
- { "gcc_disp_hf_axi_clk", 0x4F, 1, GCC,
- 0x4F, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
- { "gcc_disp_sf_axi_clk", 0x50, 1, GCC,
- 0x50, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
- { "gcc_disp_xo_clk", 0x53, 1, GCC,
- 0x53, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
- { "gcc_dpm_ahb_clk", 0x198, 1, GCC,
- 0x198, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
- { "gcc_dpm_clk", 0x197, 1, GCC,
- 0x197, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
- { "gcc_gp1_clk", 0xEF, 1, GCC,
- 0xEF, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
- { "gcc_gp2_clk", 0xF0, 1, GCC,
- 0xF0, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
- { "gcc_gp3_clk", 0xF1, 1, GCC,
- 0xF1, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
- { "gcc_gpu_cfg_ahb_clk", 0x161, 1, GCC,
- 0x161, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
- { "gcc_gpu_gpll0_clk_src", 0x167, 1, GCC,
- 0x167, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
- { "gcc_gpu_gpll0_div_clk_src", 0x168, 1, GCC,
- 0x168, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
- { "gcc_gpu_memnoc_gfx_clk", 0x164, 1, GCC,
- 0x164, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
- { "gcc_gpu_snoc_dvm_gfx_clk", 0x166, 1, GCC,
- 0x166, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
- { "gcc_npu_at_clk", 0x17D, 1, GCC,
- 0x17D, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
- { "gcc_npu_axi_clk", 0x17A, 1, GCC,
- 0x17A, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
- { "gcc_npu_bwmon_axi_clk", 0x19A, 1, GCC,
- 0x19A, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
- { "gcc_npu_bwmon_cfg_ahb_clk", 0x199, 1, GCC,
- 0x199, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
- { "gcc_npu_cfg_ahb_clk", 0x179, 1, GCC,
- 0x179, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
- { "gcc_npu_dma_clk", 0x17B, 1, GCC,
- 0x17B, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
- { "gcc_npu_gpll0_clk_src", 0x17E, 1, GCC,
- 0x17E, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
- { "gcc_npu_gpll0_div_clk_src", 0x17F, 1, GCC,
- 0x17F, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
- { "gcc_npu_trig_clk", 0x17C, 1, GCC,
- 0x17C, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
- { "gcc_pcie0_phy_refgen_clk", 0x103, 1, GCC,
- 0x103, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
- { "gcc_pcie1_phy_refgen_clk", 0x104, 1, GCC,
- 0x104, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
- { "gcc_pcie2_phy_refgen_clk", 0x105, 1, GCC,
- 0x105, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
- { "gcc_pcie_0_aux_clk", 0xF6, 1, GCC,
- 0xF6, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
- { "gcc_pcie_0_cfg_ahb_clk", 0xF5, 1, GCC,
- 0xF5, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
- { "gcc_pcie_0_mstr_axi_clk", 0xF4, 1, GCC,
- 0xF4, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
- { "gcc_pcie_0_pipe_clk", 0xF7, 1, GCC,
- 0xF7, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
- { "gcc_pcie_0_slv_axi_clk", 0xF3, 1, GCC,
- 0xF3, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
- { "gcc_pcie_0_slv_q2a_axi_clk", 0xF2, 1, GCC,
- 0xF2, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
- { "gcc_pcie_1_aux_clk", 0xFE, 1, GCC,
- 0xFE, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
- { "gcc_pcie_1_cfg_ahb_clk", 0xFD, 1, GCC,
- 0xFD, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
- { "gcc_pcie_1_mstr_axi_clk", 0xFC, 1, GCC,
- 0xFC, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
- { "gcc_pcie_1_pipe_clk", 0xFF, 1, GCC,
- 0xFF, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
- { "gcc_pcie_1_slv_axi_clk", 0xFB, 1, GCC,
- 0xFB, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
- { "gcc_pcie_1_slv_q2a_axi_clk", 0xFA, 1, GCC,
- 0xFA, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
- { "gcc_pcie_2_aux_clk", 0x191, 1, GCC,
- 0x191, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
- { "gcc_pcie_2_cfg_ahb_clk", 0x190, 1, GCC,
- 0x190, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
- { "gcc_pcie_2_mstr_axi_clk", 0x18F, 1, GCC,
- 0x18F, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
- { "gcc_pcie_2_pipe_clk", 0x192, 1, GCC,
- 0x192, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
- { "gcc_pcie_2_slv_axi_clk", 0x18E, 1, GCC,
- 0x18E, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
- { "gcc_pcie_2_slv_q2a_axi_clk", 0x18D, 1, GCC,
- 0x18D, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
- { "gcc_pcie_phy_aux_clk", 0x102, 1, GCC,
- 0x102, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
- { "gcc_pdm2_clk", 0x9D, 1, GCC,
- 0x9D, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
- { "gcc_pdm_ahb_clk", 0x9B, 1, GCC,
- 0x9B, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
- { "gcc_pdm_xo4_clk", 0x9C, 1, GCC,
- 0x9C, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
- { "gcc_prng_ahb_clk", 0x9E, 1, GCC,
- 0x9E, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
- { "gcc_qmip_camera_nrt_ahb_clk", 0x48, 1, GCC,
- 0x48, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
- { "gcc_qmip_camera_rt_ahb_clk", 0x49, 1, GCC,
- 0x49, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
- { "gcc_qmip_disp_ahb_clk", 0x4A, 1, GCC,
- 0x4A, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
- { "gcc_qmip_video_cvp_ahb_clk", 0x46, 1, GCC,
- 0x46, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
- { "gcc_qmip_video_vcodec_ahb_clk", 0x47, 1, GCC,
- 0x47, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
- { "gcc_qupv3_wrap0_s0_clk", 0x89, 1, GCC,
- 0x89, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
- { "gcc_qupv3_wrap0_s1_clk", 0x8A, 1, GCC,
- 0x8A, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
- { "gcc_qupv3_wrap0_s2_clk", 0x8B, 1, GCC,
- 0x8B, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
- { "gcc_qupv3_wrap0_s3_clk", 0x8C, 1, GCC,
- 0x8C, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
- { "gcc_qupv3_wrap0_s4_clk", 0x8D, 1, GCC,
- 0x8D, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
- { "gcc_qupv3_wrap0_s5_clk", 0x8E, 1, GCC,
- 0x8E, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
- { "gcc_qupv3_wrap0_s6_clk", 0x8F, 1, GCC,
- 0x8F, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
- { "gcc_qupv3_wrap0_s7_clk", 0x90, 1, GCC,
- 0x90, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
- { "gcc_qupv3_wrap1_s0_clk", 0x95, 1, GCC,
- 0x95, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
- { "gcc_qupv3_wrap1_s1_clk", 0x96, 1, GCC,
- 0x96, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
- { "gcc_qupv3_wrap1_s2_clk", 0x97, 1, GCC,
- 0x97, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
- { "gcc_qupv3_wrap1_s3_clk", 0x98, 1, GCC,
- 0x98, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
- { "gcc_qupv3_wrap1_s4_clk", 0x99, 1, GCC,
- 0x99, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
- { "gcc_qupv3_wrap1_s5_clk", 0x9A, 1, GCC,
- 0x9A, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
- { "gcc_qupv3_wrap2_s0_clk", 0x185, 1, GCC,
- 0x185, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
- { "gcc_qupv3_wrap2_s1_clk", 0x186, 1, GCC,
- 0x186, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
- { "gcc_qupv3_wrap2_s2_clk", 0x187, 1, GCC,
- 0x187, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
- { "gcc_qupv3_wrap2_s3_clk", 0x188, 1, GCC,
- 0x188, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
- { "gcc_qupv3_wrap2_s4_clk", 0x189, 1, GCC,
- 0x189, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
- { "gcc_qupv3_wrap2_s5_clk", 0x18A, 1, GCC,
- 0x18A, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
- { "gcc_qupv3_wrap_0_m_ahb_clk", 0x85, 1, GCC,
- 0x85, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
- { "gcc_qupv3_wrap_0_s_ahb_clk", 0x86, 1, GCC,
- 0x86, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
- { "gcc_qupv3_wrap_1_m_ahb_clk", 0x91, 1, GCC,
- 0x91, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
- { "gcc_qupv3_wrap_1_s_ahb_clk", 0x92, 1, GCC,
- 0x92, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
- { "gcc_qupv3_wrap_2_m_ahb_clk", 0x181, 1, GCC,
- 0x181, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
- { "gcc_qupv3_wrap_2_s_ahb_clk", 0x182, 1, GCC,
- 0x182, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
- { "gcc_sdcc2_ahb_clk", 0x82, 1, GCC,
- 0x82, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
- { "gcc_sdcc2_apps_clk", 0x81, 1, GCC,
- 0x81, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
- { "gcc_sdcc4_ahb_clk", 0x84, 1, GCC,
- 0x84, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
- { "gcc_sdcc4_apps_clk", 0x83, 1, GCC,
- 0x83, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
- { "gcc_sys_noc_cpuss_ahb_clk", 0xC, 1, GCC,
- 0xC, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
- { "gcc_tsif_ahb_clk", 0x9F, 1, GCC,
- 0x9F, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
- { "gcc_tsif_inactivity_timers_clk", 0xA1, 1, GCC,
- 0xA1, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
- { "gcc_tsif_ref_clk", 0xA0, 1, GCC,
- 0xA0, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
- { "gcc_ufs_card_ahb_clk", 0x107, 1, GCC,
- 0x107, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
- { "gcc_ufs_card_axi_clk", 0x106, 1, GCC,
- 0x106, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
- { "gcc_ufs_card_ice_core_clk", 0x10D, 1, GCC,
- 0x10D, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
- { "gcc_ufs_card_phy_aux_clk", 0x10E, 1, GCC,
- 0x10E, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
- { "gcc_ufs_card_rx_symbol_0_clk", 0x109, 1, GCC,
- 0x109, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
- { "gcc_ufs_card_rx_symbol_1_clk", 0x10F, 1, GCC,
- 0x10F, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
- { "gcc_ufs_card_tx_symbol_0_clk", 0x108, 1, GCC,
- 0x108, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
- { "gcc_ufs_card_unipro_core_clk", 0x10C, 1, GCC,
- 0x10C, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
- { "gcc_ufs_phy_ahb_clk", 0x113, 1, GCC,
- 0x113, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
- { "gcc_ufs_phy_axi_clk", 0x112, 1, GCC,
- 0x112, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
- { "gcc_ufs_phy_ice_core_clk", 0x119, 1, GCC,
- 0x119, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
- { "gcc_ufs_phy_phy_aux_clk", 0x11A, 1, GCC,
- 0x11A, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
- { "gcc_ufs_phy_rx_symbol_0_clk", 0x115, 1, GCC,
- 0x115, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
- { "gcc_ufs_phy_rx_symbol_1_clk", 0x11B, 1, GCC,
- 0x11B, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
- { "gcc_ufs_phy_tx_symbol_0_clk", 0x114, 1, GCC,
- 0x114, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
- { "gcc_ufs_phy_unipro_core_clk", 0x118, 1, GCC,
- 0x118, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
- { "gcc_usb30_prim_master_clk", 0x6E, 1, GCC,
- 0x6E, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
- { "gcc_usb30_prim_mock_utmi_clk", 0x70, 1, GCC,
- 0x70, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
- { "gcc_usb30_prim_sleep_clk", 0x6F, 1, GCC,
- 0x6F, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
- { "gcc_usb30_sec_master_clk", 0x75, 1, GCC,
- 0x75, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
- { "gcc_usb30_sec_mock_utmi_clk", 0x77, 1, GCC,
- 0x77, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
- { "gcc_usb30_sec_sleep_clk", 0x76, 1, GCC,
- 0x76, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
- { "gcc_usb3_prim_phy_aux_clk", 0x71, 1, GCC,
- 0x71, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
- { "gcc_usb3_prim_phy_com_aux_clk", 0x72, 1, GCC,
- 0x72, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
- { "gcc_usb3_prim_phy_pipe_clk", 0x73, 1, GCC,
- 0x73, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
- { "gcc_usb3_sec_phy_aux_clk", 0x78, 1, GCC,
- 0x78, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
- { "gcc_usb3_sec_phy_com_aux_clk", 0x79, 1, GCC,
- 0x79, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
- { "gcc_usb3_sec_phy_pipe_clk", 0x7A, 1, GCC,
- 0x7A, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
- { "gcc_video_ahb_clk", 0x43, 1, GCC,
- 0x43, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
- { "gcc_video_axi0_clk", 0x4B, 1, GCC,
- 0x4B, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
- { "gcc_video_axi1_clk", 0x4C, 1, GCC,
- 0x4C, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
- { "gcc_video_xo_clk", 0x51, 1, GCC,
- 0x51, 0x3FF, 0, 0xF, 0, 1, 0x62000, 0x62004, 0x62008 },
- { "gpu_cc_ahb_clk", 0x163, 1, GPU_CC,
+ { "gcc_aggre_noc_pcie_tbu_clk", 0x36, 2, GCC,
+ 0x36, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+ { "gcc_aggre_ufs_card_axi_clk", 0x142, 2, GCC,
+ 0x142, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+ { "gcc_aggre_ufs_phy_axi_clk", 0x141, 2, GCC,
+ 0x141, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+ { "gcc_aggre_usb3_prim_axi_clk", 0x13F, 2, GCC,
+ 0x13F, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+ { "gcc_aggre_usb3_sec_axi_clk", 0x140, 2, GCC,
+ 0x140, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+ { "gcc_camera_ahb_clk", 0x44, 2, GCC,
+ 0x44, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+ { "gcc_camera_hf_axi_clk", 0x4D, 2, GCC,
+ 0x4D, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+ { "gcc_camera_sf_axi_clk", 0x4E, 2, GCC,
+ 0x4E, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+ { "gcc_cfg_noc_usb3_prim_axi_clk", 0x21, 2, GCC,
+ 0x21, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+ { "gcc_cfg_noc_usb3_sec_axi_clk", 0x22, 2, GCC,
+ 0x22, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+ { "gcc_cpuss_rbcpr_clk", 0xE1, 2, GCC,
+ 0xE1, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+ { "gcc_ddrss_gpu_axi_clk", 0xC4, 2, GCC,
+ 0xC4, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+ { "gcc_ddrss_pcie_sf_tbu_clk", 0xC5, 2, GCC,
+ 0xC5, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+ { "gcc_disp_ahb_clk", 0x45, 2, GCC,
+ 0x45, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+ { "gcc_disp_hf_axi_clk", 0x4F, 2, GCC,
+ 0x4F, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+ { "gcc_disp_sf_axi_clk", 0x50, 2, GCC,
+ 0x50, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+ { "gcc_dpm_clk", 0x197, 2, GCC,
+ 0x197, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+ { "gcc_gp1_clk", 0xEF, 2, GCC,
+ 0xEF, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+ { "gcc_gp2_clk", 0xF0, 2, GCC,
+ 0xF0, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+ { "gcc_gp3_clk", 0xF1, 2, GCC,
+ 0xF1, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+ { "gcc_gpu_cfg_ahb_clk", 0x161, 2, GCC,
+ 0x161, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+ { "gcc_gpu_gpll0_clk_src", 0x167, 2, GCC,
+ 0x167, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+ { "gcc_gpu_gpll0_div_clk_src", 0x168, 2, GCC,
+ 0x168, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+ { "gcc_gpu_memnoc_gfx_clk", 0x164, 2, GCC,
+ 0x164, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+ { "gcc_gpu_snoc_dvm_gfx_clk", 0x166, 2, GCC,
+ 0x166, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+ { "gcc_npu_axi_clk", 0x17A, 2, GCC,
+ 0x17A, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+ { "gcc_npu_bwmon_axi_clk", 0x19A, 2, GCC,
+ 0x19A, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+ { "gcc_npu_bwmon_cfg_ahb_clk", 0x199, 2, GCC,
+ 0x199, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+ { "gcc_npu_cfg_ahb_clk", 0x179, 2, GCC,
+ 0x179, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+ { "gcc_npu_dma_clk", 0x17B, 2, GCC,
+ 0x17B, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+ { "gcc_npu_gpll0_clk_src", 0x17E, 2, GCC,
+ 0x17E, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+ { "gcc_npu_gpll0_div_clk_src", 0x17F, 2, GCC,
+ 0x17F, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+ { "gcc_pcie0_phy_refgen_clk", 0x103, 2, GCC,
+ 0x103, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+ { "gcc_pcie1_phy_refgen_clk", 0x104, 2, GCC,
+ 0x104, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+ { "gcc_pcie2_phy_refgen_clk", 0x105, 2, GCC,
+ 0x105, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+ { "gcc_pcie_0_aux_clk", 0xF6, 2, GCC,
+ 0xF6, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+ { "gcc_pcie_0_cfg_ahb_clk", 0xF5, 2, GCC,
+ 0xF5, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+ { "gcc_pcie_0_mstr_axi_clk", 0xF4, 2, GCC,
+ 0xF4, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+ { "gcc_pcie_0_pipe_clk", 0xF7, 2, GCC,
+ 0xF7, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+ { "gcc_pcie_0_slv_axi_clk", 0xF3, 2, GCC,
+ 0xF3, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+ { "gcc_pcie_0_slv_q2a_axi_clk", 0xF2, 2, GCC,
+ 0xF2, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+ { "gcc_pcie_1_aux_clk", 0xFE, 2, GCC,
+ 0xFE, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+ { "gcc_pcie_1_cfg_ahb_clk", 0xFD, 2, GCC,
+ 0xFD, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+ { "gcc_pcie_1_mstr_axi_clk", 0xFC, 2, GCC,
+ 0xFC, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+ { "gcc_pcie_1_pipe_clk", 0xFF, 2, GCC,
+ 0xFF, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+ { "gcc_pcie_1_slv_axi_clk", 0xFB, 2, GCC,
+ 0xFB, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+ { "gcc_pcie_1_slv_q2a_axi_clk", 0xFA, 2, GCC,
+ 0xFA, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+ { "gcc_pcie_2_aux_clk", 0x191, 2, GCC,
+ 0x191, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+ { "gcc_pcie_2_cfg_ahb_clk", 0x190, 2, GCC,
+ 0x190, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+ { "gcc_pcie_2_mstr_axi_clk", 0x18F, 2, GCC,
+ 0x18F, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+ { "gcc_pcie_2_pipe_clk", 0x192, 2, GCC,
+ 0x192, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+ { "gcc_pcie_2_slv_axi_clk", 0x18E, 2, GCC,
+ 0x18E, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+ { "gcc_pcie_2_slv_q2a_axi_clk", 0x18D, 2, GCC,
+ 0x18D, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+ { "gcc_pcie_phy_aux_clk", 0x102, 2, GCC,
+ 0x102, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+ { "gcc_pdm2_clk", 0x9D, 2, GCC,
+ 0x9D, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+ { "gcc_prng_ahb_clk", 0x9E, 2, GCC,
+ 0x9E, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+ { "gcc_qupv3_wrap0_core_2x_clk", 0x88, 2, GCC,
+ 0x88, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+ { "gcc_qupv3_wrap0_core_clk", 0x87, 2, GCC,
+ 0x87, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+ { "gcc_qupv3_wrap0_s0_clk", 0x89, 2, GCC,
+ 0x89, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+ { "gcc_qupv3_wrap0_s1_clk", 0x8A, 2, GCC,
+ 0x8A, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+ { "gcc_qupv3_wrap0_s2_clk", 0x8B, 2, GCC,
+ 0x8B, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+ { "gcc_qupv3_wrap0_s3_clk", 0x8C, 2, GCC,
+ 0x8C, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+ { "gcc_qupv3_wrap0_s4_clk", 0x8D, 2, GCC,
+ 0x8D, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+ { "gcc_qupv3_wrap0_s5_clk", 0x8E, 2, GCC,
+ 0x8E, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+ { "gcc_qupv3_wrap0_s6_clk", 0x8F, 2, GCC,
+ 0x8F, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+ { "gcc_qupv3_wrap0_s7_clk", 0x90, 2, GCC,
+ 0x90, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+ { "gcc_qupv3_wrap1_core_2x_clk", 0x94, 2, GCC,
+ 0x94, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+ { "gcc_qupv3_wrap1_core_clk", 0x93, 2, GCC,
+ 0x93, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+ { "gcc_qupv3_wrap1_s0_clk", 0x95, 2, GCC,
+ 0x95, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+ { "gcc_qupv3_wrap1_s1_clk", 0x96, 2, GCC,
+ 0x96, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+ { "gcc_qupv3_wrap1_s2_clk", 0x97, 2, GCC,
+ 0x97, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+ { "gcc_qupv3_wrap1_s3_clk", 0x98, 2, GCC,
+ 0x98, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+ { "gcc_qupv3_wrap1_s4_clk", 0x99, 2, GCC,
+ 0x99, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+ { "gcc_qupv3_wrap1_s5_clk", 0x9A, 2, GCC,
+ 0x9A, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+ { "gcc_qupv3_wrap2_core_2x_clk", 0x184, 2, GCC,
+ 0x184, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+ { "gcc_qupv3_wrap2_core_clk", 0x183, 2, GCC,
+ 0x183, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+ { "gcc_qupv3_wrap2_s0_clk", 0x185, 2, GCC,
+ 0x185, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+ { "gcc_qupv3_wrap2_s1_clk", 0x186, 2, GCC,
+ 0x186, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+ { "gcc_qupv3_wrap2_s2_clk", 0x187, 2, GCC,
+ 0x187, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+ { "gcc_qupv3_wrap2_s3_clk", 0x188, 2, GCC,
+ 0x188, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+ { "gcc_qupv3_wrap2_s4_clk", 0x189, 2, GCC,
+ 0x189, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+ { "gcc_qupv3_wrap2_s5_clk", 0x18A, 2, GCC,
+ 0x18A, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+ { "gcc_sdcc2_ahb_clk", 0x82, 2, GCC,
+ 0x82, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+ { "gcc_sdcc2_apps_clk", 0x81, 2, GCC,
+ 0x81, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+ { "gcc_sdcc4_ahb_clk", 0x84, 2, GCC,
+ 0x84, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+ { "gcc_sdcc4_apps_clk", 0x83, 2, GCC,
+ 0x83, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+ { "gcc_sys_noc_cpuss_ahb_clk", 0xC, 2, GCC,
+ 0xC, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+ { "gcc_tsif_ref_clk", 0xA0, 2, GCC,
+ 0xA0, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+ { "gcc_ufs_card_ahb_clk", 0x107, 2, GCC,
+ 0x107, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+ { "gcc_ufs_card_axi_clk", 0x106, 2, GCC,
+ 0x106, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+ { "gcc_ufs_card_ice_core_clk", 0x10D, 2, GCC,
+ 0x10D, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+ { "gcc_ufs_card_phy_aux_clk", 0x10E, 2, GCC,
+ 0x10E, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+ { "gcc_ufs_card_rx_symbol_0_clk", 0x109, 2, GCC,
+ 0x109, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+ { "gcc_ufs_card_rx_symbol_1_clk", 0x10F, 2, GCC,
+ 0x10F, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+ { "gcc_ufs_card_tx_symbol_0_clk", 0x108, 2, GCC,
+ 0x108, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+ { "gcc_ufs_card_unipro_core_clk", 0x10C, 2, GCC,
+ 0x10C, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+ { "gcc_ufs_phy_ahb_clk", 0x113, 2, GCC,
+ 0x113, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+ { "gcc_ufs_phy_axi_clk", 0x112, 2, GCC,
+ 0x112, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+ { "gcc_ufs_phy_ice_core_clk", 0x119, 2, GCC,
+ 0x119, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+ { "gcc_ufs_phy_phy_aux_clk", 0x11A, 2, GCC,
+ 0x11A, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+ { "gcc_ufs_phy_rx_symbol_0_clk", 0x115, 2, GCC,
+ 0x115, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+ { "gcc_ufs_phy_rx_symbol_1_clk", 0x11B, 2, GCC,
+ 0x11B, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+ { "gcc_ufs_phy_tx_symbol_0_clk", 0x114, 2, GCC,
+ 0x114, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+ { "gcc_ufs_phy_unipro_core_clk", 0x118, 2, GCC,
+ 0x118, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+ { "gcc_usb30_prim_master_clk", 0x6E, 2, GCC,
+ 0x6E, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+ { "gcc_usb30_prim_mock_utmi_clk", 0x70, 2, GCC,
+ 0x70, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+ { "gcc_usb30_sec_master_clk", 0x75, 2, GCC,
+ 0x75, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+ { "gcc_usb30_sec_mock_utmi_clk", 0x77, 2, GCC,
+ 0x77, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+ { "gcc_usb3_prim_phy_aux_clk", 0x71, 2, GCC,
+ 0x71, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+ { "gcc_usb3_prim_phy_com_aux_clk", 0x72, 2, GCC,
+ 0x72, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+ { "gcc_usb3_prim_phy_pipe_clk", 0x73, 2, GCC,
+ 0x73, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+ { "gcc_usb3_sec_phy_aux_clk", 0x78, 2, GCC,
+ 0x78, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+ { "gcc_usb3_sec_phy_com_aux_clk", 0x79, 2, GCC,
+ 0x79, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+ { "gcc_usb3_sec_phy_pipe_clk", 0x7A, 2, GCC,
+ 0x7A, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+ { "gcc_video_ahb_clk", 0x43, 2, GCC,
+ 0x43, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+ { "gcc_video_axi0_clk", 0x4B, 2, GCC,
+ 0x4B, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+ { "gcc_video_axi1_clk", 0x4C, 2, GCC,
+ 0x4C, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+ { "gpu_cc_ahb_clk", 0x163, 2, GPU_CC,
0x10, 0xFF, 0, 0x3, 0, 2, 0x1568, 0x10FC, 0x1100 },
- { "gpu_cc_crc_ahb_clk", 0x163, 1, GPU_CC,
- 0x11, 0xFF, 0, 0x3, 0, 2, 0x1568, 0x10FC, 0x1100 },
- { "gpu_cc_cx_apb_clk", 0x163, 1, GPU_CC,
- 0x14, 0xFF, 0, 0x3, 0, 2, 0x1568, 0x10FC, 0x1100 },
- { "gpu_cc_cx_gmu_clk", 0x163, 1, GPU_CC,
+ { "gpu_cc_cx_gmu_clk", 0x163, 2, GPU_CC,
0x18, 0xFF, 0, 0x3, 0, 2, 0x1568, 0x10FC, 0x1100 },
- { "gpu_cc_cx_snoc_dvm_clk", 0x163, 1, GPU_CC,
+ { "gpu_cc_cx_snoc_dvm_clk", 0x163, 2, GPU_CC,
0x15, 0xFF, 0, 0x3, 0, 2, 0x1568, 0x10FC, 0x1100 },
- { "gpu_cc_cxo_aon_clk", 0x163, 1, GPU_CC,
- 0xA, 0xFF, 0, 0x3, 0, 2, 0x1568, 0x10FC, 0x1100 },
- { "gpu_cc_cxo_clk", 0x163, 1, GPU_CC,
- 0x19, 0xFF, 0, 0x3, 0, 2, 0x1568, 0x10FC, 0x1100 },
- { "gpu_cc_gx_gmu_clk", 0x163, 1, GPU_CC,
+ { "gpu_cc_gx_gmu_clk", 0x163, 2, GPU_CC,
0xF, 0xFF, 0, 0x3, 0, 2, 0x1568, 0x10FC, 0x1100 },
- { "gpu_cc_gx_vsense_clk", 0x163, 1, GPU_CC,
+ { "gpu_cc_gx_vsense_clk", 0x163, 2, GPU_CC,
0xC, 0xFF, 0, 0x3, 0, 2, 0x1568, 0x10FC, 0x1100 },
- { "npu_cc_aon_clk", 0x180, 1, NPU_CC,
- 0x5, 0xFF, 0, 0x3, 0, 2, 0x3000, 0x3004, 0x3008 },
- { "npu_cc_atb_clk", 0x180, 1, NPU_CC,
- 0x17, 0xFF, 0, 0x3, 0, 2, 0x3000, 0x3004, 0x3008 },
- { "npu_cc_bto_core_clk", 0x180, 1, NPU_CC,
+ { "measure_only_cnoc_clk", 0x19, 2, GCC,
+ 0x19, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+ { "measure_only_gpu_cc_cx_gfx3d_clk", 0x163, 2, GPU_CC,
+ 0x1A, 0xFF, 0, 0x3, 0, 2, 0x1568, 0x10FC, 0x1100 },
+ { "measure_only_gpu_cc_cx_gfx3d_slv_clk", 0x163, 2, GPU_CC,
+ 0x1B, 0xFF, 0, 0x3, 0, 2, 0x1568, 0x10FC, 0x1100 },
+ { "measure_only_gpu_cc_gx_gfx3d_clk", 0x163, 2, GPU_CC,
+ 0xB, 0xFF, 0, 0x3, 0, 2, 0x1568, 0x10FC, 0x1100 },
+ { "measure_only_ipa_2x_clk", 0x147, 2, GCC,
+ 0x147, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+ { "measure_only_snoc_clk", 0x7, 2, GCC,
+ 0x7, 0x3FF, 0, 0xF, 0, 2, 0x62000, 0x62004, 0x62008 },
+ { "npu_cc_bto_core_clk", 0x180, 2, NPU_CC,
0x19, 0xFF, 0, 0x3, 0, 2, 0x3000, 0x3004, 0x3008 },
- { "npu_cc_bwmon_clk", 0x180, 1, NPU_CC,
+ { "npu_cc_bwmon_clk", 0x180, 2, NPU_CC,
0x18, 0xFF, 0, 0x3, 0, 2, 0x3000, 0x3004, 0x3008 },
- { "npu_cc_cal_hm0_cdc_clk", 0x180, 1, NPU_CC,
+ { "npu_cc_cal_hm0_cdc_clk", 0x180, 2, NPU_CC,
0xB, 0xFF, 0, 0x3, 0, 2, 0x3000, 0x3004, 0x3008 },
- { "npu_cc_cal_hm0_clk", 0x180, 1, NPU_CC,
+ { "npu_cc_cal_hm0_clk", 0x180, 2, NPU_CC,
0x2, 0xFF, 0, 0x3, 0, 2, 0x3000, 0x3004, 0x3008 },
- { "npu_cc_cal_hm0_dpm_ip_clk", 0x180, 1, NPU_CC,
+ { "npu_cc_cal_hm0_dpm_ip_clk", 0x180, 2, NPU_CC,
0xC, 0xFF, 0, 0x3, 0, 2, 0x3000, 0x3004, 0x3008 },
- { "npu_cc_cal_hm0_perf_cnt_clk", 0x180, 1, NPU_CC,
+ { "npu_cc_cal_hm0_perf_cnt_clk", 0x180, 2, NPU_CC,
0xD, 0xFF, 0, 0x3, 0, 2, 0x3000, 0x3004, 0x3008 },
- { "npu_cc_cal_hm1_cdc_clk", 0x180, 1, NPU_CC,
+ { "npu_cc_cal_hm1_cdc_clk", 0x180, 2, NPU_CC,
0xE, 0xFF, 0, 0x3, 0, 2, 0x3000, 0x3004, 0x3008 },
- { "npu_cc_cal_hm1_clk", 0x180, 1, NPU_CC,
+ { "npu_cc_cal_hm1_clk", 0x180, 2, NPU_CC,
0x3, 0xFF, 0, 0x3, 0, 2, 0x3000, 0x3004, 0x3008 },
- { "npu_cc_cal_hm1_dpm_ip_clk", 0x180, 1, NPU_CC,
+ { "npu_cc_cal_hm1_dpm_ip_clk", 0x180, 2, NPU_CC,
0xF, 0xFF, 0, 0x3, 0, 2, 0x3000, 0x3004, 0x3008 },
- { "npu_cc_cal_hm1_perf_cnt_clk", 0x180, 1, NPU_CC,
+ { "npu_cc_cal_hm1_perf_cnt_clk", 0x180, 2, NPU_CC,
0x10, 0xFF, 0, 0x3, 0, 2, 0x3000, 0x3004, 0x3008 },
- { "npu_cc_core_clk", 0x180, 1, NPU_CC,
+ { "npu_cc_core_clk", 0x180, 2, NPU_CC,
0x4, 0xFF, 0, 0x3, 0, 2, 0x3000, 0x3004, 0x3008 },
- { "npu_cc_dl_dpm_clk", 0x180, 1, NPU_CC,
+ { "npu_cc_dl_dpm_clk", 0x180, 2, NPU_CC,
0x23, 0xFF, 0, 0x3, 0, 2, 0x3000, 0x3004, 0x3008 },
- { "npu_cc_dl_llm_clk", 0x180, 1, NPU_CC,
+ { "npu_cc_dl_llm_clk", 0x180, 2, NPU_CC,
0x22, 0xFF, 0, 0x3, 0, 2, 0x3000, 0x3004, 0x3008 },
- { "npu_cc_dpm_clk", 0x180, 1, NPU_CC,
+ { "npu_cc_dpm_clk", 0x180, 2, NPU_CC,
0x8, 0xFF, 0, 0x3, 0, 2, 0x3000, 0x3004, 0x3008 },
- { "npu_cc_dpm_temp_clk", 0x180, 1, NPU_CC,
+ { "npu_cc_dpm_temp_clk", 0x180, 2, NPU_CC,
0x14, 0xFF, 0, 0x3, 0, 2, 0x3000, 0x3004, 0x3008 },
- { "npu_cc_dpm_xo_clk", 0x180, 1, NPU_CC,
- 0xA, 0xFF, 0, 0x3, 0, 2, 0x3000, 0x3004, 0x3008 },
- { "npu_cc_dsp_ahbm_clk", 0x180, 1, NPU_CC,
+ { "npu_cc_dsp_ahbm_clk", 0x180, 2, NPU_CC,
0x1C, 0xFF, 0, 0x3, 0, 2, 0x3000, 0x3004, 0x3008 },
- { "npu_cc_dsp_ahbs_clk", 0x180, 1, NPU_CC,
+ { "npu_cc_dsp_ahbs_clk", 0x180, 2, NPU_CC,
0x1B, 0xFF, 0, 0x3, 0, 2, 0x3000, 0x3004, 0x3008 },
- { "npu_cc_dsp_axi_clk", 0x180, 1, NPU_CC,
+ { "npu_cc_dsp_axi_clk", 0x180, 2, NPU_CC,
0x1E, 0xFF, 0, 0x3, 0, 2, 0x3000, 0x3004, 0x3008 },
- { "npu_cc_dsp_bwmon_ahb_clk", 0x180, 1, NPU_CC,
+ { "npu_cc_dsp_bwmon_ahb_clk", 0x180, 2, NPU_CC,
0x1D, 0xFF, 0, 0x3, 0, 2, 0x3000, 0x3004, 0x3008 },
- { "npu_cc_dsp_bwmon_clk", 0x180, 1, NPU_CC,
+ { "npu_cc_dsp_bwmon_clk", 0x180, 2, NPU_CC,
0x1F, 0xFF, 0, 0x3, 0, 2, 0x3000, 0x3004, 0x3008 },
- { "npu_cc_isense_clk", 0x180, 1, NPU_CC,
+ { "npu_cc_isense_clk", 0x180, 2, NPU_CC,
0x7, 0xFF, 0, 0x3, 0, 2, 0x3000, 0x3004, 0x3008 },
- { "npu_cc_llm_clk", 0x180, 1, NPU_CC,
+ { "npu_cc_llm_clk", 0x180, 2, NPU_CC,
0x6, 0xFF, 0, 0x3, 0, 2, 0x3000, 0x3004, 0x3008 },
- { "npu_cc_llm_curr_clk", 0x180, 1, NPU_CC,
+ { "npu_cc_llm_curr_clk", 0x180, 2, NPU_CC,
0x21, 0xFF, 0, 0x3, 0, 2, 0x3000, 0x3004, 0x3008 },
- { "npu_cc_llm_temp_clk", 0x180, 1, NPU_CC,
+ { "npu_cc_llm_temp_clk", 0x180, 2, NPU_CC,
0x15, 0xFF, 0, 0x3, 0, 2, 0x3000, 0x3004, 0x3008 },
- { "npu_cc_llm_xo_clk", 0x180, 1, NPU_CC,
- 0x9, 0xFF, 0, 0x3, 0, 2, 0x3000, 0x3004, 0x3008 },
- { "npu_cc_noc_ahb_clk", 0x180, 1, NPU_CC,
- 0x13, 0xFF, 0, 0x3, 0, 2, 0x3000, 0x3004, 0x3008 },
- { "npu_cc_noc_axi_clk", 0x180, 1, NPU_CC,
+ { "npu_cc_noc_axi_clk", 0x180, 2, NPU_CC,
0x12, 0xFF, 0, 0x3, 0, 2, 0x3000, 0x3004, 0x3008 },
- { "npu_cc_noc_dma_clk", 0x180, 1, NPU_CC,
+ { "npu_cc_noc_dma_clk", 0x180, 2, NPU_CC,
0x11, 0xFF, 0, 0x3, 0, 2, 0x3000, 0x3004, 0x3008 },
- { "npu_cc_rsc_xo_clk", 0x180, 1, NPU_CC,
- 0x1A, 0xFF, 0, 0x3, 0, 2, 0x3000, 0x3004, 0x3008 },
- { "npu_cc_s2p_clk", 0x180, 1, NPU_CC,
+ { "npu_cc_s2p_clk", 0x180, 2, NPU_CC,
0x16, 0xFF, 0, 0x3, 0, 2, 0x3000, 0x3004, 0x3008 },
- { "npu_cc_xo_clk", 0x180, 1, NPU_CC,
- 0x1, 0xFF, 0, 0x3, 0, 2, 0x3000, 0x3004, 0x3008 },
- { "video_cc_ahb_clk", 0x57, 1, VIDEO_CC,
+ { "video_cc_ahb_clk", 0x57, 2, VIDEO_CC,
0x7, 0x3F, 0, 0x7, 0, 3, 0xA4C, 0xE9C, 0xEBC },
- { "video_cc_mvs0_clk", 0x57, 1, VIDEO_CC,
+ { "video_cc_mvs0_clk", 0x57, 2, VIDEO_CC,
0x3, 0x3F, 0, 0x7, 0, 3, 0xA4C, 0xE9C, 0xEBC },
- { "video_cc_mvs0c_clk", 0x57, 1, VIDEO_CC,
+ { "video_cc_mvs0c_clk", 0x57, 2, VIDEO_CC,
0x1, 0x3F, 0, 0x7, 0, 3, 0xA4C, 0xE9C, 0xEBC },
- { "video_cc_mvs1_clk", 0x57, 1, VIDEO_CC,
+ { "video_cc_mvs1_clk", 0x57, 2, VIDEO_CC,
0x5, 0x3F, 0, 0x7, 0, 3, 0xA4C, 0xE9C, 0xEBC },
- { "video_cc_mvs1_div2_clk", 0x57, 1, VIDEO_CC,
+ { "video_cc_mvs1_div2_clk", 0x57, 2, VIDEO_CC,
0x8, 0x3F, 0, 0x7, 0, 3, 0xA4C, 0xE9C, 0xEBC },
- { "video_cc_mvs1c_clk", 0x57, 1, VIDEO_CC,
+ { "video_cc_mvs1c_clk", 0x57, 2, VIDEO_CC,
0x9, 0x3F, 0, 0x7, 0, 3, 0xA4C, 0xE9C, 0xEBC },
- { "video_cc_xo_clk", 0x57, 1, VIDEO_CC,
- 0xB, 0x3F, 0, 0x7, 0, 3, 0xA4C, 0xE9C, 0xEBC },
),
.hw.init = &(struct clk_init_data){
.name = "gcc_debug_mux",
diff --git a/drivers/clk/qcom/gcc-kona.c b/drivers/clk/qcom/gcc-kona.c
index 7ba76b0..ec44288 100644
--- a/drivers/clk/qcom/gcc-kona.c
+++ b/drivers/clk/qcom/gcc-kona.c
@@ -1969,19 +1969,6 @@ static struct clk_branch gcc_gpu_snoc_dvm_gfx_clk = {
},
};
-static struct clk_branch gcc_npu_at_clk = {
- .halt_reg = 0x4d014,
- .halt_check = BRANCH_VOTED,
- .clkr = {
- .enable_reg = 0x4d014,
- .enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
- .name = "gcc_npu_at_clk",
- .ops = &clk_branch2_ops,
- },
- },
-};
-
static struct clk_branch gcc_npu_axi_clk = {
.halt_reg = 0x4d008,
.halt_check = BRANCH_VOTED,
@@ -2084,19 +2071,6 @@ static struct clk_branch gcc_npu_gpll0_div_clk_src = {
},
};
-static struct clk_branch gcc_npu_trig_clk = {
- .halt_reg = 0x4d010,
- .halt_check = BRANCH_VOTED,
- .clkr = {
- .enable_reg = 0x4d010,
- .enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
- .name = "gcc_npu_trig_clk",
- .ops = &clk_branch2_ops,
- },
- },
-};
-
static struct clk_branch gcc_pcie0_phy_refgen_clk = {
.halt_reg = 0x6f02c,
.halt_check = BRANCH_HALT,
@@ -4033,7 +4007,6 @@ static struct clk_regmap *gcc_kona_clocks[] = {
[GCC_GPU_IREF_EN] = &gcc_gpu_iref_en.clkr,
[GCC_GPU_MEMNOC_GFX_CLK] = &gcc_gpu_memnoc_gfx_clk.clkr,
[GCC_GPU_SNOC_DVM_GFX_CLK] = &gcc_gpu_snoc_dvm_gfx_clk.clkr,
- [GCC_NPU_AT_CLK] = &gcc_npu_at_clk.clkr,
[GCC_NPU_AXI_CLK] = &gcc_npu_axi_clk.clkr,
[GCC_NPU_BWMON_AXI_CLK] = &gcc_npu_bwmon_axi_clk.clkr,
[GCC_NPU_BWMON_CFG_AHB_CLK] = &gcc_npu_bwmon_cfg_ahb_clk.clkr,
@@ -4041,7 +4014,6 @@ static struct clk_regmap *gcc_kona_clocks[] = {
[GCC_NPU_DMA_CLK] = &gcc_npu_dma_clk.clkr,
[GCC_NPU_GPLL0_CLK_SRC] = &gcc_npu_gpll0_clk_src.clkr,
[GCC_NPU_GPLL0_DIV_CLK_SRC] = &gcc_npu_gpll0_div_clk_src.clkr,
- [GCC_NPU_TRIG_CLK] = &gcc_npu_trig_clk.clkr,
[GCC_PCIE0_PHY_REFGEN_CLK] = &gcc_pcie0_phy_refgen_clk.clkr,
[GCC_PCIE1_PHY_REFGEN_CLK] = &gcc_pcie1_phy_refgen_clk.clkr,
[GCC_PCIE2_PHY_REFGEN_CLK] = &gcc_pcie2_phy_refgen_clk.clkr,
diff --git a/drivers/clk/qcom/gcc-lito.c b/drivers/clk/qcom/gcc-lito.c
index 7ca26b8..0c02861 100644
--- a/drivers/clk/qcom/gcc-lito.c
+++ b/drivers/clk/qcom/gcc-lito.c
@@ -280,31 +280,6 @@ static struct clk_rcg2 gcc_cpuss_ahb_clk_src = {
},
};
-static const struct freq_tbl ftbl_gcc_dpm_clk_src[] = {
- F(200000000, P_GPLL0_OUT_EVEN, 1.5, 0, 0),
- { }
-};
-
-static struct clk_rcg2 gcc_dpm_clk_src = {
- .cmd_rcgr = 0x4600c,
- .mnd_width = 0,
- .hid_width = 5,
- .parent_map = gcc_parent_map_0,
- .freq_tbl = ftbl_gcc_dpm_clk_src,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "gcc_dpm_clk_src",
- .parent_names = gcc_parent_names_0,
- .num_parents = 4,
- .ops = &clk_rcg2_ops,
- .vdd_class = &vdd_cx,
- .num_rate_max = VDD_NUM,
- .rate_max = (unsigned long[VDD_NUM]) {
- [VDD_LOWER] = 100000000,
- [VDD_LOW] = 150000000,
- [VDD_LOW_L1] = 200000000},
- },
-};
-
static const struct freq_tbl ftbl_gcc_gp1_clk_src[] = {
F(19200000, P_BI_TCXO, 1, 0, 0),
F(25000000, P_GPLL0_OUT_EVEN, 12, 0, 0),
@@ -1315,39 +1290,6 @@ static struct clk_branch gcc_disp_xo_clk = {
},
};
-static struct clk_branch gcc_dpm_ahb_clk = {
- .halt_reg = 0x46008,
- .halt_check = BRANCH_HALT,
- .hwcg_reg = 0x46008,
- .hwcg_bit = 1,
- .clkr = {
- .enable_reg = 0x46008,
- .enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
- .name = "gcc_dpm_ahb_clk",
- .ops = &clk_branch2_ops,
- },
- },
-};
-
-static struct clk_branch gcc_dpm_clk = {
- .halt_reg = 0x46004,
- .halt_check = BRANCH_HALT,
- .clkr = {
- .enable_reg = 0x46004,
- .enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
- .name = "gcc_dpm_clk",
- .parent_names = (const char *[]){
- "gcc_dpm_clk_src",
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
static struct clk_branch gcc_gp1_clk = {
.halt_reg = 0x64000,
.halt_check = BRANCH_HALT,
@@ -1512,6 +1454,7 @@ static struct clk_branch gcc_npu_bwmon2_axi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_npu_bwmon2_axi_clk",
+ .flags = CLK_IS_CRITICAL,
.ops = &clk_branch2_ops,
},
},
@@ -1525,6 +1468,7 @@ static struct clk_branch gcc_npu_bwmon_axi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_npu_bwmon_axi_clk",
+ .flags = CLK_IS_CRITICAL,
.ops = &clk_branch2_ops,
},
},
@@ -1538,6 +1482,7 @@ static struct clk_branch gcc_npu_bwmon_cfg_ahb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_npu_bwmon_cfg_ahb_clk",
+ .flags = CLK_IS_CRITICAL,
.ops = &clk_branch2_ops,
},
},
@@ -2565,9 +2510,6 @@ static struct clk_regmap *gcc_lito_clocks[] = {
[GCC_DISP_THROTTLE_HF_AXI_CLK] = &gcc_disp_throttle_hf_axi_clk.clkr,
[GCC_DISP_THROTTLE_SF_AXI_CLK] = &gcc_disp_throttle_sf_axi_clk.clkr,
[GCC_DISP_XO_CLK] = &gcc_disp_xo_clk.clkr,
- [GCC_DPM_AHB_CLK] = &gcc_dpm_ahb_clk.clkr,
- [GCC_DPM_CLK] = &gcc_dpm_clk.clkr,
- [GCC_DPM_CLK_SRC] = &gcc_dpm_clk_src.clkr,
[GCC_GP1_CLK] = &gcc_gp1_clk.clkr,
[GCC_GP1_CLK_SRC] = &gcc_gp1_clk_src.clkr,
[GCC_GP2_CLK] = &gcc_gp2_clk.clkr,
diff --git a/drivers/clk/qcom/gpucc-kona.c b/drivers/clk/qcom/gpucc-kona.c
index 34cb1b6..b46e269 100644
--- a/drivers/clk/qcom/gpucc-kona.c
+++ b/drivers/clk/qcom/gpucc-kona.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
-/* Copyright (c) 2018, The Linux Foundation. All rights reserved.*/
+/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.*/
#define pr_fmt(fmt) "clk: %s: " fmt, __func__
@@ -136,7 +136,7 @@ static struct clk_branch gpu_cc_ahb_clk = {
static struct clk_branch gpu_cc_crc_ahb_clk = {
.halt_reg = 0x107c,
- .halt_check = BRANCH_HALT,
+ .halt_check = BRANCH_HALT_VOTED,
.clkr = {
.enable_reg = 0x107c,
.enable_mask = BIT(0),
@@ -149,7 +149,7 @@ static struct clk_branch gpu_cc_crc_ahb_clk = {
static struct clk_branch gpu_cc_cx_apb_clk = {
.halt_reg = 0x1088,
- .halt_check = BRANCH_HALT,
+ .halt_check = BRANCH_HALT_VOTED,
.clkr = {
.enable_reg = 0x1088,
.enable_mask = BIT(0),
@@ -180,12 +180,16 @@ static struct clk_branch gpu_cc_cx_gmu_clk = {
static struct clk_branch gpu_cc_cx_qdss_at_clk = {
.halt_reg = 0x1080,
- .halt_check = BRANCH_HALT,
+ .halt_check = BRANCH_HALT_VOTED,
.clkr = {
.enable_reg = 0x1080,
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gpu_cc_cx_qdss_at_clk",
+ .parent_names = (const char *[]){
+ "qdss_qmp_clk",
+ },
+ .num_parents = 1,
.ops = &clk_branch2_ops,
},
},
@@ -193,12 +197,16 @@ static struct clk_branch gpu_cc_cx_qdss_at_clk = {
static struct clk_branch gpu_cc_cx_qdss_trig_clk = {
.halt_reg = 0x1094,
- .halt_check = BRANCH_HALT,
+ .halt_check = BRANCH_HALT_VOTED,
.clkr = {
.enable_reg = 0x1094,
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gpu_cc_cx_qdss_trig_clk",
+ .parent_names = (const char *[]){
+ "qdss_qmp_clk",
+ },
+ .num_parents = 1,
.ops = &clk_branch2_ops,
},
},
@@ -206,12 +214,16 @@ static struct clk_branch gpu_cc_cx_qdss_trig_clk = {
static struct clk_branch gpu_cc_cx_qdss_tsctr_clk = {
.halt_reg = 0x1084,
- .halt_check = BRANCH_HALT,
+ .halt_check = BRANCH_HALT_VOTED,
.clkr = {
.enable_reg = 0x1084,
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gpu_cc_cx_qdss_tsctr_clk",
+ .parent_names = (const char *[]){
+ "qdss_qmp_clk",
+ },
+ .num_parents = 1,
.ops = &clk_branch2_ops,
},
},
@@ -219,7 +231,7 @@ static struct clk_branch gpu_cc_cx_qdss_tsctr_clk = {
static struct clk_branch gpu_cc_cx_snoc_dvm_clk = {
.halt_reg = 0x108c,
- .halt_check = BRANCH_HALT,
+ .halt_check = BRANCH_HALT_VOTED,
.clkr = {
.enable_reg = 0x108c,
.enable_mask = BIT(0),
@@ -232,7 +244,7 @@ static struct clk_branch gpu_cc_cx_snoc_dvm_clk = {
static struct clk_branch gpu_cc_cxo_aon_clk = {
.halt_reg = 0x1004,
- .halt_check = BRANCH_HALT,
+ .halt_check = BRANCH_HALT_VOTED,
.clkr = {
.enable_reg = 0x1004,
.enable_mask = BIT(0),
@@ -276,12 +288,16 @@ static struct clk_branch gpu_cc_gx_gmu_clk = {
static struct clk_branch gpu_cc_gx_qdss_tsctr_clk = {
.halt_reg = 0x105c,
- .halt_check = BRANCH_HALT,
+ .halt_check = BRANCH_HALT_VOTED,
.clkr = {
.enable_reg = 0x105c,
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gpu_cc_gx_qdss_tsctr_clk",
+ .parent_names = (const char *[]){
+ "qdss_qmp_clk",
+ },
+ .num_parents = 1,
.ops = &clk_branch2_ops,
},
},
@@ -289,7 +305,7 @@ static struct clk_branch gpu_cc_gx_qdss_tsctr_clk = {
static struct clk_branch gpu_cc_gx_vsense_clk = {
.halt_reg = 0x1058,
- .halt_check = BRANCH_HALT,
+ .halt_check = BRANCH_HALT_VOTED,
.clkr = {
.enable_reg = 0x1058,
.enable_mask = BIT(0),
@@ -302,7 +318,7 @@ static struct clk_branch gpu_cc_gx_vsense_clk = {
static struct clk_branch gpu_cc_sleep_clk = {
.halt_reg = 0x1090,
- .halt_check = BRANCH_HALT,
+ .halt_check = BRANCH_HALT_VOTED,
.clkr = {
.enable_reg = 0x1090,
.enable_mask = BIT(0),
diff --git a/drivers/clk/qcom/npucc-kona.c b/drivers/clk/qcom/npucc-kona.c
index 0f29925..2734d9e 100644
--- a/drivers/clk/qcom/npucc-kona.c
+++ b/drivers/clk/qcom/npucc-kona.c
@@ -477,6 +477,10 @@ static struct clk_branch npu_cc_atb_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "npu_cc_atb_clk",
+ .parent_names = (const char *[]){
+ "qdss_qmp_clk",
+ },
+ .num_parents = 1,
.ops = &clk_branch2_ops,
},
},
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-a33.c b/drivers/clk/sunxi-ng/ccu-sun8i-a33.c
index 13eb5b2..c40d572 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-a33.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-a33.c
@@ -366,10 +366,10 @@ static SUNXI_CCU_MP_WITH_MUX_GATE(spi1_clk, "spi1", mod0_default_parents, 0x0a4,
static const char * const i2s_parents[] = { "pll-audio-8x", "pll-audio-4x",
"pll-audio-2x", "pll-audio" };
static SUNXI_CCU_MUX_WITH_GATE(i2s0_clk, "i2s0", i2s_parents,
- 0x0b0, 16, 2, BIT(31), 0);
+ 0x0b0, 16, 2, BIT(31), CLK_SET_RATE_PARENT);
static SUNXI_CCU_MUX_WITH_GATE(i2s1_clk, "i2s1", i2s_parents,
- 0x0b4, 16, 2, BIT(31), 0);
+ 0x0b4, 16, 2, BIT(31), CLK_SET_RATE_PARENT);
/* TODO: the parent for most of the USB clocks is not known */
static SUNXI_CCU_GATE(usb_phy0_clk, "usb-phy0", "osc24M",
@@ -446,7 +446,7 @@ static SUNXI_CCU_M_WITH_GATE(ve_clk, "ve", "pll-ve",
static SUNXI_CCU_GATE(ac_dig_clk, "ac-dig", "pll-audio",
0x140, BIT(31), CLK_SET_RATE_PARENT);
static SUNXI_CCU_GATE(ac_dig_4x_clk, "ac-dig-4x", "pll-audio-4x",
- 0x140, BIT(30), 0);
+ 0x140, BIT(30), CLK_SET_RATE_PARENT);
static SUNXI_CCU_GATE(avs_clk, "avs", "osc24M",
0x144, BIT(31), 0);
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 4ddbc66..d5b1ec6 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -1542,17 +1542,16 @@ static unsigned int __cpufreq_get(struct cpufreq_policy *policy)
{
unsigned int ret_freq = 0;
- if (!cpufreq_driver->get)
+ if (unlikely(policy_is_inactive(policy)) || !cpufreq_driver->get)
return ret_freq;
ret_freq = cpufreq_driver->get(policy->cpu);
/*
- * Updating inactive policies is invalid, so avoid doing that. Also
- * if fast frequency switching is used with the given policy, the check
+ * If fast frequency switching is used with the given policy, the check
* against policy->cur is pointless, so skip it in that case too.
*/
- if (unlikely(policy_is_inactive(policy)) || policy->fast_switch_enabled)
+ if (policy->fast_switch_enabled)
return ret_freq;
if (ret_freq && policy->cur &&
@@ -1581,10 +1580,7 @@ unsigned int cpufreq_get(unsigned int cpu)
if (policy) {
down_read(&policy->rwsem);
-
- if (!policy_is_inactive(policy))
- ret_freq = __cpufreq_get(policy);
-
+ ret_freq = __cpufreq_get(policy);
up_read(&policy->rwsem);
cpufreq_cpu_put(policy);
diff --git a/drivers/cpuidle/cpuidle-big_little.c b/drivers/cpuidle/cpuidle-big_little.c
index db2ede5..b44476a 100644
--- a/drivers/cpuidle/cpuidle-big_little.c
+++ b/drivers/cpuidle/cpuidle-big_little.c
@@ -167,6 +167,7 @@ static int __init bl_idle_init(void)
{
int ret;
struct device_node *root = of_find_node_by_path("/");
+ const struct of_device_id *match_id;
if (!root)
return -ENODEV;
@@ -174,7 +175,11 @@ static int __init bl_idle_init(void)
/*
* Initialize the driver just for a compliant set of machines
*/
- if (!of_match_node(compatible_machine_match, root))
+ match_id = of_match_node(compatible_machine_match, root);
+
+ of_node_put(root);
+
+ if (!match_id)
return -ENODEV;
if (!mcpm_is_available())
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index a825b64..668cd3e 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -762,4 +762,8 @@
source "drivers/crypto/hisilicon/Kconfig"
+if ARCH_QCOM
+source drivers/crypto/msm/Kconfig
+endif
+
endif # CRYPTO_HW
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index 9cd4c0c..e2ca339 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -21,7 +21,7 @@
obj-$(CONFIG_CRYPTO_DEV_MXC_SCC) += mxc-scc.o
obj-$(CONFIG_CRYPTO_DEV_NIAGARA2) += n2_crypto.o
n2_crypto-y := n2_core.o n2_asm.o
-obj-$(CONFIG_CRYPTO_DEV_QCOM_MSM_QCE) += msm/
+obj-$(CONFIG_CRYPTO_DEV_QCOM_ICE) += msm/
obj-$(CONFIG_CRYPTO_DEV_NX) += nx/
obj-$(CONFIG_CRYPTO_DEV_OMAP) += omap-crypto.o
obj-$(CONFIG_CRYPTO_DEV_OMAP_AES) += omap-aes-driver.o
diff --git a/drivers/crypto/ux500/cryp/cryp_core.c b/drivers/crypto/ux500/cryp/cryp_core.c
index d2663a4..a92a66b 100644
--- a/drivers/crypto/ux500/cryp/cryp_core.c
+++ b/drivers/crypto/ux500/cryp/cryp_core.c
@@ -556,7 +556,7 @@ static int cryp_set_dma_transfer(struct cryp_ctx *ctx,
desc = dmaengine_prep_slave_sg(channel,
ctx->device->dma.sg_src,
ctx->device->dma.sg_src_len,
- direction, DMA_CTRL_ACK);
+ DMA_MEM_TO_DEV, DMA_CTRL_ACK);
break;
case DMA_FROM_DEVICE:
@@ -580,7 +580,7 @@ static int cryp_set_dma_transfer(struct cryp_ctx *ctx,
desc = dmaengine_prep_slave_sg(channel,
ctx->device->dma.sg_dst,
ctx->device->dma.sg_dst_len,
- direction,
+ DMA_DEV_TO_MEM,
DMA_CTRL_ACK |
DMA_PREP_INTERRUPT);
diff --git a/drivers/crypto/ux500/hash/hash_core.c b/drivers/crypto/ux500/hash/hash_core.c
index 633321a..a0bb8a6 100644
--- a/drivers/crypto/ux500/hash/hash_core.c
+++ b/drivers/crypto/ux500/hash/hash_core.c
@@ -166,7 +166,7 @@ static int hash_set_dma_transfer(struct hash_ctx *ctx, struct scatterlist *sg,
__func__);
desc = dmaengine_prep_slave_sg(channel,
ctx->device->dma.sg, ctx->device->dma.sg_len,
- direction, DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
+ DMA_MEM_TO_DEV, DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
if (!desc) {
dev_err(ctx->device->dev,
"%s: dmaengine_prep_slave_sg() failed!\n", __func__);
diff --git a/drivers/devfreq/bimc-bwmon.c b/drivers/devfreq/bimc-bwmon.c
index 8b74ecc..a0ac20b 100644
--- a/drivers/devfreq/bimc-bwmon.c
+++ b/drivers/devfreq/bimc-bwmon.c
@@ -580,15 +580,16 @@ unsigned long get_zone_count(struct bwmon *m, unsigned int zone,
WARN(1, "Invalid\n");
return 0;
case MON2:
- count = readl_relaxed(MON2_ZONE_MAX(m, zone)) + 1;
+ count = readl_relaxed(MON2_ZONE_MAX(m, zone));
break;
case MON3:
count = readl_relaxed(MON3_ZONE_MAX(m, zone));
- if (count)
- count++;
break;
}
+ if (count)
+ count++;
+
return count;
}
diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
index 9ccb000..2ebf83f 100644
--- a/drivers/devfreq/devfreq.c
+++ b/drivers/devfreq/devfreq.c
@@ -72,28 +72,28 @@ static struct devfreq *find_device_devfreq(struct device *dev)
return ERR_PTR(-ENODEV);
}
-static unsigned long find_available_min_freq(struct devfreq *devfreq)
+static long find_available_min_freq(struct devfreq *devfreq)
{
struct dev_pm_opp *opp;
- unsigned long min_freq = 0;
+ long min_freq = 0;
opp = dev_pm_opp_find_freq_ceil(devfreq->dev.parent, &min_freq);
if (IS_ERR(opp))
- min_freq = 0;
+ min_freq = PTR_ERR(opp);
else
dev_pm_opp_put(opp);
return min_freq;
}
-static unsigned long find_available_max_freq(struct devfreq *devfreq)
+static long find_available_max_freq(struct devfreq *devfreq)
{
struct dev_pm_opp *opp;
- unsigned long max_freq = ULONG_MAX;
+ long max_freq = LONG_MAX;
opp = dev_pm_opp_find_freq_floor(devfreq->dev.parent, &max_freq);
if (IS_ERR(opp))
- max_freq = 0;
+ max_freq = PTR_ERR(opp);
else
dev_pm_opp_put(opp);
@@ -494,20 +494,25 @@ static int devfreq_notifier_call(struct notifier_block *nb, unsigned long type,
{
struct devfreq *devfreq = container_of(nb, struct devfreq, nb);
int ret;
+ long freq;
mutex_lock(&devfreq->lock);
- devfreq->scaling_min_freq = find_available_min_freq(devfreq);
- if (!devfreq->scaling_min_freq) {
+ freq = find_available_min_freq(devfreq);
+ if (freq < 0) {
+ devfreq->scaling_min_freq = 0;
mutex_unlock(&devfreq->lock);
return -EINVAL;
}
+ devfreq->scaling_min_freq = freq;
- devfreq->scaling_max_freq = find_available_max_freq(devfreq);
- if (!devfreq->scaling_max_freq) {
+ freq = find_available_max_freq(devfreq);
+ if (freq < 0) {
+ devfreq->scaling_max_freq = 0;
mutex_unlock(&devfreq->lock);
return -EINVAL;
}
+ devfreq->scaling_max_freq = freq;
ret = update_devfreq(devfreq);
mutex_unlock(&devfreq->lock);
@@ -562,6 +567,7 @@ struct devfreq *devfreq_add_device(struct device *dev,
struct devfreq *devfreq;
struct devfreq_governor *governor;
int err = 0;
+ long freq;
if (!dev || !profile || !governor_name) {
dev_err(dev, "%s: Invalid parameters.\n", __func__);
@@ -606,21 +612,21 @@ struct devfreq *devfreq_add_device(struct device *dev,
mutex_lock(&devfreq->lock);
}
- devfreq->scaling_min_freq = find_available_min_freq(devfreq);
- if (!devfreq->scaling_min_freq) {
+ freq = find_available_min_freq(devfreq);
+ if (freq < 0) {
mutex_unlock(&devfreq->lock);
err = -EINVAL;
goto err_dev;
}
- devfreq->min_freq = devfreq->scaling_min_freq;
+ devfreq->min_freq = devfreq->scaling_min_freq = freq;
- devfreq->scaling_max_freq = find_available_max_freq(devfreq);
- if (!devfreq->scaling_max_freq) {
+ freq = find_available_max_freq(devfreq);
+ if (freq < 0) {
mutex_unlock(&devfreq->lock);
err = -EINVAL;
goto err_dev;
}
- devfreq->max_freq = devfreq->scaling_max_freq;
+ devfreq->max_freq = devfreq->scaling_max_freq = freq;
dev_set_name(&devfreq->dev, "%s", dev_name(dev));
err = device_register(&devfreq->dev);
diff --git a/drivers/dma/bcm2835-dma.c b/drivers/dma/bcm2835-dma.c
index 847f84a..2b11d96 100644
--- a/drivers/dma/bcm2835-dma.c
+++ b/drivers/dma/bcm2835-dma.c
@@ -415,38 +415,32 @@ static void bcm2835_dma_fill_cb_chain_with_sg(
}
}
-static int bcm2835_dma_abort(void __iomem *chan_base)
+static int bcm2835_dma_abort(struct bcm2835_chan *c)
{
- unsigned long cs;
+ void __iomem *chan_base = c->chan_base;
long int timeout = 10000;
- cs = readl(chan_base + BCM2835_DMA_CS);
- if (!(cs & BCM2835_DMA_ACTIVE))
+ /*
+ * A zero control block address means the channel is idle.
+ * (The ACTIVE flag in the CS register is not a reliable indicator.)
+ */
+ if (!readl(chan_base + BCM2835_DMA_ADDR))
return 0;
/* Write 0 to the active bit - Pause the DMA */
writel(0, chan_base + BCM2835_DMA_CS);
/* Wait for any current AXI transfer to complete */
- while ((cs & BCM2835_DMA_ISPAUSED) && --timeout) {
+ while ((readl(chan_base + BCM2835_DMA_CS) &
+ BCM2835_DMA_WAITING_FOR_WRITES) && --timeout)
cpu_relax();
- cs = readl(chan_base + BCM2835_DMA_CS);
- }
- /* We'll un-pause when we set of our next DMA */
+ /* Peripheral might be stuck and fail to signal AXI write responses */
if (!timeout)
- return -ETIMEDOUT;
+ dev_err(c->vc.chan.device->dev,
+ "failed to complete outstanding writes\n");
- if (!(cs & BCM2835_DMA_ACTIVE))
- return 0;
-
- /* Terminate the control block chain */
- writel(0, chan_base + BCM2835_DMA_NEXTCB);
-
- /* Abort the whole DMA */
- writel(BCM2835_DMA_ABORT | BCM2835_DMA_ACTIVE,
- chan_base + BCM2835_DMA_CS);
-
+ writel(BCM2835_DMA_RESET, chan_base + BCM2835_DMA_CS);
return 0;
}
@@ -485,8 +479,15 @@ static irqreturn_t bcm2835_dma_callback(int irq, void *data)
spin_lock_irqsave(&c->vc.lock, flags);
- /* Acknowledge interrupt */
- writel(BCM2835_DMA_INT, c->chan_base + BCM2835_DMA_CS);
+ /*
+ * Clear the INT flag to receive further interrupts. Keep the channel
+ * active in case the descriptor is cyclic or in case the client has
+ * already terminated the descriptor and issued a new one. (May happen
+ * if this IRQ handler is threaded.) If the channel is finished, it
+ * will remain idle despite the ACTIVE flag being set.
+ */
+ writel(BCM2835_DMA_INT | BCM2835_DMA_ACTIVE,
+ c->chan_base + BCM2835_DMA_CS);
d = c->desc;
@@ -494,11 +495,7 @@ static irqreturn_t bcm2835_dma_callback(int irq, void *data)
if (d->cyclic) {
/* call the cyclic callback */
vchan_cyclic_callback(&d->vd);
-
- /* Keep the DMA engine running */
- writel(BCM2835_DMA_ACTIVE,
- c->chan_base + BCM2835_DMA_CS);
- } else {
+ } else if (!readl(c->chan_base + BCM2835_DMA_ADDR)) {
vchan_cookie_complete(&c->desc->vd);
bcm2835_dma_start_desc(c);
}
@@ -796,7 +793,6 @@ static int bcm2835_dma_terminate_all(struct dma_chan *chan)
struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
struct bcm2835_dmadev *d = to_bcm2835_dma_dev(c->vc.chan.device);
unsigned long flags;
- int timeout = 10000;
LIST_HEAD(head);
spin_lock_irqsave(&c->vc.lock, flags);
@@ -806,27 +802,11 @@ static int bcm2835_dma_terminate_all(struct dma_chan *chan)
list_del_init(&c->node);
spin_unlock(&d->lock);
- /*
- * Stop DMA activity: we assume the callback will not be called
- * after bcm_dma_abort() returns (even if it does, it will see
- * c->desc is NULL and exit.)
- */
+ /* stop DMA activity */
if (c->desc) {
vchan_terminate_vdesc(&c->desc->vd);
c->desc = NULL;
- bcm2835_dma_abort(c->chan_base);
-
- /* Wait for stopping */
- while (--timeout) {
- if (!(readl(c->chan_base + BCM2835_DMA_CS) &
- BCM2835_DMA_ACTIVE))
- break;
-
- cpu_relax();
- }
-
- if (!timeout)
- dev_err(d->ddev.dev, "DMA transfer could not be terminated\n");
+ bcm2835_dma_abort(c);
}
vchan_get_all_descriptors(&c->vc, &head);
diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c
index 75b6ff0..118d371 100644
--- a/drivers/dma/imx-dma.c
+++ b/drivers/dma/imx-dma.c
@@ -617,7 +617,7 @@ static void imxdma_tasklet(unsigned long data)
{
struct imxdma_channel *imxdmac = (void *)data;
struct imxdma_engine *imxdma = imxdmac->imxdma;
- struct imxdma_desc *desc;
+ struct imxdma_desc *desc, *next_desc;
unsigned long flags;
spin_lock_irqsave(&imxdma->lock, flags);
@@ -647,10 +647,10 @@ static void imxdma_tasklet(unsigned long data)
list_move_tail(imxdmac->ld_active.next, &imxdmac->ld_free);
if (!list_empty(&imxdmac->ld_queue)) {
- desc = list_first_entry(&imxdmac->ld_queue, struct imxdma_desc,
- node);
+ next_desc = list_first_entry(&imxdmac->ld_queue,
+ struct imxdma_desc, node);
list_move_tail(imxdmac->ld_queue.next, &imxdmac->ld_active);
- if (imxdma_xfer_desc(desc) < 0)
+ if (imxdma_xfer_desc(next_desc) < 0)
dev_warn(imxdma->dev, "%s: channel: %d couldn't xfer desc\n",
__func__, imxdmac->channel);
}
diff --git a/drivers/dma/xilinx/zynqmp_dma.c b/drivers/dma/xilinx/zynqmp_dma.c
index c74a88b..73de6a6 100644
--- a/drivers/dma/xilinx/zynqmp_dma.c
+++ b/drivers/dma/xilinx/zynqmp_dma.c
@@ -163,7 +163,7 @@ struct zynqmp_dma_desc_ll {
u32 ctrl;
u64 nxtdscraddr;
u64 rsvd;
-}; __aligned(64)
+};
/**
* struct zynqmp_dma_desc_sw - Per Transaction structure
diff --git a/drivers/firmware/arm_scmi/bus.c b/drivers/firmware/arm_scmi/bus.c
index 472c88a..92f843ea 100644
--- a/drivers/firmware/arm_scmi/bus.c
+++ b/drivers/firmware/arm_scmi/bus.c
@@ -119,6 +119,11 @@ void scmi_driver_unregister(struct scmi_driver *driver)
}
EXPORT_SYMBOL_GPL(scmi_driver_unregister);
+static void scmi_device_release(struct device *dev)
+{
+ kfree(to_scmi_dev(dev));
+}
+
struct scmi_device *
scmi_device_create(struct device_node *np, struct device *parent, int protocol)
{
@@ -138,6 +143,7 @@ scmi_device_create(struct device_node *np, struct device *parent, int protocol)
scmi_dev->dev.parent = parent;
scmi_dev->dev.of_node = np;
scmi_dev->dev.bus = &scmi_bus_type;
+ scmi_dev->dev.release = scmi_device_release;
dev_set_name(&scmi_dev->dev, "scmi_dev.%d", id);
retval = device_register(&scmi_dev->dev);
@@ -156,9 +162,8 @@ scmi_device_create(struct device_node *np, struct device *parent, int protocol)
void scmi_device_destroy(struct scmi_device *scmi_dev)
{
scmi_handle_put(scmi_dev->handle);
- device_unregister(&scmi_dev->dev);
ida_simple_remove(&scmi_bus_id, scmi_dev->id);
- kfree(scmi_dev);
+ device_unregister(&scmi_dev->dev);
}
void scmi_set_handle(struct scmi_device *scmi_dev)
diff --git a/drivers/firmware/efi/runtime-wrappers.c b/drivers/firmware/efi/runtime-wrappers.c
index aa66cbf..b0aeffd 100644
--- a/drivers/firmware/efi/runtime-wrappers.c
+++ b/drivers/firmware/efi/runtime-wrappers.c
@@ -173,6 +173,13 @@ void efi_call_virt_check_flags(unsigned long flags, const char *call)
static DEFINE_SEMAPHORE(efi_runtime_lock);
/*
+ * Expose the EFI runtime lock to the UV platform
+ */
+#ifdef CONFIG_X86_UV
+extern struct semaphore __efi_uv_runtime_lock __alias(efi_runtime_lock);
+#endif
+
+/*
* Calls the appropriate efi_runtime_service() with the appropriate
* arguments.
*
diff --git a/drivers/firmware/efi/vars.c b/drivers/firmware/efi/vars.c
index 9336ffd..fceaafd 100644
--- a/drivers/firmware/efi/vars.c
+++ b/drivers/firmware/efi/vars.c
@@ -318,7 +318,12 @@ EXPORT_SYMBOL_GPL(efivar_variable_is_removable);
static efi_status_t
check_var_size(u32 attributes, unsigned long size)
{
- const struct efivar_operations *fops = __efivars->ops;
+ const struct efivar_operations *fops;
+
+ if (!__efivars)
+ return EFI_UNSUPPORTED;
+
+ fops = __efivars->ops;
if (!fops->query_variable_store)
return EFI_UNSUPPORTED;
@@ -329,7 +334,12 @@ check_var_size(u32 attributes, unsigned long size)
static efi_status_t
check_var_size_nonblocking(u32 attributes, unsigned long size)
{
- const struct efivar_operations *fops = __efivars->ops;
+ const struct efivar_operations *fops;
+
+ if (!__efivars)
+ return EFI_UNSUPPORTED;
+
+ fops = __efivars->ops;
if (!fops->query_variable_store)
return EFI_UNSUPPORTED;
@@ -429,13 +439,18 @@ static void dup_variable_bug(efi_char16_t *str16, efi_guid_t *vendor_guid,
int efivar_init(int (*func)(efi_char16_t *, efi_guid_t, unsigned long, void *),
void *data, bool duplicates, struct list_head *head)
{
- const struct efivar_operations *ops = __efivars->ops;
+ const struct efivar_operations *ops;
unsigned long variable_name_size = 1024;
efi_char16_t *variable_name;
efi_status_t status;
efi_guid_t vendor_guid;
int err = 0;
+ if (!__efivars)
+ return -EFAULT;
+
+ ops = __efivars->ops;
+
variable_name = kzalloc(variable_name_size, GFP_KERNEL);
if (!variable_name) {
printk(KERN_ERR "efivars: Memory allocation failed.\n");
@@ -583,12 +598,14 @@ static void efivar_entry_list_del_unlock(struct efivar_entry *entry)
*/
int __efivar_entry_delete(struct efivar_entry *entry)
{
- const struct efivar_operations *ops = __efivars->ops;
efi_status_t status;
- status = ops->set_variable(entry->var.VariableName,
- &entry->var.VendorGuid,
- 0, 0, NULL);
+ if (!__efivars)
+ return -EINVAL;
+
+ status = __efivars->ops->set_variable(entry->var.VariableName,
+ &entry->var.VendorGuid,
+ 0, 0, NULL);
return efi_status_to_err(status);
}
@@ -607,12 +624,17 @@ EXPORT_SYMBOL_GPL(__efivar_entry_delete);
*/
int efivar_entry_delete(struct efivar_entry *entry)
{
- const struct efivar_operations *ops = __efivars->ops;
+ const struct efivar_operations *ops;
efi_status_t status;
if (down_interruptible(&efivars_lock))
return -EINTR;
+ if (!__efivars) {
+ up(&efivars_lock);
+ return -EINVAL;
+ }
+ ops = __efivars->ops;
status = ops->set_variable(entry->var.VariableName,
&entry->var.VendorGuid,
0, 0, NULL);
@@ -650,13 +672,19 @@ EXPORT_SYMBOL_GPL(efivar_entry_delete);
int efivar_entry_set(struct efivar_entry *entry, u32 attributes,
unsigned long size, void *data, struct list_head *head)
{
- const struct efivar_operations *ops = __efivars->ops;
+ const struct efivar_operations *ops;
efi_status_t status;
efi_char16_t *name = entry->var.VariableName;
efi_guid_t vendor = entry->var.VendorGuid;
if (down_interruptible(&efivars_lock))
return -EINTR;
+
+ if (!__efivars) {
+ up(&efivars_lock);
+ return -EINVAL;
+ }
+ ops = __efivars->ops;
if (head && efivar_entry_find(name, vendor, head, false)) {
up(&efivars_lock);
return -EEXIST;
@@ -687,12 +715,17 @@ static int
efivar_entry_set_nonblocking(efi_char16_t *name, efi_guid_t vendor,
u32 attributes, unsigned long size, void *data)
{
- const struct efivar_operations *ops = __efivars->ops;
+ const struct efivar_operations *ops;
efi_status_t status;
if (down_trylock(&efivars_lock))
return -EBUSY;
+ if (!__efivars) {
+ up(&efivars_lock);
+ return -EINVAL;
+ }
+
status = check_var_size_nonblocking(attributes,
size + ucs2_strsize(name, 1024));
if (status != EFI_SUCCESS) {
@@ -700,6 +733,7 @@ efivar_entry_set_nonblocking(efi_char16_t *name, efi_guid_t vendor,
return -ENOSPC;
}
+ ops = __efivars->ops;
status = ops->set_variable_nonblocking(name, &vendor, attributes,
size, data);
@@ -727,9 +761,13 @@ efivar_entry_set_nonblocking(efi_char16_t *name, efi_guid_t vendor,
int efivar_entry_set_safe(efi_char16_t *name, efi_guid_t vendor, u32 attributes,
bool block, unsigned long size, void *data)
{
- const struct efivar_operations *ops = __efivars->ops;
+ const struct efivar_operations *ops;
efi_status_t status;
+ if (!__efivars)
+ return -EINVAL;
+
+ ops = __efivars->ops;
if (!ops->query_variable_store)
return -ENOSYS;
@@ -829,13 +867,18 @@ EXPORT_SYMBOL_GPL(efivar_entry_find);
*/
int efivar_entry_size(struct efivar_entry *entry, unsigned long *size)
{
- const struct efivar_operations *ops = __efivars->ops;
+ const struct efivar_operations *ops;
efi_status_t status;
*size = 0;
if (down_interruptible(&efivars_lock))
return -EINTR;
+ if (!__efivars) {
+ up(&efivars_lock);
+ return -EINVAL;
+ }
+ ops = __efivars->ops;
status = ops->get_variable(entry->var.VariableName,
&entry->var.VendorGuid, NULL, size, NULL);
up(&efivars_lock);
@@ -861,12 +904,14 @@ EXPORT_SYMBOL_GPL(efivar_entry_size);
int __efivar_entry_get(struct efivar_entry *entry, u32 *attributes,
unsigned long *size, void *data)
{
- const struct efivar_operations *ops = __efivars->ops;
efi_status_t status;
- status = ops->get_variable(entry->var.VariableName,
- &entry->var.VendorGuid,
- attributes, size, data);
+ if (!__efivars)
+ return -EINVAL;
+
+ status = __efivars->ops->get_variable(entry->var.VariableName,
+ &entry->var.VendorGuid,
+ attributes, size, data);
return efi_status_to_err(status);
}
@@ -882,14 +927,19 @@ EXPORT_SYMBOL_GPL(__efivar_entry_get);
int efivar_entry_get(struct efivar_entry *entry, u32 *attributes,
unsigned long *size, void *data)
{
- const struct efivar_operations *ops = __efivars->ops;
efi_status_t status;
if (down_interruptible(&efivars_lock))
return -EINTR;
- status = ops->get_variable(entry->var.VariableName,
- &entry->var.VendorGuid,
- attributes, size, data);
+
+ if (!__efivars) {
+ up(&efivars_lock);
+ return -EINVAL;
+ }
+
+ status = __efivars->ops->get_variable(entry->var.VariableName,
+ &entry->var.VendorGuid,
+ attributes, size, data);
up(&efivars_lock);
return efi_status_to_err(status);
@@ -921,7 +971,7 @@ EXPORT_SYMBOL_GPL(efivar_entry_get);
int efivar_entry_set_get_size(struct efivar_entry *entry, u32 attributes,
unsigned long *size, void *data, bool *set)
{
- const struct efivar_operations *ops = __efivars->ops;
+ const struct efivar_operations *ops;
efi_char16_t *name = entry->var.VariableName;
efi_guid_t *vendor = &entry->var.VendorGuid;
efi_status_t status;
@@ -940,6 +990,11 @@ int efivar_entry_set_get_size(struct efivar_entry *entry, u32 attributes,
if (down_interruptible(&efivars_lock))
return -EINTR;
+ if (!__efivars) {
+ err = -EINVAL;
+ goto out;
+ }
+
/*
* Ensure that the available space hasn't shrunk below the safe level
*/
@@ -956,6 +1011,8 @@ int efivar_entry_set_get_size(struct efivar_entry *entry, u32 attributes,
}
}
+ ops = __efivars->ops;
+
status = ops->set_variable(name, vendor, attributes, *size, data);
if (status != EFI_SUCCESS) {
err = efi_status_to_err(status);
diff --git a/drivers/fpga/altera-cvp.c b/drivers/fpga/altera-cvp.c
index 68e4b2b..7a42c19 100644
--- a/drivers/fpga/altera-cvp.c
+++ b/drivers/fpga/altera-cvp.c
@@ -403,6 +403,7 @@ static int altera_cvp_probe(struct pci_dev *pdev,
struct altera_cvp_conf *conf;
struct fpga_manager *mgr;
u16 cmd, val;
+ u32 regval;
int ret;
/*
@@ -416,6 +417,14 @@ static int altera_cvp_probe(struct pci_dev *pdev,
return -ENODEV;
}
+ pci_read_config_dword(pdev, VSE_CVP_STATUS, ®val);
+ if (!(regval & VSE_CVP_STATUS_CVP_EN)) {
+ dev_err(&pdev->dev,
+ "CVP is disabled for this device: CVP_STATUS Reg 0x%x\n",
+ regval);
+ return -ENODEV;
+ }
+
conf = devm_kzalloc(&pdev->dev, sizeof(*conf), GFP_KERNEL);
if (!conf)
return -ENOMEM;
@@ -471,7 +480,8 @@ static int altera_cvp_probe(struct pci_dev *pdev,
return 0;
err_unmap:
- pci_iounmap(pdev, conf->map);
+ if (conf->map)
+ pci_iounmap(pdev, conf->map);
pci_release_region(pdev, CVP_BAR);
err_disable:
cmd &= ~PCI_COMMAND_MEMORY;
@@ -486,7 +496,8 @@ static void altera_cvp_remove(struct pci_dev *pdev)
u16 cmd;
fpga_mgr_unregister(mgr);
- pci_iounmap(pdev, conf->map);
+ if (conf->map)
+ pci_iounmap(pdev, conf->map);
pci_release_region(pdev, CVP_BAR);
pci_read_config_word(pdev, PCI_COMMAND, &cmd);
cmd &= ~PCI_COMMAND_MEMORY;
diff --git a/drivers/gpio/gpio-mt7621.c b/drivers/gpio/gpio-mt7621.c
index d72af6f..74401e0 100644
--- a/drivers/gpio/gpio-mt7621.c
+++ b/drivers/gpio/gpio-mt7621.c
@@ -30,6 +30,7 @@
#define GPIO_REG_EDGE 0xA0
struct mtk_gc {
+ struct irq_chip irq_chip;
struct gpio_chip chip;
spinlock_t lock;
int bank;
@@ -189,13 +190,6 @@ mediatek_gpio_irq_type(struct irq_data *d, unsigned int type)
return 0;
}
-static struct irq_chip mediatek_gpio_irq_chip = {
- .irq_unmask = mediatek_gpio_irq_unmask,
- .irq_mask = mediatek_gpio_irq_mask,
- .irq_mask_ack = mediatek_gpio_irq_mask,
- .irq_set_type = mediatek_gpio_irq_type,
-};
-
static int
mediatek_gpio_xlate(struct gpio_chip *chip,
const struct of_phandle_args *spec, u32 *flags)
@@ -244,6 +238,8 @@ mediatek_gpio_bank_probe(struct device *dev,
rg->chip.of_xlate = mediatek_gpio_xlate;
rg->chip.label = devm_kasprintf(dev, GFP_KERNEL, "%s-bank%d",
dev_name(dev), bank);
+ if (!rg->chip.label)
+ return -ENOMEM;
ret = devm_gpiochip_add_data(dev, &rg->chip, mtk);
if (ret < 0) {
@@ -252,6 +248,13 @@ mediatek_gpio_bank_probe(struct device *dev,
return ret;
}
+ rg->irq_chip.name = dev_name(dev);
+ rg->irq_chip.parent_device = dev;
+ rg->irq_chip.irq_unmask = mediatek_gpio_irq_unmask;
+ rg->irq_chip.irq_mask = mediatek_gpio_irq_mask;
+ rg->irq_chip.irq_mask_ack = mediatek_gpio_irq_mask;
+ rg->irq_chip.irq_set_type = mediatek_gpio_irq_type;
+
if (mtk->gpio_irq) {
/*
* Manually request the irq here instead of passing
@@ -268,14 +271,14 @@ mediatek_gpio_bank_probe(struct device *dev,
return ret;
}
- ret = gpiochip_irqchip_add(&rg->chip, &mediatek_gpio_irq_chip,
+ ret = gpiochip_irqchip_add(&rg->chip, &rg->irq_chip,
0, handle_simple_irq, IRQ_TYPE_NONE);
if (ret) {
dev_err(dev, "failed to add gpiochip_irqchip\n");
return ret;
}
- gpiochip_set_chained_irqchip(&rg->chip, &mediatek_gpio_irq_chip,
+ gpiochip_set_chained_irqchip(&rg->chip, &rg->irq_chip,
mtk->gpio_irq, NULL);
}
@@ -295,6 +298,7 @@ mediatek_gpio_probe(struct platform_device *pdev)
struct device_node *np = dev->of_node;
struct mtk *mtk;
int i;
+ int ret;
mtk = devm_kzalloc(dev, sizeof(*mtk), GFP_KERNEL);
if (!mtk)
@@ -307,10 +311,12 @@ mediatek_gpio_probe(struct platform_device *pdev)
mtk->gpio_irq = irq_of_parse_and_map(np, 0);
mtk->dev = dev;
platform_set_drvdata(pdev, mtk);
- mediatek_gpio_irq_chip.name = dev_name(dev);
- for (i = 0; i < MTK_BANK_CNT; i++)
- mediatek_gpio_bank_probe(dev, np, i);
+ for (i = 0; i < MTK_BANK_CNT; i++) {
+ ret = mediatek_gpio_bank_probe(dev, np, i);
+ if (ret)
+ return ret;
+ }
return 0;
}
diff --git a/drivers/gpio/gpio-mxc.c b/drivers/gpio/gpio-mxc.c
index 995cf0b..2d1dfa1 100644
--- a/drivers/gpio/gpio-mxc.c
+++ b/drivers/gpio/gpio-mxc.c
@@ -17,6 +17,7 @@
#include <linux/irqchip/chained_irq.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
+#include <linux/syscore_ops.h>
#include <linux/gpio/driver.h>
#include <linux/of.h>
#include <linux/of_device.h>
@@ -550,33 +551,38 @@ static void mxc_gpio_restore_regs(struct mxc_gpio_port *port)
writel(port->gpio_saved_reg.dr, port->base + GPIO_DR);
}
-static int __maybe_unused mxc_gpio_noirq_suspend(struct device *dev)
+static int mxc_gpio_syscore_suspend(void)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct mxc_gpio_port *port = platform_get_drvdata(pdev);
+ struct mxc_gpio_port *port;
- mxc_gpio_save_regs(port);
- clk_disable_unprepare(port->clk);
+ /* walk through all ports */
+ list_for_each_entry(port, &mxc_gpio_ports, node) {
+ mxc_gpio_save_regs(port);
+ clk_disable_unprepare(port->clk);
+ }
return 0;
}
-static int __maybe_unused mxc_gpio_noirq_resume(struct device *dev)
+static void mxc_gpio_syscore_resume(void)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct mxc_gpio_port *port = platform_get_drvdata(pdev);
+ struct mxc_gpio_port *port;
int ret;
- ret = clk_prepare_enable(port->clk);
- if (ret)
- return ret;
- mxc_gpio_restore_regs(port);
-
- return 0;
+ /* walk through all ports */
+ list_for_each_entry(port, &mxc_gpio_ports, node) {
+ ret = clk_prepare_enable(port->clk);
+ if (ret) {
+ pr_err("mxc: failed to enable gpio clock %d\n", ret);
+ return;
+ }
+ mxc_gpio_restore_regs(port);
+ }
}
-static const struct dev_pm_ops mxc_gpio_dev_pm_ops = {
- SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(mxc_gpio_noirq_suspend, mxc_gpio_noirq_resume)
+static struct syscore_ops mxc_gpio_syscore_ops = {
+ .suspend = mxc_gpio_syscore_suspend,
+ .resume = mxc_gpio_syscore_resume,
};
static struct platform_driver mxc_gpio_driver = {
@@ -584,7 +590,6 @@ static struct platform_driver mxc_gpio_driver = {
.name = "gpio-mxc",
.of_match_table = mxc_gpio_dt_ids,
.suppress_bind_attrs = true,
- .pm = &mxc_gpio_dev_pm_ops,
},
.probe = mxc_gpio_probe,
.id_table = mxc_gpio_devtype,
@@ -592,6 +597,8 @@ static struct platform_driver mxc_gpio_driver = {
static int __init gpio_mxc_init(void)
{
+ register_syscore_ops(&mxc_gpio_syscore_ops);
+
return platform_driver_register(&mxc_gpio_driver);
}
subsys_initcall(gpio_mxc_init);
diff --git a/drivers/gpio/gpio-pxa.c b/drivers/gpio/gpio-pxa.c
index 9f3f166..eb27fa7 100644
--- a/drivers/gpio/gpio-pxa.c
+++ b/drivers/gpio/gpio-pxa.c
@@ -245,6 +245,7 @@ static bool pxa_gpio_has_pinctrl(void)
{
switch (gpio_type) {
case PXA3XX_GPIO:
+ case MMP2_GPIO:
return false;
default:
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 5c43f4c..f845471 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -2286,6 +2286,12 @@ static int gpiod_request_commit(struct gpio_desc *desc, const char *label)
unsigned long flags;
unsigned offset;
+ if (label) {
+ label = kstrdup_const(label, GFP_KERNEL);
+ if (!label)
+ return -ENOMEM;
+ }
+
spin_lock_irqsave(&gpio_lock, flags);
/* NOTE: gpio_request() can be called in early boot,
@@ -2296,6 +2302,7 @@ static int gpiod_request_commit(struct gpio_desc *desc, const char *label)
desc_set_label(desc, label ? : "?");
status = 0;
} else {
+ kfree_const(label);
status = -EBUSY;
goto done;
}
@@ -2312,6 +2319,7 @@ static int gpiod_request_commit(struct gpio_desc *desc, const char *label)
if (status < 0) {
desc_set_label(desc, NULL);
+ kfree_const(label);
clear_bit(FLAG_REQUESTED, &desc->flags);
goto done;
}
@@ -2407,6 +2415,7 @@ static bool gpiod_free_commit(struct gpio_desc *desc)
chip->free(chip, gpio_chip_hwgpio(desc));
spin_lock_irqsave(&gpio_lock, flags);
}
+ kfree_const(desc->label);
desc_set_label(desc, NULL);
clear_bit(FLAG_ACTIVE_LOW, &desc->flags);
clear_bit(FLAG_REQUESTED, &desc->flags);
@@ -3228,11 +3237,19 @@ EXPORT_SYMBOL_GPL(gpiod_cansleep);
* @desc: gpio to set the consumer name on
* @name: the new consumer name
*/
-void gpiod_set_consumer_name(struct gpio_desc *desc, const char *name)
+int gpiod_set_consumer_name(struct gpio_desc *desc, const char *name)
{
- VALIDATE_DESC_VOID(desc);
- /* Just overwrite whatever the previous name was */
- desc->label = name;
+ VALIDATE_DESC(desc);
+ if (name) {
+ name = kstrdup_const(name, GFP_KERNEL);
+ if (!name)
+ return -ENOMEM;
+ }
+
+ kfree_const(desc->label);
+ desc_set_label(desc, name);
+
+ return 0;
}
EXPORT_SYMBOL_GPL(gpiod_set_consumer_name);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 39bf2ce..7f6af42 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -1653,8 +1653,10 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev)
amdgpu_amdkfd_device_init(adev);
- if (amdgpu_sriov_vf(adev))
+ if (amdgpu_sriov_vf(adev)) {
+ amdgpu_virt_init_data_exchange(adev);
amdgpu_virt_release_full_gpu(adev, true);
+ }
return 0;
}
@@ -2555,9 +2557,6 @@ int amdgpu_device_init(struct amdgpu_device *adev,
goto failed;
}
- if (amdgpu_sriov_vf(adev))
- amdgpu_virt_init_data_exchange(adev);
-
amdgpu_fbdev_init(adev);
r = amdgpu_pm_sysfs_init(adev);
@@ -3269,6 +3268,7 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
r = amdgpu_ib_ring_tests(adev);
error:
+ amdgpu_virt_init_data_exchange(adev);
amdgpu_virt_release_full_gpu(adev, true);
if (!r && adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) {
atomic_inc(&adev->vram_lost_counter);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index fd825d3..c0396e8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -159,6 +159,7 @@ int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags)
}
if (amdgpu_device_is_px(dev)) {
+ dev_pm_set_driver_flags(dev->dev, DPM_FLAG_NEVER_SKIP);
pm_runtime_use_autosuspend(dev->dev);
pm_runtime_set_autosuspend_delay(dev->dev, 5000);
pm_runtime_set_active(dev->dev);
diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
index 078f70f..d06332b 100644
--- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
+++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
@@ -174,7 +174,7 @@ static int xgpu_ai_send_access_requests(struct amdgpu_device *adev,
return r;
}
/* Retrieve checksum from mailbox2 */
- if (req == IDH_REQ_GPU_INIT_ACCESS) {
+ if (req == IDH_REQ_GPU_INIT_ACCESS || req == IDH_REQ_GPU_RESET_ACCESS) {
adev->virt.fw_reserve.checksum_key =
RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
mmBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW2));
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
index 7c3b634..de5a689 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
@@ -71,7 +71,6 @@ static const struct soc15_reg_golden golden_settings_sdma_4[] = {
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003c0),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_WATERMK, 0xfc000000, 0x00000000),
- SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CHICKEN_BITS, 0xfe931f07, 0x02831f07),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CLK_CTRL, 0xffffffff, 0x3f000100),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GFX_IB_CNTL, 0x800f0100, 0x00000100),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GFX_RB_WPTR_POLL_CNTL, 0x0000fff0, 0x00403000),
@@ -89,6 +88,7 @@ static const struct soc15_reg_golden golden_settings_sdma_4[] = {
static const struct soc15_reg_golden golden_settings_sdma_vg10[] = {
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG, 0x0018773f, 0x00104002),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104002),
+ SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CHICKEN_BITS, 0xfe931f07, 0x02831d07),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG, 0x0018773f, 0x00104002),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104002)
};
@@ -96,6 +96,7 @@ static const struct soc15_reg_golden golden_settings_sdma_vg10[] = {
static const struct soc15_reg_golden golden_settings_sdma_vg12[] = {
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG, 0x0018773f, 0x00104001),
SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104001),
+ SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CHICKEN_BITS, 0xfe931f07, 0x02831d07),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG, 0x0018773f, 0x00104001),
SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x0018773f, 0x00104001)
};
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
index 80f5db4..0805c42 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
@@ -1072,8 +1072,6 @@ static uint32_t kfd_generate_gpu_id(struct kfd_dev *gpu)
* the GPU device is not already present in the topology device
* list then return NULL. This means a new topology device has to
* be created for this GPU.
- * TODO: Rather than assiging @gpu to first topology device withtout
- * gpu attached, it will better to have more stringent check.
*/
static struct kfd_topology_device *kfd_assign_gpu(struct kfd_dev *gpu)
{
@@ -1081,12 +1079,20 @@ static struct kfd_topology_device *kfd_assign_gpu(struct kfd_dev *gpu)
struct kfd_topology_device *out_dev = NULL;
down_write(&topology_lock);
- list_for_each_entry(dev, &topology_device_list, list)
+ list_for_each_entry(dev, &topology_device_list, list) {
+ /* Discrete GPUs need their own topology device list
+ * entries. Don't assign them to CPU/APU nodes.
+ */
+ if (!gpu->device_info->needs_iommu_device &&
+ dev->node_props.cpu_cores_count)
+ continue;
+
if (!dev->gpu && (dev->node_props.simd_count > 0)) {
dev->gpu = gpu;
out_dev = dev;
break;
}
+ }
up_write(&topology_lock);
return out_dev;
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index a851bb0..c5ba9128 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -624,12 +624,13 @@ static int dm_suspend(void *handle)
struct amdgpu_display_manager *dm = &adev->dm;
int ret = 0;
+ WARN_ON(adev->dm.cached_state);
+ adev->dm.cached_state = drm_atomic_helper_suspend(adev->ddev);
+
s3_handle_mst(adev->ddev, true);
amdgpu_dm_irq_suspend(adev);
- WARN_ON(adev->dm.cached_state);
- adev->dm.cached_state = drm_atomic_helper_suspend(adev->ddev);
dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index 7c89785..23a7ef9 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -324,7 +324,7 @@ bool dc_link_is_dp_sink_present(struct dc_link *link)
{
enum gpio_result gpio_result;
uint32_t clock_pin = 0;
-
+ uint8_t retry = 0;
struct ddc *ddc;
enum connector_id connector_id =
@@ -353,11 +353,22 @@ bool dc_link_is_dp_sink_present(struct dc_link *link)
return present;
}
- /* Read GPIO: DP sink is present if both clock and data pins are zero */
- /* [anaumov] in DAL2, there was no check for GPIO failure */
-
- gpio_result = dal_gpio_get_value(ddc->pin_clock, &clock_pin);
- ASSERT(gpio_result == GPIO_RESULT_OK);
+ /*
+ * Read GPIO: DP sink is present if both clock and data pins are zero
+ *
+ * [W/A] plug-unplug DP cable, sometimes customer board has
+ * one short pulse on clk_pin(1V, < 1ms). DP will be config to HDMI/DVI
+ * then monitor can't br light up. Add retry 3 times
+ * But in real passive dongle, it need additional 3ms to detect
+ */
+ do {
+ gpio_result = dal_gpio_get_value(ddc->pin_clock, &clock_pin);
+ ASSERT(gpio_result == GPIO_RESULT_OK);
+ if (clock_pin)
+ udelay(1000);
+ else
+ break;
+ } while (retry++ < 3);
present = (gpio_result == GPIO_RESULT_OK) && !clock_pin;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
index a7553b6..05840f5 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
@@ -2240,7 +2240,8 @@ static void get_active_converter_info(
translate_dpcd_max_bpc(
hdmi_color_caps.bits.MAX_BITS_PER_COLOR_COMPONENT);
- link->dpcd_caps.dongle_caps.extendedCapValid = true;
+ if (link->dpcd_caps.dongle_caps.dp_hdmi_max_pixel_clk != 0)
+ link->dpcd_caps.dongle_caps.extendedCapValid = true;
}
break;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index ea6becc..87bf422 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -1917,6 +1917,8 @@ enum dc_status resource_map_pool_resources(
}
*/
+ calculate_phy_pix_clks(stream);
+
/* acquire new resources */
pipe_idx = acquire_first_free_pipe(&context->res_ctx, pool, stream);
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
index 0941f3c..53ccacf 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
@@ -1000,7 +1000,7 @@ void dce110_enable_audio_stream(struct pipe_ctx *pipe_ctx)
pipe_ctx->stream_res.audio->funcs->az_enable(pipe_ctx->stream_res.audio);
- if (num_audio == 1 && pp_smu != NULL && pp_smu->set_pme_wa_enable != NULL)
+ if (num_audio >= 1 && pp_smu != NULL && pp_smu->set_pme_wa_enable != NULL)
/*this is the first audio. apply the PME w/a in order to wake AZ from D3*/
pp_smu->set_pme_wa_enable(&pp_smu->pp_smu);
/* un-mute audio */
@@ -1017,6 +1017,8 @@ void dce110_disable_audio_stream(struct pipe_ctx *pipe_ctx, int option)
pipe_ctx->stream_res.stream_enc->funcs->audio_mute_control(
pipe_ctx->stream_res.stream_enc, true);
if (pipe_ctx->stream_res.audio) {
+ struct pp_smu_funcs_rv *pp_smu = dc->res_pool->pp_smu;
+
if (option != KEEP_ACQUIRED_RESOURCE ||
!dc->debug.az_endpoint_mute_only) {
/*only disalbe az_endpoint if power down or free*/
@@ -1036,6 +1038,9 @@ void dce110_disable_audio_stream(struct pipe_ctx *pipe_ctx, int option)
update_audio_usage(&dc->current_state->res_ctx, dc->res_pool, pipe_ctx->stream_res.audio, false);
pipe_ctx->stream_res.audio = NULL;
}
+ if (pp_smu != NULL && pp_smu->set_pme_wa_enable != NULL)
+ /*this is the first audio. apply the PME w/a in order to wake AZ from D3*/
+ pp_smu->set_pme_wa_enable(&pp_smu->pp_smu);
/* TODO: notify audio driver for if audio modes list changed
* add audio mode list change flag */
@@ -1268,10 +1273,19 @@ static void program_scaler(const struct dc *dc,
pipe_ctx->plane_res.scl_data.lb_params.depth,
&pipe_ctx->stream->bit_depth_params);
- if (pipe_ctx->stream_res.tg->funcs->set_overscan_blank_color)
+ if (pipe_ctx->stream_res.tg->funcs->set_overscan_blank_color) {
+ /*
+ * The way 420 is packed, 2 channels carry Y component, 1 channel
+ * alternate between Cb and Cr, so both channels need the pixel
+ * value for Y
+ */
+ if (pipe_ctx->stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
+ color.color_r_cr = color.color_g_y;
+
pipe_ctx->stream_res.tg->funcs->set_overscan_blank_color(
pipe_ctx->stream_res.tg,
&color);
+ }
pipe_ctx->plane_res.xfm->funcs->transform_set_scaler(pipe_ctx->plane_res.xfm,
&pipe_ctx->plane_res.scl_data);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index cfcc54f..4058b59 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -1190,7 +1190,8 @@ static bool dcn10_set_input_transfer_func(struct pipe_ctx *pipe_ctx,
tf = plane_state->in_transfer_func;
if (plane_state->gamma_correction &&
- !plane_state->gamma_correction->is_identity
+ !dpp_base->ctx->dc->debug.always_use_regamma
+ && !plane_state->gamma_correction->is_identity
&& dce_use_lut(plane_state->format))
dpp_base->funcs->dpp_program_input_lut(dpp_base, plane_state->gamma_correction);
@@ -2120,6 +2121,15 @@ static void dcn10_blank_pixel_data(
color_space = stream->output_color_space;
color_space_to_black_color(dc, color_space, &black_color);
+ /*
+ * The way 420 is packed, 2 channels carry Y component, 1 channel
+ * alternate between Cb and Cr, so both channels need the pixel
+ * value for Y
+ */
+ if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
+ black_color.color_r_cr = black_color.color_g_y;
+
+
if (stream_res->tg->funcs->set_blank_color)
stream_res->tg->funcs->set_blank_color(
stream_res->tg,
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
index a63e006..1546bc4 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
@@ -984,6 +984,7 @@ static int smu10_get_clock_by_type_with_latency(struct pp_hwmgr *hwmgr,
break;
case amd_pp_dpp_clock:
pclk_vol_table = pinfo->vdd_dep_on_dppclk;
+ break;
default:
return -EINVAL;
}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
index 5b67f57..45629f2 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
@@ -1528,8 +1528,21 @@ static int polaris10_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr)
efuse = efuse >> 24;
if (hwmgr->chip_id == CHIP_POLARIS10) {
- min = 1000;
- max = 2300;
+ if (hwmgr->is_kicker) {
+ min = 1200;
+ max = 2500;
+ } else {
+ min = 1000;
+ max = 2300;
+ }
+ } else if (hwmgr->chip_id == CHIP_POLARIS11) {
+ if (hwmgr->is_kicker) {
+ min = 900;
+ max = 2100;
+ } else {
+ min = 1100;
+ max = 2100;
+ }
} else {
min = 1100;
max = 2100;
diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c
index 8e28e73..3915473 100644
--- a/drivers/gpu/drm/bridge/tc358767.c
+++ b/drivers/gpu/drm/bridge/tc358767.c
@@ -98,6 +98,8 @@
#define DP0_STARTVAL 0x064c
#define DP0_ACTIVEVAL 0x0650
#define DP0_SYNCVAL 0x0654
+#define SYNCVAL_HS_POL_ACTIVE_LOW (1 << 15)
+#define SYNCVAL_VS_POL_ACTIVE_LOW (1 << 31)
#define DP0_MISC 0x0658
#define TU_SIZE_RECOMMENDED (63) /* LSCLK cycles per TU */
#define BPC_6 (0 << 5)
@@ -142,6 +144,8 @@
#define DP0_LTLOOPCTRL 0x06d8
#define DP0_SNKLTCTRL 0x06e4
+#define DP1_SRCCTRL 0x07a0
+
/* PHY */
#define DP_PHY_CTRL 0x0800
#define DP_PHY_RST BIT(28) /* DP PHY Global Soft Reset */
@@ -150,6 +154,7 @@
#define PHY_M1_RST BIT(12) /* Reset PHY1 Main Channel */
#define PHY_RDY BIT(16) /* PHY Main Channels Ready */
#define PHY_M0_RST BIT(8) /* Reset PHY0 Main Channel */
+#define PHY_2LANE BIT(2) /* PHY Enable 2 lanes */
#define PHY_A0_EN BIT(1) /* PHY Aux Channel0 Enable */
#define PHY_M0_EN BIT(0) /* PHY Main Channel0 Enable */
@@ -540,6 +545,7 @@ static int tc_aux_link_setup(struct tc_data *tc)
unsigned long rate;
u32 value;
int ret;
+ u32 dp_phy_ctrl;
rate = clk_get_rate(tc->refclk);
switch (rate) {
@@ -564,7 +570,10 @@ static int tc_aux_link_setup(struct tc_data *tc)
value |= SYSCLK_SEL_LSCLK | LSCLK_DIV_2;
tc_write(SYS_PLLPARAM, value);
- tc_write(DP_PHY_CTRL, BGREN | PWR_SW_EN | BIT(2) | PHY_A0_EN);
+ dp_phy_ctrl = BGREN | PWR_SW_EN | PHY_A0_EN;
+ if (tc->link.base.num_lanes == 2)
+ dp_phy_ctrl |= PHY_2LANE;
+ tc_write(DP_PHY_CTRL, dp_phy_ctrl);
/*
* Initially PLLs are in bypass. Force PLL parameter update,
@@ -719,7 +728,9 @@ static int tc_set_video_mode(struct tc_data *tc, struct drm_display_mode *mode)
tc_write(DP0_ACTIVEVAL, (mode->vdisplay << 16) | (mode->hdisplay));
- tc_write(DP0_SYNCVAL, (vsync_len << 16) | (hsync_len << 0));
+ tc_write(DP0_SYNCVAL, (vsync_len << 16) | (hsync_len << 0) |
+ ((mode->flags & DRM_MODE_FLAG_NHSYNC) ? SYNCVAL_HS_POL_ACTIVE_LOW : 0) |
+ ((mode->flags & DRM_MODE_FLAG_NVSYNC) ? SYNCVAL_VS_POL_ACTIVE_LOW : 0));
tc_write(DPIPXLFMT, VS_POL_ACTIVE_LOW | HS_POL_ACTIVE_LOW |
DE_POL_ACTIVE_HIGH | SUB_CFG_TYPE_CONFIG1 | DPI_BPP_RGB888);
@@ -829,12 +840,11 @@ static int tc_main_link_setup(struct tc_data *tc)
if (!tc->mode)
return -EINVAL;
- /* from excel file - DP0_SrcCtrl */
- tc_write(DP0_SRCCTRL, DP0_SRCCTRL_SCRMBLDIS | DP0_SRCCTRL_EN810B |
- DP0_SRCCTRL_LANESKEW | DP0_SRCCTRL_LANES_2 |
- DP0_SRCCTRL_BW27 | DP0_SRCCTRL_AUTOCORRECT);
- /* from excel file - DP1_SrcCtrl */
- tc_write(0x07a0, 0x00003083);
+ tc_write(DP0_SRCCTRL, tc_srcctrl(tc));
+ /* SSCG and BW27 on DP1 must be set to the same as on DP0 */
+ tc_write(DP1_SRCCTRL,
+ (tc->link.spread ? DP0_SRCCTRL_SSCG : 0) |
+ ((tc->link.base.rate != 162000) ? DP0_SRCCTRL_BW27 : 0));
rate = clk_get_rate(tc->refclk);
switch (rate) {
@@ -855,8 +865,11 @@ static int tc_main_link_setup(struct tc_data *tc)
}
value |= SYSCLK_SEL_LSCLK | LSCLK_DIV_2;
tc_write(SYS_PLLPARAM, value);
+
/* Setup Main Link */
- dp_phy_ctrl = BGREN | PWR_SW_EN | BIT(2) | PHY_A0_EN | PHY_M0_EN;
+ dp_phy_ctrl = BGREN | PWR_SW_EN | PHY_A0_EN | PHY_M0_EN;
+ if (tc->link.base.num_lanes == 2)
+ dp_phy_ctrl |= PHY_2LANE;
tc_write(DP_PHY_CTRL, dp_phy_ctrl);
msleep(100);
@@ -1105,10 +1118,20 @@ static bool tc_bridge_mode_fixup(struct drm_bridge *bridge,
static enum drm_mode_status tc_connector_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
+ struct tc_data *tc = connector_to_tc(connector);
+ u32 req, avail;
+ u32 bits_per_pixel = 24;
+
/* DPI interface clock limitation: upto 154 MHz */
if (mode->clock > 154000)
return MODE_CLOCK_HIGH;
+ req = mode->clock * bits_per_pixel / 8;
+ avail = tc->link.base.num_lanes * tc->link.base.rate;
+
+ if (req > avail)
+ return MODE_BAD;
+
return MODE_OK;
}
@@ -1195,6 +1218,10 @@ static int tc_bridge_attach(struct drm_bridge *bridge)
drm_display_info_set_bus_formats(&tc->connector.display_info,
&bus_format, 1);
+ tc->connector.display_info.bus_flags =
+ DRM_BUS_FLAG_DE_HIGH |
+ DRM_BUS_FLAG_PIXDATA_NEGEDGE |
+ DRM_BUS_FLAG_SYNC_NEGEDGE;
drm_connector_attach_encoder(&tc->connector, tc->bridge.encoder);
return 0;
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index f77bff5..23397c0 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -3192,7 +3192,7 @@ EXPORT_SYMBOL(drm_atomic_helper_suspend);
int drm_atomic_helper_commit_duplicated_state(struct drm_atomic_state *state,
struct drm_modeset_acquire_ctx *ctx)
{
- int i;
+ int i, ret;
struct drm_plane *plane;
struct drm_plane_state *new_plane_state;
struct drm_connector *connector;
@@ -3211,7 +3211,11 @@ int drm_atomic_helper_commit_duplicated_state(struct drm_atomic_state *state,
for_each_new_connector_in_state(state, connector, new_conn_state, i)
state->connectors[i].old_state = connector->state;
- return drm_atomic_commit(state);
+ ret = drm_atomic_commit(state);
+
+ state->acquire_ctx = NULL;
+
+ return ret;
}
EXPORT_SYMBOL(drm_atomic_helper_commit_duplicated_state);
diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c
index ba8cfe6..e2f775d 100644
--- a/drivers/gpu/drm/drm_bufs.c
+++ b/drivers/gpu/drm/drm_bufs.c
@@ -36,6 +36,8 @@
#include <drm/drmP.h>
#include "drm_legacy.h"
+#include <linux/nospec.h>
+
static struct drm_map_list *drm_find_matching_map(struct drm_device *dev,
struct drm_local_map *map)
{
@@ -1417,6 +1419,7 @@ int drm_legacy_freebufs(struct drm_device *dev, void *data,
idx, dma->buf_count - 1);
return -EINVAL;
}
+ idx = array_index_nospec(idx, dma->buf_count);
buf = dma->buflist[idx];
if (buf->file_priv != file_priv) {
DRM_ERROR("Process %d freeing buffer not owned\n",
diff --git a/drivers/gpu/drm/drm_lease.c b/drivers/gpu/drm/drm_lease.c
index fe6bfaf..086f2ad 100644
--- a/drivers/gpu/drm/drm_lease.c
+++ b/drivers/gpu/drm/drm_lease.c
@@ -521,7 +521,8 @@ int drm_mode_create_lease_ioctl(struct drm_device *dev,
object_count = cl->object_count;
- object_ids = memdup_user(u64_to_user_ptr(cl->object_ids), object_count * sizeof(__u32));
+ object_ids = memdup_user(u64_to_user_ptr(cl->object_ids),
+ array_size(object_count, sizeof(__u32)));
if (IS_ERR(object_ids))
return PTR_ERR(object_ids);
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index 02db9ac..a3104d7 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -758,7 +758,7 @@ int drm_mode_hsync(const struct drm_display_mode *mode)
if (mode->hsync)
return mode->hsync;
- if (mode->htotal < 0)
+ if (mode->htotal <= 0)
return 0;
calc_val = (mode->clock * 1000) / mode->htotal; /* hsync in Hz */
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 47cc932..280c851 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1821,6 +1821,16 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
return 0;
}
+static inline bool
+__vma_matches(struct vm_area_struct *vma, struct file *filp,
+ unsigned long addr, unsigned long size)
+{
+ if (vma->vm_file != filp)
+ return false;
+
+ return vma->vm_start == addr && (vma->vm_end - vma->vm_start) == size;
+}
+
/**
* i915_gem_mmap_ioctl - Maps the contents of an object, returning the address
* it is mapped to.
@@ -1879,7 +1889,7 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
return -EINTR;
}
vma = find_vma(mm, addr);
- if (vma)
+ if (vma && __vma_matches(vma, obj->base.filp, addr, args->size))
vma->vm_page_prot =
pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
else
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index c9af348..b4b1f9c 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -1085,7 +1085,7 @@ static uint32_t icl_pll_to_ddi_pll_sel(struct intel_encoder *encoder,
return DDI_CLK_SEL_TBT_810;
default:
MISSING_CASE(clock);
- break;
+ return DDI_CLK_SEL_NONE;
}
case DPLL_ID_ICL_MGPLL1:
case DPLL_ID_ICL_MGPLL2:
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 8fc61e96..50d5649 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -209,6 +209,16 @@ struct intel_fbdev {
unsigned long vma_flags;
async_cookie_t cookie;
int preferred_bpp;
+
+ /* Whether or not fbdev hpd processing is temporarily suspended */
+ bool hpd_suspended : 1;
+ /* Set when a hotplug was received while HPD processing was
+ * suspended
+ */
+ bool hpd_waiting : 1;
+
+ /* Protects hpd_suspended */
+ struct mutex hpd_lock;
};
struct intel_encoder {
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
index fb2f9fc..6f91634 100644
--- a/drivers/gpu/drm/i915/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -334,8 +334,8 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
bool *enabled, int width, int height)
{
struct drm_i915_private *dev_priv = to_i915(fb_helper->dev);
- unsigned long conn_configured, conn_seq, mask;
unsigned int count = min(fb_helper->connector_count, BITS_PER_LONG);
+ unsigned long conn_configured, conn_seq;
int i, j;
bool *save_enabled;
bool fallback = true, ret = true;
@@ -353,10 +353,9 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
drm_modeset_backoff(&ctx);
memcpy(save_enabled, enabled, count);
- mask = GENMASK(count - 1, 0);
+ conn_seq = GENMASK(count - 1, 0);
conn_configured = 0;
retry:
- conn_seq = conn_configured;
for (i = 0; i < count; i++) {
struct drm_fb_helper_connector *fb_conn;
struct drm_connector *connector;
@@ -369,7 +368,8 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
if (conn_configured & BIT(i))
continue;
- if (conn_seq == 0 && !connector->has_tile)
+ /* First pass, only consider tiled connectors */
+ if (conn_seq == GENMASK(count - 1, 0) && !connector->has_tile)
continue;
if (connector->status == connector_status_connected)
@@ -473,8 +473,10 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
conn_configured |= BIT(i);
}
- if ((conn_configured & mask) != mask && conn_configured != conn_seq)
+ if (conn_configured != conn_seq) { /* repeat until no more are found */
+ conn_seq = conn_configured;
goto retry;
+ }
/*
* If the BIOS didn't enable everything it could, fall back to have the
@@ -677,6 +679,7 @@ int intel_fbdev_init(struct drm_device *dev)
if (ifbdev == NULL)
return -ENOMEM;
+ mutex_init(&ifbdev->hpd_lock);
drm_fb_helper_prepare(dev, &ifbdev->helper, &intel_fb_helper_funcs);
if (!intel_fbdev_init_bios(dev, ifbdev))
@@ -750,6 +753,26 @@ void intel_fbdev_fini(struct drm_i915_private *dev_priv)
intel_fbdev_destroy(ifbdev);
}
+/* Suspends/resumes fbdev processing of incoming HPD events. When resuming HPD
+ * processing, fbdev will perform a full connector reprobe if a hotplug event
+ * was received while HPD was suspended.
+ */
+static void intel_fbdev_hpd_set_suspend(struct intel_fbdev *ifbdev, int state)
+{
+ bool send_hpd = false;
+
+ mutex_lock(&ifbdev->hpd_lock);
+ ifbdev->hpd_suspended = state == FBINFO_STATE_SUSPENDED;
+ send_hpd = !ifbdev->hpd_suspended && ifbdev->hpd_waiting;
+ ifbdev->hpd_waiting = false;
+ mutex_unlock(&ifbdev->hpd_lock);
+
+ if (send_hpd) {
+ DRM_DEBUG_KMS("Handling delayed fbcon HPD event\n");
+ drm_fb_helper_hotplug_event(&ifbdev->helper);
+ }
+}
+
void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous)
{
struct drm_i915_private *dev_priv = to_i915(dev);
@@ -771,6 +794,7 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous
*/
if (state != FBINFO_STATE_RUNNING)
flush_work(&dev_priv->fbdev_suspend_work);
+
console_lock();
} else {
/*
@@ -798,17 +822,26 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous
drm_fb_helper_set_suspend(&ifbdev->helper, state);
console_unlock();
+
+ intel_fbdev_hpd_set_suspend(ifbdev, state);
}
void intel_fbdev_output_poll_changed(struct drm_device *dev)
{
struct intel_fbdev *ifbdev = to_i915(dev)->fbdev;
+ bool send_hpd;
if (!ifbdev)
return;
intel_fbdev_sync(ifbdev);
- if (ifbdev->vma || ifbdev->helper.deferred_setup)
+
+ mutex_lock(&ifbdev->hpd_lock);
+ send_hpd = !ifbdev->hpd_suspended;
+ ifbdev->hpd_waiting = true;
+ mutex_unlock(&ifbdev->hpd_lock);
+
+ if (send_hpd && (ifbdev->vma || ifbdev->helper.deferred_setup))
drm_fb_helper_hotplug_event(&ifbdev->helper);
}
diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c
index bf5f294..611ac34 100644
--- a/drivers/gpu/drm/meson/meson_drv.c
+++ b/drivers/gpu/drm/meson/meson_drv.c
@@ -368,8 +368,10 @@ static int meson_probe_remote(struct platform_device *pdev,
remote_node = of_graph_get_remote_port_parent(ep);
if (!remote_node ||
remote_node == parent || /* Ignore parent endpoint */
- !of_device_is_available(remote_node))
+ !of_device_is_available(remote_node)) {
+ of_node_put(remote_node);
continue;
+ }
count += meson_probe_remote(pdev, match, remote, remote_node);
@@ -388,10 +390,13 @@ static int meson_drv_probe(struct platform_device *pdev)
for_each_endpoint_of_node(np, ep) {
remote = of_graph_get_remote_port_parent(ep);
- if (!remote || !of_device_is_available(remote))
+ if (!remote || !of_device_is_available(remote)) {
+ of_node_put(remote);
continue;
+ }
count += meson_probe_remote(pdev, &match, np, remote);
+ of_node_put(remote);
}
if (count && !match)
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
index 14fc7c2a..c9962a3 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
@@ -331,7 +331,7 @@ static void dpu_encoder_phys_vid_vblank_irq(void *arg, int irq_idx)
if (hw_ctl && hw_ctl->ops.get_flush_register)
flush_register = hw_ctl->ops.get_flush_register(hw_ctl);
- if (flush_register == 0)
+ if (!(flush_register & hw_ctl->ops.get_pending_flush(hw_ctl)))
new_cnt = atomic_add_unless(&phys_enc->pending_kickoff_cnt,
-1, 0);
spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c
index 41bec57..3120562 100644
--- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c
+++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c
@@ -17,7 +17,7 @@
* | |
* | |
* +---------+ | +----------+ | +----+
- * dsi0vco_clk ---| out_div |--o--| divl_3_0 |--o--| /8 |-- dsi0pllbyte
+ * dsi0vco_clk ---| out_div |--o--| divl_3_0 |--o--| /8 |-- dsi0_phy_pll_out_byteclk
* +---------+ | +----------+ | +----+
* | |
* | | dsi0_pll_by_2_bit_clk
@@ -25,7 +25,7 @@
* | | +----+ | |\ dsi0_pclk_mux
* | |--| /2 |--o--| \ |
* | | +----+ | \ | +---------+
- * | --------------| |--o--| div_7_4 |-- dsi0pll
+ * | --------------| |--o--| div_7_4 |-- dsi0_phy_pll_out_dsiclk
* |------------------------------| / +---------+
* | +-----+ | /
* -----------| /4? |--o----------|/
@@ -690,7 +690,7 @@ static int pll_10nm_register(struct dsi_pll_10nm *pll_10nm)
hws[num++] = hw;
- snprintf(clk_name, 32, "dsi%dpllbyte", pll_10nm->id);
+ snprintf(clk_name, 32, "dsi%d_phy_pll_out_byteclk", pll_10nm->id);
snprintf(parent, 32, "dsi%d_pll_bit_clk", pll_10nm->id);
/* DSI Byte clock = VCO_CLK / OUT_DIV / BIT_DIV / 8 */
@@ -739,7 +739,7 @@ static int pll_10nm_register(struct dsi_pll_10nm *pll_10nm)
hws[num++] = hw;
- snprintf(clk_name, 32, "dsi%dpll", pll_10nm->id);
+ snprintf(clk_name, 32, "dsi%d_phy_pll_out_dsiclk", pll_10nm->id);
snprintf(parent, 32, "dsi%d_pclk_mux", pll_10nm->id);
/* PIX CLK DIV : DIV_CTRL_7_4*/
diff --git a/drivers/gpu/drm/msm/sde/sde_color_processing.c b/drivers/gpu/drm/msm/sde/sde_color_processing.c
index 5e7ba86..48f97a0 100644
--- a/drivers/gpu/drm/msm/sde/sde_color_processing.c
+++ b/drivers/gpu/drm/msm/sde/sde_color_processing.c
@@ -1597,6 +1597,7 @@ void sde_cp_crtc_suspend(struct drm_crtc *crtc)
struct sde_crtc *sde_crtc = NULL;
struct sde_cp_node *prop_node = NULL, *n = NULL;
bool ad_suspend = false;
+ unsigned long irq_flags;
if (!crtc) {
DRM_ERROR("crtc %pK\n", crtc);
@@ -1623,6 +1624,10 @@ void sde_cp_crtc_suspend(struct drm_crtc *crtc)
}
mutex_unlock(&sde_crtc->crtc_cp_lock);
+ spin_lock_irqsave(&sde_crtc->ltm_lock, irq_flags);
+ sde_crtc->ltm_hist_en = false;
+ spin_unlock_irqrestore(&sde_crtc->ltm_lock, irq_flags);
+
if (ad_suspend)
sde_cp_ad_set_prop(sde_crtc, AD_SUSPEND);
}
@@ -2077,6 +2082,12 @@ static void sde_cp_update_list(struct sde_cp_node *prop_node,
list_add_tail(&prop_node->active_list,
&crtc->ad_active);
break;
+ case SDE_CP_CRTC_DSPP_LTM_SET_BUF:
+ case SDE_CP_CRTC_DSPP_LTM_QUEUE_BUF:
+ if (dirty_list)
+ list_add_tail(&prop_node->dirty_list,
+ &crtc->dirty_list);
+ break;
default:
/* color processing properties handle here */
if (dirty_list)
@@ -2577,7 +2588,7 @@ int sde_cp_hist_interrupt(struct drm_crtc *crtc_drm, bool en,
/* needs to be called within ltm_buffer_lock mutex */
static void _sde_cp_crtc_free_ltm_buffer(struct sde_crtc *sde_crtc, void *cfg)
{
- u32 i = 0;
+ u32 i = 0, buffer_count = 0;
unsigned long irq_flags;
if (!sde_crtc) {
@@ -2602,13 +2613,13 @@ static void _sde_cp_crtc_free_ltm_buffer(struct sde_crtc *sde_crtc, void *cfg)
return;
}
+ buffer_count = sde_crtc->ltm_buffer_cnt;
sde_crtc->ltm_buffer_cnt = 0;
INIT_LIST_HEAD(&sde_crtc->ltm_buf_free);
INIT_LIST_HEAD(&sde_crtc->ltm_buf_busy);
spin_unlock_irqrestore(&sde_crtc->ltm_lock, irq_flags);
- for (i = 0; i < sde_crtc->ltm_buffer_cnt && sde_crtc->ltm_buffers[i];
- i++) {
+ for (i = 0; i < buffer_count && sde_crtc->ltm_buffers[i]; i++) {
msm_gem_put_vaddr(sde_crtc->ltm_buffers[i]->gem);
drm_framebuffer_put(sde_crtc->ltm_buffers[i]->fb);
msm_gem_put_iova(sde_crtc->ltm_buffers[i]->gem,
diff --git a/drivers/gpu/drm/msm/sde/sde_connector.c b/drivers/gpu/drm/msm/sde/sde_connector.c
index dcaa05e..fce411b 100644
--- a/drivers/gpu/drm/msm/sde/sde_connector.c
+++ b/drivers/gpu/drm/msm/sde/sde_connector.c
@@ -99,10 +99,13 @@ static int sde_backlight_device_update_status(struct backlight_device *bd)
}
if (c_conn->ops.set_backlight) {
- event.type = DRM_EVENT_SYS_BACKLIGHT;
- event.length = sizeof(u32);
- msm_mode_object_event_notify(&c_conn->base.base,
+ /* skip notifying user space if bl is 0 */
+ if (brightness != 0) {
+ event.type = DRM_EVENT_SYS_BACKLIGHT;
+ event.length = sizeof(u32);
+ msm_mode_object_event_notify(&c_conn->base.base,
c_conn->base.dev, &event, (u8 *)&brightness);
+ }
rc = c_conn->ops.set_backlight(&c_conn->base,
c_conn->display, bl_lvl);
c_conn->unset_bl_level = 0;
diff --git a/drivers/gpu/drm/msm/sde/sde_fence.c b/drivers/gpu/drm/msm/sde/sde_fence.c
index bc874f1..0fdc41b 100644
--- a/drivers/gpu/drm/msm/sde/sde_fence.c
+++ b/drivers/gpu/drm/msm/sde/sde_fence.c
@@ -295,22 +295,16 @@ static void _sde_fence_trigger(struct sde_fence_context *ctx,
unsigned long flags;
struct sde_fence *fc, *next;
bool is_signaled = false;
- struct list_head local_list_head;
- INIT_LIST_HEAD(&local_list_head);
+ kref_get(&ctx->kref);
spin_lock(&ctx->list_lock);
if (list_empty(&ctx->fence_list_head)) {
SDE_DEBUG("nothing to trigger!\n");
- spin_unlock(&ctx->list_lock);
- return;
+ goto end;
}
- list_for_each_entry_safe(fc, next, &ctx->fence_list_head, fence_list)
- list_move(&fc->fence_list, &local_list_head);
- spin_unlock(&ctx->list_lock);
-
- list_for_each_entry_safe(fc, next, &local_list_head, fence_list) {
+ list_for_each_entry_safe(fc, next, &ctx->fence_list_head, fence_list) {
spin_lock_irqsave(&ctx->lock, flags);
fc->base.error = error ? -EBUSY : 0;
fc->base.timestamp = ts;
@@ -320,20 +314,20 @@ static void _sde_fence_trigger(struct sde_fence_context *ctx,
if (is_signaled) {
list_del_init(&fc->fence_list);
dma_fence_put(&fc->base);
- } else {
- spin_lock(&ctx->list_lock);
- list_move(&fc->fence_list, &ctx->fence_list_head);
- spin_unlock(&ctx->list_lock);
}
}
+end:
+ spin_unlock(&ctx->list_lock);
+ kref_put(&ctx->kref, sde_fence_destroy);
}
int sde_fence_create(struct sde_fence_context *ctx, uint64_t *val,
uint32_t offset)
{
uint32_t trigger_value;
- int fd, rc = -EINVAL;
+ int fd = -1, rc = -EINVAL;
unsigned long flags;
+ struct sde_fence *fc;
if (!ctx || !val) {
SDE_ERROR("invalid argument(s), fence %d, pval %d\n",
@@ -351,22 +345,26 @@ int sde_fence_create(struct sde_fence_context *ctx, uint64_t *val,
*/
spin_lock_irqsave(&ctx->lock, flags);
trigger_value = ctx->commit_count + offset;
-
spin_unlock_irqrestore(&ctx->lock, flags);
- fd = _sde_fence_create_fd(ctx, trigger_value);
- *val = fd;
- SDE_DEBUG("fence_create::fd:%d trigger:%d commit:%d offset:%d\n",
+ spin_lock(&ctx->list_lock);
+ list_for_each_entry(fc, &ctx->fence_list_head, fence_list) {
+ if (trigger_value == fc->base.seqno) {
+ fd = fc->fd;
+ break;
+ }
+ }
+ spin_unlock(&ctx->list_lock);
+
+ if (fd < 0) {
+ fd = _sde_fence_create_fd(ctx, trigger_value);
+ *val = fd;
+ SDE_DEBUG("fd:%d trigger:%d commit:%d offset:%d\n",
fd, trigger_value, ctx->commit_count, offset);
+ }
SDE_EVT32(ctx->drm_id, trigger_value, fd);
-
- if (fd >= 0) {
- rc = 0;
- _sde_fence_trigger(ctx, ktime_get(), false);
- } else {
- rc = fd;
- }
+ rc = (fd >= 0) ? 0 : fd;
return rc;
}
diff --git a/drivers/gpu/drm/msm/sde/sde_plane.c b/drivers/gpu/drm/msm/sde/sde_plane.c
index 9ea9c09..47bfa93 100644
--- a/drivers/gpu/drm/msm/sde/sde_plane.c
+++ b/drivers/gpu/drm/msm/sde/sde_plane.c
@@ -1636,8 +1636,6 @@ static int sde_plane_rot_atomic_check(struct drm_plane *plane,
struct sde_rect src;
bool q16_data = true;
- msm_fmt = msm_framebuffer_format(state->fb);
- fmt = to_sde_format(msm_fmt);
POPULATE_RECT(&src, state->src_x, state->src_y,
state->src_w, state->src_h, q16_data);
/*
@@ -1672,8 +1670,13 @@ static int sde_plane_rot_atomic_check(struct drm_plane *plane,
goto exit;
}
+ if (!sde_plane_enabled(state))
+ goto exit;
+
/* check for valid formats supported by inline rot */
sde_kms = to_sde_kms(priv->kms);
+ msm_fmt = msm_framebuffer_format(state->fb);
+ fmt = to_sde_format(msm_fmt);
ret = sde_format_validate_fmt(&sde_kms->base, fmt,
psde->pipe_sblk->in_rot_format_list);
}
@@ -3153,11 +3156,9 @@ static int sde_plane_sspp_atomic_update(struct drm_plane *plane,
&pstate->property_state)) >= 0) {
dirty_prop_flag = plane_prop_array[idx];
pstate->dirty |= dirty_prop_flag;
- if (dirty_prop_flag == SDE_PLANE_DIRTY_ALL &&
- idx != PLANE_PROP_MULTIRECT_MODE &&
- idx != PLANE_PROP_COLOR_FILL)
- SDE_ERROR("executing full mode set, prp_idx %d\n", idx);
- break;
+
+ if (dirty_prop_flag == SDE_PLANE_DIRTY_ALL)
+ break;
}
/**
diff --git a/drivers/gpu/drm/msm/sde_rsc.c b/drivers/gpu/drm/msm/sde_rsc.c
index b21f877..725d34f 100644
--- a/drivers/gpu/drm/msm/sde_rsc.c
+++ b/drivers/gpu/drm/msm/sde_rsc.c
@@ -30,7 +30,14 @@
#define RSC_MODE_INSTRUCTION_TIME 100
#define RSC_MODE_THRESHOLD_OVERHEAD 2700
-#define MIN_THRESHOLD_TIME 0
+
+/**
+ * rsc_min_threshold will be set to MIN_THRESHOLD_OVERHEAD_TIME which
+ * takes into account back off time + overhead from RSC/RSC_WRAPPER. The
+ * overhead buffer time is required to be greater than 14. For measure,
+ * this value assumes 18.
+ */
+#define MIN_THRESHOLD_OVERHEAD_TIME 18
#define DEFAULT_PANEL_FPS 60
#define DEFAULT_PANEL_JITTER_NUMERATOR 2
@@ -329,7 +336,7 @@ static int sde_rsc_clk_enable(struct sde_power_handle *phandle,
}
static u32 sde_rsc_timer_calculate(struct sde_rsc_priv *rsc,
- struct sde_rsc_cmd_config *cmd_config)
+ struct sde_rsc_cmd_config *cmd_config, enum sde_rsc_state state)
{
const u32 cxo_period_ns = 52;
u64 rsc_backoff_time_ns = rsc->backoff_time_ns;
@@ -380,7 +387,12 @@ static u32 sde_rsc_timer_calculate(struct sde_rsc_priv *rsc,
line_time_ns = div_u64(line_time_ns, rsc->cmd_config.vtotal);
prefill_time_ns = line_time_ns * rsc->cmd_config.prefill_lines;
- total = frame_time_ns - frame_jitter - prefill_time_ns;
+ /* only take jitter into account for CMD mode */
+ if (state == SDE_RSC_CMD_STATE)
+ total = frame_time_ns - frame_jitter - prefill_time_ns;
+ else
+ total = frame_time_ns - prefill_time_ns;
+
if (total < 0) {
pr_err("invalid total time period time:%llu jiter_time:%llu blanking time:%llu\n",
frame_time_ns, frame_jitter, prefill_time_ns);
@@ -421,7 +433,7 @@ static u32 sde_rsc_timer_calculate(struct sde_rsc_priv *rsc,
/* mode 2 is infinite */
rsc->timer_config.rsc_time_slot_2_ns = 0xFFFFFFFF;
- rsc->timer_config.min_threshold_time_ns = 0;
+ rsc->timer_config.min_threshold_time_ns = MIN_THRESHOLD_OVERHEAD_TIME;
rsc->timer_config.bwi_threshold_time_ns =
rsc->timer_config.rsc_time_slot_0_ns;
@@ -461,7 +473,7 @@ static int sde_rsc_switch_to_cmd(struct sde_rsc_priv *rsc,
/* update timers - might not be available at next switch */
if (config)
- sde_rsc_timer_calculate(rsc, config);
+ sde_rsc_timer_calculate(rsc, config, SDE_RSC_CMD_STATE);
/**
* rsc clients can still send config at any time. If a config is
@@ -601,7 +613,7 @@ static int sde_rsc_switch_to_vid(struct sde_rsc_priv *rsc,
/* update timers - might not be available at next switch */
if (config)
- sde_rsc_timer_calculate(rsc, config);
+ sde_rsc_timer_calculate(rsc, config, SDE_RSC_VID_STATE);
/**
* rsc clients can still send config at any time. If a config is
@@ -1541,7 +1553,7 @@ static int sde_rsc_probe(struct platform_device *pdev)
goto sde_rsc_fail;
}
- if (sde_rsc_timer_calculate(rsc, NULL))
+ if (sde_rsc_timer_calculate(rsc, NULL, SDE_RSC_IDLE_STATE))
goto sde_rsc_fail;
sde_rsc_clk_enable(&rsc->phandle, rsc->pclient, false);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/falcon.c b/drivers/gpu/drm/nouveau/nvkm/engine/falcon.c
index 816ccae..8675613 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/falcon.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/falcon.c
@@ -22,6 +22,7 @@
#include <engine/falcon.h>
#include <core/gpuobj.h>
+#include <subdev/mc.h>
#include <subdev/timer.h>
#include <engine/fifo.h>
@@ -107,8 +108,10 @@ nvkm_falcon_fini(struct nvkm_engine *engine, bool suspend)
}
}
- nvkm_mask(device, base + 0x048, 0x00000003, 0x00000000);
- nvkm_wr32(device, base + 0x014, 0xffffffff);
+ if (nvkm_mc_enabled(device, engine->subdev.index)) {
+ nvkm_mask(device, base + 0x048, 0x00000003, 0x00000000);
+ nvkm_wr32(device, base + 0x014, 0xffffffff);
+ }
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c
index 3695cde..07914e36 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c
@@ -132,11 +132,12 @@ nvkm_therm_update(struct nvkm_therm *therm, int mode)
duty = nvkm_therm_update_linear(therm);
break;
case NVBIOS_THERM_FAN_OTHER:
- if (therm->cstate)
+ if (therm->cstate) {
duty = therm->cstate;
- else
+ poll = false;
+ } else {
duty = nvkm_therm_update_linear_fallback(therm);
- poll = false;
+ }
break;
}
immd = false;
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index dec1e08..6a8fb6f 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -172,6 +172,7 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
}
if (radeon_is_px(dev)) {
+ dev_pm_set_driver_flags(dev->dev, DPM_FLAG_NEVER_SKIP);
pm_runtime_use_autosuspend(dev->dev);
pm_runtime_set_autosuspend_delay(dev->dev, 5000);
pm_runtime_set_active(dev->dev);
diff --git a/drivers/gpu/drm/rockchip/cdn-dp-reg.c b/drivers/gpu/drm/rockchip/cdn-dp-reg.c
index 3105965..5a48548 100644
--- a/drivers/gpu/drm/rockchip/cdn-dp-reg.c
+++ b/drivers/gpu/drm/rockchip/cdn-dp-reg.c
@@ -147,7 +147,7 @@ static int cdn_dp_mailbox_validate_receive(struct cdn_dp_device *dp,
}
static int cdn_dp_mailbox_read_receive(struct cdn_dp_device *dp,
- u8 *buff, u8 buff_size)
+ u8 *buff, u16 buff_size)
{
u32 i;
int ret;
diff --git a/drivers/gpu/drm/sun4i/sun4i_backend.c b/drivers/gpu/drm/sun4i/sun4i_backend.c
index d7950b5..e30b1f5 100644
--- a/drivers/gpu/drm/sun4i/sun4i_backend.c
+++ b/drivers/gpu/drm/sun4i/sun4i_backend.c
@@ -717,17 +717,18 @@ static struct sun4i_frontend *sun4i_backend_find_frontend(struct sun4i_drv *drv,
remote = of_graph_get_remote_port_parent(ep);
if (!remote)
continue;
+ of_node_put(remote);
/* does this node match any registered engines? */
list_for_each_entry(frontend, &drv->frontend_list, list) {
if (remote == frontend->node) {
- of_node_put(remote);
of_node_put(port);
+ of_node_put(ep);
return frontend;
}
}
}
-
+ of_node_put(port);
return ERR_PTR(-EINVAL);
}
diff --git a/drivers/gpu/drm/sun4i/sun8i_tcon_top.c b/drivers/gpu/drm/sun4i/sun8i_tcon_top.c
index d5240b7..adcdf94 100644
--- a/drivers/gpu/drm/sun4i/sun8i_tcon_top.c
+++ b/drivers/gpu/drm/sun4i/sun8i_tcon_top.c
@@ -169,6 +169,13 @@ static int sun8i_tcon_top_bind(struct device *dev, struct device *master,
}
/*
+ * At least on H6, some registers have some bits set by default
+ * which may cause issues. Clear them here.
+ */
+ writel(0, regs + TCON_TOP_PORT_SEL_REG);
+ writel(0, regs + TCON_TOP_GATE_SRC_REG);
+
+ /*
* TCON TOP has two muxes, which select parent clock for each TCON TV
* channel clock. Parent could be either TCON TV or TVE clock. For now
* we leave this fixed to TCON TV, since TVE driver for R40 is not yet
diff --git a/drivers/gpu/drm/v3d/v3d_bo.c b/drivers/gpu/drm/v3d/v3d_bo.c
index 54d9651..a08766d 100644
--- a/drivers/gpu/drm/v3d/v3d_bo.c
+++ b/drivers/gpu/drm/v3d/v3d_bo.c
@@ -293,6 +293,7 @@ v3d_prime_import_sg_table(struct drm_device *dev,
bo->resv = attach->dmabuf->resv;
bo->sgt = sgt;
+ obj->import_attach = attach;
v3d_bo_get_pages(bo);
v3d_mmu_insert_ptes(bo);
diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c
index 629f404..ab39315 100644
--- a/drivers/gpu/drm/vc4/vc4_plane.c
+++ b/drivers/gpu/drm/vc4/vc4_plane.c
@@ -315,12 +315,14 @@ static int vc4_plane_setup_clipping_and_scaling(struct drm_plane_state *state)
vc4_get_scaling_mode(vc4_state->src_h[1],
vc4_state->crtc_h);
- /* YUV conversion requires that horizontal scaling be enabled,
- * even on a plane that's otherwise 1:1. Looks like only PPF
- * works in that case, so let's pick that one.
+ /* YUV conversion requires that horizontal scaling be enabled
+ * on the UV plane even if vc4_get_scaling_mode() returned
+ * VC4_SCALING_NONE (which can happen when the down-scaling
+ * ratio is 0.5). Let's force it to VC4_SCALING_PPF in this
+ * case.
*/
- if (vc4_state->is_unity)
- vc4_state->x_scaling[0] = VC4_SCALING_PPF;
+ if (vc4_state->x_scaling[1] == VC4_SCALING_NONE)
+ vc4_state->x_scaling[1] = VC4_SCALING_PPF;
} else {
vc4_state->is_yuv = false;
vc4_state->x_scaling[1] = VC4_SCALING_NONE;
diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c
index 0e5620f..6887db8 100644
--- a/drivers/gpu/drm/vgem/vgem_drv.c
+++ b/drivers/gpu/drm/vgem/vgem_drv.c
@@ -471,31 +471,31 @@ static int __init vgem_init(void)
if (!vgem_device)
return -ENOMEM;
- ret = drm_dev_init(&vgem_device->drm, &vgem_driver, NULL);
- if (ret)
- goto out_free;
-
vgem_device->platform =
platform_device_register_simple("vgem", -1, NULL, 0);
if (IS_ERR(vgem_device->platform)) {
ret = PTR_ERR(vgem_device->platform);
- goto out_fini;
+ goto out_free;
}
dma_coerce_mask_and_coherent(&vgem_device->platform->dev,
DMA_BIT_MASK(64));
+ ret = drm_dev_init(&vgem_device->drm, &vgem_driver,
+ &vgem_device->platform->dev);
+ if (ret)
+ goto out_unregister;
/* Final step: expose the device/driver to userspace */
ret = drm_dev_register(&vgem_device->drm, 0);
if (ret)
- goto out_unregister;
+ goto out_fini;
return 0;
-out_unregister:
- platform_device_unregister(vgem_device->platform);
out_fini:
drm_dev_fini(&vgem_device->drm);
+out_unregister:
+ platform_device_unregister(vgem_device->platform);
out_free:
kfree(vgem_device);
return ret;
diff --git a/drivers/gpu/drm/vkms/vkms_crtc.c b/drivers/gpu/drm/vkms/vkms_crtc.c
index 875fca6..1ea2dd3 100644
--- a/drivers/gpu/drm/vkms/vkms_crtc.c
+++ b/drivers/gpu/drm/vkms/vkms_crtc.c
@@ -1,10 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
+// SPDX-License-Identifier: GPL-2.0+
#include "vkms_drv.h"
#include <drm/drm_atomic_helper.h>
diff --git a/drivers/gpu/drm/vkms/vkms_drv.c b/drivers/gpu/drm/vkms/vkms_drv.c
index 6e728b8..b1201c1 100644
--- a/drivers/gpu/drm/vkms/vkms_drv.c
+++ b/drivers/gpu/drm/vkms/vkms_drv.c
@@ -1,9 +1,4 @@
-/*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
+// SPDX-License-Identifier: GPL-2.0+
#include <linux/module.h>
#include <drm/drm_gem.h>
diff --git a/drivers/gpu/drm/vkms/vkms_drv.h b/drivers/gpu/drm/vkms/vkms_drv.h
index 07be29f..e018752 100644
--- a/drivers/gpu/drm/vkms/vkms_drv.h
+++ b/drivers/gpu/drm/vkms/vkms_drv.h
@@ -1,3 +1,5 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+
#ifndef _VKMS_DRV_H_
#define _VKMS_DRV_H_
diff --git a/drivers/gpu/drm/vkms/vkms_gem.c b/drivers/gpu/drm/vkms/vkms_gem.c
index c7e3836..ca4a74e 100644
--- a/drivers/gpu/drm/vkms/vkms_gem.c
+++ b/drivers/gpu/drm/vkms/vkms_gem.c
@@ -1,10 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
+// SPDX-License-Identifier: GPL-2.0+
#include <linux/shmem_fs.h>
diff --git a/drivers/gpu/drm/vkms/vkms_output.c b/drivers/gpu/drm/vkms/vkms_output.c
index 901012c..5697148 100644
--- a/drivers/gpu/drm/vkms/vkms_output.c
+++ b/drivers/gpu/drm/vkms/vkms_output.c
@@ -1,10 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
+// SPDX-License-Identifier: GPL-2.0+
#include "vkms_drv.h"
#include <drm/drm_crtc_helper.h>
diff --git a/drivers/gpu/drm/vkms/vkms_plane.c b/drivers/gpu/drm/vkms/vkms_plane.c
index 9f75b1e..ce043b7 100644
--- a/drivers/gpu/drm/vkms/vkms_plane.c
+++ b/drivers/gpu/drm/vkms/vkms_plane.c
@@ -1,10 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
+// SPDX-License-Identifier: GPL-2.0+
#include "vkms_drv.h"
#include <drm/drm_plane_helper.h>
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index bb6dbbe..c72b942 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -627,13 +627,16 @@ static int vmw_dma_select_mode(struct vmw_private *dev_priv)
static int vmw_dma_masks(struct vmw_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
+ int ret = 0;
- if (intel_iommu_enabled &&
+ ret = dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(64));
+ if (dev_priv->map_mode != vmw_dma_phys &&
(sizeof(unsigned long) == 4 || vmw_restrict_dma_mask)) {
DRM_INFO("Restricting DMA addresses to 44 bits.\n");
- return dma_set_mask(dev->dev, DMA_BIT_MASK(44));
+ return dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(44));
}
- return 0;
+
+ return ret;
}
#else
static int vmw_dma_masks(struct vmw_private *dev_priv)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index f0ab6b2..c3e2022 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -3843,7 +3843,7 @@ int vmw_execbuf_fence_commands(struct drm_file *file_priv,
*p_fence = NULL;
}
- return 0;
+ return ret;
}
/**
diff --git a/drivers/gpu/ipu-v3/ipu-image-convert.c b/drivers/gpu/ipu-v3/ipu-image-convert.c
index f408196..91653ad 100644
--- a/drivers/gpu/ipu-v3/ipu-image-convert.c
+++ b/drivers/gpu/ipu-v3/ipu-image-convert.c
@@ -1524,7 +1524,7 @@ int ipu_image_convert_queue(struct ipu_image_convert_run *run)
EXPORT_SYMBOL_GPL(ipu_image_convert_queue);
/* Abort any active or pending conversions for this context */
-void ipu_image_convert_abort(struct ipu_image_convert_ctx *ctx)
+static void __ipu_image_convert_abort(struct ipu_image_convert_ctx *ctx)
{
struct ipu_image_convert_chan *chan = ctx->chan;
struct ipu_image_convert_priv *priv = chan->priv;
@@ -1551,7 +1551,7 @@ void ipu_image_convert_abort(struct ipu_image_convert_ctx *ctx)
need_abort = (run_count || active_run);
- ctx->aborting = need_abort;
+ ctx->aborting = true;
spin_unlock_irqrestore(&chan->irqlock, flags);
@@ -1572,7 +1572,11 @@ void ipu_image_convert_abort(struct ipu_image_convert_ctx *ctx)
dev_warn(priv->ipu->dev, "%s: timeout\n", __func__);
force_abort(ctx);
}
+}
+void ipu_image_convert_abort(struct ipu_image_convert_ctx *ctx)
+{
+ __ipu_image_convert_abort(ctx);
ctx->aborting = false;
}
EXPORT_SYMBOL_GPL(ipu_image_convert_abort);
@@ -1586,7 +1590,7 @@ void ipu_image_convert_unprepare(struct ipu_image_convert_ctx *ctx)
bool put_res;
/* make sure no runs are hanging around */
- ipu_image_convert_abort(ctx);
+ __ipu_image_convert_abort(ctx);
dev_dbg(priv->ipu->dev, "%s: task %u: removing ctx %p\n", __func__,
chan->ic_task, ctx);
diff --git a/drivers/gpu/msm/adreno.c b/drivers/gpu/msm/adreno.c
index 7334c29..9aafd04 100644
--- a/drivers/gpu/msm/adreno.c
+++ b/drivers/gpu/msm/adreno.c
@@ -1229,6 +1229,24 @@ static void adreno_rscc_probe(struct kgsl_device *device)
dev_warn(device->dev, "rscc ioremap failed\n");
}
+static void adreno_isense_probe(struct kgsl_device *device)
+{
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ struct resource *res;
+
+ res = platform_get_resource_byname(device->pdev, IORESOURCE_MEM,
+ "isense_cntl");
+ if (res == NULL)
+ return;
+
+ adreno_dev->isense_base = res->start - device->reg_phys;
+ adreno_dev->isense_len = resource_size(res);
+ adreno_dev->isense_virt = devm_ioremap(device->dev, res->start,
+ adreno_dev->isense_len);
+ if (adreno_dev->isense_virt == NULL)
+ dev_warn(device->dev, "isense ioremap failed\n");
+}
+
static void adreno_efuse_read_soc_hw_rev(struct adreno_device *adreno_dev)
{
unsigned int val;
@@ -1355,6 +1373,8 @@ static int adreno_probe(struct platform_device *pdev)
adreno_cx_misc_probe(device);
adreno_rscc_probe(device);
+
+ adreno_isense_probe(device);
/*
* qcom,iommu-secure-id is used to identify MMUs that can handle secure
* content but that is only part of the story - the GPU also has to be
@@ -3247,6 +3267,25 @@ void adreno_rscc_regread(struct adreno_device *adreno_dev,
rmb();
}
+void adreno_isense_regread(struct adreno_device *adreno_dev,
+ unsigned int offsetwords, unsigned int *value)
+{
+ unsigned int isense_offset;
+
+ isense_offset = (offsetwords << 2);
+ if (!adreno_dev->isense_virt ||
+ (isense_offset >= adreno_dev->isense_len))
+ return;
+
+ *value = __raw_readl(adreno_dev->isense_virt + isense_offset);
+
+ /*
+ * ensure this read finishes before the next one.
+ * i.e. act like normal readl()
+ */
+ rmb();
+}
+
void adreno_cx_misc_regwrite(struct adreno_device *adreno_dev,
unsigned int offsetwords, unsigned int value)
{
diff --git a/drivers/gpu/msm/adreno.h b/drivers/gpu/msm/adreno.h
index 45fba57..b276b3df 100644
--- a/drivers/gpu/msm/adreno.h
+++ b/drivers/gpu/msm/adreno.h
@@ -445,6 +445,9 @@ enum gpu_coresight_sources {
* @rscc_base: Base physical address of the RSCC
* @rscc_len: Length of the RSCC register block
* @rscc_virt: Pointer where RSCC block is mapped
+ * @isense_base: Base physical address of isense block
+ * @isense_len: Length of the isense register block
+ * @isense_virt: Pointer where isense block is mapped
* @gpucore: Pointer to the adreno_gpu_core structure
* @pfp_fw: Buffer which holds the pfp ucode
* @pfp_fw_size: Size of pfp ucode buffer
@@ -530,6 +533,9 @@ struct adreno_device {
unsigned long rscc_base;
unsigned int rscc_len;
void __iomem *rscc_virt;
+ unsigned long isense_base;
+ unsigned int isense_len;
+ void __iomem *isense_virt;
const struct adreno_gpu_core *gpucore;
struct adreno_firmware fw[2];
size_t gpmu_cmds_size;
@@ -1191,6 +1197,8 @@ void adreno_cx_misc_regrmw(struct adreno_device *adreno_dev,
unsigned int mask, unsigned int bits);
void adreno_rscc_regread(struct adreno_device *adreno_dev,
unsigned int offsetwords, unsigned int *value);
+void adreno_isense_regread(struct adreno_device *adreno_dev,
+ unsigned int offsetwords, unsigned int *value);
#define ADRENO_TARGET(_name, _id) \
diff --git a/drivers/gpu/msm/adreno_a6xx.h b/drivers/gpu/msm/adreno_a6xx.h
index cc2b7aa..8e0d04e 100644
--- a/drivers/gpu/msm/adreno_a6xx.h
+++ b/drivers/gpu/msm/adreno_a6xx.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
- * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
*/
#ifndef _ADRENO_A6XX_H_
@@ -14,6 +14,7 @@
#define CP_CLUSTER_GRAS 0x3
#define CP_CLUSTER_SP_PS 0x4
#define CP_CLUSTER_PS 0x5
+#define CP_CLUSTER_VPC_PS 0x6
/**
* struct a6xx_cp_preemption_record - CP context record for
diff --git a/drivers/gpu/msm/adreno_a6xx_snapshot.c b/drivers/gpu/msm/adreno_a6xx_snapshot.c
index 0b53b51..bbe6577 100644
--- a/drivers/gpu/msm/adreno_a6xx_snapshot.c
+++ b/drivers/gpu/msm/adreno_a6xx_snapshot.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
*/
#include <linux/io.h>
@@ -35,7 +35,7 @@ static const unsigned int a6xx_ps_cluster_rbp[] = {
0x8C02, 0x8C07, 0x8C11, 0x8C16, 0x8C20, 0x8C25,
};
-static const unsigned int a6xx_ps_cluster[] = {
+static const unsigned int a6xx_vpc_ps_cluster[] = {
0x9200, 0x9216, 0x9218, 0x9236, 0x9300, 0x9306,
};
@@ -66,6 +66,67 @@ static const unsigned int a6xx_rscc_snapshot_registers[] = {
0x2389C, 0x238D7, 0x2393C, 0x2393F, 0x23944, 0x2397F,
};
+static const unsigned int a650_rscc_registers[] = {
+ 0x38000, 0x38034, 0x38036, 0x38036, 0x38040, 0x38042, 0x38080, 0x38084,
+ 0x38089, 0x3808C, 0x38091, 0x38094, 0x38099, 0x3809C, 0x380A1, 0x380A4,
+ 0x380A9, 0x380AC, 0x38100, 0x38102, 0x38104, 0x38107, 0x38114, 0x38119,
+ 0x38124, 0x3812E, 0x38180, 0x38197, 0x38340, 0x38341, 0x38344, 0x38347,
+ 0x3834C, 0x3834F, 0x38351, 0x38354, 0x38356, 0x38359, 0x3835B, 0x3835E,
+ 0x38360, 0x38363, 0x38365, 0x38368, 0x3836A, 0x3836D, 0x3836F, 0x38372,
+ 0x383EC, 0x383EF, 0x383F4, 0x383F7, 0x383F9, 0x383FC, 0x383FE, 0x38401,
+ 0x38403, 0x38406, 0x38408, 0x3840B, 0x3840D, 0x38410, 0x38412, 0x38415,
+ 0x38417, 0x3841A, 0x38494, 0x38497, 0x3849C, 0x3849F, 0x384A1, 0x384A4,
+ 0x384A6, 0x384A9, 0x384AB, 0x384AE, 0x384B0, 0x384B3, 0x384B5, 0x384B8,
+ 0x384BA, 0x384BD, 0x384BF, 0x384C2, 0x3853C, 0x3853F, 0x38544, 0x38547,
+ 0x38549, 0x3854C, 0x3854E, 0x38551, 0x38553, 0x38556, 0x38558, 0x3855B,
+ 0x3855D, 0x38560, 0x38562, 0x38565, 0x38567, 0x3856A, 0x385E4, 0x385E7,
+ 0x385EC, 0x385EF, 0x385F1, 0x385F4, 0x385F6, 0x385F9, 0x385FB, 0x385FE,
+ 0x38600, 0x38603, 0x38605, 0x38608, 0x3860A, 0x3860D, 0x3860F, 0x38612,
+ 0x3868C, 0x3868F, 0x38694, 0x38697, 0x38699, 0x3869C, 0x3869E, 0x386A1,
+ 0x386A3, 0x386A6, 0x386A8, 0x386AB, 0x386AD, 0x386B0, 0x386B2, 0x386B5,
+ 0x386B7, 0x386BA, 0x38734, 0x38737, 0x3873C, 0x3873F, 0x38741, 0x38744,
+ 0x38746, 0x38749, 0x3874B, 0x3874E, 0x38750, 0x38753, 0x38755, 0x38758,
+ 0x3875A, 0x3875D, 0x3875F, 0x38762, 0x387DC, 0x387DF, 0x387E4, 0x387E7,
+ 0x387E9, 0x387EC, 0x387EE, 0x387F1, 0x387F3, 0x387F6, 0x387F8, 0x387FB,
+ 0x387FD, 0x38800, 0x38802, 0x38805, 0x38807, 0x3880A, 0x38884, 0x38887,
+ 0x3888C, 0x3888F, 0x38891, 0x38894, 0x38896, 0x38899, 0x3889B, 0x3889E,
+ 0x388A0, 0x388A3, 0x388A5, 0x388A8, 0x388AA, 0x388AD, 0x388AF, 0x388B2,
+ 0x3892C, 0x3892F, 0x38934, 0x38937, 0x38939, 0x3893C, 0x3893E, 0x38941,
+ 0x38943, 0x38946, 0x38948, 0x3894B, 0x3894D, 0x38950, 0x38952, 0x38955,
+ 0x38957, 0x3895A, 0x38B50, 0x38B51, 0x38B53, 0x38B55, 0x38B5A, 0x38B5A,
+ 0x38B5F, 0x38B5F, 0x38B64, 0x38B64, 0x38B69, 0x38B69, 0x38B6E, 0x38B6E,
+ 0x38B73, 0x38B73, 0x38BF8, 0x38BF8, 0x38BFD, 0x38BFD, 0x38C02, 0x38C02,
+ 0x38C07, 0x38C07, 0x38C0C, 0x38C0C, 0x38C11, 0x38C11, 0x38C16, 0x38C16,
+ 0x38C1B, 0x38C1B, 0x38CA0, 0x38CA0, 0x38CA5, 0x38CA5, 0x38CAA, 0x38CAA,
+ 0x38CAF, 0x38CAF, 0x38CB4, 0x38CB4, 0x38CB9, 0x38CB9, 0x38CBE, 0x38CBE,
+ 0x38CC3, 0x38CC3, 0x38D48, 0x38D48, 0x38D4D, 0x38D4D, 0x38D52, 0x38D52,
+ 0x38D57, 0x38D57, 0x38D5C, 0x38D5C, 0x38D61, 0x38D61, 0x38D66, 0x38D66,
+ 0x38D6B, 0x38D6B, 0x38DF0, 0x38DF0, 0x38DF5, 0x38DF5, 0x38DFA, 0x38DFA,
+ 0x38DFF, 0x38DFF, 0x38E04, 0x38E04, 0x38E09, 0x38E09, 0x38E0E, 0x38E0E,
+ 0x38E13, 0x38E13, 0x38E98, 0x38E98, 0x38E9D, 0x38E9D, 0x38EA2, 0x38EA2,
+ 0x38EA7, 0x38EA7, 0x38EAC, 0x38EAC, 0x38EB1, 0x38EB1, 0x38EB6, 0x38EB6,
+ 0x38EBB, 0x38EBB, 0x38F40, 0x38F40, 0x38F45, 0x38F45, 0x38F4A, 0x38F4A,
+ 0x38F4F, 0x38F4F, 0x38F54, 0x38F54, 0x38F59, 0x38F59, 0x38F5E, 0x38F5E,
+ 0x38F63, 0x38F63, 0x38FE8, 0x38FE8, 0x38FED, 0x38FED, 0x38FF2, 0x38FF2,
+ 0x38FF7, 0x38FF7, 0x38FFC, 0x38FFC, 0x39001, 0x39001, 0x39006, 0x39006,
+ 0x3900B, 0x3900B, 0x39090, 0x39090, 0x39095, 0x39095, 0x3909A, 0x3909A,
+ 0x3909F, 0x3909F, 0x390A4, 0x390A4, 0x390A9, 0x390A9, 0x390AE, 0x390AE,
+ 0x390B3, 0x390B3, 0x39138, 0x39138, 0x3913D, 0x3913D, 0x39142, 0x39142,
+ 0x39147, 0x39147, 0x3914C, 0x3914C, 0x39151, 0x39151, 0x39156, 0x39156,
+ 0x3915B, 0x3915B,
+};
+
+static const unsigned int a650_isense_registers[] = {
+ 0x22C00, 0x22C19, 0x22C26, 0x22C2D, 0x22C2F, 0x22C36, 0x22C40, 0x22C44,
+ 0x22C50, 0x22C57, 0x22C60, 0x22C67, 0x22C80, 0x22C87, 0x22D25, 0x22D2A,
+ 0x22D2C, 0x22D32, 0x22D3E, 0x22D3F, 0x22D4E, 0x22D55, 0x22D58, 0x22D60,
+ 0x22D64, 0x22D64, 0x22D66, 0x22D66, 0x22D68, 0x22D6B, 0x22D6E, 0x22D76,
+ 0x22D78, 0x22D78, 0x22D80, 0x22D87, 0x22D90, 0x22D97, 0x22DA0, 0x22DA0,
+ 0x22DB0, 0x22DB7, 0x22DC0, 0x22DC2, 0x22DC4, 0x22DE3, 0x2301A, 0x2301A,
+ 0x2301D, 0x2302A, 0x23120, 0x23121, 0x23133, 0x23133, 0x23156, 0x23157,
+ 0x23165, 0x23165, 0x2316D, 0x2316D, 0x23180, 0x23191,
+};
+
static const struct sel_reg {
unsigned int host_reg;
unsigned int cd_reg;
@@ -95,7 +156,7 @@ static struct a6xx_cluster_registers {
&_a6xx_rb_rac_aperture },
{ CP_CLUSTER_PS, a6xx_ps_cluster_rbp, ARRAY_SIZE(a6xx_ps_cluster_rbp)/2,
&_a6xx_rb_rbp_aperture },
- { CP_CLUSTER_PS, a6xx_ps_cluster, ARRAY_SIZE(a6xx_ps_cluster)/2,
+ { CP_CLUSTER_PS, a6xx_vpc_ps_cluster, ARRAY_SIZE(a6xx_vpc_ps_cluster)/2,
NULL },
{ CP_CLUSTER_FE, a6xx_fe_cluster, ARRAY_SIZE(a6xx_fe_cluster)/2,
NULL },
@@ -144,7 +205,7 @@ static const unsigned int a6xx_sp_ps_hlsq_2d_cluster[] = {
static const unsigned int a6xx_sp_ps_sp_cluster[] = {
0xA980, 0xA9A8, 0xA9B0, 0xA9BC, 0xA9D0, 0xA9D3, 0xA9E0, 0xA9F3,
- 0xAA00, 0xAA00, 0xAA30, 0xAA31,
+ 0xAA00, 0xAA00, 0xAA30, 0xAA31, 0xAAF2, 0xAAF2,
};
static const unsigned int a6xx_sp_ps_sp_2d_cluster[] = {
@@ -354,19 +415,32 @@ enum a6xx_debugbus_id {
A6XX_DBGBUS_HLSQ_SPTP = 0x1f,
A6XX_DBGBUS_RB_0 = 0x20,
A6XX_DBGBUS_RB_1 = 0x21,
+ A6XX_DBGBUS_RB_2 = 0x22,
A6XX_DBGBUS_UCHE_WRAPPER = 0x24,
A6XX_DBGBUS_CCU_0 = 0x28,
A6XX_DBGBUS_CCU_1 = 0x29,
+ A6XX_DBGBUS_CCU_2 = 0x2a,
A6XX_DBGBUS_VFD_0 = 0x38,
A6XX_DBGBUS_VFD_1 = 0x39,
A6XX_DBGBUS_VFD_2 = 0x3a,
A6XX_DBGBUS_VFD_3 = 0x3b,
+ A6XX_DBGBUS_VFD_4 = 0x3c,
+ A6XX_DBGBUS_VFD_5 = 0x3d,
A6XX_DBGBUS_SP_0 = 0x40,
A6XX_DBGBUS_SP_1 = 0x41,
+ A6XX_DBGBUS_SP_2 = 0x42,
A6XX_DBGBUS_TPL1_0 = 0x48,
A6XX_DBGBUS_TPL1_1 = 0x49,
A6XX_DBGBUS_TPL1_2 = 0x4a,
A6XX_DBGBUS_TPL1_3 = 0x4b,
+ A6XX_DBGBUS_TPL1_4 = 0x4c,
+ A6XX_DBGBUS_TPL1_5 = 0x4d,
+ A6XX_DBGBUS_SPTP_0 = 0x58,
+ A6XX_DBGBUS_SPTP_1 = 0x59,
+ A6XX_DBGBUS_SPTP_2 = 0x5a,
+ A6XX_DBGBUS_SPTP_3 = 0x5b,
+ A6XX_DBGBUS_SPTP_4 = 0x5c,
+ A6XX_DBGBUS_SPTP_5 = 0x5d,
};
static const struct adreno_debugbus_block a6xx_dbgc_debugbus_blocks[] = {
@@ -420,6 +494,22 @@ static const struct adreno_debugbus_block a6xx_cx_dbgc_debugbus_blocks[] = {
{ A6XX_DBGBUS_CX, 0x100, },
};
+static const struct adreno_debugbus_block a650_dbgc_debugbus_blocks[] = {
+ { A6XX_DBGBUS_RB_2, 0x100, },
+ { A6XX_DBGBUS_CCU_2, 0x100, },
+ { A6XX_DBGBUS_VFD_4, 0x100, },
+ { A6XX_DBGBUS_VFD_5, 0x100, },
+ { A6XX_DBGBUS_SP_2, 0x100, },
+ { A6XX_DBGBUS_TPL1_4, 0x100, },
+ { A6XX_DBGBUS_TPL1_5, 0x100, },
+ { A6XX_DBGBUS_SPTP_0, 0x100, },
+ { A6XX_DBGBUS_SPTP_1, 0x100, },
+ { A6XX_DBGBUS_SPTP_2, 0x100, },
+ { A6XX_DBGBUS_SPTP_3, 0x100, },
+ { A6XX_DBGBUS_SPTP_4, 0x100, },
+ { A6XX_DBGBUS_SPTP_5, 0x100, },
+};
+
#define A6XX_NUM_SHADER_BANKS 3
#define A6XX_SHADER_STATETYPE_SHIFT 8
@@ -1381,6 +1471,16 @@ static void a6xx_snapshot_debugbus(struct adreno_device *adreno_dev,
snapshot, a6xx_snapshot_dbgc_debugbus_block,
(void *) &a6xx_dbgc_debugbus_blocks[i]);
}
+
+ if (adreno_is_a650(adreno_dev)) {
+ for (i = 0; i < ARRAY_SIZE(a650_dbgc_debugbus_blocks); i++) {
+ kgsl_snapshot_add_section(device,
+ KGSL_SNAPSHOT_SECTION_DEBUGBUS,
+ snapshot, a6xx_snapshot_dbgc_debugbus_block,
+ (void *) &a650_dbgc_debugbus_blocks[i]);
+ }
+ }
+
/*
* GBIF has same debugbus as of other GPU blocks hence fall back to
* default path if GPU uses GBIF.
@@ -1489,6 +1589,91 @@ static void _a6xx_do_crashdump(struct kgsl_device *device)
crash_dump_valid = true;
}
+size_t a6xx_snapshot_rscc_registers(struct kgsl_device *device, u8 *buf,
+ size_t remain, void *priv)
+{
+ struct kgsl_snapshot_regs *header = (struct kgsl_snapshot_regs *)buf;
+ struct kgsl_snapshot_registers *regs = priv;
+ unsigned int *data = (unsigned int *)(buf + sizeof(*header));
+ int count = 0, j, k;
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+
+ /* Figure out how many registers we are going to dump */
+ for (j = 0; j < regs->count; j++) {
+ int start = regs->regs[j * 2];
+ int end = regs->regs[j * 2 + 1];
+
+ count += (end - start + 1);
+ }
+
+ if (remain < (count * 8) + sizeof(*header)) {
+ SNAPSHOT_ERR_NOMEM(device, "RSCC REGISTERS");
+ return 0;
+ }
+
+ for (j = 0; j < regs->count; j++) {
+ unsigned int start = regs->regs[j * 2];
+ unsigned int end = regs->regs[j * 2 + 1];
+
+ for (k = start; k <= end; k++) {
+ unsigned int val;
+
+ adreno_rscc_regread(adreno_dev,
+ k - (adreno_dev->rscc_base >> 2), &val);
+ *data++ = k;
+ *data++ = val;
+ }
+ }
+
+ header->count = count;
+
+ /* Return the size of the section */
+ return (count * 8) + sizeof(*header);
+}
+
+size_t a6xx_snapshot_isense_registers(struct kgsl_device *device, u8 *buf,
+ size_t remain, void *priv)
+{
+ struct kgsl_snapshot_regs *header = (struct kgsl_snapshot_regs *)buf;
+ struct kgsl_snapshot_registers *regs = priv;
+ unsigned int *data = (unsigned int *)(buf + sizeof(*header));
+ int count = 0, j, k;
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+
+ /* Figure out how many registers we are going to dump */
+
+ for (j = 0; j < regs->count; j++) {
+ int start = regs->regs[j * 2];
+ int end = regs->regs[j * 2 + 1];
+
+ count += (end - start + 1);
+ }
+
+ if (remain < (count * 8) + sizeof(*header)) {
+ SNAPSHOT_ERR_NOMEM(device, "ISENSE REGISTERS");
+ return 0;
+ }
+
+ for (j = 0; j < regs->count; j++) {
+ unsigned int start = regs->regs[j * 2];
+ unsigned int end = regs->regs[j * 2 + 1];
+
+ for (k = start; k <= end; k++) {
+ unsigned int val;
+
+ adreno_isense_regread(adreno_dev,
+ k - (adreno_dev->isense_base >> 2), &val);
+ *data++ = k;
+ *data++ = val;
+ }
+ }
+
+ header->count = count;
+
+ /* Return the size of the section */
+ return (count * 8) + sizeof(*header);
+}
+
/*
* a6xx_snapshot() - A6XX GPU snapshot function
* @adreno_dev: Device being snapshotted
@@ -1503,7 +1688,7 @@ void a6xx_snapshot(struct adreno_device *adreno_dev,
struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
bool sptprac_on;
- unsigned int i, roq_size;
+ unsigned int i, roq_size, ucode_dbg_size;
/* GMU TCM data dumped through AHB */
gmu_core_dev_snapshot(device, snapshot);
@@ -1517,6 +1702,32 @@ void a6xx_snapshot(struct adreno_device *adreno_dev,
*/
a6xx_snapshot_debugbus(adreno_dev, snapshot);
+ /* RSCC registers are on cx */
+ if (adreno_is_a650(adreno_dev)) {
+ struct kgsl_snapshot_registers r;
+
+ r.regs = a650_rscc_registers;
+ r.count = ARRAY_SIZE(a650_rscc_registers) / 2;
+
+ kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_REGS,
+ snapshot, a6xx_snapshot_rscc_registers, &r);
+
+ r.regs = a650_isense_registers;
+ r.count = ARRAY_SIZE(a650_isense_registers) / 2;
+
+ kgsl_snapshot_add_section(device, KGSL_SNAPSHOT_SECTION_REGS,
+ snapshot, a6xx_snapshot_isense_registers, &r);
+ } else if (adreno_is_a615_family(adreno_dev) ||
+ adreno_is_a630(adreno_dev)) {
+ adreno_snapshot_registers(device, snapshot,
+ a630_rscc_snapshot_registers,
+ ARRAY_SIZE(a630_rscc_snapshot_registers) / 2);
+ } else if (adreno_is_a640(adreno_dev) || adreno_is_a680(adreno_dev)) {
+ adreno_snapshot_registers(device, snapshot,
+ a6xx_rscc_snapshot_registers,
+ ARRAY_SIZE(a6xx_rscc_snapshot_registers) / 2);
+ }
+
sptprac_on = gpudev->sptprac_is_on(adreno_dev);
if (!gmu_core_dev_gx_is_on(device))
@@ -1545,15 +1756,6 @@ void a6xx_snapshot(struct adreno_device *adreno_dev,
snapshot, a6xx_snapshot_registers, &a6xx_reg_list[i]);
}
- if (adreno_is_a615_family(adreno_dev) || adreno_is_a630(adreno_dev))
- adreno_snapshot_registers(device, snapshot,
- a630_rscc_snapshot_registers,
- ARRAY_SIZE(a630_rscc_snapshot_registers) / 2);
- else if (adreno_is_a640(adreno_dev) || adreno_is_a680(adreno_dev))
- adreno_snapshot_registers(device, snapshot,
- a6xx_rscc_snapshot_registers,
- ARRAY_SIZE(a6xx_rscc_snapshot_registers) / 2);
-
/* CP_SQE indexed registers */
kgsl_snapshot_indexed_registers(device, snapshot,
A6XX_CP_SQE_STAT_ADDR, A6XX_CP_SQE_STAT_DATA, 0, 0x33);
@@ -1563,10 +1765,12 @@ void a6xx_snapshot(struct adreno_device *adreno_dev,
A6XX_CP_DRAW_STATE_ADDR, A6XX_CP_DRAW_STATE_DATA,
0, 0x100);
+ ucode_dbg_size = adreno_is_a650(adreno_dev) ? 0x7000 : 0x6000;
+
/* SQE_UCODE Cache */
kgsl_snapshot_indexed_registers(device, snapshot,
A6XX_CP_SQE_UCODE_DBG_ADDR, A6XX_CP_SQE_UCODE_DBG_DATA,
- 0, 0x6000);
+ 0, ucode_dbg_size);
/*
* CP ROQ dump units is 4dwords. The number of units is stored
@@ -1598,7 +1802,8 @@ void a6xx_snapshot(struct adreno_device *adreno_dev,
}
-static int _a6xx_crashdump_init_mvc(uint64_t *ptr, uint64_t *offset)
+static int _a6xx_crashdump_init_mvc(struct adreno_device *adreno_dev,
+ uint64_t *ptr, uint64_t *offset)
{
int qwords = 0;
unsigned int i, j, k;
@@ -1607,6 +1812,11 @@ static int _a6xx_crashdump_init_mvc(uint64_t *ptr, uint64_t *offset)
for (i = 0; i < ARRAY_SIZE(a6xx_clusters); i++) {
struct a6xx_cluster_registers *cluster = &a6xx_clusters[i];
+ /* The VPC registers are driven by VPC_PS cluster on a650 */
+ if (adreno_is_a650(adreno_dev) &&
+ (cluster->regs == a6xx_vpc_ps_cluster))
+ cluster->id = CP_CLUSTER_VPC_PS;
+
if (cluster->sel) {
ptr[qwords++] = cluster->sel->val;
ptr[qwords++] = ((uint64_t)cluster->sel->cd_reg << 44) |
@@ -1902,7 +2112,7 @@ void a6xx_crashdump_init(struct adreno_device *adreno_dev)
}
/* Program the capturescript for the MVC regsiters */
- ptr += _a6xx_crashdump_init_mvc(ptr, &offset);
+ ptr += _a6xx_crashdump_init_mvc(adreno_dev, ptr, &offset);
ptr += _a6xx_crashdump_init_ctx_dbgahb(ptr, &offset);
diff --git a/drivers/hid/hid-debug.c b/drivers/hid/hid-debug.c
index b481002..ebc9ffd 100644
--- a/drivers/hid/hid-debug.c
+++ b/drivers/hid/hid-debug.c
@@ -30,6 +30,7 @@
#include <linux/debugfs.h>
#include <linux/seq_file.h>
+#include <linux/kfifo.h>
#include <linux/sched/signal.h>
#include <linux/export.h>
#include <linux/slab.h>
@@ -661,17 +662,12 @@ EXPORT_SYMBOL_GPL(hid_dump_device);
/* enqueue string to 'events' ring buffer */
void hid_debug_event(struct hid_device *hdev, char *buf)
{
- unsigned i;
struct hid_debug_list *list;
unsigned long flags;
spin_lock_irqsave(&hdev->debug_list_lock, flags);
- list_for_each_entry(list, &hdev->debug_list, node) {
- for (i = 0; buf[i]; i++)
- list->hid_debug_buf[(list->tail + i) % HID_DEBUG_BUFSIZE] =
- buf[i];
- list->tail = (list->tail + i) % HID_DEBUG_BUFSIZE;
- }
+ list_for_each_entry(list, &hdev->debug_list, node)
+ kfifo_in(&list->hid_debug_fifo, buf, strlen(buf));
spin_unlock_irqrestore(&hdev->debug_list_lock, flags);
wake_up_interruptible(&hdev->debug_wait);
@@ -722,8 +718,7 @@ void hid_dump_input(struct hid_device *hdev, struct hid_usage *usage, __s32 valu
hid_debug_event(hdev, buf);
kfree(buf);
- wake_up_interruptible(&hdev->debug_wait);
-
+ wake_up_interruptible(&hdev->debug_wait);
}
EXPORT_SYMBOL_GPL(hid_dump_input);
@@ -1088,8 +1083,8 @@ static int hid_debug_events_open(struct inode *inode, struct file *file)
goto out;
}
- if (!(list->hid_debug_buf = kzalloc(HID_DEBUG_BUFSIZE, GFP_KERNEL))) {
- err = -ENOMEM;
+ err = kfifo_alloc(&list->hid_debug_fifo, HID_DEBUG_FIFOSIZE, GFP_KERNEL);
+ if (err) {
kfree(list);
goto out;
}
@@ -1109,77 +1104,57 @@ static ssize_t hid_debug_events_read(struct file *file, char __user *buffer,
size_t count, loff_t *ppos)
{
struct hid_debug_list *list = file->private_data;
- int ret = 0, len;
+ int ret = 0, copied;
DECLARE_WAITQUEUE(wait, current);
mutex_lock(&list->read_mutex);
- while (ret == 0) {
- if (list->head == list->tail) {
- add_wait_queue(&list->hdev->debug_wait, &wait);
- set_current_state(TASK_INTERRUPTIBLE);
+ if (kfifo_is_empty(&list->hid_debug_fifo)) {
+ add_wait_queue(&list->hdev->debug_wait, &wait);
+ set_current_state(TASK_INTERRUPTIBLE);
- while (list->head == list->tail) {
- if (file->f_flags & O_NONBLOCK) {
- ret = -EAGAIN;
- break;
- }
- if (signal_pending(current)) {
- ret = -ERESTARTSYS;
- break;
- }
-
- if (!list->hdev || !list->hdev->debug) {
- ret = -EIO;
- set_current_state(TASK_RUNNING);
- goto out;
- }
-
- /* allow O_NONBLOCK from other threads */
- mutex_unlock(&list->read_mutex);
- schedule();
- mutex_lock(&list->read_mutex);
- set_current_state(TASK_INTERRUPTIBLE);
+ while (kfifo_is_empty(&list->hid_debug_fifo)) {
+ if (file->f_flags & O_NONBLOCK) {
+ ret = -EAGAIN;
+ break;
}
- set_current_state(TASK_RUNNING);
- remove_wait_queue(&list->hdev->debug_wait, &wait);
+ if (signal_pending(current)) {
+ ret = -ERESTARTSYS;
+ break;
+ }
+
+ /* if list->hdev is NULL we cannot remove_wait_queue().
+ * if list->hdev->debug is 0 then hid_debug_unregister()
+ * was already called and list->hdev is being destroyed.
+ * if we add remove_wait_queue() here we can hit a race.
+ */
+ if (!list->hdev || !list->hdev->debug) {
+ ret = -EIO;
+ set_current_state(TASK_RUNNING);
+ goto out;
+ }
+
+ /* allow O_NONBLOCK from other threads */
+ mutex_unlock(&list->read_mutex);
+ schedule();
+ mutex_lock(&list->read_mutex);
+ set_current_state(TASK_INTERRUPTIBLE);
}
+ __set_current_state(TASK_RUNNING);
+ remove_wait_queue(&list->hdev->debug_wait, &wait);
+
if (ret)
goto out;
-
- /* pass the ringbuffer contents to userspace */
-copy_rest:
- if (list->tail == list->head)
- goto out;
- if (list->tail > list->head) {
- len = list->tail - list->head;
- if (len > count)
- len = count;
-
- if (copy_to_user(buffer + ret, &list->hid_debug_buf[list->head], len)) {
- ret = -EFAULT;
- goto out;
- }
- ret += len;
- list->head += len;
- } else {
- len = HID_DEBUG_BUFSIZE - list->head;
- if (len > count)
- len = count;
-
- if (copy_to_user(buffer, &list->hid_debug_buf[list->head], len)) {
- ret = -EFAULT;
- goto out;
- }
- list->head = 0;
- ret += len;
- count -= len;
- if (count > 0)
- goto copy_rest;
- }
-
}
+
+ /* pass the fifo content to userspace, locking is not needed with only
+ * one concurrent reader and one concurrent writer
+ */
+ ret = kfifo_to_user(&list->hid_debug_fifo, buffer, count, &copied);
+ if (ret)
+ goto out;
+ ret = copied;
out:
mutex_unlock(&list->read_mutex);
return ret;
@@ -1190,7 +1165,7 @@ static __poll_t hid_debug_events_poll(struct file *file, poll_table *wait)
struct hid_debug_list *list = file->private_data;
poll_wait(file, &list->hdev->debug_wait, wait);
- if (list->head != list->tail)
+ if (!kfifo_is_empty(&list->hid_debug_fifo))
return EPOLLIN | EPOLLRDNORM;
if (!list->hdev->debug)
return EPOLLERR | EPOLLHUP;
@@ -1205,7 +1180,7 @@ static int hid_debug_events_release(struct inode *inode, struct file *file)
spin_lock_irqsave(&list->hdev->debug_list_lock, flags);
list_del(&list->node);
spin_unlock_irqrestore(&list->hdev->debug_list_lock, flags);
- kfree(list->hid_debug_buf);
+ kfifo_free(&list->hid_debug_fifo);
kfree(list);
return 0;
@@ -1256,4 +1231,3 @@ void hid_debug_exit(void)
{
debugfs_remove_recursive(hid_debug_root);
}
-
diff --git a/drivers/hid/hid-lenovo.c b/drivers/hid/hid-lenovo.c
index 643b6eb..eacc76d 100644
--- a/drivers/hid/hid-lenovo.c
+++ b/drivers/hid/hid-lenovo.c
@@ -743,7 +743,9 @@ static int lenovo_probe_tpkbd(struct hid_device *hdev)
data_pointer->led_mute.brightness_get = lenovo_led_brightness_get_tpkbd;
data_pointer->led_mute.brightness_set = lenovo_led_brightness_set_tpkbd;
data_pointer->led_mute.dev = dev;
- led_classdev_register(dev, &data_pointer->led_mute);
+ ret = led_classdev_register(dev, &data_pointer->led_mute);
+ if (ret < 0)
+ goto err;
data_pointer->led_micmute.name = name_micmute;
data_pointer->led_micmute.brightness_get =
@@ -751,7 +753,11 @@ static int lenovo_probe_tpkbd(struct hid_device *hdev)
data_pointer->led_micmute.brightness_set =
lenovo_led_brightness_set_tpkbd;
data_pointer->led_micmute.dev = dev;
- led_classdev_register(dev, &data_pointer->led_micmute);
+ ret = led_classdev_register(dev, &data_pointer->led_micmute);
+ if (ret < 0) {
+ led_classdev_unregister(&data_pointer->led_mute);
+ goto err;
+ }
lenovo_features_set_tpkbd(hdev);
diff --git a/drivers/hwmon/lm80.c b/drivers/hwmon/lm80.c
index 08e3945..f9b8e3e 100644
--- a/drivers/hwmon/lm80.c
+++ b/drivers/hwmon/lm80.c
@@ -360,9 +360,11 @@ static ssize_t set_fan_div(struct device *dev, struct device_attribute *attr,
struct i2c_client *client = data->client;
unsigned long min, val;
u8 reg;
- int err = kstrtoul(buf, 10, &val);
- if (err < 0)
- return err;
+ int rv;
+
+ rv = kstrtoul(buf, 10, &val);
+ if (rv < 0)
+ return rv;
/* Save fan_min */
mutex_lock(&data->update_lock);
@@ -390,8 +392,13 @@ static ssize_t set_fan_div(struct device *dev, struct device_attribute *attr,
return -EINVAL;
}
- reg = (lm80_read_value(client, LM80_REG_FANDIV) &
- ~(3 << (2 * (nr + 1)))) | (data->fan_div[nr] << (2 * (nr + 1)));
+ rv = lm80_read_value(client, LM80_REG_FANDIV);
+ if (rv < 0) {
+ mutex_unlock(&data->update_lock);
+ return rv;
+ }
+ reg = (rv & ~(3 << (2 * (nr + 1))))
+ | (data->fan_div[nr] << (2 * (nr + 1)));
lm80_write_value(client, LM80_REG_FANDIV, reg);
/* Restore fan_min */
@@ -623,6 +630,7 @@ static int lm80_probe(struct i2c_client *client,
struct device *dev = &client->dev;
struct device *hwmon_dev;
struct lm80_data *data;
+ int rv;
data = devm_kzalloc(dev, sizeof(struct lm80_data), GFP_KERNEL);
if (!data)
@@ -635,8 +643,14 @@ static int lm80_probe(struct i2c_client *client,
lm80_init_client(client);
/* A few vars need to be filled upon startup */
- data->fan[f_min][0] = lm80_read_value(client, LM80_REG_FAN_MIN(1));
- data->fan[f_min][1] = lm80_read_value(client, LM80_REG_FAN_MIN(2));
+ rv = lm80_read_value(client, LM80_REG_FAN_MIN(1));
+ if (rv < 0)
+ return rv;
+ data->fan[f_min][0] = rv;
+ rv = lm80_read_value(client, LM80_REG_FAN_MIN(2));
+ if (rv < 0)
+ return rv;
+ data->fan[f_min][1] = rv;
hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name,
data, lm80_groups);
diff --git a/drivers/hwmon/tmp421.c b/drivers/hwmon/tmp421.c
index e363992..ceb3db6f3 100644
--- a/drivers/hwmon/tmp421.c
+++ b/drivers/hwmon/tmp421.c
@@ -88,7 +88,7 @@ static const struct of_device_id tmp421_of_match[] = {
.data = (void *)2
},
{
- .compatible = "ti,tmp422",
+ .compatible = "ti,tmp442",
.data = (void *)3
},
{ },
diff --git a/drivers/i2c/busses/i2c-axxia.c b/drivers/i2c/busses/i2c-axxia.c
index 51d3495..fb5bac0 100644
--- a/drivers/i2c/busses/i2c-axxia.c
+++ b/drivers/i2c/busses/i2c-axxia.c
@@ -296,22 +296,7 @@ static irqreturn_t axxia_i2c_isr(int irq, void *_dev)
i2c_int_disable(idev, MST_STATUS_TFL);
}
- if (status & MST_STATUS_SCC) {
- /* Stop completed */
- i2c_int_disable(idev, ~MST_STATUS_TSS);
- complete(&idev->msg_complete);
- } else if (status & MST_STATUS_SNS) {
- /* Transfer done */
- i2c_int_disable(idev, ~MST_STATUS_TSS);
- if (i2c_m_rd(idev->msg) && idev->msg_xfrd < idev->msg->len)
- axxia_i2c_empty_rx_fifo(idev);
- complete(&idev->msg_complete);
- } else if (status & MST_STATUS_TSS) {
- /* Transfer timeout */
- idev->msg_err = -ETIMEDOUT;
- i2c_int_disable(idev, ~MST_STATUS_TSS);
- complete(&idev->msg_complete);
- } else if (unlikely(status & MST_STATUS_ERR)) {
+ if (unlikely(status & MST_STATUS_ERR)) {
/* Transfer error */
i2c_int_disable(idev, ~0);
if (status & MST_STATUS_AL)
@@ -328,6 +313,21 @@ static irqreturn_t axxia_i2c_isr(int irq, void *_dev)
readl(idev->base + MST_TX_BYTES_XFRD),
readl(idev->base + MST_TX_XFER));
complete(&idev->msg_complete);
+ } else if (status & MST_STATUS_SCC) {
+ /* Stop completed */
+ i2c_int_disable(idev, ~MST_STATUS_TSS);
+ complete(&idev->msg_complete);
+ } else if (status & MST_STATUS_SNS) {
+ /* Transfer done */
+ i2c_int_disable(idev, ~MST_STATUS_TSS);
+ if (i2c_m_rd(idev->msg) && idev->msg_xfrd < idev->msg->len)
+ axxia_i2c_empty_rx_fifo(idev);
+ complete(&idev->msg_complete);
+ } else if (status & MST_STATUS_TSS) {
+ /* Transfer timeout */
+ idev->msg_err = -ETIMEDOUT;
+ i2c_int_disable(idev, ~MST_STATUS_TSS);
+ complete(&idev->msg_complete);
}
out:
diff --git a/drivers/i2c/busses/i2c-qcom-geni.c b/drivers/i2c/busses/i2c-qcom-geni.c
index fb65502..703c634 100644
--- a/drivers/i2c/busses/i2c-qcom-geni.c
+++ b/drivers/i2c/busses/i2c-qcom-geni.c
@@ -321,11 +321,12 @@ static irqreturn_t geni_i2c_irq(int irq, void *dev)
SE_DMA_RX_IRQ_CLR);
/* Ensure all writes are done before returning from ISR. */
wmb();
+ if ((dm_tx_st & TX_DMA_DONE) || (dm_rx_st & RX_DMA_DONE))
+ complete(&gi2c->xfer);
+
}
/* if this is err with done-bit not set, handle that thr' timeout. */
- if (m_stat & M_CMD_DONE_EN)
- complete(&gi2c->xfer);
- else if ((dm_tx_st & TX_DMA_DONE) || (dm_rx_st & RX_DMA_DONE))
+ else if (m_stat & M_CMD_DONE_EN)
complete(&gi2c->xfer);
return IRQ_HANDLED;
@@ -665,6 +666,7 @@ static int geni_i2c_xfer(struct i2c_adapter *adap,
dma_addr_t tx_dma = 0;
dma_addr_t rx_dma = 0;
enum se_xfer_mode mode = FIFO_MODE;
+ reinit_completion(&gi2c->xfer);
m_param |= (stretch ? STOP_STRETCH : 0);
m_param |= ((msgs[i].addr & 0x7F) << SLV_ADDR_SHFT);
@@ -724,6 +726,7 @@ static int geni_i2c_xfer(struct i2c_adapter *adap,
gi2c->xfer_timeout);
if (!timeout) {
geni_i2c_err(gi2c, GENI_TIMEOUT);
+ reinit_completion(&gi2c->xfer);
gi2c->cur = NULL;
geni_abort_m_cmd(gi2c->base);
timeout = wait_for_completion_timeout(&gi2c->xfer, HZ);
@@ -732,6 +735,7 @@ static int geni_i2c_xfer(struct i2c_adapter *adap,
gi2c->cur_rd = 0;
if (mode == SE_DMA) {
if (gi2c->err) {
+ reinit_completion(&gi2c->xfer);
if (msgs[i].flags != I2C_M_RD)
writel_relaxed(1, gi2c->base +
SE_DMA_TX_FSM_RST);
@@ -814,6 +818,7 @@ static int geni_i2c_probe(struct platform_device *pdev)
return ret;
}
+ gi2c->i2c_rsc.ctrl_dev = gi2c->dev;
gi2c->i2c_rsc.se_clk = devm_clk_get(&pdev->dev, "se-clk");
if (IS_ERR(gi2c->i2c_rsc.se_clk)) {
ret = PTR_ERR(gi2c->i2c_rsc.se_clk);
diff --git a/drivers/i2c/busses/i2c-sh_mobile.c b/drivers/i2c/busses/i2c-sh_mobile.c
index 818cab1..ddcfb6d 100644
--- a/drivers/i2c/busses/i2c-sh_mobile.c
+++ b/drivers/i2c/busses/i2c-sh_mobile.c
@@ -800,6 +800,7 @@ static const struct sh_mobile_dt_config r8a7740_dt_config = {
static const struct of_device_id sh_mobile_i2c_dt_ids[] = {
{ .compatible = "renesas,iic-r8a73a4", .data = &fast_clock_dt_config },
{ .compatible = "renesas,iic-r8a7740", .data = &r8a7740_dt_config },
+ { .compatible = "renesas,iic-r8a774c0", .data = &fast_clock_dt_config },
{ .compatible = "renesas,iic-r8a7790", .data = &v2_freq_calc_dt_config },
{ .compatible = "renesas,iic-r8a7791", .data = &fast_clock_dt_config },
{ .compatible = "renesas,iic-r8a7792", .data = &fast_clock_dt_config },
@@ -808,6 +809,7 @@ static const struct of_device_id sh_mobile_i2c_dt_ids[] = {
{ .compatible = "renesas,rcar-gen2-iic", .data = &fast_clock_dt_config },
{ .compatible = "renesas,iic-r8a7795", .data = &fast_clock_dt_config },
{ .compatible = "renesas,rcar-gen3-iic", .data = &fast_clock_dt_config },
+ { .compatible = "renesas,iic-r8a77990", .data = &fast_clock_dt_config },
{ .compatible = "renesas,iic-sh73a0", .data = &fast_clock_dt_config },
{ .compatible = "renesas,rmobile-iic", .data = &default_dt_config },
{},
diff --git a/drivers/i3c/Kconfig b/drivers/i3c/Kconfig
new file mode 100644
index 0000000..30a4415
--- /dev/null
+++ b/drivers/i3c/Kconfig
@@ -0,0 +1,24 @@
+# SPDX-License-Identifier: GPL-2.0
+
+menuconfig I3C
+ tristate "I3C support"
+ select I2C
+ help
+ I3C is a serial protocol standardized by the MIPI alliance.
+
+ It's supposed to be backward compatible with I2C while providing
+ support for high speed transfers and native interrupt support
+ without the need for extra pins.
+
+ The I3C protocol also standardizes the slave device types and is
+ mainly designed to communicate with sensors.
+
+ If you want I3C support, you should say Y here and also to the
+ specific driver for your bus adapter(s) below.
+
+ This I3C support can also be built as a module. If so, the module
+ will be called i3c.
+
+if I3C
+source "drivers/i3c/master/Kconfig"
+endif # I3C
diff --git a/drivers/i3c/Makefile b/drivers/i3c/Makefile
new file mode 100644
index 0000000..11982ef
--- /dev/null
+++ b/drivers/i3c/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
+i3c-y := device.o master.o
+obj-$(CONFIG_I3C) += i3c.o
+obj-$(CONFIG_I3C) += master/
diff --git a/drivers/i3c/device.c b/drivers/i3c/device.c
new file mode 100644
index 0000000..69cc040
--- /dev/null
+++ b/drivers/i3c/device.c
@@ -0,0 +1,233 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018 Cadence Design Systems Inc.
+ *
+ * Author: Boris Brezillon <boris.brezillon@bootlin.com>
+ */
+
+#include <linux/atomic.h>
+#include <linux/bug.h>
+#include <linux/completion.h>
+#include <linux/device.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+
+#include "internals.h"
+
+/**
+ * i3c_device_do_priv_xfers() - do I3C SDR private transfers directed to a
+ * specific device
+ *
+ * @dev: device with which the transfers should be done
+ * @xfers: array of transfers
+ * @nxfers: number of transfers
+ *
+ * Initiate one or several private SDR transfers with @dev.
+ *
+ * This function can sleep and thus cannot be called in atomic context.
+ *
+ * Return: 0 in case of success, a negative error core otherwise.
+ */
+int i3c_device_do_priv_xfers(struct i3c_device *dev,
+ struct i3c_priv_xfer *xfers,
+ int nxfers)
+{
+ int ret, i;
+
+ if (nxfers < 1)
+ return 0;
+
+ for (i = 0; i < nxfers; i++) {
+ if (!xfers[i].len || !xfers[i].data.in)
+ return -EINVAL;
+ }
+
+ i3c_bus_normaluse_lock(dev->bus);
+ ret = i3c_dev_do_priv_xfers_locked(dev->desc, xfers, nxfers);
+ i3c_bus_normaluse_unlock(dev->bus);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(i3c_device_do_priv_xfers);
+
+/**
+ * i3c_device_get_info() - get I3C device information
+ *
+ * @dev: device we want information on
+ * @info: the information object to fill in
+ *
+ * Retrieve I3C dev info.
+ */
+void i3c_device_get_info(struct i3c_device *dev,
+ struct i3c_device_info *info)
+{
+ if (!info)
+ return;
+
+ i3c_bus_normaluse_lock(dev->bus);
+ if (dev->desc)
+ *info = dev->desc->info;
+ i3c_bus_normaluse_unlock(dev->bus);
+}
+EXPORT_SYMBOL_GPL(i3c_device_get_info);
+
+/**
+ * i3c_device_disable_ibi() - Disable IBIs coming from a specific device
+ * @dev: device on which IBIs should be disabled
+ *
+ * This function disable IBIs coming from a specific device and wait for
+ * all pending IBIs to be processed.
+ *
+ * Return: 0 in case of success, a negative error core otherwise.
+ */
+int i3c_device_disable_ibi(struct i3c_device *dev)
+{
+ int ret = -ENOENT;
+
+ i3c_bus_normaluse_lock(dev->bus);
+ if (dev->desc) {
+ mutex_lock(&dev->desc->ibi_lock);
+ ret = i3c_dev_disable_ibi_locked(dev->desc);
+ mutex_unlock(&dev->desc->ibi_lock);
+ }
+ i3c_bus_normaluse_unlock(dev->bus);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(i3c_device_disable_ibi);
+
+/**
+ * i3c_device_enable_ibi() - Enable IBIs coming from a specific device
+ * @dev: device on which IBIs should be enabled
+ *
+ * This function enable IBIs coming from a specific device and wait for
+ * all pending IBIs to be processed. This should be called on a device
+ * where i3c_device_request_ibi() has succeeded.
+ *
+ * Note that IBIs from this device might be received before this function
+ * returns to its caller.
+ *
+ * Return: 0 in case of success, a negative error core otherwise.
+ */
+int i3c_device_enable_ibi(struct i3c_device *dev)
+{
+ int ret = -ENOENT;
+
+ i3c_bus_normaluse_lock(dev->bus);
+ if (dev->desc) {
+ mutex_lock(&dev->desc->ibi_lock);
+ ret = i3c_dev_enable_ibi_locked(dev->desc);
+ mutex_unlock(&dev->desc->ibi_lock);
+ }
+ i3c_bus_normaluse_unlock(dev->bus);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(i3c_device_enable_ibi);
+
+/**
+ * i3c_device_request_ibi() - Request an IBI
+ * @dev: device for which we should enable IBIs
+ * @req: setup requested for this IBI
+ *
+ * This function is responsible for pre-allocating all resources needed to
+ * process IBIs coming from @dev. When this function returns, the IBI is not
+ * enabled until i3c_device_enable_ibi() is called.
+ *
+ * Return: 0 in case of success, a negative error core otherwise.
+ */
+int i3c_device_request_ibi(struct i3c_device *dev,
+ const struct i3c_ibi_setup *req)
+{
+ int ret = -ENOENT;
+
+ if (!req->handler || !req->num_slots)
+ return -EINVAL;
+
+ i3c_bus_normaluse_lock(dev->bus);
+ if (dev->desc) {
+ mutex_lock(&dev->desc->ibi_lock);
+ ret = i3c_dev_request_ibi_locked(dev->desc, req);
+ mutex_unlock(&dev->desc->ibi_lock);
+ }
+ i3c_bus_normaluse_unlock(dev->bus);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(i3c_device_request_ibi);
+
+/**
+ * i3c_device_free_ibi() - Free all resources needed for IBI handling
+ * @dev: device on which you want to release IBI resources
+ *
+ * This function is responsible for de-allocating resources previously
+ * allocated by i3c_device_request_ibi(). It should be called after disabling
+ * IBIs with i3c_device_disable_ibi().
+ */
+void i3c_device_free_ibi(struct i3c_device *dev)
+{
+ i3c_bus_normaluse_lock(dev->bus);
+ if (dev->desc) {
+ mutex_lock(&dev->desc->ibi_lock);
+ i3c_dev_free_ibi_locked(dev->desc);
+ mutex_unlock(&dev->desc->ibi_lock);
+ }
+ i3c_bus_normaluse_unlock(dev->bus);
+}
+EXPORT_SYMBOL_GPL(i3c_device_free_ibi);
+
+/**
+ * i3cdev_to_dev() - Returns the device embedded in @i3cdev
+ * @i3cdev: I3C device
+ *
+ * Return: a pointer to a device object.
+ */
+struct device *i3cdev_to_dev(struct i3c_device *i3cdev)
+{
+ return &i3cdev->dev;
+}
+EXPORT_SYMBOL_GPL(i3cdev_to_dev);
+
+/**
+ * dev_to_i3cdev() - Returns the I3C device containing @dev
+ * @dev: device object
+ *
+ * Return: a pointer to an I3C device object.
+ */
+struct i3c_device *dev_to_i3cdev(struct device *dev)
+{
+ return container_of(dev, struct i3c_device, dev);
+}
+EXPORT_SYMBOL_GPL(dev_to_i3cdev);
+
+/**
+ * i3c_driver_register_with_owner() - register an I3C device driver
+ *
+ * @drv: driver to register
+ * @owner: module that owns this driver
+ *
+ * Register @drv to the core.
+ *
+ * Return: 0 in case of success, a negative error core otherwise.
+ */
+int i3c_driver_register_with_owner(struct i3c_driver *drv, struct module *owner)
+{
+ drv->driver.owner = owner;
+ drv->driver.bus = &i3c_bus_type;
+
+ return driver_register(&drv->driver);
+}
+EXPORT_SYMBOL_GPL(i3c_driver_register_with_owner);
+
+/**
+ * i3c_driver_unregister() - unregister an I3C device driver
+ *
+ * @drv: driver to unregister
+ *
+ * Unregister @drv.
+ */
+void i3c_driver_unregister(struct i3c_driver *drv)
+{
+ driver_unregister(&drv->driver);
+}
+EXPORT_SYMBOL_GPL(i3c_driver_unregister);
diff --git a/drivers/i3c/internals.h b/drivers/i3c/internals.h
new file mode 100644
index 0000000..86b7b44
--- /dev/null
+++ b/drivers/i3c/internals.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018 Cadence Design Systems Inc.
+ *
+ * Author: Boris Brezillon <boris.brezillon@bootlin.com>
+ */
+
+#ifndef I3C_INTERNALS_H
+#define I3C_INTERNALS_H
+
+#include <linux/i3c/master.h>
+
+extern struct bus_type i3c_bus_type;
+
+void i3c_bus_normaluse_lock(struct i3c_bus *bus);
+void i3c_bus_normaluse_unlock(struct i3c_bus *bus);
+
+int i3c_dev_do_priv_xfers_locked(struct i3c_dev_desc *dev,
+ struct i3c_priv_xfer *xfers,
+ int nxfers);
+int i3c_dev_disable_ibi_locked(struct i3c_dev_desc *dev);
+int i3c_dev_enable_ibi_locked(struct i3c_dev_desc *dev);
+int i3c_dev_request_ibi_locked(struct i3c_dev_desc *dev,
+ const struct i3c_ibi_setup *req);
+void i3c_dev_free_ibi_locked(struct i3c_dev_desc *dev);
+#endif /* I3C_INTERNAL_H */
diff --git a/drivers/i3c/master.c b/drivers/i3c/master.c
new file mode 100644
index 0000000..2dc628d
--- /dev/null
+++ b/drivers/i3c/master.c
@@ -0,0 +1,2659 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018 Cadence Design Systems Inc.
+ *
+ * Author: Boris Brezillon <boris.brezillon@bootlin.com>
+ */
+
+#include <linux/atomic.h>
+#include <linux/bug.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/export.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/of.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+
+#include "internals.h"
+
+static DEFINE_IDR(i3c_bus_idr);
+static DEFINE_MUTEX(i3c_core_lock);
+
+/**
+ * i3c_bus_maintenance_lock - Lock the bus for a maintenance operation
+ * @bus: I3C bus to take the lock on
+ *
+ * This function takes the bus lock so that no other operations can occur on
+ * the bus. This is needed for all kind of bus maintenance operation, like
+ * - enabling/disabling slave events
+ * - re-triggering DAA
+ * - changing the dynamic address of a device
+ * - relinquishing mastership
+ * - ...
+ *
+ * The reason for this kind of locking is that we don't want drivers and core
+ * logic to rely on I3C device information that could be changed behind their
+ * back.
+ */
+static void i3c_bus_maintenance_lock(struct i3c_bus *bus)
+{
+ down_write(&bus->lock);
+}
+
+/**
+ * i3c_bus_maintenance_unlock - Release the bus lock after a maintenance
+ * operation
+ * @bus: I3C bus to release the lock on
+ *
+ * Should be called when the bus maintenance operation is done. See
+ * i3c_bus_maintenance_lock() for more details on what these maintenance
+ * operations are.
+ */
+static void i3c_bus_maintenance_unlock(struct i3c_bus *bus)
+{
+ up_write(&bus->lock);
+}
+
+/**
+ * i3c_bus_normaluse_lock - Lock the bus for a normal operation
+ * @bus: I3C bus to take the lock on
+ *
+ * This function takes the bus lock for any operation that is not a maintenance
+ * operation (see i3c_bus_maintenance_lock() for a non-exhaustive list of
+ * maintenance operations). Basically all communications with I3C devices are
+ * normal operations (HDR, SDR transfers or CCC commands that do not change bus
+ * state or I3C dynamic address).
+ *
+ * Note that this lock is not guaranteeing serialization of normal operations.
+ * In other words, transfer requests passed to the I3C master can be submitted
+ * in parallel and I3C master drivers have to use their own locking to make
+ * sure two different communications are not inter-mixed, or access to the
+ * output/input queue is not done while the engine is busy.
+ */
+void i3c_bus_normaluse_lock(struct i3c_bus *bus)
+{
+ down_read(&bus->lock);
+}
+
+/**
+ * i3c_bus_normaluse_unlock - Release the bus lock after a normal operation
+ * @bus: I3C bus to release the lock on
+ *
+ * Should be called when a normal operation is done. See
+ * i3c_bus_normaluse_lock() for more details on what these normal operations
+ * are.
+ */
+void i3c_bus_normaluse_unlock(struct i3c_bus *bus)
+{
+ up_read(&bus->lock);
+}
+
+static struct i3c_master_controller *dev_to_i3cmaster(struct device *dev)
+{
+ return container_of(dev, struct i3c_master_controller, dev);
+}
+
+static const struct device_type i3c_device_type;
+
+static struct i3c_bus *dev_to_i3cbus(struct device *dev)
+{
+ struct i3c_master_controller *master;
+
+ if (dev->type == &i3c_device_type)
+ return dev_to_i3cdev(dev)->bus;
+
+ master = dev_to_i3cmaster(dev);
+
+ return &master->bus;
+}
+
+static struct i3c_dev_desc *dev_to_i3cdesc(struct device *dev)
+{
+ struct i3c_master_controller *master;
+
+ if (dev->type == &i3c_device_type)
+ return dev_to_i3cdev(dev)->desc;
+
+ master = container_of(dev, struct i3c_master_controller, dev);
+
+ return master->this;
+}
+
+static ssize_t bcr_show(struct device *dev,
+ struct device_attribute *da,
+ char *buf)
+{
+ struct i3c_bus *bus = dev_to_i3cbus(dev);
+ struct i3c_dev_desc *desc;
+ ssize_t ret;
+
+ i3c_bus_normaluse_lock(bus);
+ desc = dev_to_i3cdesc(dev);
+ ret = sprintf(buf, "%x\n", desc->info.bcr);
+ i3c_bus_normaluse_unlock(bus);
+
+ return ret;
+}
+static DEVICE_ATTR_RO(bcr);
+
+static ssize_t dcr_show(struct device *dev,
+ struct device_attribute *da,
+ char *buf)
+{
+ struct i3c_bus *bus = dev_to_i3cbus(dev);
+ struct i3c_dev_desc *desc;
+ ssize_t ret;
+
+ i3c_bus_normaluse_lock(bus);
+ desc = dev_to_i3cdesc(dev);
+ ret = sprintf(buf, "%x\n", desc->info.dcr);
+ i3c_bus_normaluse_unlock(bus);
+
+ return ret;
+}
+static DEVICE_ATTR_RO(dcr);
+
+static ssize_t pid_show(struct device *dev,
+ struct device_attribute *da,
+ char *buf)
+{
+ struct i3c_bus *bus = dev_to_i3cbus(dev);
+ struct i3c_dev_desc *desc;
+ ssize_t ret;
+
+ i3c_bus_normaluse_lock(bus);
+ desc = dev_to_i3cdesc(dev);
+ ret = sprintf(buf, "%llx\n", desc->info.pid);
+ i3c_bus_normaluse_unlock(bus);
+
+ return ret;
+}
+static DEVICE_ATTR_RO(pid);
+
+static ssize_t dynamic_address_show(struct device *dev,
+ struct device_attribute *da,
+ char *buf)
+{
+ struct i3c_bus *bus = dev_to_i3cbus(dev);
+ struct i3c_dev_desc *desc;
+ ssize_t ret;
+
+ i3c_bus_normaluse_lock(bus);
+ desc = dev_to_i3cdesc(dev);
+ ret = sprintf(buf, "%02x\n", desc->info.dyn_addr);
+ i3c_bus_normaluse_unlock(bus);
+
+ return ret;
+}
+static DEVICE_ATTR_RO(dynamic_address);
+
+static const char * const hdrcap_strings[] = {
+ "hdr-ddr", "hdr-tsp", "hdr-tsl",
+};
+
+static ssize_t hdrcap_show(struct device *dev,
+ struct device_attribute *da,
+ char *buf)
+{
+ struct i3c_bus *bus = dev_to_i3cbus(dev);
+ struct i3c_dev_desc *desc;
+ ssize_t offset = 0, ret;
+ unsigned long caps;
+ int mode;
+
+ i3c_bus_normaluse_lock(bus);
+ desc = dev_to_i3cdesc(dev);
+ caps = desc->info.hdr_cap;
+ for_each_set_bit(mode, &caps, 8) {
+ if (mode >= ARRAY_SIZE(hdrcap_strings))
+ break;
+
+ if (!hdrcap_strings[mode])
+ continue;
+
+ ret = sprintf(buf + offset, offset ? " %s" : "%s",
+ hdrcap_strings[mode]);
+ if (ret < 0)
+ goto out;
+
+ offset += ret;
+ }
+
+ ret = sprintf(buf + offset, "\n");
+ if (ret < 0)
+ goto out;
+
+ ret = offset + ret;
+
+out:
+ i3c_bus_normaluse_unlock(bus);
+
+ return ret;
+}
+static DEVICE_ATTR_RO(hdrcap);
+
+static struct attribute *i3c_device_attrs[] = {
+ &dev_attr_bcr.attr,
+ &dev_attr_dcr.attr,
+ &dev_attr_pid.attr,
+ &dev_attr_dynamic_address.attr,
+ &dev_attr_hdrcap.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(i3c_device);
+
+static int i3c_device_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+ struct i3c_device *i3cdev = dev_to_i3cdev(dev);
+ struct i3c_device_info devinfo;
+ u16 manuf, part, ext;
+
+ i3c_device_get_info(i3cdev, &devinfo);
+ manuf = I3C_PID_MANUF_ID(devinfo.pid);
+ part = I3C_PID_PART_ID(devinfo.pid);
+ ext = I3C_PID_EXTRA_INFO(devinfo.pid);
+
+ if (I3C_PID_RND_LOWER_32BITS(devinfo.pid))
+ return add_uevent_var(env, "MODALIAS=i3c:dcr%02Xmanuf%04X",
+ devinfo.dcr, manuf);
+
+ return add_uevent_var(env,
+ "MODALIAS=i3c:dcr%02Xmanuf%04Xpart%04xext%04x",
+ devinfo.dcr, manuf, part, ext);
+}
+
+static const struct device_type i3c_device_type = {
+ .groups = i3c_device_groups,
+ .uevent = i3c_device_uevent,
+};
+
+static const struct i3c_device_id *
+i3c_device_match_id(struct i3c_device *i3cdev,
+ const struct i3c_device_id *id_table)
+{
+ struct i3c_device_info devinfo;
+ const struct i3c_device_id *id;
+
+ i3c_device_get_info(i3cdev, &devinfo);
+
+ /*
+ * The lower 32bits of the provisional ID is just filled with a random
+ * value, try to match using DCR info.
+ */
+ if (!I3C_PID_RND_LOWER_32BITS(devinfo.pid)) {
+ u16 manuf = I3C_PID_MANUF_ID(devinfo.pid);
+ u16 part = I3C_PID_PART_ID(devinfo.pid);
+ u16 ext_info = I3C_PID_EXTRA_INFO(devinfo.pid);
+
+ /* First try to match by manufacturer/part ID. */
+ for (id = id_table; id->match_flags != 0; id++) {
+ if ((id->match_flags & I3C_MATCH_MANUF_AND_PART) !=
+ I3C_MATCH_MANUF_AND_PART)
+ continue;
+
+ if (manuf != id->manuf_id || part != id->part_id)
+ continue;
+
+ if ((id->match_flags & I3C_MATCH_EXTRA_INFO) &&
+ ext_info != id->extra_info)
+ continue;
+
+ return id;
+ }
+ }
+
+ /* Fallback to DCR match. */
+ for (id = id_table; id->match_flags != 0; id++) {
+ if ((id->match_flags & I3C_MATCH_DCR) &&
+ id->dcr == devinfo.dcr)
+ return id;
+ }
+
+ return NULL;
+}
+
+static int i3c_device_match(struct device *dev, struct device_driver *drv)
+{
+ struct i3c_device *i3cdev;
+ struct i3c_driver *i3cdrv;
+
+ if (dev->type != &i3c_device_type)
+ return 0;
+
+ i3cdev = dev_to_i3cdev(dev);
+ i3cdrv = drv_to_i3cdrv(drv);
+ if (i3c_device_match_id(i3cdev, i3cdrv->id_table))
+ return 1;
+
+ return 0;
+}
+
+static int i3c_device_probe(struct device *dev)
+{
+ struct i3c_device *i3cdev = dev_to_i3cdev(dev);
+ struct i3c_driver *driver = drv_to_i3cdrv(dev->driver);
+
+ return driver->probe(i3cdev);
+}
+
+static int i3c_device_remove(struct device *dev)
+{
+ struct i3c_device *i3cdev = dev_to_i3cdev(dev);
+ struct i3c_driver *driver = drv_to_i3cdrv(dev->driver);
+ int ret;
+
+ ret = driver->remove(i3cdev);
+ if (ret)
+ return ret;
+
+ i3c_device_free_ibi(i3cdev);
+
+ return ret;
+}
+
+struct bus_type i3c_bus_type = {
+ .name = "i3c",
+ .match = i3c_device_match,
+ .probe = i3c_device_probe,
+ .remove = i3c_device_remove,
+};
+
+static enum i3c_addr_slot_status
+i3c_bus_get_addr_slot_status(struct i3c_bus *bus, u16 addr)
+{
+ int status, bitpos = addr * 2;
+
+ if (addr > I2C_MAX_ADDR)
+ return I3C_ADDR_SLOT_RSVD;
+
+ status = bus->addrslots[bitpos / BITS_PER_LONG];
+ status >>= bitpos % BITS_PER_LONG;
+
+ return status & I3C_ADDR_SLOT_STATUS_MASK;
+}
+
+static void i3c_bus_set_addr_slot_status(struct i3c_bus *bus, u16 addr,
+ enum i3c_addr_slot_status status)
+{
+ int bitpos = addr * 2;
+ unsigned long *ptr;
+
+ if (addr > I2C_MAX_ADDR)
+ return;
+
+ ptr = bus->addrslots + (bitpos / BITS_PER_LONG);
+ *ptr &= ~(I3C_ADDR_SLOT_STATUS_MASK << (bitpos % BITS_PER_LONG));
+ *ptr |= status << (bitpos % BITS_PER_LONG);
+}
+
+static bool i3c_bus_dev_addr_is_avail(struct i3c_bus *bus, u8 addr)
+{
+ enum i3c_addr_slot_status status;
+
+ status = i3c_bus_get_addr_slot_status(bus, addr);
+
+ return status == I3C_ADDR_SLOT_FREE;
+}
+
+static int i3c_bus_get_free_addr(struct i3c_bus *bus, u8 start_addr)
+{
+ enum i3c_addr_slot_status status;
+ u8 addr;
+
+ for (addr = start_addr; addr < I3C_MAX_ADDR; addr++) {
+ status = i3c_bus_get_addr_slot_status(bus, addr);
+ if (status == I3C_ADDR_SLOT_FREE)
+ return addr;
+ }
+
+ return -ENOMEM;
+}
+
+static void i3c_bus_init_addrslots(struct i3c_bus *bus)
+{
+ int i;
+
+ /* Addresses 0 to 7 are reserved. */
+ for (i = 0; i < 8; i++)
+ i3c_bus_set_addr_slot_status(bus, i, I3C_ADDR_SLOT_RSVD);
+
+ /*
+ * Reserve broadcast address and all addresses that might collide
+ * with the broadcast address when facing a single bit error.
+ */
+ i3c_bus_set_addr_slot_status(bus, I3C_BROADCAST_ADDR,
+ I3C_ADDR_SLOT_RSVD);
+ for (i = 0; i < 7; i++)
+ i3c_bus_set_addr_slot_status(bus, I3C_BROADCAST_ADDR ^ BIT(i),
+ I3C_ADDR_SLOT_RSVD);
+}
+
+static void i3c_bus_cleanup(struct i3c_bus *i3cbus)
+{
+ mutex_lock(&i3c_core_lock);
+ idr_remove(&i3c_bus_idr, i3cbus->id);
+ mutex_unlock(&i3c_core_lock);
+}
+
+static int i3c_bus_init(struct i3c_bus *i3cbus)
+{
+ int ret;
+
+ init_rwsem(&i3cbus->lock);
+ INIT_LIST_HEAD(&i3cbus->devs.i2c);
+ INIT_LIST_HEAD(&i3cbus->devs.i3c);
+ i3c_bus_init_addrslots(i3cbus);
+ i3cbus->mode = I3C_BUS_MODE_PURE;
+
+ mutex_lock(&i3c_core_lock);
+ ret = idr_alloc(&i3c_bus_idr, i3cbus, 0, 0, GFP_KERNEL);
+ mutex_unlock(&i3c_core_lock);
+
+ if (ret < 0)
+ return ret;
+
+ i3cbus->id = ret;
+
+ return 0;
+}
+
+static const char * const i3c_bus_mode_strings[] = {
+ [I3C_BUS_MODE_PURE] = "pure",
+ [I3C_BUS_MODE_MIXED_FAST] = "mixed-fast",
+ [I3C_BUS_MODE_MIXED_SLOW] = "mixed-slow",
+};
+
+static ssize_t mode_show(struct device *dev,
+ struct device_attribute *da,
+ char *buf)
+{
+ struct i3c_bus *i3cbus = dev_to_i3cbus(dev);
+ ssize_t ret;
+
+ i3c_bus_normaluse_lock(i3cbus);
+ if (i3cbus->mode < 0 ||
+ i3cbus->mode >= ARRAY_SIZE(i3c_bus_mode_strings) ||
+ !i3c_bus_mode_strings[i3cbus->mode])
+ ret = sprintf(buf, "unknown\n");
+ else
+ ret = sprintf(buf, "%s\n", i3c_bus_mode_strings[i3cbus->mode]);
+ i3c_bus_normaluse_unlock(i3cbus);
+
+ return ret;
+}
+static DEVICE_ATTR_RO(mode);
+
+static ssize_t current_master_show(struct device *dev,
+ struct device_attribute *da,
+ char *buf)
+{
+ struct i3c_bus *i3cbus = dev_to_i3cbus(dev);
+ ssize_t ret;
+
+ i3c_bus_normaluse_lock(i3cbus);
+ ret = sprintf(buf, "%d-%llx\n", i3cbus->id,
+ i3cbus->cur_master->info.pid);
+ i3c_bus_normaluse_unlock(i3cbus);
+
+ return ret;
+}
+static DEVICE_ATTR_RO(current_master);
+
+static ssize_t i3c_scl_frequency_show(struct device *dev,
+ struct device_attribute *da,
+ char *buf)
+{
+ struct i3c_bus *i3cbus = dev_to_i3cbus(dev);
+ ssize_t ret;
+
+ i3c_bus_normaluse_lock(i3cbus);
+ ret = sprintf(buf, "%ld\n", i3cbus->scl_rate.i3c);
+ i3c_bus_normaluse_unlock(i3cbus);
+
+ return ret;
+}
+static DEVICE_ATTR_RO(i3c_scl_frequency);
+
+static ssize_t i2c_scl_frequency_show(struct device *dev,
+ struct device_attribute *da,
+ char *buf)
+{
+ struct i3c_bus *i3cbus = dev_to_i3cbus(dev);
+ ssize_t ret;
+
+ i3c_bus_normaluse_lock(i3cbus);
+ ret = sprintf(buf, "%ld\n", i3cbus->scl_rate.i2c);
+ i3c_bus_normaluse_unlock(i3cbus);
+
+ return ret;
+}
+static DEVICE_ATTR_RO(i2c_scl_frequency);
+
+static struct attribute *i3c_masterdev_attrs[] = {
+ &dev_attr_mode.attr,
+ &dev_attr_current_master.attr,
+ &dev_attr_i3c_scl_frequency.attr,
+ &dev_attr_i2c_scl_frequency.attr,
+ &dev_attr_bcr.attr,
+ &dev_attr_dcr.attr,
+ &dev_attr_pid.attr,
+ &dev_attr_dynamic_address.attr,
+ &dev_attr_hdrcap.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(i3c_masterdev);
+
+static void i3c_masterdev_release(struct device *dev)
+{
+ struct i3c_master_controller *master = dev_to_i3cmaster(dev);
+ struct i3c_bus *bus = dev_to_i3cbus(dev);
+
+ if (master->wq)
+ destroy_workqueue(master->wq);
+
+ WARN_ON(!list_empty(&bus->devs.i2c) || !list_empty(&bus->devs.i3c));
+ i3c_bus_cleanup(bus);
+
+ of_node_put(dev->of_node);
+}
+
+static const struct device_type i3c_masterdev_type = {
+ .groups = i3c_masterdev_groups,
+};
+
+int i3c_bus_set_mode(struct i3c_bus *i3cbus, enum i3c_bus_mode mode)
+{
+ i3cbus->mode = mode;
+
+ if (!i3cbus->scl_rate.i3c)
+ i3cbus->scl_rate.i3c = I3C_BUS_TYP_I3C_SCL_RATE;
+
+ if (!i3cbus->scl_rate.i2c) {
+ if (i3cbus->mode == I3C_BUS_MODE_MIXED_SLOW)
+ i3cbus->scl_rate.i2c = I3C_BUS_I2C_FM_SCL_RATE;
+ else
+ i3cbus->scl_rate.i2c = I3C_BUS_I2C_FM_PLUS_SCL_RATE;
+ }
+
+ /*
+ * I3C/I2C frequency may have been overridden, check that user-provided
+ * values are not exceeding max possible frequency.
+ */
+ if (i3cbus->scl_rate.i3c > I3C_BUS_MAX_I3C_SCL_RATE ||
+ i3cbus->scl_rate.i2c > I3C_BUS_I2C_FM_PLUS_SCL_RATE)
+ return -EINVAL;
+
+ return 0;
+}
+
+static struct i3c_master_controller *
+i2c_adapter_to_i3c_master(struct i2c_adapter *adap)
+{
+ return container_of(adap, struct i3c_master_controller, i2c);
+}
+
+static struct i2c_adapter *
+i3c_master_to_i2c_adapter(struct i3c_master_controller *master)
+{
+ return &master->i2c;
+}
+
+static void i3c_master_free_i2c_dev(struct i2c_dev_desc *dev)
+{
+ kfree(dev);
+}
+
+static struct i2c_dev_desc *
+i3c_master_alloc_i2c_dev(struct i3c_master_controller *master,
+ const struct i2c_dev_boardinfo *boardinfo)
+{
+ struct i2c_dev_desc *dev;
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return ERR_PTR(-ENOMEM);
+
+ dev->common.master = master;
+ dev->boardinfo = boardinfo;
+
+ return dev;
+}
+
+static void *i3c_ccc_cmd_dest_init(struct i3c_ccc_cmd_dest *dest, u8 addr,
+ u16 payloadlen)
+{
+ dest->addr = addr;
+ dest->payload.len = payloadlen;
+ if (payloadlen)
+ dest->payload.data = kzalloc(payloadlen, GFP_KERNEL);
+ else
+ dest->payload.data = NULL;
+
+ return dest->payload.data;
+}
+
+static void i3c_ccc_cmd_dest_cleanup(struct i3c_ccc_cmd_dest *dest)
+{
+ kfree(dest->payload.data);
+}
+
+static void i3c_ccc_cmd_init(struct i3c_ccc_cmd *cmd, bool rnw, u8 id,
+ struct i3c_ccc_cmd_dest *dests,
+ unsigned int ndests)
+{
+ cmd->rnw = rnw ? 1 : 0;
+ cmd->id = id;
+ cmd->dests = dests;
+ cmd->ndests = ndests;
+ cmd->err = I3C_ERROR_UNKNOWN;
+}
+
+static int i3c_master_send_ccc_cmd_locked(struct i3c_master_controller *master,
+ struct i3c_ccc_cmd *cmd)
+{
+ int ret;
+
+ if (!cmd || !master)
+ return -EINVAL;
+
+ if (WARN_ON(master->init_done &&
+ !rwsem_is_locked(&master->bus.lock)))
+ return -EINVAL;
+
+ if (!master->ops->send_ccc_cmd)
+ return -ENOTSUPP;
+
+ if ((cmd->id & I3C_CCC_DIRECT) && (!cmd->dests || !cmd->ndests))
+ return -EINVAL;
+
+ if (master->ops->supports_ccc_cmd &&
+ !master->ops->supports_ccc_cmd(master, cmd))
+ return -ENOTSUPP;
+
+ ret = master->ops->send_ccc_cmd(master, cmd);
+ if (ret) {
+ if (cmd->err != I3C_ERROR_UNKNOWN)
+ return cmd->err;
+
+ return ret;
+ }
+
+ return 0;
+}
+
+static struct i2c_dev_desc *
+i3c_master_find_i2c_dev_by_addr(const struct i3c_master_controller *master,
+ u16 addr)
+{
+ struct i2c_dev_desc *dev;
+
+ i3c_bus_for_each_i2cdev(&master->bus, dev) {
+ if (dev->boardinfo->base.addr == addr)
+ return dev;
+ }
+
+ return NULL;
+}
+
+/**
+ * i3c_master_get_free_addr() - get a free address on the bus
+ * @master: I3C master object
+ * @start_addr: where to start searching
+ *
+ * This function must be called with the bus lock held in write mode.
+ *
+ * Return: the first free address starting at @start_addr (included) or -ENOMEM
+ * if there's no more address available.
+ */
+int i3c_master_get_free_addr(struct i3c_master_controller *master,
+ u8 start_addr)
+{
+ return i3c_bus_get_free_addr(&master->bus, start_addr);
+}
+EXPORT_SYMBOL_GPL(i3c_master_get_free_addr);
+
+static void i3c_device_release(struct device *dev)
+{
+ struct i3c_device *i3cdev = dev_to_i3cdev(dev);
+
+ WARN_ON(i3cdev->desc);
+
+ of_node_put(i3cdev->dev.of_node);
+ kfree(i3cdev);
+}
+
+static void i3c_master_free_i3c_dev(struct i3c_dev_desc *dev)
+{
+ kfree(dev);
+}
+
+static struct i3c_dev_desc *
+i3c_master_alloc_i3c_dev(struct i3c_master_controller *master,
+ const struct i3c_device_info *info)
+{
+ struct i3c_dev_desc *dev;
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return ERR_PTR(-ENOMEM);
+
+ dev->common.master = master;
+ dev->info = *info;
+ mutex_init(&dev->ibi_lock);
+
+ return dev;
+}
+
+static int i3c_master_rstdaa_locked(struct i3c_master_controller *master,
+ u8 addr)
+{
+ enum i3c_addr_slot_status addrstat;
+ struct i3c_ccc_cmd_dest dest;
+ struct i3c_ccc_cmd cmd;
+ int ret;
+
+ if (!master)
+ return -EINVAL;
+
+ addrstat = i3c_bus_get_addr_slot_status(&master->bus, addr);
+ if (addr != I3C_BROADCAST_ADDR && addrstat != I3C_ADDR_SLOT_I3C_DEV)
+ return -EINVAL;
+
+ i3c_ccc_cmd_dest_init(&dest, addr, 0);
+ i3c_ccc_cmd_init(&cmd, false,
+ I3C_CCC_RSTDAA(addr == I3C_BROADCAST_ADDR),
+ &dest, 1);
+ ret = i3c_master_send_ccc_cmd_locked(master, &cmd);
+ i3c_ccc_cmd_dest_cleanup(&dest);
+
+ return ret;
+}
+
+/**
+ * i3c_master_entdaa_locked() - start a DAA (Dynamic Address Assignment)
+ * procedure
+ * @master: master used to send frames on the bus
+ *
+ * Send a ENTDAA CCC command to start a DAA procedure.
+ *
+ * Note that this function only sends the ENTDAA CCC command, all the logic
+ * behind dynamic address assignment has to be handled in the I3C master
+ * driver.
+ *
+ * This function must be called with the bus lock held in write mode.
+ *
+ * Return: 0 in case of success, a positive I3C error code if the error is
+ * one of the official Mx error codes, and a negative error code otherwise.
+ */
+int i3c_master_entdaa_locked(struct i3c_master_controller *master)
+{
+ struct i3c_ccc_cmd_dest dest;
+ struct i3c_ccc_cmd cmd;
+ int ret;
+
+ i3c_ccc_cmd_dest_init(&dest, I3C_BROADCAST_ADDR, 0);
+ i3c_ccc_cmd_init(&cmd, false, I3C_CCC_ENTDAA, &dest, 1);
+ ret = i3c_master_send_ccc_cmd_locked(master, &cmd);
+ i3c_ccc_cmd_dest_cleanup(&dest);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(i3c_master_entdaa_locked);
+
+static int i3c_master_enec_disec_locked(struct i3c_master_controller *master,
+ u8 addr, bool enable, u8 evts)
+{
+ struct i3c_ccc_events *events;
+ struct i3c_ccc_cmd_dest dest;
+ struct i3c_ccc_cmd cmd;
+ int ret;
+
+ events = i3c_ccc_cmd_dest_init(&dest, addr, sizeof(*events));
+ if (!events)
+ return -ENOMEM;
+
+ events->events = evts;
+ i3c_ccc_cmd_init(&cmd, false,
+ enable ?
+ I3C_CCC_ENEC(addr == I3C_BROADCAST_ADDR) :
+ I3C_CCC_DISEC(addr == I3C_BROADCAST_ADDR),
+ &dest, 1);
+ ret = i3c_master_send_ccc_cmd_locked(master, &cmd);
+ i3c_ccc_cmd_dest_cleanup(&dest);
+
+ return ret;
+}
+
+/**
+ * i3c_master_disec_locked() - send a DISEC CCC command
+ * @master: master used to send frames on the bus
+ * @addr: a valid I3C slave address or %I3C_BROADCAST_ADDR
+ * @evts: events to disable
+ *
+ * Send a DISEC CCC command to disable some or all events coming from a
+ * specific slave, or all devices if @addr is %I3C_BROADCAST_ADDR.
+ *
+ * This function must be called with the bus lock held in write mode.
+ *
+ * Return: 0 in case of success, a positive I3C error code if the error is
+ * one of the official Mx error codes, and a negative error code otherwise.
+ */
+int i3c_master_disec_locked(struct i3c_master_controller *master, u8 addr,
+ u8 evts)
+{
+ return i3c_master_enec_disec_locked(master, addr, false, evts);
+}
+EXPORT_SYMBOL_GPL(i3c_master_disec_locked);
+
+/**
+ * i3c_master_enec_locked() - send an ENEC CCC command
+ * @master: master used to send frames on the bus
+ * @addr: a valid I3C slave address or %I3C_BROADCAST_ADDR
+ * @evts: events to disable
+ *
+ * Sends an ENEC CCC command to enable some or all events coming from a
+ * specific slave, or all devices if @addr is %I3C_BROADCAST_ADDR.
+ *
+ * This function must be called with the bus lock held in write mode.
+ *
+ * Return: 0 in case of success, a positive I3C error code if the error is
+ * one of the official Mx error codes, and a negative error code otherwise.
+ */
+int i3c_master_enec_locked(struct i3c_master_controller *master, u8 addr,
+ u8 evts)
+{
+ return i3c_master_enec_disec_locked(master, addr, true, evts);
+}
+EXPORT_SYMBOL_GPL(i3c_master_enec_locked);
+
+/**
+ * i3c_master_defslvs_locked() - send a DEFSLVS CCC command
+ * @master: master used to send frames on the bus
+ *
+ * Send a DEFSLVS CCC command containing all the devices known to the @master.
+ * This is useful when you have secondary masters on the bus to propagate
+ * device information.
+ *
+ * This should be called after all I3C devices have been discovered (in other
+ * words, after the DAA procedure has finished) and instantiated in
+ * &i3c_master_controller_ops->bus_init().
+ * It should also be called if a master ACKed an Hot-Join request and assigned
+ * a dynamic address to the device joining the bus.
+ *
+ * This function must be called with the bus lock held in write mode.
+ *
+ * Return: 0 in case of success, a positive I3C error code if the error is
+ * one of the official Mx error codes, and a negative error code otherwise.
+ */
+int i3c_master_defslvs_locked(struct i3c_master_controller *master)
+{
+ struct i3c_ccc_defslvs *defslvs;
+ struct i3c_ccc_dev_desc *desc;
+ struct i3c_ccc_cmd_dest dest;
+ struct i3c_dev_desc *i3cdev;
+ struct i2c_dev_desc *i2cdev;
+ struct i3c_ccc_cmd cmd;
+ struct i3c_bus *bus;
+ bool send = false;
+ int ndevs = 0, ret;
+
+ if (!master)
+ return -EINVAL;
+
+ bus = i3c_master_get_bus(master);
+ i3c_bus_for_each_i3cdev(bus, i3cdev) {
+ ndevs++;
+
+ if (i3cdev == master->this)
+ continue;
+
+ if (I3C_BCR_DEVICE_ROLE(i3cdev->info.bcr) ==
+ I3C_BCR_I3C_MASTER)
+ send = true;
+ }
+
+ /* No other master on the bus, skip DEFSLVS. */
+ if (!send)
+ return 0;
+
+ i3c_bus_for_each_i2cdev(bus, i2cdev)
+ ndevs++;
+
+ defslvs = i3c_ccc_cmd_dest_init(&dest, I3C_BROADCAST_ADDR,
+ sizeof(*defslvs) +
+ ((ndevs - 1) *
+ sizeof(struct i3c_ccc_dev_desc)));
+ if (!defslvs)
+ return -ENOMEM;
+
+ defslvs->count = ndevs;
+ defslvs->master.bcr = master->this->info.bcr;
+ defslvs->master.dcr = master->this->info.dcr;
+ defslvs->master.dyn_addr = master->this->info.dyn_addr << 1;
+ defslvs->master.static_addr = I3C_BROADCAST_ADDR << 1;
+
+ desc = defslvs->slaves;
+ i3c_bus_for_each_i2cdev(bus, i2cdev) {
+ desc->lvr = i2cdev->boardinfo->lvr;
+ desc->static_addr = i2cdev->boardinfo->base.addr << 1;
+ desc++;
+ }
+
+ i3c_bus_for_each_i3cdev(bus, i3cdev) {
+ /* Skip the I3C dev representing this master. */
+ if (i3cdev == master->this)
+ continue;
+
+ desc->bcr = i3cdev->info.bcr;
+ desc->dcr = i3cdev->info.dcr;
+ desc->dyn_addr = i3cdev->info.dyn_addr << 1;
+ desc->static_addr = i3cdev->info.static_addr << 1;
+ desc++;
+ }
+
+ i3c_ccc_cmd_init(&cmd, false, I3C_CCC_DEFSLVS, &dest, 1);
+ ret = i3c_master_send_ccc_cmd_locked(master, &cmd);
+ i3c_ccc_cmd_dest_cleanup(&dest);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(i3c_master_defslvs_locked);
+
+static int i3c_master_setda_locked(struct i3c_master_controller *master,
+ u8 oldaddr, u8 newaddr, bool setdasa)
+{
+ struct i3c_ccc_cmd_dest dest;
+ struct i3c_ccc_setda *setda;
+ struct i3c_ccc_cmd cmd;
+ int ret;
+
+ if (!oldaddr || !newaddr)
+ return -EINVAL;
+
+ setda = i3c_ccc_cmd_dest_init(&dest, oldaddr, sizeof(*setda));
+ if (!setda)
+ return -ENOMEM;
+
+ setda->addr = newaddr << 1;
+ i3c_ccc_cmd_init(&cmd, false,
+ setdasa ? I3C_CCC_SETDASA : I3C_CCC_SETNEWDA,
+ &dest, 1);
+ ret = i3c_master_send_ccc_cmd_locked(master, &cmd);
+ i3c_ccc_cmd_dest_cleanup(&dest);
+
+ return ret;
+}
+
+static int i3c_master_setdasa_locked(struct i3c_master_controller *master,
+ u8 static_addr, u8 dyn_addr)
+{
+ return i3c_master_setda_locked(master, static_addr, dyn_addr, true);
+}
+
+static int i3c_master_setnewda_locked(struct i3c_master_controller *master,
+ u8 oldaddr, u8 newaddr)
+{
+ return i3c_master_setda_locked(master, oldaddr, newaddr, false);
+}
+
+static int i3c_master_getmrl_locked(struct i3c_master_controller *master,
+ struct i3c_device_info *info)
+{
+ struct i3c_ccc_cmd_dest dest;
+ unsigned int expected_len;
+ struct i3c_ccc_mrl *mrl;
+ struct i3c_ccc_cmd cmd;
+ int ret;
+
+ mrl = i3c_ccc_cmd_dest_init(&dest, info->dyn_addr, sizeof(*mrl));
+ if (!mrl)
+ return -ENOMEM;
+
+ /*
+ * When the device does not have IBI payload GETMRL only returns 2
+ * bytes of data.
+ */
+ if (!(info->bcr & I3C_BCR_IBI_PAYLOAD))
+ dest.payload.len -= 1;
+
+ expected_len = dest.payload.len;
+ i3c_ccc_cmd_init(&cmd, true, I3C_CCC_GETMRL, &dest, 1);
+ ret = i3c_master_send_ccc_cmd_locked(master, &cmd);
+ if (ret)
+ goto out;
+
+ if (dest.payload.len != expected_len) {
+ ret = -EIO;
+ goto out;
+ }
+
+ info->max_read_len = be16_to_cpu(mrl->read_len);
+
+ if (info->bcr & I3C_BCR_IBI_PAYLOAD)
+ info->max_ibi_len = mrl->ibi_len;
+
+out:
+ i3c_ccc_cmd_dest_cleanup(&dest);
+
+ return ret;
+}
+
+static int i3c_master_getmwl_locked(struct i3c_master_controller *master,
+ struct i3c_device_info *info)
+{
+ struct i3c_ccc_cmd_dest dest;
+ struct i3c_ccc_mwl *mwl;
+ struct i3c_ccc_cmd cmd;
+ int ret;
+
+ mwl = i3c_ccc_cmd_dest_init(&dest, info->dyn_addr, sizeof(*mwl));
+ if (!mwl)
+ return -ENOMEM;
+
+ i3c_ccc_cmd_init(&cmd, true, I3C_CCC_GETMWL, &dest, 1);
+ ret = i3c_master_send_ccc_cmd_locked(master, &cmd);
+ if (ret)
+ goto out;
+
+ if (dest.payload.len != sizeof(*mwl))
+ return -EIO;
+
+ info->max_write_len = be16_to_cpu(mwl->len);
+
+out:
+ i3c_ccc_cmd_dest_cleanup(&dest);
+
+ return ret;
+}
+
+static int i3c_master_getmxds_locked(struct i3c_master_controller *master,
+ struct i3c_device_info *info)
+{
+ struct i3c_ccc_getmxds *getmaxds;
+ struct i3c_ccc_cmd_dest dest;
+ struct i3c_ccc_cmd cmd;
+ int ret;
+
+ getmaxds = i3c_ccc_cmd_dest_init(&dest, info->dyn_addr,
+ sizeof(*getmaxds));
+ if (!getmaxds)
+ return -ENOMEM;
+
+ i3c_ccc_cmd_init(&cmd, true, I3C_CCC_GETMXDS, &dest, 1);
+ ret = i3c_master_send_ccc_cmd_locked(master, &cmd);
+ if (ret)
+ goto out;
+
+ if (dest.payload.len != 2 && dest.payload.len != 5) {
+ ret = -EIO;
+ goto out;
+ }
+
+ info->max_read_ds = getmaxds->maxrd;
+ info->max_write_ds = getmaxds->maxwr;
+ if (dest.payload.len == 5)
+ info->max_read_turnaround = getmaxds->maxrdturn[0] |
+ ((u32)getmaxds->maxrdturn[1] << 8) |
+ ((u32)getmaxds->maxrdturn[2] << 16);
+
+out:
+ i3c_ccc_cmd_dest_cleanup(&dest);
+
+ return ret;
+}
+
+static int i3c_master_gethdrcap_locked(struct i3c_master_controller *master,
+ struct i3c_device_info *info)
+{
+ struct i3c_ccc_gethdrcap *gethdrcap;
+ struct i3c_ccc_cmd_dest dest;
+ struct i3c_ccc_cmd cmd;
+ int ret;
+
+ gethdrcap = i3c_ccc_cmd_dest_init(&dest, info->dyn_addr,
+ sizeof(*gethdrcap));
+ if (!gethdrcap)
+ return -ENOMEM;
+
+ i3c_ccc_cmd_init(&cmd, true, I3C_CCC_GETHDRCAP, &dest, 1);
+ ret = i3c_master_send_ccc_cmd_locked(master, &cmd);
+ if (ret)
+ goto out;
+
+ if (dest.payload.len != 1) {
+ ret = -EIO;
+ goto out;
+ }
+
+ info->hdr_cap = gethdrcap->modes;
+
+out:
+ i3c_ccc_cmd_dest_cleanup(&dest);
+
+ return ret;
+}
+
+static int i3c_master_getpid_locked(struct i3c_master_controller *master,
+ struct i3c_device_info *info)
+{
+ struct i3c_ccc_getpid *getpid;
+ struct i3c_ccc_cmd_dest dest;
+ struct i3c_ccc_cmd cmd;
+ int ret, i;
+
+ getpid = i3c_ccc_cmd_dest_init(&dest, info->dyn_addr, sizeof(*getpid));
+ if (!getpid)
+ return -ENOMEM;
+
+ i3c_ccc_cmd_init(&cmd, true, I3C_CCC_GETPID, &dest, 1);
+ ret = i3c_master_send_ccc_cmd_locked(master, &cmd);
+ if (ret)
+ goto out;
+
+ info->pid = 0;
+ for (i = 0; i < sizeof(getpid->pid); i++) {
+ int sft = (sizeof(getpid->pid) - i - 1) * 8;
+
+ info->pid |= (u64)getpid->pid[i] << sft;
+ }
+
+out:
+ i3c_ccc_cmd_dest_cleanup(&dest);
+
+ return ret;
+}
+
+static int i3c_master_getbcr_locked(struct i3c_master_controller *master,
+ struct i3c_device_info *info)
+{
+ struct i3c_ccc_getbcr *getbcr;
+ struct i3c_ccc_cmd_dest dest;
+ struct i3c_ccc_cmd cmd;
+ int ret;
+
+ getbcr = i3c_ccc_cmd_dest_init(&dest, info->dyn_addr, sizeof(*getbcr));
+ if (!getbcr)
+ return -ENOMEM;
+
+ i3c_ccc_cmd_init(&cmd, true, I3C_CCC_GETBCR, &dest, 1);
+ ret = i3c_master_send_ccc_cmd_locked(master, &cmd);
+ if (ret)
+ goto out;
+
+ info->bcr = getbcr->bcr;
+
+out:
+ i3c_ccc_cmd_dest_cleanup(&dest);
+
+ return ret;
+}
+
+static int i3c_master_getdcr_locked(struct i3c_master_controller *master,
+ struct i3c_device_info *info)
+{
+ struct i3c_ccc_getdcr *getdcr;
+ struct i3c_ccc_cmd_dest dest;
+ struct i3c_ccc_cmd cmd;
+ int ret;
+
+ getdcr = i3c_ccc_cmd_dest_init(&dest, info->dyn_addr, sizeof(*getdcr));
+ if (!getdcr)
+ return -ENOMEM;
+
+ i3c_ccc_cmd_init(&cmd, true, I3C_CCC_GETDCR, &dest, 1);
+ ret = i3c_master_send_ccc_cmd_locked(master, &cmd);
+ if (ret)
+ goto out;
+
+ info->dcr = getdcr->dcr;
+
+out:
+ i3c_ccc_cmd_dest_cleanup(&dest);
+
+ return ret;
+}
+
+static int i3c_master_retrieve_dev_info(struct i3c_dev_desc *dev)
+{
+ struct i3c_master_controller *master = i3c_dev_get_master(dev);
+ enum i3c_addr_slot_status slot_status;
+ int ret;
+
+ if (!dev->info.dyn_addr)
+ return -EINVAL;
+
+ slot_status = i3c_bus_get_addr_slot_status(&master->bus,
+ dev->info.dyn_addr);
+ if (slot_status == I3C_ADDR_SLOT_RSVD ||
+ slot_status == I3C_ADDR_SLOT_I2C_DEV)
+ return -EINVAL;
+
+ ret = i3c_master_getpid_locked(master, &dev->info);
+ if (ret)
+ return ret;
+
+ ret = i3c_master_getbcr_locked(master, &dev->info);
+ if (ret)
+ return ret;
+
+ ret = i3c_master_getdcr_locked(master, &dev->info);
+ if (ret)
+ return ret;
+
+ if (dev->info.bcr & I3C_BCR_MAX_DATA_SPEED_LIM) {
+ ret = i3c_master_getmxds_locked(master, &dev->info);
+ if (ret)
+ return ret;
+ }
+
+ if (dev->info.bcr & I3C_BCR_IBI_PAYLOAD)
+ dev->info.max_ibi_len = 1;
+
+ i3c_master_getmrl_locked(master, &dev->info);
+ i3c_master_getmwl_locked(master, &dev->info);
+
+ if (dev->info.bcr & I3C_BCR_HDR_CAP) {
+ ret = i3c_master_gethdrcap_locked(master, &dev->info);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static void i3c_master_put_i3c_addrs(struct i3c_dev_desc *dev)
+{
+ struct i3c_master_controller *master = i3c_dev_get_master(dev);
+
+ if (dev->info.static_addr)
+ i3c_bus_set_addr_slot_status(&master->bus,
+ dev->info.static_addr,
+ I3C_ADDR_SLOT_FREE);
+
+ if (dev->info.dyn_addr)
+ i3c_bus_set_addr_slot_status(&master->bus, dev->info.dyn_addr,
+ I3C_ADDR_SLOT_FREE);
+
+ if (dev->boardinfo && dev->boardinfo->init_dyn_addr)
+ i3c_bus_set_addr_slot_status(&master->bus, dev->info.dyn_addr,
+ I3C_ADDR_SLOT_FREE);
+}
+
+static int i3c_master_get_i3c_addrs(struct i3c_dev_desc *dev)
+{
+ struct i3c_master_controller *master = i3c_dev_get_master(dev);
+ enum i3c_addr_slot_status status;
+
+ if (!dev->info.static_addr && !dev->info.dyn_addr)
+ return 0;
+
+ if (dev->info.static_addr) {
+ status = i3c_bus_get_addr_slot_status(&master->bus,
+ dev->info.static_addr);
+ if (status != I3C_ADDR_SLOT_FREE)
+ return -EBUSY;
+
+ i3c_bus_set_addr_slot_status(&master->bus,
+ dev->info.static_addr,
+ I3C_ADDR_SLOT_I3C_DEV);
+ }
+
+ /*
+ * ->init_dyn_addr should have been reserved before that, so, if we're
+ * trying to apply a pre-reserved dynamic address, we should not try
+ * to reserve the address slot a second time.
+ */
+ if (dev->info.dyn_addr &&
+ (!dev->boardinfo ||
+ dev->boardinfo->init_dyn_addr != dev->info.dyn_addr)) {
+ status = i3c_bus_get_addr_slot_status(&master->bus,
+ dev->info.dyn_addr);
+ if (status != I3C_ADDR_SLOT_FREE)
+ goto err_release_static_addr;
+
+ i3c_bus_set_addr_slot_status(&master->bus, dev->info.dyn_addr,
+ I3C_ADDR_SLOT_I3C_DEV);
+ }
+
+ return 0;
+
+err_release_static_addr:
+ if (dev->info.static_addr)
+ i3c_bus_set_addr_slot_status(&master->bus,
+ dev->info.static_addr,
+ I3C_ADDR_SLOT_FREE);
+
+ return -EBUSY;
+}
+
+static int i3c_master_attach_i3c_dev(struct i3c_master_controller *master,
+ struct i3c_dev_desc *dev)
+{
+ int ret;
+
+ /*
+ * We don't attach devices to the controller until they are
+ * addressable on the bus.
+ */
+ if (!dev->info.static_addr && !dev->info.dyn_addr)
+ return 0;
+
+ ret = i3c_master_get_i3c_addrs(dev);
+ if (ret)
+ return ret;
+
+ /* Do not attach the master device itself. */
+ if (master->this != dev && master->ops->attach_i3c_dev) {
+ ret = master->ops->attach_i3c_dev(dev);
+ if (ret) {
+ i3c_master_put_i3c_addrs(dev);
+ return ret;
+ }
+ }
+
+ list_add_tail(&dev->common.node, &master->bus.devs.i3c);
+
+ return 0;
+}
+
+static int i3c_master_reattach_i3c_dev(struct i3c_dev_desc *dev,
+ u8 old_dyn_addr)
+{
+ struct i3c_master_controller *master = i3c_dev_get_master(dev);
+ enum i3c_addr_slot_status status;
+ int ret;
+
+ if (dev->info.dyn_addr != old_dyn_addr) {
+ status = i3c_bus_get_addr_slot_status(&master->bus,
+ dev->info.dyn_addr);
+ if (status != I3C_ADDR_SLOT_FREE)
+ return -EBUSY;
+ i3c_bus_set_addr_slot_status(&master->bus,
+ dev->info.dyn_addr,
+ I3C_ADDR_SLOT_I3C_DEV);
+ }
+
+ if (master->ops->reattach_i3c_dev) {
+ ret = master->ops->reattach_i3c_dev(dev, old_dyn_addr);
+ if (ret) {
+ i3c_master_put_i3c_addrs(dev);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static void i3c_master_detach_i3c_dev(struct i3c_dev_desc *dev)
+{
+ struct i3c_master_controller *master = i3c_dev_get_master(dev);
+
+ /* Do not detach the master device itself. */
+ if (master->this != dev && master->ops->detach_i3c_dev)
+ master->ops->detach_i3c_dev(dev);
+
+ i3c_master_put_i3c_addrs(dev);
+ list_del(&dev->common.node);
+}
+
+static int i3c_master_attach_i2c_dev(struct i3c_master_controller *master,
+ struct i2c_dev_desc *dev)
+{
+ int ret;
+
+ if (master->ops->attach_i2c_dev) {
+ ret = master->ops->attach_i2c_dev(dev);
+ if (ret)
+ return ret;
+ }
+
+ list_add_tail(&dev->common.node, &master->bus.devs.i2c);
+
+ return 0;
+}
+
+static void i3c_master_detach_i2c_dev(struct i2c_dev_desc *dev)
+{
+ struct i3c_master_controller *master = i2c_dev_get_master(dev);
+
+ list_del(&dev->common.node);
+
+ if (master->ops->detach_i2c_dev)
+ master->ops->detach_i2c_dev(dev);
+}
+
+static void i3c_master_pre_assign_dyn_addr(struct i3c_dev_desc *dev)
+{
+ struct i3c_master_controller *master = i3c_dev_get_master(dev);
+ int ret;
+
+ if (!dev->boardinfo || !dev->boardinfo->init_dyn_addr ||
+ !dev->boardinfo->static_addr)
+ return;
+
+ ret = i3c_master_setdasa_locked(master, dev->info.static_addr,
+ dev->boardinfo->init_dyn_addr);
+ if (ret)
+ return;
+
+ dev->info.dyn_addr = dev->boardinfo->init_dyn_addr;
+ ret = i3c_master_reattach_i3c_dev(dev, 0);
+ if (ret)
+ goto err_rstdaa;
+
+ ret = i3c_master_retrieve_dev_info(dev);
+ if (ret)
+ goto err_rstdaa;
+
+ return;
+
+err_rstdaa:
+ i3c_master_rstdaa_locked(master, dev->boardinfo->init_dyn_addr);
+}
+
+static void
+i3c_master_register_new_i3c_devs(struct i3c_master_controller *master)
+{
+ struct i3c_dev_desc *desc;
+ int ret;
+
+ if (!master->init_done)
+ return;
+
+ i3c_bus_for_each_i3cdev(&master->bus, desc) {
+ if (desc->dev || !desc->info.dyn_addr || desc == master->this)
+ continue;
+
+ desc->dev = kzalloc(sizeof(*desc->dev), GFP_KERNEL);
+ if (!desc->dev)
+ continue;
+
+ desc->dev->bus = &master->bus;
+ desc->dev->desc = desc;
+ desc->dev->dev.parent = &master->dev;
+ desc->dev->dev.type = &i3c_device_type;
+ desc->dev->dev.bus = &i3c_bus_type;
+ desc->dev->dev.release = i3c_device_release;
+ dev_set_name(&desc->dev->dev, "%d-%llx", master->bus.id,
+ desc->info.pid);
+
+ if (desc->boardinfo)
+ desc->dev->dev.of_node = desc->boardinfo->of_node;
+
+ ret = device_register(&desc->dev->dev);
+ if (ret)
+ dev_err(&master->dev,
+ "Failed to add I3C device (err = %d)\n", ret);
+ }
+}
+
+/**
+ * i3c_master_do_daa() - do a DAA (Dynamic Address Assignment)
+ * @master: master doing the DAA
+ *
+ * This function is instantiating an I3C device object and adding it to the
+ * I3C device list. All device information are automatically retrieved using
+ * standard CCC commands.
+ *
+ * The I3C device object is returned in case the master wants to attach
+ * private data to it using i3c_dev_set_master_data().
+ *
+ * This function must be called with the bus lock held in write mode.
+ *
+ * Return: a 0 in case of success, an negative error code otherwise.
+ */
+int i3c_master_do_daa(struct i3c_master_controller *master)
+{
+ int ret;
+
+ i3c_bus_maintenance_lock(&master->bus);
+ ret = master->ops->do_daa(master);
+ i3c_bus_maintenance_unlock(&master->bus);
+
+ if (ret)
+ return ret;
+
+ i3c_bus_normaluse_lock(&master->bus);
+ i3c_master_register_new_i3c_devs(master);
+ i3c_bus_normaluse_unlock(&master->bus);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(i3c_master_do_daa);
+
+/**
+ * i3c_master_set_info() - set master device information
+ * @master: master used to send frames on the bus
+ * @info: I3C device information
+ *
+ * Set master device info. This should be called from
+ * &i3c_master_controller_ops->bus_init().
+ *
+ * Not all &i3c_device_info fields are meaningful for a master device.
+ * Here is a list of fields that should be properly filled:
+ *
+ * - &i3c_device_info->dyn_addr
+ * - &i3c_device_info->bcr
+ * - &i3c_device_info->dcr
+ * - &i3c_device_info->pid
+ * - &i3c_device_info->hdr_cap if %I3C_BCR_HDR_CAP bit is set in
+ * &i3c_device_info->bcr
+ *
+ * This function must be called with the bus lock held in maintenance mode.
+ *
+ * Return: 0 if @info contains valid information (not every piece of
+ * information can be checked, but we can at least make sure @info->dyn_addr
+ * and @info->bcr are correct), -EINVAL otherwise.
+ */
+int i3c_master_set_info(struct i3c_master_controller *master,
+ const struct i3c_device_info *info)
+{
+ struct i3c_dev_desc *i3cdev;
+ int ret;
+
+ if (!i3c_bus_dev_addr_is_avail(&master->bus, info->dyn_addr))
+ return -EINVAL;
+
+ if (I3C_BCR_DEVICE_ROLE(info->bcr) == I3C_BCR_I3C_MASTER &&
+ master->secondary)
+ return -EINVAL;
+
+ if (master->this)
+ return -EINVAL;
+
+ i3cdev = i3c_master_alloc_i3c_dev(master, info);
+ if (IS_ERR(i3cdev))
+ return PTR_ERR(i3cdev);
+
+ master->this = i3cdev;
+ master->bus.cur_master = master->this;
+
+ ret = i3c_master_attach_i3c_dev(master, i3cdev);
+ if (ret)
+ goto err_free_dev;
+
+ return 0;
+
+err_free_dev:
+ i3c_master_free_i3c_dev(i3cdev);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(i3c_master_set_info);
+
+static void i3c_master_detach_free_devs(struct i3c_master_controller *master)
+{
+ struct i3c_dev_desc *i3cdev, *i3ctmp;
+ struct i2c_dev_desc *i2cdev, *i2ctmp;
+
+ list_for_each_entry_safe(i3cdev, i3ctmp, &master->bus.devs.i3c,
+ common.node) {
+ i3c_master_detach_i3c_dev(i3cdev);
+
+ if (i3cdev->boardinfo && i3cdev->boardinfo->init_dyn_addr)
+ i3c_bus_set_addr_slot_status(&master->bus,
+ i3cdev->boardinfo->init_dyn_addr,
+ I3C_ADDR_SLOT_FREE);
+
+ i3c_master_free_i3c_dev(i3cdev);
+ }
+
+ list_for_each_entry_safe(i2cdev, i2ctmp, &master->bus.devs.i2c,
+ common.node) {
+ i3c_master_detach_i2c_dev(i2cdev);
+ i3c_bus_set_addr_slot_status(&master->bus,
+ i2cdev->boardinfo->base.addr,
+ I3C_ADDR_SLOT_FREE);
+ i3c_master_free_i2c_dev(i2cdev);
+ }
+}
+
+/**
+ * i3c_master_bus_init() - initialize an I3C bus
+ * @master: main master initializing the bus
+ *
+ * This function is following all initialisation steps described in the I3C
+ * specification:
+ *
+ * 1. Attach I2C and statically defined I3C devs to the master so that the
+ * master can fill its internal device table appropriately
+ *
+ * 2. Call &i3c_master_controller_ops->bus_init() method to initialize
+ * the master controller. That's usually where the bus mode is selected
+ * (pure bus or mixed fast/slow bus)
+ *
+ * 3. Instruct all devices on the bus to drop their dynamic address. This is
+ * particularly important when the bus was previously configured by someone
+ * else (for example the bootloader)
+ *
+ * 4. Disable all slave events.
+ *
+ * 5. Pre-assign dynamic addresses requested by the FW with SETDASA for I3C
+ * devices that have a static address
+ *
+ * 6. Do a DAA (Dynamic Address Assignment) to assign dynamic addresses to all
+ * remaining I3C devices
+ *
+ * Once this is done, all I3C and I2C devices should be usable.
+ *
+ * Return: a 0 in case of success, an negative error code otherwise.
+ */
+static int i3c_master_bus_init(struct i3c_master_controller *master)
+{
+ enum i3c_addr_slot_status status;
+ struct i2c_dev_boardinfo *i2cboardinfo;
+ struct i3c_dev_boardinfo *i3cboardinfo;
+ struct i3c_dev_desc *i3cdev;
+ struct i2c_dev_desc *i2cdev;
+ int ret;
+
+ /*
+ * First attach all devices with static definitions provided by the
+ * FW.
+ */
+ list_for_each_entry(i2cboardinfo, &master->boardinfo.i2c, node) {
+ status = i3c_bus_get_addr_slot_status(&master->bus,
+ i2cboardinfo->base.addr);
+ if (status != I3C_ADDR_SLOT_FREE) {
+ ret = -EBUSY;
+ goto err_detach_devs;
+ }
+
+ i3c_bus_set_addr_slot_status(&master->bus,
+ i2cboardinfo->base.addr,
+ I3C_ADDR_SLOT_I2C_DEV);
+
+ i2cdev = i3c_master_alloc_i2c_dev(master, i2cboardinfo);
+ if (IS_ERR(i2cdev)) {
+ ret = PTR_ERR(i2cdev);
+ goto err_detach_devs;
+ }
+
+ ret = i3c_master_attach_i2c_dev(master, i2cdev);
+ if (ret) {
+ i3c_master_free_i2c_dev(i2cdev);
+ goto err_detach_devs;
+ }
+ }
+ list_for_each_entry(i3cboardinfo, &master->boardinfo.i3c, node) {
+ struct i3c_device_info info = {
+ .static_addr = i3cboardinfo->static_addr,
+ };
+
+ if (i3cboardinfo->init_dyn_addr) {
+ status = i3c_bus_get_addr_slot_status(&master->bus,
+ i3cboardinfo->init_dyn_addr);
+ if (status != I3C_ADDR_SLOT_FREE) {
+ ret = -EBUSY;
+ goto err_detach_devs;
+ }
+ }
+
+ i3cdev = i3c_master_alloc_i3c_dev(master, &info);
+ if (IS_ERR(i3cdev)) {
+ ret = PTR_ERR(i3cdev);
+ goto err_detach_devs;
+ }
+
+ i3cdev->boardinfo = i3cboardinfo;
+
+ ret = i3c_master_attach_i3c_dev(master, i3cdev);
+ if (ret) {
+ i3c_master_free_i3c_dev(i3cdev);
+ goto err_detach_devs;
+ }
+ }
+
+ /*
+ * Now execute the controller specific ->bus_init() routine, which
+ * might configure its internal logic to match the bus limitations.
+ */
+ ret = master->ops->bus_init(master);
+ if (ret)
+ goto err_detach_devs;
+
+ /*
+ * The master device should have been instantiated in ->bus_init(),
+ * complain if this was not the case.
+ */
+ if (!master->this) {
+ dev_err(&master->dev,
+ "master_set_info() was not called in ->bus_init()\n");
+ ret = -EINVAL;
+ goto err_bus_cleanup;
+ }
+
+ /*
+ * Reset all dynamic address that may have been assigned before
+ * (assigned by the bootloader for example).
+ */
+ ret = i3c_master_rstdaa_locked(master, I3C_BROADCAST_ADDR);
+ if (ret && ret != I3C_ERROR_M2)
+ goto err_bus_cleanup;
+
+ /* Disable all slave events before starting DAA. */
+ ret = i3c_master_disec_locked(master, I3C_BROADCAST_ADDR,
+ I3C_CCC_EVENT_SIR | I3C_CCC_EVENT_MR |
+ I3C_CCC_EVENT_HJ);
+ if (ret && ret != I3C_ERROR_M2)
+ goto err_bus_cleanup;
+
+ /*
+ * Pre-assign dynamic address and retrieve device information if
+ * needed.
+ */
+ i3c_bus_for_each_i3cdev(&master->bus, i3cdev)
+ i3c_master_pre_assign_dyn_addr(i3cdev);
+
+ ret = i3c_master_do_daa(master);
+ if (ret)
+ goto err_rstdaa;
+
+ return 0;
+
+err_rstdaa:
+ i3c_master_rstdaa_locked(master, I3C_BROADCAST_ADDR);
+
+err_bus_cleanup:
+ if (master->ops->bus_cleanup)
+ master->ops->bus_cleanup(master);
+
+err_detach_devs:
+ i3c_master_detach_free_devs(master);
+
+ return ret;
+}
+
+static void i3c_master_bus_cleanup(struct i3c_master_controller *master)
+{
+ if (master->ops->bus_cleanup)
+ master->ops->bus_cleanup(master);
+
+ i3c_master_detach_free_devs(master);
+}
+
+static struct i3c_dev_desc *
+i3c_master_search_i3c_dev_duplicate(struct i3c_dev_desc *refdev)
+{
+ struct i3c_master_controller *master = refdev->common.master;
+ struct i3c_dev_desc *i3cdev;
+
+ i3c_bus_for_each_i3cdev(&master->bus, i3cdev) {
+ if (i3cdev != refdev && i3cdev->info.pid == refdev->info.pid)
+ return i3cdev;
+ }
+
+ return NULL;
+}
+
+/**
+ * i3c_master_add_i3c_dev_locked() - add an I3C slave to the bus
+ * @master: master used to send frames on the bus
+ * @addr: I3C slave dynamic address assigned to the device
+ *
+ * This function is instantiating an I3C device object and adding it to the
+ * I3C device list. All device information are automatically retrieved using
+ * standard CCC commands.
+ *
+ * The I3C device object is returned in case the master wants to attach
+ * private data to it using i3c_dev_set_master_data().
+ *
+ * This function must be called with the bus lock held in write mode.
+ *
+ * Return: a 0 in case of success, an negative error code otherwise.
+ */
+int i3c_master_add_i3c_dev_locked(struct i3c_master_controller *master,
+ u8 addr)
+{
+ struct i3c_device_info info = { .dyn_addr = addr };
+ struct i3c_dev_desc *newdev, *olddev;
+ u8 old_dyn_addr = addr, expected_dyn_addr;
+ struct i3c_ibi_setup ibireq = { };
+ bool enable_ibi = false;
+ int ret;
+
+ if (!master)
+ return -EINVAL;
+
+ newdev = i3c_master_alloc_i3c_dev(master, &info);
+ if (IS_ERR(newdev))
+ return PTR_ERR(newdev);
+
+ ret = i3c_master_attach_i3c_dev(master, newdev);
+ if (ret)
+ goto err_free_dev;
+
+ ret = i3c_master_retrieve_dev_info(newdev);
+ if (ret)
+ goto err_detach_dev;
+
+ olddev = i3c_master_search_i3c_dev_duplicate(newdev);
+ if (olddev) {
+ newdev->boardinfo = olddev->boardinfo;
+ newdev->info.static_addr = olddev->info.static_addr;
+ newdev->dev = olddev->dev;
+ if (newdev->dev)
+ newdev->dev->desc = newdev;
+
+ /*
+ * We need to restore the IBI state too, so let's save the
+ * IBI information and try to restore them after olddev has
+ * been detached+released and its IBI has been stopped and
+ * the associated resources have been freed.
+ */
+ mutex_lock(&olddev->ibi_lock);
+ if (olddev->ibi) {
+ ibireq.handler = olddev->ibi->handler;
+ ibireq.max_payload_len = olddev->ibi->max_payload_len;
+ ibireq.num_slots = olddev->ibi->num_slots;
+
+ if (olddev->ibi->enabled) {
+ enable_ibi = true;
+ i3c_dev_disable_ibi_locked(olddev);
+ }
+
+ i3c_dev_free_ibi_locked(olddev);
+ }
+ mutex_unlock(&olddev->ibi_lock);
+
+ old_dyn_addr = olddev->info.dyn_addr;
+
+ i3c_master_detach_i3c_dev(olddev);
+ i3c_master_free_i3c_dev(olddev);
+ }
+
+ ret = i3c_master_reattach_i3c_dev(newdev, old_dyn_addr);
+ if (ret)
+ goto err_detach_dev;
+
+ /*
+ * Depending on our previous state, the expected dynamic address might
+ * differ:
+ * - if the device already had a dynamic address assigned, let's try to
+ * re-apply this one
+ * - if the device did not have a dynamic address and the firmware
+ * requested a specific address, pick this one
+ * - in any other case, keep the address automatically assigned by the
+ * master
+ */
+ if (old_dyn_addr && old_dyn_addr != newdev->info.dyn_addr)
+ expected_dyn_addr = old_dyn_addr;
+ else if (newdev->boardinfo && newdev->boardinfo->init_dyn_addr)
+ expected_dyn_addr = newdev->boardinfo->init_dyn_addr;
+ else
+ expected_dyn_addr = newdev->info.dyn_addr;
+
+ if (newdev->info.dyn_addr != expected_dyn_addr) {
+ /*
+ * Try to apply the expected dynamic address. If it fails, keep
+ * the address assigned by the master.
+ */
+ ret = i3c_master_setnewda_locked(master,
+ newdev->info.dyn_addr,
+ expected_dyn_addr);
+ if (!ret) {
+ old_dyn_addr = newdev->info.dyn_addr;
+ newdev->info.dyn_addr = expected_dyn_addr;
+ i3c_master_reattach_i3c_dev(newdev, old_dyn_addr);
+ } else {
+ dev_err(&master->dev,
+ "Failed to assign reserved/old address to device %d%llx",
+ master->bus.id, newdev->info.pid);
+ }
+ }
+
+ /*
+ * Now is time to try to restore the IBI setup. If we're lucky,
+ * everything works as before, otherwise, all we can do is complain.
+ * FIXME: maybe we should add callback to inform the driver that it
+ * should request the IBI again instead of trying to hide that from
+ * him.
+ */
+ if (ibireq.handler) {
+ mutex_lock(&newdev->ibi_lock);
+ ret = i3c_dev_request_ibi_locked(newdev, &ibireq);
+ if (ret) {
+ dev_err(&master->dev,
+ "Failed to request IBI on device %d-%llx",
+ master->bus.id, newdev->info.pid);
+ } else if (enable_ibi) {
+ ret = i3c_dev_enable_ibi_locked(newdev);
+ if (ret)
+ dev_err(&master->dev,
+ "Failed to re-enable IBI on device %d-%llx",
+ master->bus.id, newdev->info.pid);
+ }
+ mutex_unlock(&newdev->ibi_lock);
+ }
+
+ return 0;
+
+err_detach_dev:
+ if (newdev->dev && newdev->dev->desc)
+ newdev->dev->desc = NULL;
+
+ i3c_master_detach_i3c_dev(newdev);
+
+err_free_dev:
+ i3c_master_free_i3c_dev(newdev);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(i3c_master_add_i3c_dev_locked);
+
+#define OF_I3C_REG1_IS_I2C_DEV BIT(31)
+
+static int
+of_i3c_master_add_i2c_boardinfo(struct i3c_master_controller *master,
+ struct device_node *node, u32 *reg)
+{
+ struct i2c_dev_boardinfo *boardinfo;
+ struct device *dev = &master->dev;
+ int ret;
+
+ boardinfo = devm_kzalloc(dev, sizeof(*boardinfo), GFP_KERNEL);
+ if (!boardinfo)
+ return -ENOMEM;
+
+ ret = of_i2c_get_board_info(dev, node, &boardinfo->base);
+ if (ret)
+ return ret;
+
+ /* LVR is encoded in reg[2]. */
+ boardinfo->lvr = reg[2];
+
+ if (boardinfo->lvr & I3C_LVR_I2C_FM_MODE)
+ master->bus.scl_rate.i2c = I3C_BUS_I2C_FM_SCL_RATE;
+
+ list_add_tail(&boardinfo->node, &master->boardinfo.i2c);
+ of_node_get(node);
+
+ return 0;
+}
+
+static int
+of_i3c_master_add_i3c_boardinfo(struct i3c_master_controller *master,
+ struct device_node *node, u32 *reg)
+{
+ struct i3c_dev_boardinfo *boardinfo;
+ struct device *dev = &master->dev;
+ struct i3c_device_info info = { };
+ enum i3c_addr_slot_status addrstatus;
+ u32 init_dyn_addr = 0;
+
+ boardinfo = devm_kzalloc(dev, sizeof(*boardinfo), GFP_KERNEL);
+ if (!boardinfo)
+ return -ENOMEM;
+
+ if (reg[0]) {
+ if (reg[0] > I3C_MAX_ADDR)
+ return -EINVAL;
+
+ addrstatus = i3c_bus_get_addr_slot_status(&master->bus,
+ reg[0]);
+ if (addrstatus != I3C_ADDR_SLOT_FREE)
+ return -EINVAL;
+ }
+
+ boardinfo->static_addr = reg[0];
+
+ if (!of_property_read_u32(node, "assigned-address", &init_dyn_addr)) {
+ if (init_dyn_addr > I3C_MAX_ADDR)
+ return -EINVAL;
+
+ addrstatus = i3c_bus_get_addr_slot_status(&master->bus,
+ init_dyn_addr);
+ if (addrstatus != I3C_ADDR_SLOT_FREE)
+ return -EINVAL;
+ }
+
+ boardinfo->pid = ((u64)reg[1] << 32) | reg[2];
+
+ if ((info.pid & GENMASK_ULL(63, 48)) ||
+ I3C_PID_RND_LOWER_32BITS(info.pid))
+ return -EINVAL;
+
+ boardinfo->init_dyn_addr = init_dyn_addr;
+ boardinfo->of_node = of_node_get(node);
+ list_add_tail(&boardinfo->node, &master->boardinfo.i3c);
+
+ return 0;
+}
+
+static int of_i3c_master_add_dev(struct i3c_master_controller *master,
+ struct device_node *node)
+{
+ u32 reg[3];
+ int ret;
+
+ if (!master || !node)
+ return -EINVAL;
+
+ ret = of_property_read_u32_array(node, "reg", reg, ARRAY_SIZE(reg));
+ if (ret)
+ return ret;
+
+ /*
+ * The manufacturer ID can't be 0. If reg[1] == 0 that means we're
+ * dealing with an I2C device.
+ */
+ if (!reg[1])
+ ret = of_i3c_master_add_i2c_boardinfo(master, node, reg);
+ else
+ ret = of_i3c_master_add_i3c_boardinfo(master, node, reg);
+
+ return ret;
+}
+
+static int of_populate_i3c_bus(struct i3c_master_controller *master)
+{
+ struct device *dev = &master->dev;
+ struct device_node *i3cbus_np = dev->of_node;
+ struct device_node *node;
+ int ret;
+ u32 val;
+
+ if (!i3cbus_np)
+ return 0;
+
+ for_each_available_child_of_node(i3cbus_np, node) {
+ ret = of_i3c_master_add_dev(master, node);
+ if (ret)
+ return ret;
+ }
+
+ /*
+ * The user might want to limit I2C and I3C speed in case some devices
+ * on the bus are not supporting typical rates, or if the bus topology
+ * prevents it from using max possible rate.
+ */
+ if (!of_property_read_u32(i3cbus_np, "i2c-scl-hz", &val))
+ master->bus.scl_rate.i2c = val;
+
+ if (!of_property_read_u32(i3cbus_np, "i3c-scl-hz", &val))
+ master->bus.scl_rate.i3c = val;
+
+ return 0;
+}
+
+static int i3c_master_i2c_adapter_xfer(struct i2c_adapter *adap,
+ struct i2c_msg *xfers, int nxfers)
+{
+ struct i3c_master_controller *master = i2c_adapter_to_i3c_master(adap);
+ struct i2c_dev_desc *dev;
+ int i, ret;
+ u16 addr;
+
+ if (!xfers || !master || nxfers <= 0)
+ return -EINVAL;
+
+ if (!master->ops->i2c_xfers)
+ return -ENOTSUPP;
+
+ /* Doing transfers to different devices is not supported. */
+ addr = xfers[0].addr;
+ for (i = 1; i < nxfers; i++) {
+ if (addr != xfers[i].addr)
+ return -ENOTSUPP;
+ }
+
+ i3c_bus_normaluse_lock(&master->bus);
+ dev = i3c_master_find_i2c_dev_by_addr(master, addr);
+ if (!dev)
+ ret = -ENOENT;
+ else
+ ret = master->ops->i2c_xfers(dev, xfers, nxfers);
+ i3c_bus_normaluse_unlock(&master->bus);
+
+ return ret ? ret : nxfers;
+}
+
+static u32 i3c_master_i2c_functionalities(struct i2c_adapter *adap)
+{
+ struct i3c_master_controller *master = i2c_adapter_to_i3c_master(adap);
+
+ return master->ops->i2c_funcs(master);
+}
+
+static const struct i2c_algorithm i3c_master_i2c_algo = {
+ .master_xfer = i3c_master_i2c_adapter_xfer,
+ .functionality = i3c_master_i2c_functionalities,
+};
+
+static int i3c_master_i2c_adapter_init(struct i3c_master_controller *master)
+{
+ struct i2c_adapter *adap = i3c_master_to_i2c_adapter(master);
+ struct i2c_dev_desc *i2cdev;
+ int ret;
+
+ adap->dev.parent = master->dev.parent;
+ adap->owner = master->dev.parent->driver->owner;
+ adap->algo = &i3c_master_i2c_algo;
+ strncpy(adap->name, dev_name(master->dev.parent), sizeof(adap->name));
+
+ /* FIXME: Should we allow i3c masters to override these values? */
+ adap->timeout = 1000;
+ adap->retries = 3;
+
+ ret = i2c_add_adapter(adap);
+ if (ret)
+ return ret;
+
+ /*
+ * We silently ignore failures here. The bus should keep working
+ * correctly even if one or more i2c devices are not registered.
+ */
+ i3c_bus_for_each_i2cdev(&master->bus, i2cdev)
+ i2cdev->dev = i2c_new_device(adap, &i2cdev->boardinfo->base);
+
+ return 0;
+}
+
+static void i3c_master_i2c_adapter_cleanup(struct i3c_master_controller *master)
+{
+ struct i2c_dev_desc *i2cdev;
+
+ i2c_del_adapter(&master->i2c);
+
+ i3c_bus_for_each_i2cdev(&master->bus, i2cdev)
+ i2cdev->dev = NULL;
+}
+
+static void i3c_master_unregister_i3c_devs(struct i3c_master_controller *master)
+{
+ struct i3c_dev_desc *i3cdev;
+
+ i3c_bus_for_each_i3cdev(&master->bus, i3cdev) {
+ if (!i3cdev->dev)
+ continue;
+
+ i3cdev->dev->desc = NULL;
+ if (device_is_registered(&i3cdev->dev->dev))
+ device_unregister(&i3cdev->dev->dev);
+ else
+ put_device(&i3cdev->dev->dev);
+ i3cdev->dev = NULL;
+ }
+}
+
+/**
+ * i3c_master_queue_ibi() - Queue an IBI
+ * @dev: the device this IBI is coming from
+ * @slot: the IBI slot used to store the payload
+ *
+ * Queue an IBI to the controller workqueue. The IBI handler attached to
+ * the dev will be called from a workqueue context.
+ */
+void i3c_master_queue_ibi(struct i3c_dev_desc *dev, struct i3c_ibi_slot *slot)
+{
+ atomic_inc(&dev->ibi->pending_ibis);
+ queue_work(dev->common.master->wq, &slot->work);
+}
+EXPORT_SYMBOL_GPL(i3c_master_queue_ibi);
+
+static void i3c_master_handle_ibi(struct work_struct *work)
+{
+ struct i3c_ibi_slot *slot = container_of(work, struct i3c_ibi_slot,
+ work);
+ struct i3c_dev_desc *dev = slot->dev;
+ struct i3c_master_controller *master = i3c_dev_get_master(dev);
+ struct i3c_ibi_payload payload;
+
+ payload.data = slot->data;
+ payload.len = slot->len;
+
+ if (dev->dev)
+ dev->ibi->handler(dev->dev, &payload);
+
+ master->ops->recycle_ibi_slot(dev, slot);
+ if (atomic_dec_and_test(&dev->ibi->pending_ibis))
+ complete(&dev->ibi->all_ibis_handled);
+}
+
+static void i3c_master_init_ibi_slot(struct i3c_dev_desc *dev,
+ struct i3c_ibi_slot *slot)
+{
+ slot->dev = dev;
+ INIT_WORK(&slot->work, i3c_master_handle_ibi);
+}
+
+struct i3c_generic_ibi_slot {
+ struct list_head node;
+ struct i3c_ibi_slot base;
+};
+
+struct i3c_generic_ibi_pool {
+ spinlock_t lock;
+ unsigned int num_slots;
+ struct i3c_generic_ibi_slot *slots;
+ void *payload_buf;
+ struct list_head free_slots;
+ struct list_head pending;
+};
+
+/**
+ * i3c_generic_ibi_free_pool() - Free a generic IBI pool
+ * @pool: the IBI pool to free
+ *
+ * Free all IBI slots allated by a generic IBI pool.
+ */
+void i3c_generic_ibi_free_pool(struct i3c_generic_ibi_pool *pool)
+{
+ struct i3c_generic_ibi_slot *slot;
+ unsigned int nslots = 0;
+
+ while (!list_empty(&pool->free_slots)) {
+ slot = list_first_entry(&pool->free_slots,
+ struct i3c_generic_ibi_slot, node);
+ list_del(&slot->node);
+ nslots++;
+ }
+
+ /*
+ * If the number of freed slots is not equal to the number of allocated
+ * slots we have a leak somewhere.
+ */
+ WARN_ON(nslots != pool->num_slots);
+
+ kfree(pool->payload_buf);
+ kfree(pool->slots);
+ kfree(pool);
+}
+EXPORT_SYMBOL_GPL(i3c_generic_ibi_free_pool);
+
+/**
+ * i3c_generic_ibi_alloc_pool() - Create a generic IBI pool
+ * @dev: the device this pool will be used for
+ * @req: IBI setup request describing what the device driver expects
+ *
+ * Create a generic IBI pool based on the information provided in @req.
+ *
+ * Return: a valid IBI pool in case of success, an ERR_PTR() otherwise.
+ */
+struct i3c_generic_ibi_pool *
+i3c_generic_ibi_alloc_pool(struct i3c_dev_desc *dev,
+ const struct i3c_ibi_setup *req)
+{
+ struct i3c_generic_ibi_pool *pool;
+ struct i3c_generic_ibi_slot *slot;
+ unsigned int i;
+ int ret;
+
+ pool = kzalloc(sizeof(*pool), GFP_KERNEL);
+ if (!pool)
+ return ERR_PTR(-ENOMEM);
+
+ spin_lock_init(&pool->lock);
+ INIT_LIST_HEAD(&pool->free_slots);
+ INIT_LIST_HEAD(&pool->pending);
+
+ pool->slots = kcalloc(req->num_slots, sizeof(*slot), GFP_KERNEL);
+ if (!pool->slots) {
+ ret = -ENOMEM;
+ goto err_free_pool;
+ }
+
+ if (req->max_payload_len) {
+ pool->payload_buf = kcalloc(req->num_slots,
+ req->max_payload_len, GFP_KERNEL);
+ if (!pool->payload_buf) {
+ ret = -ENOMEM;
+ goto err_free_pool;
+ }
+ }
+
+ for (i = 0; i < req->num_slots; i++) {
+ slot = &pool->slots[i];
+ i3c_master_init_ibi_slot(dev, &slot->base);
+
+ if (req->max_payload_len)
+ slot->base.data = pool->payload_buf +
+ (i * req->max_payload_len);
+
+ list_add_tail(&slot->node, &pool->free_slots);
+ pool->num_slots++;
+ }
+
+ return pool;
+
+err_free_pool:
+ i3c_generic_ibi_free_pool(pool);
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(i3c_generic_ibi_alloc_pool);
+
+/**
+ * i3c_generic_ibi_get_free_slot() - Get a free slot from a generic IBI pool
+ * @pool: the pool to query an IBI slot on
+ *
+ * Search for a free slot in a generic IBI pool.
+ * The slot should be returned to the pool using i3c_generic_ibi_recycle_slot()
+ * when it's no longer needed.
+ *
+ * Return: a pointer to a free slot, or NULL if there's no free slot available.
+ */
+struct i3c_ibi_slot *
+i3c_generic_ibi_get_free_slot(struct i3c_generic_ibi_pool *pool)
+{
+ struct i3c_generic_ibi_slot *slot;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pool->lock, flags);
+ slot = list_first_entry_or_null(&pool->free_slots,
+ struct i3c_generic_ibi_slot, node);
+ if (slot)
+ list_del(&slot->node);
+ spin_unlock_irqrestore(&pool->lock, flags);
+
+ return slot ? &slot->base : NULL;
+}
+EXPORT_SYMBOL_GPL(i3c_generic_ibi_get_free_slot);
+
+/**
+ * i3c_generic_ibi_recycle_slot() - Return a slot to a generic IBI pool
+ * @pool: the pool to return the IBI slot to
+ * @s: IBI slot to recycle
+ *
+ * Add an IBI slot back to its generic IBI pool. Should be called from the
+ * master driver struct_master_controller_ops->recycle_ibi() method.
+ */
+void i3c_generic_ibi_recycle_slot(struct i3c_generic_ibi_pool *pool,
+ struct i3c_ibi_slot *s)
+{
+ struct i3c_generic_ibi_slot *slot;
+ unsigned long flags;
+
+ if (!s)
+ return;
+
+ slot = container_of(s, struct i3c_generic_ibi_slot, base);
+ spin_lock_irqsave(&pool->lock, flags);
+ list_add_tail(&slot->node, &pool->free_slots);
+ spin_unlock_irqrestore(&pool->lock, flags);
+}
+EXPORT_SYMBOL_GPL(i3c_generic_ibi_recycle_slot);
+
+static int i3c_master_check_ops(const struct i3c_master_controller_ops *ops)
+{
+ if (!ops || !ops->bus_init || !ops->priv_xfers ||
+ !ops->send_ccc_cmd || !ops->do_daa || !ops->i2c_xfers ||
+ !ops->i2c_funcs)
+ return -EINVAL;
+
+ if (ops->request_ibi &&
+ (!ops->enable_ibi || !ops->disable_ibi || !ops->free_ibi ||
+ !ops->recycle_ibi_slot))
+ return -EINVAL;
+
+ return 0;
+}
+
+/**
+ * i3c_master_register() - register an I3C master
+ * @master: master used to send frames on the bus
+ * @parent: the parent device (the one that provides this I3C master
+ * controller)
+ * @ops: the master controller operations
+ * @secondary: true if you are registering a secondary master. Will return
+ * -ENOTSUPP if set to true since secondary masters are not yet
+ * supported
+ *
+ * This function takes care of everything for you:
+ *
+ * - creates and initializes the I3C bus
+ * - populates the bus with static I2C devs if @parent->of_node is not
+ * NULL
+ * - registers all I3C devices added by the controller during bus
+ * initialization
+ * - registers the I2C adapter and all I2C devices
+ *
+ * Return: 0 in case of success, a negative error code otherwise.
+ */
+int i3c_master_register(struct i3c_master_controller *master,
+ struct device *parent,
+ const struct i3c_master_controller_ops *ops,
+ bool secondary)
+{
+ struct i3c_bus *i3cbus = i3c_master_get_bus(master);
+ enum i3c_bus_mode mode = I3C_BUS_MODE_PURE;
+ struct i2c_dev_boardinfo *i2cbi;
+ int ret;
+
+ /* We do not support secondary masters yet. */
+ if (secondary)
+ return -ENOTSUPP;
+
+ ret = i3c_master_check_ops(ops);
+ if (ret)
+ return ret;
+
+ master->dev.parent = parent;
+ master->dev.of_node = of_node_get(parent->of_node);
+ master->dev.bus = &i3c_bus_type;
+ master->dev.type = &i3c_masterdev_type;
+ master->dev.release = i3c_masterdev_release;
+ master->ops = ops;
+ master->secondary = secondary;
+ INIT_LIST_HEAD(&master->boardinfo.i2c);
+ INIT_LIST_HEAD(&master->boardinfo.i3c);
+
+ ret = i3c_bus_init(i3cbus);
+ if (ret)
+ return ret;
+
+ device_initialize(&master->dev);
+ dev_set_name(&master->dev, "i3c-%d", i3cbus->id);
+
+ ret = of_populate_i3c_bus(master);
+ if (ret)
+ goto err_put_dev;
+
+ list_for_each_entry(i2cbi, &master->boardinfo.i2c, node) {
+ switch (i2cbi->lvr & I3C_LVR_I2C_INDEX_MASK) {
+ case I3C_LVR_I2C_INDEX(0):
+ if (mode < I3C_BUS_MODE_MIXED_FAST)
+ mode = I3C_BUS_MODE_MIXED_FAST;
+ break;
+ case I3C_LVR_I2C_INDEX(1):
+ case I3C_LVR_I2C_INDEX(2):
+ if (mode < I3C_BUS_MODE_MIXED_SLOW)
+ mode = I3C_BUS_MODE_MIXED_SLOW;
+ break;
+ default:
+ ret = -EINVAL;
+ goto err_put_dev;
+ }
+ }
+
+ ret = i3c_bus_set_mode(i3cbus, mode);
+ if (ret)
+ goto err_put_dev;
+
+ master->wq = alloc_workqueue("%s", 0, 0, dev_name(parent));
+ if (!master->wq) {
+ ret = -ENOMEM;
+ goto err_put_dev;
+ }
+
+ ret = i3c_master_bus_init(master);
+ if (ret)
+ goto err_put_dev;
+
+ ret = device_add(&master->dev);
+ if (ret)
+ goto err_cleanup_bus;
+
+ /*
+ * Expose our I3C bus as an I2C adapter so that I2C devices are exposed
+ * through the I2C subsystem.
+ */
+ ret = i3c_master_i2c_adapter_init(master);
+ if (ret)
+ goto err_del_dev;
+
+ /*
+ * We're done initializing the bus and the controller, we can now
+ * register I3C devices dicovered during the initial DAA.
+ */
+ master->init_done = true;
+ i3c_bus_normaluse_lock(&master->bus);
+ i3c_master_register_new_i3c_devs(master);
+ i3c_bus_normaluse_unlock(&master->bus);
+
+ return 0;
+
+err_del_dev:
+ device_del(&master->dev);
+
+err_cleanup_bus:
+ i3c_master_bus_cleanup(master);
+
+err_put_dev:
+ put_device(&master->dev);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(i3c_master_register);
+
+/**
+ * i3c_master_unregister() - unregister an I3C master
+ * @master: master used to send frames on the bus
+ *
+ * Basically undo everything done in i3c_master_register().
+ *
+ * Return: 0 in case of success, a negative error code otherwise.
+ */
+int i3c_master_unregister(struct i3c_master_controller *master)
+{
+ i3c_master_i2c_adapter_cleanup(master);
+ i3c_master_unregister_i3c_devs(master);
+ i3c_master_bus_cleanup(master);
+ device_unregister(&master->dev);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(i3c_master_unregister);
+
+int i3c_dev_do_priv_xfers_locked(struct i3c_dev_desc *dev,
+ struct i3c_priv_xfer *xfers,
+ int nxfers)
+{
+ struct i3c_master_controller *master;
+
+ if (!dev)
+ return -ENOENT;
+
+ master = i3c_dev_get_master(dev);
+ if (!master || !xfers)
+ return -EINVAL;
+
+ if (!master->ops->priv_xfers)
+ return -ENOTSUPP;
+
+ return master->ops->priv_xfers(dev, xfers, nxfers);
+}
+
+int i3c_dev_disable_ibi_locked(struct i3c_dev_desc *dev)
+{
+ struct i3c_master_controller *master;
+ int ret;
+
+ if (!dev->ibi)
+ return -EINVAL;
+
+ master = i3c_dev_get_master(dev);
+ ret = master->ops->disable_ibi(dev);
+ if (ret)
+ return ret;
+
+ reinit_completion(&dev->ibi->all_ibis_handled);
+ if (atomic_read(&dev->ibi->pending_ibis))
+ wait_for_completion(&dev->ibi->all_ibis_handled);
+
+ dev->ibi->enabled = false;
+
+ return 0;
+}
+
+int i3c_dev_enable_ibi_locked(struct i3c_dev_desc *dev)
+{
+ struct i3c_master_controller *master = i3c_dev_get_master(dev);
+ int ret;
+
+ if (!dev->ibi)
+ return -EINVAL;
+
+ ret = master->ops->enable_ibi(dev);
+ if (!ret)
+ dev->ibi->enabled = true;
+
+ return ret;
+}
+
+int i3c_dev_request_ibi_locked(struct i3c_dev_desc *dev,
+ const struct i3c_ibi_setup *req)
+{
+ struct i3c_master_controller *master = i3c_dev_get_master(dev);
+ struct i3c_device_ibi_info *ibi;
+ int ret;
+
+ if (!master->ops->request_ibi)
+ return -ENOTSUPP;
+
+ if (dev->ibi)
+ return -EBUSY;
+
+ ibi = kzalloc(sizeof(*ibi), GFP_KERNEL);
+ if (!ibi)
+ return -ENOMEM;
+
+ atomic_set(&ibi->pending_ibis, 0);
+ init_completion(&ibi->all_ibis_handled);
+ ibi->handler = req->handler;
+ ibi->max_payload_len = req->max_payload_len;
+ ibi->num_slots = req->num_slots;
+
+ dev->ibi = ibi;
+ ret = master->ops->request_ibi(dev, req);
+ if (ret) {
+ kfree(ibi);
+ dev->ibi = NULL;
+ }
+
+ return ret;
+}
+
+void i3c_dev_free_ibi_locked(struct i3c_dev_desc *dev)
+{
+ struct i3c_master_controller *master = i3c_dev_get_master(dev);
+
+ if (!dev->ibi)
+ return;
+
+ if (WARN_ON(dev->ibi->enabled))
+ WARN_ON(i3c_dev_disable_ibi_locked(dev));
+
+ master->ops->free_ibi(dev);
+ kfree(dev->ibi);
+ dev->ibi = NULL;
+}
+
+static int __init i3c_init(void)
+{
+ return bus_register(&i3c_bus_type);
+}
+subsys_initcall(i3c_init);
+
+static void __exit i3c_exit(void)
+{
+ idr_destroy(&i3c_bus_idr);
+ bus_unregister(&i3c_bus_type);
+}
+module_exit(i3c_exit);
+
+MODULE_AUTHOR("Boris Brezillon <boris.brezillon@bootlin.com>");
+MODULE_DESCRIPTION("I3C core");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/i3c/master/Kconfig b/drivers/i3c/master/Kconfig
new file mode 100644
index 0000000..83f8bc0
--- /dev/null
+++ b/drivers/i3c/master/Kconfig
@@ -0,0 +1,10 @@
+config I3C_MASTER_QCOM_GENI
+ tristate "Qualcomm Technologies Inc.'s GENI based I3C controller"
+ depends on ARCH_QCOM
+ depends on I3C
+ help
+ If you say yes to this option, support will be included for the
+ built-in I3C interface on the Qualcomm Technologies Inc SoCs.
+
+ This driver can also be built as a module. If so, the module
+ will be called i3c-master-qcom-geni.
diff --git a/drivers/i3c/master/Makefile b/drivers/i3c/master/Makefile
new file mode 100644
index 0000000..cb605d0
--- /dev/null
+++ b/drivers/i3c/master/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_I3C_MASTER_QCOM_GENI) += i3c-master-qcom-geni.o
diff --git a/drivers/i3c/master/i3c-master-qcom-geni.c b/drivers/i3c/master/i3c-master-qcom-geni.c
new file mode 100644
index 0000000..ca3e046
--- /dev/null
+++ b/drivers/i3c/master/i3c-master-qcom-geni.c
@@ -0,0 +1,1348 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/i3c/master.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/pm_runtime.h>
+#include <linux/qcom-geni-se.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/ipc_logging.h>
+
+#define SE_I3C_SCL_HIGH 0x268
+#define SE_I3C_TX_TRANS_LEN 0x26C
+#define SE_I3C_RX_TRANS_LEN 0x270
+#define SE_I3C_DELAY_COUNTER 0x274
+#define SE_I2C_SCL_COUNTERS 0x278
+#define SE_I3C_SCL_CYCLE 0x27C
+#define SE_GENI_HW_IRQ_EN 0x920
+#define SE_GENI_HW_IRQ_IGNORE_ON_ACTIVE 0x924
+#define SE_GENI_HW_IRQ_CMD_PARAM_0 0x930
+
+/* SE_GENI_M_CLK_CFG field shifts */
+#define CLK_DEV_VALUE_SHFT 4
+#define SER_CLK_EN_SHFT 0
+
+/* SE_GENI_HW_IRQ_CMD_PARAM_0 field shifts */
+#define M_IBI_IRQ_PARAM_7E_SHFT 0
+#define M_IBI_IRQ_PARAM_STOP_STALL_SHFT 1
+
+/* SE_I2C_SCL_COUNTERS field shifts */
+#define I2C_SCL_HIGH_COUNTER_SHFT 20
+#define I2C_SCL_LOW_COUNTER_SHFT 10
+
+#define SE_I3C_ERR (M_CMD_OVERRUN_EN | M_ILLEGAL_CMD_EN | M_CMD_FAILURE_EN |\
+ M_CMD_ABORT_EN | M_GP_IRQ_0_EN | M_GP_IRQ_1_EN | M_GP_IRQ_2_EN | \
+ M_GP_IRQ_3_EN | M_GP_IRQ_4_EN)
+
+/* M_CMD OP codes for I2C/I3C */
+#define I3C_READ_IBI_HW 0
+#define I2C_WRITE 1
+#define I2C_READ 2
+#define I2C_WRITE_READ 3
+#define I2C_ADDR_ONLY 4
+#define I3C_INBAND_RESET 5
+#define I2C_BUS_CLEAR 6
+#define I2C_STOP_ON_BUS 7
+#define I3C_HDR_DDR_EXIT 8
+#define I3C_PRIVATE_WRITE 9
+#define I3C_PRIVATE_READ 10
+#define I3C_HDR_DDR_WRITE 11
+#define I3C_HDR_DDR_READ 12
+#define I3C_DIRECT_CCC_ADDR_ONLY 13
+#define I3C_BCAST_CCC_ADDR_ONLY 14
+#define I3C_READ_IBI 15
+#define I3C_BCAST_CCC_WRITE 16
+#define I3C_DIRECT_CCC_WRITE 17
+#define I3C_DIRECT_CCC_READ 18
+/* M_CMD params for I3C */
+#define PRE_CMD_DELAY BIT(0)
+#define TIMESTAMP_BEFORE BIT(1)
+#define STOP_STRETCH BIT(2)
+#define TIMESTAMP_AFTER BIT(3)
+#define POST_COMMAND_DELAY BIT(4)
+#define IGNORE_ADD_NACK BIT(6)
+#define READ_FINISHED_WITH_ACK BIT(7)
+#define CONTINUOUS_MODE_DAA BIT(8)
+#define SLV_ADDR_MSK GENMASK(15, 9)
+#define SLV_ADDR_SHFT 9
+#define CCC_HDR_CMD_MSK GENMASK(23, 16)
+#define CCC_HDR_CMD_SHFT 16
+#define IBI_NACK_TBL_CTRL BIT(24)
+#define USE_7E BIT(25)
+#define BYPASS_ADDR_PHASE BIT(26)
+
+enum geni_i3c_err_code {
+ RD_TERM,
+ NACK,
+ CRC_ERR,
+ BUS_PROTO,
+ NACK_7E,
+ NACK_IBI,
+ GENI_OVERRUN,
+ GENI_ILLEGAL_CMD,
+ GENI_ABORT_DONE,
+ GENI_TIMEOUT,
+};
+
+#define DM_I3C_CB_ERR ((BIT(NACK) | BIT(BUS_PROTO) | BIT(NACK_7E)) << 5)
+
+#define I3C_AUTO_SUSPEND_DELAY 250
+#define KHZ(freq) (1000 * freq)
+#define PACKING_BYTES_PW 4
+#define XFER_TIMEOUT HZ
+#define DFS_INDEX_MAX 7
+#define I3C_CORE2X_VOTE (960)
+#define DEFAULT_BUS_WIDTH (4)
+#define DEFAULT_SE_CLK (19200000)
+
+#define I3C_DDR_READ_CMD BIT(7)
+#define I3C_ADDR_MASK 0x7f
+
+enum i3c_trans_dir {
+ WRITE_TRANSACTION = 0,
+ READ_TRANSACTION = 1
+};
+
+struct geni_se {
+ void __iomem *base;
+ struct device *dev;
+ struct se_geni_rsc i3c_rsc;
+};
+
+struct geni_i3c_dev {
+ struct geni_se se;
+ unsigned int tx_wm;
+ int irq;
+ int err;
+ struct i3c_master_controller ctrlr;
+ void *ipcl;
+ struct completion done;
+ struct mutex lock;
+ spinlock_t spinlock;
+ u32 clk_src_freq;
+ u32 dfs_idx;
+ u8 *cur_buf;
+ enum i3c_trans_dir cur_rnw;
+ int cur_len;
+ int cur_idx;
+ unsigned long newaddrslots[(I3C_ADDR_MASK + 1) / BITS_PER_LONG];
+ const struct geni_i3c_clk_fld *clk_fld;
+};
+
+struct geni_i3c_i2c_dev_data {
+ u32 dummy; /* placeholder for now, later will hold IBI information */
+};
+
+struct i3c_xfer_params {
+ enum se_xfer_mode mode;
+ u32 m_cmd;
+ u32 m_param;
+};
+
+struct geni_i3c_err_log {
+ int err;
+ const char *msg;
+};
+
+static struct geni_i3c_err_log gi3c_log[] = {
+ [RD_TERM] = { -EINVAL, "I3C slave early read termination" },
+ [NACK] = { -ENOTCONN, "NACK: slave unresponsive, check power/reset" },
+ [CRC_ERR] = { -EINVAL, "CRC or parity error" },
+ [BUS_PROTO] = { -EPROTO, "Bus proto err, noisy/unexpected start/stop" },
+ [NACK_7E] = { -EBUSY, "NACK on 7E, unexpected protocol error" },
+ [NACK_IBI] = { -EINVAL, "NACK on IBI" },
+ [GENI_OVERRUN] = { -EIO, "Cmd overrun, check GENI cmd-state machine" },
+ [GENI_ILLEGAL_CMD] = { -EILSEQ,
+ "Illegal cmd, check GENI cmd-state machine" },
+ [GENI_ABORT_DONE] = { -ETIMEDOUT, "Abort after timeout successful" },
+ [GENI_TIMEOUT] = { -ETIMEDOUT, "I3C transaction timed out" },
+};
+
+struct geni_i3c_clk_fld {
+ u32 clk_freq_out;
+ u32 clk_src_freq;
+ u8 clk_div;
+ u8 i2c_t_high_cnt;
+ u8 i2c_t_low_cnt;
+ u8 i2c_t_cycle_cnt;
+ u8 i3c_t_high_cnt;
+ u8 i3c_t_cycle_cnt;
+};
+
+static struct geni_i3c_dev*
+to_geni_i3c_master(struct i3c_master_controller *master)
+{
+ return container_of(master, struct geni_i3c_dev, ctrlr);
+}
+
+/*
+ * Hardware uses the underlying formula to calculate time periods of
+ * SCL clock cycle. Firmware uses some additional cycles excluded from the
+ * below formula and it is confirmed that the time periods are within
+ * specification limits.
+ *
+ * time of high period of I2C SCL:
+ * i2c_t_high = (i2c_t_high_cnt * clk_div) / source_clock
+ * time of low period of I2C SCL:
+ * i2c_t_low = (i2c_t_low_cnt * clk_div) / source_clock
+ * time of full period of I2C SCL:
+ * i2c_t_cycle = (i2c_t_cycle_cnt * clk_div) / source_clock
+ * time of high period of I3C SCL:
+ * i3c_t_high = (i3c_t_high_cnt * clk_div) / source_clock
+ * time of full period of I3C SCL:
+ * i3c_t_cycle = (i3c_t_cycle_cnt * clk_div) / source_clock
+ * clk_freq_out = t / t_cycle
+ */
+static const struct geni_i3c_clk_fld geni_i3c_clk_map[] = {
+ { KHZ(100), 19200, 7, 10, 11, 26, 0, 0 },
+ { KHZ(400), 19200, 2, 5, 12, 24, 0, 0 },
+ { KHZ(1000), 19200, 1, 3, 9, 18, 0, 0 },
+ { KHZ(12500), 100000, 1, 60, 140, 250, 8, 16 },
+};
+
+static int geni_i3c_clk_map_idx(struct geni_i3c_dev *gi3c)
+{
+ int i;
+ struct i3c_master_controller *m = &gi3c->ctrlr;
+ const struct geni_i3c_clk_fld *itr = geni_i3c_clk_map;
+ struct i3c_bus *bus = i3c_master_get_bus(m);
+
+ for (i = 0; i < ARRAY_SIZE(geni_i3c_clk_map); i++, itr++) {
+ if ((!bus ||
+ itr->clk_freq_out == bus->scl_rate.i3c) &&
+ KHZ(itr->clk_src_freq) == gi3c->clk_src_freq) {
+ gi3c->clk_fld = itr;
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static void set_new_addr_slot(unsigned long *addrslot, u8 addr)
+{
+ unsigned long *ptr;
+
+ if (addr > I3C_ADDR_MASK)
+ return;
+
+ ptr = addrslot + (addr / BITS_PER_LONG);
+ *ptr |= 1 << (addr % BITS_PER_LONG);
+}
+
+static void clear_new_addr_slot(unsigned long *addrslot, u8 addr)
+{
+ unsigned long *ptr;
+
+ if (addr > I3C_ADDR_MASK)
+ return;
+
+ ptr = addrslot + (addr / BITS_PER_LONG);
+ *ptr &= ~(1 << (addr % BITS_PER_LONG));
+}
+
+static bool is_new_addr_slot_set(unsigned long *addrslot, u8 addr)
+{
+ unsigned long *ptr;
+
+ if (addr > I3C_ADDR_MASK)
+ return false;
+
+ ptr = addrslot + (addr / BITS_PER_LONG);
+ return ((*ptr & (1 << (addr % BITS_PER_LONG))) != 0);
+}
+
+static void qcom_geni_i3c_conf(struct geni_i3c_dev *gi3c)
+{
+ const struct geni_i3c_clk_fld *itr = gi3c->clk_fld;
+ u32 val;
+ unsigned long freq;
+ int ret = 0;
+
+ if (gi3c->dfs_idx > DFS_INDEX_MAX)
+ ret = geni_se_clk_freq_match(&gi3c->se.i3c_rsc,
+ KHZ(itr->clk_src_freq),
+ &gi3c->dfs_idx, &freq, false);
+ if (ret)
+ gi3c->dfs_idx = 0;
+
+ writel_relaxed(gi3c->dfs_idx, gi3c->se.base + SE_GENI_CLK_SEL);
+
+ val = itr->clk_div << CLK_DEV_VALUE_SHFT;
+ val |= 1 << SER_CLK_EN_SHFT;
+ writel_relaxed(val, gi3c->se.base + GENI_SER_M_CLK_CFG);
+
+ val = itr->i2c_t_high_cnt << I2C_SCL_HIGH_COUNTER_SHFT;
+ val |= itr->i2c_t_low_cnt << I2C_SCL_LOW_COUNTER_SHFT;
+ val |= itr->i2c_t_cycle_cnt;
+ writel_relaxed(val, gi3c->se.base + SE_I2C_SCL_COUNTERS);
+
+ writel_relaxed(itr->i3c_t_cycle_cnt, gi3c->se.base + SE_I3C_SCL_CYCLE);
+ writel_relaxed(itr->i3c_t_high_cnt, gi3c->se.base + SE_I3C_SCL_HIGH);
+
+ writel_relaxed(1, gi3c->se.base + SE_GENI_HW_IRQ_IGNORE_ON_ACTIVE);
+
+ val = 1 << M_IBI_IRQ_PARAM_STOP_STALL_SHFT;
+ val |= 1 << M_IBI_IRQ_PARAM_7E_SHFT;
+ writel_relaxed(val, gi3c->se.base + SE_GENI_HW_IRQ_CMD_PARAM_0);
+
+ writel_relaxed(1, gi3c->se.base + SE_GENI_HW_IRQ_EN);
+}
+
+static void geni_i3c_err(struct geni_i3c_dev *gi3c, int err)
+{
+ if (gi3c->cur_rnw == WRITE_TRANSACTION)
+ dev_dbg(gi3c->se.dev, "len:%d, write\n", gi3c->cur_len);
+ else
+ dev_dbg(gi3c->se.dev, "len:%d, read\n", gi3c->cur_len);
+
+ dev_dbg(gi3c->se.dev, "%s\n", gi3c_log[err].msg);
+ gi3c->err = gi3c_log[err].err;
+}
+
+static irqreturn_t geni_i3c_irq(int irq, void *dev)
+{
+ struct geni_i3c_dev *gi3c = dev;
+ int j;
+ u32 m_stat, m_stat_mask, rx_st;
+ u32 dm_tx_st, dm_rx_st, dma;
+ unsigned long flags;
+
+ spin_lock_irqsave(&gi3c->spinlock, flags);
+
+ m_stat = readl_relaxed(gi3c->se.base + SE_GENI_M_IRQ_STATUS);
+ m_stat_mask = readl_relaxed(gi3c->se.base + SE_GENI_M_IRQ_EN);
+ rx_st = readl_relaxed(gi3c->se.base + SE_GENI_RX_FIFO_STATUS);
+ dm_tx_st = readl_relaxed(gi3c->se.base + SE_DMA_TX_IRQ_STAT);
+ dm_rx_st = readl_relaxed(gi3c->se.base + SE_DMA_RX_IRQ_STAT);
+ dma = readl_relaxed(gi3c->se.base + SE_GENI_DMA_MODE_EN);
+
+ if ((m_stat & SE_I3C_ERR) ||
+ (dm_rx_st & DM_I3C_CB_ERR)) {
+ if (m_stat & M_GP_IRQ_0_EN)
+ geni_i3c_err(gi3c, RD_TERM);
+ if (m_stat & M_GP_IRQ_1_EN)
+ geni_i3c_err(gi3c, NACK);
+ if (m_stat & M_GP_IRQ_2_EN)
+ geni_i3c_err(gi3c, CRC_ERR);
+ if (m_stat & M_GP_IRQ_3_EN)
+ geni_i3c_err(gi3c, BUS_PROTO);
+ if (m_stat & M_GP_IRQ_4_EN)
+ geni_i3c_err(gi3c, NACK_7E);
+ if (m_stat & M_CMD_OVERRUN_EN)
+ geni_i3c_err(gi3c, GENI_OVERRUN);
+ if (m_stat & M_ILLEGAL_CMD_EN)
+ geni_i3c_err(gi3c, GENI_ILLEGAL_CMD);
+ if (m_stat & M_CMD_ABORT_EN)
+ geni_i3c_err(gi3c, GENI_ABORT_DONE);
+
+ /* Disable the TX Watermark interrupt to stop TX */
+ if (!dma)
+ writel_relaxed(0, gi3c->se.base +
+ SE_GENI_TX_WATERMARK_REG);
+ goto irqret;
+ }
+
+ if (dma) {
+ dev_dbg(gi3c->se.dev, "i3c dma tx:0x%x, dma rx:0x%x\n",
+ dm_tx_st, dm_rx_st);
+ goto irqret;
+ }
+
+ if ((m_stat & (M_RX_FIFO_WATERMARK_EN | M_RX_FIFO_LAST_EN)) &&
+ (gi3c->cur_rnw == READ_TRANSACTION) &&
+ gi3c->cur_buf) {
+ u32 rxcnt = rx_st & RX_FIFO_WC_MSK;
+
+ for (j = 0; j < rxcnt; j++) {
+ u32 val;
+ int p = 0;
+
+ val = readl_relaxed(gi3c->se.base + SE_GENI_RX_FIFOn);
+ while (gi3c->cur_idx < gi3c->cur_len &&
+ p < sizeof(val)) {
+ gi3c->cur_buf[gi3c->cur_idx++] = val & 0xff;
+ val >>= 8;
+ p++;
+ }
+ if (gi3c->cur_idx == gi3c->cur_len)
+ break;
+ }
+ } else if ((m_stat & M_TX_FIFO_WATERMARK_EN) &&
+ (gi3c->cur_rnw == WRITE_TRANSACTION) &&
+ (gi3c->cur_buf)) {
+ for (j = 0; j < gi3c->tx_wm; j++) {
+ u32 temp;
+ u32 val = 0;
+ int p = 0;
+
+ while (gi3c->cur_idx < gi3c->cur_len &&
+ p < sizeof(val)) {
+ temp = gi3c->cur_buf[gi3c->cur_idx++];
+ val |= temp << (p * 8);
+ p++;
+ }
+ writel_relaxed(val, gi3c->se.base + SE_GENI_TX_FIFOn);
+ if (gi3c->cur_idx == gi3c->cur_len) {
+ writel_relaxed(0, gi3c->se.base +
+ SE_GENI_TX_WATERMARK_REG);
+ break;
+ }
+ }
+ }
+irqret:
+ if (m_stat)
+ writel_relaxed(m_stat, gi3c->se.base + SE_GENI_M_IRQ_CLEAR);
+
+ if (dma) {
+ if (dm_tx_st)
+ writel_relaxed(dm_tx_st,
+ gi3c->se.base + SE_DMA_TX_IRQ_CLR);
+ if (dm_rx_st)
+ writel_relaxed(dm_rx_st,
+ gi3c->se.base + SE_DMA_RX_IRQ_CLR);
+ }
+ /* if this is err with done-bit not set, handle that through timeout. */
+ if (m_stat & M_CMD_DONE_EN || m_stat & M_CMD_ABORT_EN) {
+ writel_relaxed(0, gi3c->se.base + SE_GENI_TX_WATERMARK_REG);
+ complete(&gi3c->done);
+ } else if ((dm_tx_st & TX_DMA_DONE) ||
+ (dm_rx_st & RX_DMA_DONE) ||
+ (dm_rx_st & RX_RESET_DONE))
+ complete(&gi3c->done);
+
+ spin_unlock_irqrestore(&gi3c->spinlock, flags);
+ return IRQ_HANDLED;
+}
+
+static int i3c_geni_runtime_get_mutex_lock(struct geni_i3c_dev *gi3c)
+{
+ int ret;
+
+ mutex_lock(&gi3c->lock);
+
+ reinit_completion(&gi3c->done);
+ ret = pm_runtime_get_sync(gi3c->se.dev);
+ if (ret < 0) {
+ dev_err(gi3c->se.dev,
+ "error turning on SE resources:%d\n", ret);
+ pm_runtime_put_noidle(gi3c->se.dev);
+ /* Set device in suspended since resume failed */
+ pm_runtime_set_suspended(gi3c->se.dev);
+
+ mutex_unlock(&gi3c->lock);
+ return ret;
+ }
+
+ qcom_geni_i3c_conf(gi3c);
+
+ return 0; /* return 0 to indicate SUCCESS */
+}
+
+static void i3c_geni_runtime_put_mutex_unlock(struct geni_i3c_dev *gi3c)
+{
+ pm_runtime_mark_last_busy(gi3c->se.dev);
+ pm_runtime_put_autosuspend(gi3c->se.dev);
+ mutex_unlock(&gi3c->lock);
+}
+
+static int _i3c_geni_execute_command
+(
+ struct geni_i3c_dev *gi3c,
+ struct i3c_xfer_params *xfer
+)
+{
+ dma_addr_t tx_dma = 0;
+ dma_addr_t rx_dma = 0;
+ int ret, time_remaining = 0;
+ enum i3c_trans_dir rnw = gi3c->cur_rnw;
+ u32 len = gi3c->cur_len;
+
+ geni_se_select_mode(gi3c->se.base, xfer->mode);
+
+ gi3c->err = 0;
+ gi3c->cur_idx = 0;
+
+ if (rnw == READ_TRANSACTION) {
+ dev_dbg(gi3c->se.dev, "I3C cmd:0x%x param:0x%x READ len:%d\n",
+ xfer->m_cmd, xfer->m_param, len);
+ writel_relaxed(len, gi3c->se.base + SE_I3C_RX_TRANS_LEN);
+ geni_setup_m_cmd(gi3c->se.base, xfer->m_cmd, xfer->m_param);
+ if (xfer->mode == SE_DMA) {
+ ret = geni_se_rx_dma_prep(gi3c->se.i3c_rsc.wrapper_dev,
+ gi3c->se.base, gi3c->cur_buf,
+ len, &rx_dma);
+ if (ret) {
+ xfer->mode = FIFO_MODE;
+ geni_se_select_mode(gi3c->se.base, xfer->mode);
+ }
+ }
+ } else {
+ dev_dbg(gi3c->se.dev, "I3C cmd:0x%x param:0x%x WRITE len:%d\n",
+ xfer->m_cmd, xfer->m_param, len);
+ writel_relaxed(len, gi3c->se.base + SE_I3C_TX_TRANS_LEN);
+ geni_setup_m_cmd(gi3c->se.base, xfer->m_cmd, xfer->m_param);
+ if (xfer->mode == SE_DMA) {
+ ret = geni_se_tx_dma_prep(gi3c->se.i3c_rsc.wrapper_dev,
+ gi3c->se.base, gi3c->cur_buf,
+ len, &tx_dma);
+ if (ret) {
+ xfer->mode = FIFO_MODE;
+ geni_se_select_mode(gi3c->se.base, xfer->mode);
+ }
+ }
+ if (xfer->mode == FIFO_MODE && len > 0) /* Get FIFO IRQ */
+ writel_relaxed(1, gi3c->se.base +
+ SE_GENI_TX_WATERMARK_REG);
+ }
+ time_remaining = wait_for_completion_timeout(&gi3c->done,
+ XFER_TIMEOUT);
+ if (!time_remaining) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&gi3c->spinlock, flags);
+ geni_i3c_err(gi3c, GENI_TIMEOUT);
+ gi3c->cur_buf = NULL;
+ gi3c->cur_len = gi3c->cur_idx = 0;
+ gi3c->cur_rnw = 0;
+ geni_abort_m_cmd(gi3c->se.base);
+ spin_unlock_irqrestore(&gi3c->spinlock, flags);
+ time_remaining = wait_for_completion_timeout(&gi3c->done,
+ XFER_TIMEOUT);
+ }
+ if (xfer->mode == SE_DMA) {
+ if (gi3c->err) {
+ if (rnw == READ_TRANSACTION)
+ writel_relaxed(1, gi3c->se.base +
+ SE_DMA_TX_FSM_RST);
+ else
+ writel_relaxed(1, gi3c->se.base +
+ SE_DMA_RX_FSM_RST);
+ wait_for_completion_timeout(&gi3c->done, XFER_TIMEOUT);
+ }
+ geni_se_rx_dma_unprep(gi3c->se.i3c_rsc.wrapper_dev,
+ rx_dma, len);
+ geni_se_tx_dma_unprep(gi3c->se.i3c_rsc.wrapper_dev,
+ tx_dma, len);
+ }
+ ret = gi3c->err;
+ if (gi3c->err)
+ dev_err(gi3c->se.dev, "I3C transaction error :%d\n", gi3c->err);
+
+ gi3c->cur_buf = NULL;
+ gi3c->cur_len = gi3c->cur_idx = 0;
+ gi3c->cur_rnw = 0;
+ gi3c->err = 0;
+
+ return ret;
+}
+
+static int i3c_geni_execute_read_command
+(
+ struct geni_i3c_dev *gi3c,
+ struct i3c_xfer_params *xfer,
+ u8 *buf,
+ u32 len
+)
+{
+ gi3c->cur_rnw = READ_TRANSACTION;
+ gi3c->cur_buf = buf;
+ gi3c->cur_len = len;
+ return _i3c_geni_execute_command(gi3c, xfer);
+}
+
+static int i3c_geni_execute_write_command
+(
+ struct geni_i3c_dev *gi3c,
+ struct i3c_xfer_params *xfer,
+ u8 *buf,
+ u32 len
+)
+{
+ gi3c->cur_rnw = WRITE_TRANSACTION;
+ gi3c->cur_buf = buf;
+ gi3c->cur_len = len;
+ return _i3c_geni_execute_command(gi3c, xfer);
+}
+
+static void geni_i3c_perform_daa(struct geni_i3c_dev *gi3c)
+{
+ struct i3c_master_controller *m = &gi3c->ctrlr;
+ struct i3c_bus *bus = i3c_master_get_bus(m);
+ u8 last_dyn_addr = 0;
+ int ret;
+
+ while (1) {
+ u8 rx_buf[8], tx_buf[8];
+ struct i3c_xfer_params xfer = { FIFO_MODE };
+ struct i3c_device_info info = { 0 };
+ struct i3c_dev_desc *i3cdev;
+ bool new_device = true;
+ u64 pid;
+ u8 bcr, dcr, addr;
+
+ dev_dbg(gi3c->se.dev, "i3c entdaa read\n");
+
+ xfer.m_cmd = I2C_READ;
+ xfer.m_param = STOP_STRETCH | CONTINUOUS_MODE_DAA | USE_7E;
+
+ ret = i3c_geni_execute_read_command(gi3c, &xfer, rx_buf, 8);
+ if (ret)
+ break;
+
+ dcr = rx_buf[7];
+ bcr = rx_buf[6];
+ pid = ((u64)rx_buf[0] << 40) |
+ ((u64)rx_buf[1] << 32) |
+ ((u64)rx_buf[2] << 24) |
+ ((u64)rx_buf[3] << 16) |
+ ((u64)rx_buf[4] << 8) |
+ ((u64)rx_buf[5]);
+
+ i3c_bus_for_each_i3cdev(bus, i3cdev) {
+ i3c_device_get_info(i3cdev->dev, &info);
+ if (pid == info.pid &&
+ dcr == info.dcr &&
+ bcr == info.bcr) {
+ new_device = false;
+ addr = (info.dyn_addr) ? info.dyn_addr :
+ info.static_addr;
+ break;
+ }
+ }
+
+ if (new_device) {
+ ret = i3c_master_get_free_addr(m,
+ last_dyn_addr + 1);
+ if (ret < 0)
+ goto daa_err;
+ addr = last_dyn_addr = (u8)ret;
+ set_new_addr_slot(gi3c->newaddrslots, addr);
+ }
+
+ tx_buf[0] = (addr & I3C_ADDR_MASK) << 1;
+ tx_buf[0] |= ~(hweight8(addr & I3C_ADDR_MASK) & 1);
+
+ dev_dbg(gi3c->se.dev, "i3c entdaa write\n");
+
+ xfer.m_cmd = I2C_WRITE;
+ xfer.m_param = STOP_STRETCH | BYPASS_ADDR_PHASE | USE_7E;
+
+ ret = i3c_geni_execute_write_command(gi3c, &xfer, tx_buf, 1);
+ if (ret)
+ break;
+ }
+daa_err:
+ return;
+}
+
+static int geni_i3c_master_send_ccc_cmd
+(
+ struct i3c_master_controller *m,
+ struct i3c_ccc_cmd *cmd
+)
+{
+ struct geni_i3c_dev *gi3c = to_geni_i3c_master(m);
+ int i, ret;
+
+ if (!(cmd->id & I3C_CCC_DIRECT) && (cmd->ndests != 1))
+ return -EINVAL;
+
+ ret = i3c_geni_runtime_get_mutex_lock(gi3c);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < cmd->ndests; i++) {
+ int stall = (i < (cmd->ndests - 1)) ||
+ (cmd->id == I3C_CCC_ENTDAA);
+ struct i3c_xfer_params xfer = { FIFO_MODE };
+
+ xfer.m_param = (stall ? STOP_STRETCH : 0);
+ xfer.m_param |= (cmd->id << CCC_HDR_CMD_SHFT);
+ xfer.m_param |= IBI_NACK_TBL_CTRL;
+ if (cmd->id & I3C_CCC_DIRECT) {
+ xfer.m_param |= ((cmd->dests[i].addr & I3C_ADDR_MASK)
+ << SLV_ADDR_SHFT);
+ if (cmd->rnw) {
+ if (i == 0)
+ xfer.m_cmd = I3C_DIRECT_CCC_READ;
+ else
+ xfer.m_cmd = I3C_PRIVATE_READ;
+ } else {
+ if (i == 0)
+ xfer.m_cmd =
+ (cmd->dests[i].payload.len > 0) ?
+ I3C_DIRECT_CCC_WRITE :
+ I3C_DIRECT_CCC_ADDR_ONLY;
+ else
+ xfer.m_cmd = I3C_PRIVATE_WRITE;
+ }
+ } else {
+ if (cmd->dests[i].payload.len > 0)
+ xfer.m_cmd = I3C_BCAST_CCC_WRITE;
+ else
+ xfer.m_cmd = I3C_BCAST_CCC_ADDR_ONLY;
+ }
+
+ if (i == 0)
+ xfer.m_param |= USE_7E;
+
+ if (cmd->rnw)
+ ret = i3c_geni_execute_read_command(gi3c, &xfer,
+ cmd->dests[i].payload.data,
+ cmd->dests[i].payload.len);
+ else
+ ret = i3c_geni_execute_write_command(gi3c, &xfer,
+ cmd->dests[i].payload.data,
+ cmd->dests[i].payload.len);
+ if (ret)
+ break;
+
+ if (cmd->id == I3C_CCC_ENTDAA)
+ geni_i3c_perform_daa(gi3c);
+ }
+
+ dev_dbg(gi3c->se.dev, "i3c ccc: txn ret:%d\n", ret);
+
+ i3c_geni_runtime_put_mutex_unlock(gi3c);
+
+ return ret;
+}
+
+static int geni_i3c_master_priv_xfers
+(
+ struct i3c_dev_desc *dev,
+ struct i3c_priv_xfer *xfers,
+ int nxfers
+)
+{
+ struct i3c_master_controller *m = i3c_dev_get_master(dev);
+ struct geni_i3c_dev *gi3c = to_geni_i3c_master(m);
+ int i, ret;
+ bool use_7e = true;
+
+ if (nxfers <= 0)
+ return 0;
+
+ ret = i3c_geni_runtime_get_mutex_lock(gi3c);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < nxfers; i++) {
+ bool stall = (i < (nxfers - 1));
+ struct i3c_xfer_params xfer = { FIFO_MODE };
+
+ xfer.m_param = (stall ? STOP_STRETCH : 0);
+ xfer.m_param |= ((dev->info.dyn_addr & I3C_ADDR_MASK)
+ << SLV_ADDR_SHFT);
+ xfer.m_param |= (use_7e) ? USE_7E : 0;
+
+ /* Update use_7e status for next loop iteration */
+ use_7e = !stall;
+
+ if (xfers[i].rnw) {
+ xfer.m_cmd = I3C_PRIVATE_READ;
+ ret = i3c_geni_execute_read_command(gi3c, &xfer,
+ (u8 *)xfers[i].data.in,
+ xfers[i].len);
+ } else {
+ xfer.m_cmd = I3C_PRIVATE_WRITE;
+ ret = i3c_geni_execute_write_command(gi3c, &xfer,
+ (u8 *)xfers[i].data.out,
+ xfers[i].len);
+ }
+
+ if (ret)
+ break;
+ }
+
+ dev_dbg(gi3c->se.dev, "i3c priv: txn ret:%d\n", ret);
+
+ i3c_geni_runtime_put_mutex_unlock(gi3c);
+
+ return ret;
+}
+
+static int geni_i3c_master_i2c_xfers
+(
+ struct i2c_dev_desc *dev,
+ const struct i2c_msg *msgs,
+ int num
+)
+{
+ struct i3c_master_controller *m = i2c_dev_get_master(dev);
+ struct geni_i3c_dev *gi3c = to_geni_i3c_master(m);
+ int i, ret;
+
+ ret = i3c_geni_runtime_get_mutex_lock(gi3c);
+ if (ret)
+ return ret;
+
+ dev_dbg(gi3c->se.dev, "i2c xfer:num:%d, msgs:len:%d,flg:%d\n",
+ num, msgs[0].len, msgs[0].flags);
+ for (i = 0; i < num; i++) {
+ struct i3c_xfer_params xfer;
+
+ xfer.m_cmd = (msgs[i].flags & I2C_M_RD) ? I2C_READ :
+ I2C_WRITE;
+ xfer.m_param = (i < (num - 1)) ? STOP_STRETCH : 0;
+ xfer.m_param |= ((msgs[i].addr & I3C_ADDR_MASK)
+ << SLV_ADDR_SHFT);
+ xfer.mode = msgs[i].len > 32 ? SE_DMA : FIFO_MODE;
+ if (msgs[i].flags & I2C_M_RD)
+ ret = i3c_geni_execute_read_command(gi3c, &xfer,
+ msgs[i].buf, msgs[i].len);
+ else
+ ret = i3c_geni_execute_write_command(gi3c, &xfer,
+ msgs[i].buf, msgs[i].len);
+ if (ret)
+ break;
+ }
+
+ dev_dbg(gi3c->se.dev, "i2c: txn ret:%d\n", ret);
+
+ i3c_geni_runtime_put_mutex_unlock(gi3c);
+
+ return ret;
+}
+
+static u32 geni_i3c_master_i2c_funcs(struct i3c_master_controller *m)
+{
+ return I2C_FUNC_I2C;
+}
+
+static int geni_i3c_master_attach_i2c_dev(struct i2c_dev_desc *dev)
+{
+ struct i3c_master_controller *m = i2c_dev_get_master(dev);
+ struct geni_i3c_dev *gi3c = to_geni_i3c_master(m);
+ struct geni_i3c_i2c_dev_data *data;
+
+ data = devm_kzalloc(gi3c->se.dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ i2c_dev_set_master_data(dev, data);
+
+ return 0;
+}
+
+static void geni_i3c_master_detach_i2c_dev(struct i2c_dev_desc *dev)
+{
+ struct geni_i3c_i2c_dev_data *data = i2c_dev_get_master_data(dev);
+
+ i2c_dev_set_master_data(dev, NULL);
+ kfree(data);
+}
+
+static int geni_i3c_master_attach_i3c_dev(struct i3c_dev_desc *dev)
+{
+ struct i3c_master_controller *m = i3c_dev_get_master(dev);
+ struct geni_i3c_dev *gi3c = to_geni_i3c_master(m);
+ struct geni_i3c_i2c_dev_data *data;
+
+ data = devm_kzalloc(gi3c->se.dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ i3c_dev_set_master_data(dev, data);
+
+ return 0;
+}
+
+static int geni_i3c_master_reattach_i3c_dev
+(
+ struct i3c_dev_desc *dev,
+ u8 old_dyn_addr
+)
+{
+ return 0;
+}
+
+static void geni_i3c_master_detach_i3c_dev(struct i3c_dev_desc *dev)
+{
+ struct geni_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev);
+
+ i3c_dev_set_master_data(dev, NULL);
+ kfree(data);
+}
+
+static int geni_i3c_master_entdaa_locked(struct geni_i3c_dev *gi3c)
+{
+ struct i3c_master_controller *m = &gi3c->ctrlr;
+ u8 addr;
+ int ret;
+
+ ret = i3c_master_entdaa_locked(m);
+ if (ret && ret != I3C_ERROR_M2)
+ return ret;
+
+ for (addr = 0; addr <= I3C_ADDR_MASK; addr++) {
+ if (is_new_addr_slot_set(gi3c->newaddrslots, addr)) {
+ clear_new_addr_slot(gi3c->newaddrslots, addr);
+ i3c_master_add_i3c_dev_locked(m, addr);
+ }
+ }
+
+ return 0;
+}
+
+static int geni_i3c_master_do_daa(struct i3c_master_controller *m)
+{
+ struct geni_i3c_dev *gi3c = to_geni_i3c_master(m);
+
+ return geni_i3c_master_entdaa_locked(gi3c);
+}
+
+static int geni_i3c_master_bus_init(struct i3c_master_controller *m)
+{
+ struct geni_i3c_dev *gi3c = to_geni_i3c_master(m);
+ struct i3c_bus *bus = i3c_master_get_bus(m);
+ struct i3c_device_info info = { };
+ int ret;
+
+ ret = pm_runtime_get_sync(gi3c->se.dev);
+ if (ret < 0) {
+ dev_err(gi3c->se.dev, "%s: error turning SE resources:%d\n",
+ __func__, ret);
+ pm_runtime_put_noidle(gi3c->se.dev);
+ /* Set device in suspended since resume failed */
+ pm_runtime_set_suspended(gi3c->se.dev);
+ return ret;
+ }
+
+ ret = geni_i3c_clk_map_idx(gi3c);
+ if (ret) {
+ dev_err(gi3c->se.dev,
+ "Invalid clk frequency %d Hz src or %ld Hz bus: %d\n",
+ gi3c->clk_src_freq, bus->scl_rate.i3c,
+ ret);
+ goto err_cleanup;
+ }
+
+ qcom_geni_i3c_conf(gi3c);
+
+ /* Get an address for the master. */
+ ret = i3c_master_get_free_addr(m, 0);
+ if (ret < 0)
+ goto err_cleanup;
+
+ info.dyn_addr = ret;
+ info.dcr = I3C_DCR_GENERIC_DEVICE;
+ info.bcr = I3C_BCR_I3C_MASTER | I3C_BCR_HDR_CAP;
+ info.pid = 0;
+
+ ret = i3c_master_set_info(&gi3c->ctrlr, &info);
+
+err_cleanup:
+ pm_runtime_mark_last_busy(gi3c->se.dev);
+ pm_runtime_put_autosuspend(gi3c->se.dev);
+
+ return ret;
+}
+
+static void geni_i3c_master_bus_cleanup(struct i3c_master_controller *m)
+{
+}
+
+static bool geni_i3c_master_supports_ccc_cmd
+(
+ struct i3c_master_controller *m,
+ const struct i3c_ccc_cmd *cmd
+)
+{
+ switch (cmd->id) {
+ case I3C_CCC_ENEC(true):
+ /* fallthrough */
+ case I3C_CCC_ENEC(false):
+ /* fallthrough */
+ case I3C_CCC_DISEC(true):
+ /* fallthrough */
+ case I3C_CCC_DISEC(false):
+ /* fallthrough */
+ case I3C_CCC_ENTAS(0, true):
+ /* fallthrough */
+ case I3C_CCC_ENTAS(0, false):
+ /* fallthrough */
+ case I3C_CCC_RSTDAA(true):
+ /* fallthrough */
+ case I3C_CCC_RSTDAA(false):
+ /* fallthrough */
+ case I3C_CCC_ENTDAA:
+ /* fallthrough */
+ case I3C_CCC_SETMWL(true):
+ /* fallthrough */
+ case I3C_CCC_SETMWL(false):
+ /* fallthrough */
+ case I3C_CCC_SETMRL(true):
+ /* fallthrough */
+ case I3C_CCC_SETMRL(false):
+ /* fallthrough */
+ case I3C_CCC_DEFSLVS:
+ /* fallthrough */
+ case I3C_CCC_ENTHDR(0):
+ /* fallthrough */
+ case I3C_CCC_SETDASA:
+ /* fallthrough */
+ case I3C_CCC_SETNEWDA:
+ /* fallthrough */
+ case I3C_CCC_GETMWL:
+ /* fallthrough */
+ case I3C_CCC_GETMRL:
+ /* fallthrough */
+ case I3C_CCC_GETPID:
+ /* fallthrough */
+ case I3C_CCC_GETBCR:
+ /* fallthrough */
+ case I3C_CCC_GETDCR:
+ /* fallthrough */
+ case I3C_CCC_GETSTATUS:
+ /* fallthrough */
+ case I3C_CCC_GETACCMST:
+ /* fallthrough */
+ case I3C_CCC_GETMXDS:
+ /* fallthrough */
+ case I3C_CCC_GETHDRCAP:
+ return true;
+ default:
+ break;
+ }
+
+ return false;
+}
+
+static int geni_i3c_master_enable_ibi(struct i3c_dev_desc *dev)
+{
+ return -ENOTSUPP;
+}
+
+static int geni_i3c_master_disable_ibi(struct i3c_dev_desc *dev)
+{
+ return -ENOTSUPP;
+}
+
+static int geni_i3c_master_request_ibi(struct i3c_dev_desc *dev,
+ const struct i3c_ibi_setup *req)
+{
+ return -ENOTSUPP;
+}
+
+static void geni_i3c_master_free_ibi(struct i3c_dev_desc *dev)
+{
+}
+
+static void geni_i3c_master_recycle_ibi_slot
+(
+ struct i3c_dev_desc *dev,
+ struct i3c_ibi_slot *slot
+)
+{
+}
+
+static const struct i3c_master_controller_ops geni_i3c_master_ops = {
+ .bus_init = geni_i3c_master_bus_init,
+ .bus_cleanup = geni_i3c_master_bus_cleanup,
+ .do_daa = geni_i3c_master_do_daa,
+ .attach_i3c_dev = geni_i3c_master_attach_i3c_dev,
+ .reattach_i3c_dev = geni_i3c_master_reattach_i3c_dev,
+ .detach_i3c_dev = geni_i3c_master_detach_i3c_dev,
+ .attach_i2c_dev = geni_i3c_master_attach_i2c_dev,
+ .detach_i2c_dev = geni_i3c_master_detach_i2c_dev,
+ .supports_ccc_cmd = geni_i3c_master_supports_ccc_cmd,
+ .send_ccc_cmd = geni_i3c_master_send_ccc_cmd,
+ .priv_xfers = geni_i3c_master_priv_xfers,
+ .i2c_xfers = geni_i3c_master_i2c_xfers,
+ .i2c_funcs = geni_i3c_master_i2c_funcs,
+ .enable_ibi = geni_i3c_master_enable_ibi,
+ .disable_ibi = geni_i3c_master_disable_ibi,
+ .request_ibi = geni_i3c_master_request_ibi,
+ .free_ibi = geni_i3c_master_free_ibi,
+ .recycle_ibi_slot = geni_i3c_master_recycle_ibi_slot,
+};
+
+static int i3c_geni_rsrcs_clk_init(struct geni_i3c_dev *gi3c)
+{
+ int ret;
+
+ gi3c->se.i3c_rsc.se_clk = devm_clk_get(gi3c->se.dev, "se-clk");
+ if (IS_ERR(gi3c->se.i3c_rsc.se_clk)) {
+ ret = PTR_ERR(gi3c->se.i3c_rsc.se_clk);
+ dev_err(gi3c->se.dev, "Error getting SE Core clk %d\n", ret);
+ return ret;
+ }
+
+ gi3c->se.i3c_rsc.m_ahb_clk = devm_clk_get(gi3c->se.dev, "m-ahb");
+ if (IS_ERR(gi3c->se.i3c_rsc.m_ahb_clk)) {
+ ret = PTR_ERR(gi3c->se.i3c_rsc.m_ahb_clk);
+ dev_err(gi3c->se.dev, "Error getting M AHB clk %d\n", ret);
+ return ret;
+ }
+
+ gi3c->se.i3c_rsc.s_ahb_clk = devm_clk_get(gi3c->se.dev, "s-ahb");
+ if (IS_ERR(gi3c->se.i3c_rsc.s_ahb_clk)) {
+ ret = PTR_ERR(gi3c->se.i3c_rsc.s_ahb_clk);
+ dev_err(gi3c->se.dev, "Error getting S AHB clk %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int i3c_geni_rsrcs_init(struct geni_i3c_dev *gi3c,
+ struct platform_device *pdev)
+{
+ struct resource *res;
+ struct platform_device *wrapper_pdev;
+ struct device_node *wrapper_ph_node;
+ int ret;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -EINVAL;
+
+ gi3c->se.base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(gi3c->se.base))
+ return PTR_ERR(gi3c->se.base);
+
+ wrapper_ph_node = of_parse_phandle(pdev->dev.of_node,
+ "qcom,wrapper-core", 0);
+ if (IS_ERR_OR_NULL(wrapper_ph_node)) {
+ ret = PTR_ERR(wrapper_ph_node);
+ dev_err(&pdev->dev, "No wrapper core defined\n");
+ return ret;
+ }
+
+ wrapper_pdev = of_find_device_by_node(wrapper_ph_node);
+ of_node_put(wrapper_ph_node);
+ if (IS_ERR_OR_NULL(wrapper_pdev)) {
+ ret = PTR_ERR(wrapper_pdev);
+ dev_err(&pdev->dev, "Cannot retrieve wrapper device\n");
+ return ret;
+ }
+
+ gi3c->se.i3c_rsc.wrapper_dev = &wrapper_pdev->dev;
+
+ ret = geni_se_resources_init(&gi3c->se.i3c_rsc, I3C_CORE2X_VOTE,
+ (DEFAULT_SE_CLK * DEFAULT_BUS_WIDTH));
+ if (ret) {
+ dev_err(gi3c->se.dev, "geni_se_resources_init\n");
+ return ret;
+ }
+
+ ret = device_property_read_u32(&pdev->dev, "se-clock-frequency",
+ &gi3c->clk_src_freq);
+ if (ret) {
+ dev_info(&pdev->dev,
+ "SE clk freq not specified, default to 100 MHz.\n");
+ gi3c->clk_src_freq = 100000000;
+ }
+
+ ret = device_property_read_u32(&pdev->dev, "dfs-index",
+ &gi3c->dfs_idx);
+ if (ret)
+ gi3c->dfs_idx = 0xf;
+
+ gi3c->se.i3c_rsc.geni_pinctrl = devm_pinctrl_get(&pdev->dev);
+ if (IS_ERR(gi3c->se.i3c_rsc.geni_pinctrl)) {
+ dev_err(&pdev->dev, "Error no pinctrl config specified\n");
+ ret = PTR_ERR(gi3c->se.i3c_rsc.geni_pinctrl);
+ return ret;
+ }
+ gi3c->se.i3c_rsc.geni_gpio_active =
+ pinctrl_lookup_state(gi3c->se.i3c_rsc.geni_pinctrl, "default");
+ if (IS_ERR(gi3c->se.i3c_rsc.geni_gpio_active)) {
+ dev_err(&pdev->dev, "No default config specified\n");
+ ret = PTR_ERR(gi3c->se.i3c_rsc.geni_gpio_active);
+ return ret;
+ }
+ gi3c->se.i3c_rsc.geni_gpio_sleep =
+ pinctrl_lookup_state(gi3c->se.i3c_rsc.geni_pinctrl, "sleep");
+ if (IS_ERR(gi3c->se.i3c_rsc.geni_gpio_sleep)) {
+ dev_err(&pdev->dev, "No sleep config specified\n");
+ ret = PTR_ERR(gi3c->se.i3c_rsc.geni_gpio_sleep);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int geni_i3c_probe(struct platform_device *pdev)
+{
+ struct geni_i3c_dev *gi3c;
+ u32 proto, tx_depth;
+ int ret;
+ u32 se_mode;
+
+ gi3c = devm_kzalloc(&pdev->dev, sizeof(*gi3c), GFP_KERNEL);
+ if (!gi3c)
+ return -ENOMEM;
+
+ gi3c->se.dev = &pdev->dev;
+
+ ret = i3c_geni_rsrcs_init(gi3c, pdev);
+ if (ret)
+ return ret;
+
+ ret = i3c_geni_rsrcs_clk_init(gi3c);
+ if (ret)
+ return ret;
+
+ gi3c->irq = platform_get_irq(pdev, 0);
+ if (gi3c->irq < 0) {
+ dev_err(&pdev->dev, "IRQ error for i3c-master-geni\n");
+ return gi3c->irq;
+ }
+
+ ret = geni_i3c_clk_map_idx(gi3c);
+ if (ret) {
+ dev_err(&pdev->dev, "Invalid source clk frequency %d Hz: %d\n",
+ gi3c->clk_src_freq, ret);
+ return ret;
+ }
+
+ init_completion(&gi3c->done);
+ mutex_init(&gi3c->lock);
+ spin_lock_init(&gi3c->spinlock);
+ platform_set_drvdata(pdev, gi3c);
+ ret = devm_request_irq(&pdev->dev, gi3c->irq, geni_i3c_irq,
+ IRQF_TRIGGER_HIGH, dev_name(&pdev->dev), gi3c);
+ if (ret) {
+ dev_err(&pdev->dev, "Request_irq failed:%d: err:%d\n",
+ gi3c->irq, ret);
+ return ret;
+ }
+ /* Disable the interrupt so that the system can enter low-power mode */
+ disable_irq(gi3c->irq);
+
+ ret = se_geni_resources_on(&gi3c->se.i3c_rsc);
+ if (ret) {
+ dev_err(&pdev->dev, "Error turning on resources %d\n", ret);
+ return ret;
+ }
+
+ if (!gi3c->ipcl) {
+ char ipc_name[I2C_NAME_SIZE];
+
+ snprintf(ipc_name, I2C_NAME_SIZE, "i3c-%d", gi3c->ctrlr.bus.id);
+ gi3c->ipcl = ipc_log_context_create(2, ipc_name, 0);
+ }
+
+ proto = get_se_proto(gi3c->se.base);
+ if (proto != I3C) {
+ dev_err(&pdev->dev, "Invalid proto %d\n", proto);
+ se_geni_resources_off(&gi3c->se.i3c_rsc);
+ return -ENXIO;
+ }
+
+ se_mode = readl_relaxed(gi3c->se.base + GENI_IF_FIFO_DISABLE_RO);
+ if (se_mode) {
+ dev_err(&pdev->dev, "Non supported mode %d\n", se_mode);
+ se_geni_resources_off(&gi3c->se.i3c_rsc);
+ return -ENXIO;
+ }
+
+ tx_depth = get_tx_fifo_depth(gi3c->se.base);
+ gi3c->tx_wm = tx_depth - 1;
+ geni_se_init(gi3c->se.base, gi3c->tx_wm, tx_depth);
+ se_config_packing(gi3c->se.base, BITS_PER_BYTE, PACKING_BYTES_PW, true);
+ se_geni_resources_off(&gi3c->se.i3c_rsc);
+ GENI_SE_DBG(gi3c->ipcl, false, gi3c->se.dev,
+ "i3c fifo/se-dma mode. fifo depth:%d\n", tx_depth);
+
+ pm_runtime_set_suspended(gi3c->se.dev);
+ pm_runtime_set_autosuspend_delay(gi3c->se.dev, I3C_AUTO_SUSPEND_DELAY);
+ pm_runtime_use_autosuspend(gi3c->se.dev);
+ pm_runtime_enable(gi3c->se.dev);
+
+ ret = i3c_master_register(&gi3c->ctrlr, &pdev->dev,
+ &geni_i3c_master_ops, false);
+
+ if (ret)
+ return ret;
+
+ GENI_SE_DBG(gi3c->ipcl, false, gi3c->se.dev, "I3C probed\n");
+ return ret;
+}
+
+static int geni_i3c_remove(struct platform_device *pdev)
+{
+ struct geni_i3c_dev *gi3c = platform_get_drvdata(pdev);
+ int ret = 0;
+
+ pm_runtime_disable(gi3c->se.dev);
+ ret = i3c_master_unregister(&gi3c->ctrlr);
+ if (gi3c->ipcl)
+ ipc_log_context_destroy(gi3c->ipcl);
+ return ret;
+}
+
+#ifdef CONFIG_PM
+static int geni_i3c_runtime_suspend(struct device *dev)
+{
+ struct geni_i3c_dev *gi3c = dev_get_drvdata(dev);
+
+ disable_irq(gi3c->irq);
+ se_geni_resources_off(&gi3c->se.i3c_rsc);
+ return 0;
+}
+
+static int geni_i3c_runtime_resume(struct device *dev)
+{
+ int ret;
+ struct geni_i3c_dev *gi3c = dev_get_drvdata(dev);
+
+ ret = se_geni_resources_on(&gi3c->se.i3c_rsc);
+ if (ret)
+ return ret;
+
+ enable_irq(gi3c->irq);
+
+ return 0;
+}
+#else
+static int geni_i3c_runtime_suspend(struct device *dev)
+{
+ return 0;
+}
+
+static int geni_i3c_runtime_resume(struct device *dev)
+{
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops geni_i3c_pm_ops = {
+ .runtime_suspend = geni_i3c_runtime_suspend,
+ .runtime_resume = geni_i3c_runtime_resume,
+};
+
+static const struct of_device_id geni_i3c_dt_match[] = {
+ { .compatible = "qcom,geni-i3c" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, geni_i3c_dt_match);
+
+static struct platform_driver geni_i3c_master = {
+ .probe = geni_i3c_probe,
+ .remove = geni_i3c_remove,
+ .driver = {
+ .name = "geni_i3c_master",
+ .pm = &geni_i3c_pm_ops,
+ .of_match_table = geni_i3c_dt_match,
+ },
+};
+
+module_platform_driver(geni_i3c_master);
+
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:geni_i3c_master");
diff --git a/drivers/iio/accel/kxcjk-1013.c b/drivers/iio/accel/kxcjk-1013.c
index af53a10..471caa53 100644
--- a/drivers/iio/accel/kxcjk-1013.c
+++ b/drivers/iio/accel/kxcjk-1013.c
@@ -1490,6 +1490,7 @@ static const struct acpi_device_id kx_acpi_match[] = {
{"KXCJ1008", KXCJ91008},
{"KXCJ9000", KXCJ91008},
{"KIOX000A", KXCJ91008},
+ {"KIOX010A", KXCJ91008}, /* KXCJ91008 inside the display of a 2-in-1 */
{"KXTJ1009", KXTJ21009},
{"SMO8500", KXCJ91008},
{ },
diff --git a/drivers/iio/adc/axp288_adc.c b/drivers/iio/adc/axp288_adc.c
index 031d568..4e339cf 100644
--- a/drivers/iio/adc/axp288_adc.c
+++ b/drivers/iio/adc/axp288_adc.c
@@ -27,9 +27,18 @@
#include <linux/iio/machine.h>
#include <linux/iio/driver.h>
-#define AXP288_ADC_EN_MASK 0xF1
-#define AXP288_ADC_TS_PIN_GPADC 0xF2
-#define AXP288_ADC_TS_PIN_ON 0xF3
+/*
+ * This mask enables all ADCs except for the battery temp-sensor (TS), that is
+ * left as-is to avoid breaking charging on devices without a temp-sensor.
+ */
+#define AXP288_ADC_EN_MASK 0xF0
+#define AXP288_ADC_TS_ENABLE 0x01
+
+#define AXP288_ADC_TS_CURRENT_ON_OFF_MASK GENMASK(1, 0)
+#define AXP288_ADC_TS_CURRENT_OFF (0 << 0)
+#define AXP288_ADC_TS_CURRENT_ON_WHEN_CHARGING (1 << 0)
+#define AXP288_ADC_TS_CURRENT_ON_ONDEMAND (2 << 0)
+#define AXP288_ADC_TS_CURRENT_ON (3 << 0)
enum axp288_adc_id {
AXP288_ADC_TS,
@@ -44,6 +53,7 @@ enum axp288_adc_id {
struct axp288_adc_info {
int irq;
struct regmap *regmap;
+ bool ts_enabled;
};
static const struct iio_chan_spec axp288_adc_channels[] = {
@@ -115,21 +125,33 @@ static int axp288_adc_read_channel(int *val, unsigned long address,
return IIO_VAL_INT;
}
-static int axp288_adc_set_ts(struct regmap *regmap, unsigned int mode,
- unsigned long address)
+/*
+ * The current-source used for the battery temp-sensor (TS) is shared
+ * with the GPADC. For proper fuel-gauge and charger operation the TS
+ * current-source needs to be permanently on. But to read the GPADC we
+ * need to temporary switch the TS current-source to ondemand, so that
+ * the GPADC can use it, otherwise we will always read an all 0 value.
+ */
+static int axp288_adc_set_ts(struct axp288_adc_info *info,
+ unsigned int mode, unsigned long address)
{
int ret;
- /* channels other than GPADC do not need to switch TS pin */
+ /* No need to switch the current-source if the TS pin is disabled */
+ if (!info->ts_enabled)
+ return 0;
+
+ /* Channels other than GPADC do not need the current source */
if (address != AXP288_GP_ADC_H)
return 0;
- ret = regmap_write(regmap, AXP288_ADC_TS_PIN_CTRL, mode);
+ ret = regmap_update_bits(info->regmap, AXP288_ADC_TS_PIN_CTRL,
+ AXP288_ADC_TS_CURRENT_ON_OFF_MASK, mode);
if (ret)
return ret;
/* When switching to the GPADC pin give things some time to settle */
- if (mode == AXP288_ADC_TS_PIN_GPADC)
+ if (mode == AXP288_ADC_TS_CURRENT_ON_ONDEMAND)
usleep_range(6000, 10000);
return 0;
@@ -145,14 +167,14 @@ static int axp288_adc_read_raw(struct iio_dev *indio_dev,
mutex_lock(&indio_dev->mlock);
switch (mask) {
case IIO_CHAN_INFO_RAW:
- if (axp288_adc_set_ts(info->regmap, AXP288_ADC_TS_PIN_GPADC,
+ if (axp288_adc_set_ts(info, AXP288_ADC_TS_CURRENT_ON_ONDEMAND,
chan->address)) {
dev_err(&indio_dev->dev, "GPADC mode\n");
ret = -EINVAL;
break;
}
ret = axp288_adc_read_channel(val, chan->address, info->regmap);
- if (axp288_adc_set_ts(info->regmap, AXP288_ADC_TS_PIN_ON,
+ if (axp288_adc_set_ts(info, AXP288_ADC_TS_CURRENT_ON,
chan->address))
dev_err(&indio_dev->dev, "TS pin restore\n");
break;
@@ -164,13 +186,35 @@ static int axp288_adc_read_raw(struct iio_dev *indio_dev,
return ret;
}
-static int axp288_adc_set_state(struct regmap *regmap)
+static int axp288_adc_initialize(struct axp288_adc_info *info)
{
- /* ADC should be always enabled for internal FG to function */
- if (regmap_write(regmap, AXP288_ADC_TS_PIN_CTRL, AXP288_ADC_TS_PIN_ON))
- return -EIO;
+ int ret, adc_enable_val;
- return regmap_write(regmap, AXP20X_ADC_EN1, AXP288_ADC_EN_MASK);
+ /*
+ * Determine if the TS pin is enabled and set the TS current-source
+ * accordingly.
+ */
+ ret = regmap_read(info->regmap, AXP20X_ADC_EN1, &adc_enable_val);
+ if (ret)
+ return ret;
+
+ if (adc_enable_val & AXP288_ADC_TS_ENABLE) {
+ info->ts_enabled = true;
+ ret = regmap_update_bits(info->regmap, AXP288_ADC_TS_PIN_CTRL,
+ AXP288_ADC_TS_CURRENT_ON_OFF_MASK,
+ AXP288_ADC_TS_CURRENT_ON);
+ } else {
+ info->ts_enabled = false;
+ ret = regmap_update_bits(info->regmap, AXP288_ADC_TS_PIN_CTRL,
+ AXP288_ADC_TS_CURRENT_ON_OFF_MASK,
+ AXP288_ADC_TS_CURRENT_OFF);
+ }
+ if (ret)
+ return ret;
+
+ /* Turn on the ADC for all channels except TS, leave TS as is */
+ return regmap_update_bits(info->regmap, AXP20X_ADC_EN1,
+ AXP288_ADC_EN_MASK, AXP288_ADC_EN_MASK);
}
static const struct iio_info axp288_adc_iio_info = {
@@ -200,7 +244,7 @@ static int axp288_adc_probe(struct platform_device *pdev)
* Set ADC to enabled state at all time, including system suspend.
* otherwise internal fuel gauge functionality may be affected.
*/
- ret = axp288_adc_set_state(axp20x->regmap);
+ ret = axp288_adc_initialize(info);
if (ret) {
dev_err(&pdev->dev, "unable to enable ADC device\n");
return ret;
diff --git a/drivers/iio/adc/meson_saradc.c b/drivers/iio/adc/meson_saradc.c
index da2d16d..5dd104c 100644
--- a/drivers/iio/adc/meson_saradc.c
+++ b/drivers/iio/adc/meson_saradc.c
@@ -587,8 +587,11 @@ static int meson_sar_adc_clk_init(struct iio_dev *indio_dev,
struct clk_init_data init;
const char *clk_parents[1];
- init.name = devm_kasprintf(&indio_dev->dev, GFP_KERNEL, "%pOF#adc_div",
- indio_dev->dev.of_node);
+ init.name = devm_kasprintf(&indio_dev->dev, GFP_KERNEL, "%s#adc_div",
+ dev_name(indio_dev->dev.parent));
+ if (!init.name)
+ return -ENOMEM;
+
init.flags = 0;
init.ops = &clk_divider_ops;
clk_parents[0] = __clk_get_name(priv->clkin);
@@ -606,8 +609,11 @@ static int meson_sar_adc_clk_init(struct iio_dev *indio_dev,
if (WARN_ON(IS_ERR(priv->adc_div_clk)))
return PTR_ERR(priv->adc_div_clk);
- init.name = devm_kasprintf(&indio_dev->dev, GFP_KERNEL, "%pOF#adc_en",
- indio_dev->dev.of_node);
+ init.name = devm_kasprintf(&indio_dev->dev, GFP_KERNEL, "%s#adc_en",
+ dev_name(indio_dev->dev.parent));
+ if (!init.name)
+ return -ENOMEM;
+
init.flags = CLK_SET_RATE_PARENT;
init.ops = &clk_gate_ops;
clk_parents[0] = __clk_get_name(priv->adc_div_clk);
diff --git a/drivers/iio/adc/ti-ads8688.c b/drivers/iio/adc/ti-ads8688.c
index 184d686..8b4568e 100644
--- a/drivers/iio/adc/ti-ads8688.c
+++ b/drivers/iio/adc/ti-ads8688.c
@@ -41,6 +41,7 @@
#define ADS8688_VREF_MV 4096
#define ADS8688_REALBITS 16
+#define ADS8688_MAX_CHANNELS 8
/*
* enum ads8688_range - ADS8688 reference voltage range
@@ -385,7 +386,7 @@ static irqreturn_t ads8688_trigger_handler(int irq, void *p)
{
struct iio_poll_func *pf = p;
struct iio_dev *indio_dev = pf->indio_dev;
- u16 buffer[8];
+ u16 buffer[ADS8688_MAX_CHANNELS + sizeof(s64)/sizeof(u16)];
int i, j = 0;
for (i = 0; i < indio_dev->masklength; i++) {
diff --git a/drivers/iio/chemical/atlas-ph-sensor.c b/drivers/iio/chemical/atlas-ph-sensor.c
index a406ad3..3a20cb5 100644
--- a/drivers/iio/chemical/atlas-ph-sensor.c
+++ b/drivers/iio/chemical/atlas-ph-sensor.c
@@ -444,9 +444,8 @@ static int atlas_read_raw(struct iio_dev *indio_dev,
case IIO_CHAN_INFO_SCALE:
switch (chan->type) {
case IIO_TEMP:
- *val = 1; /* 0.01 */
- *val2 = 100;
- break;
+ *val = 10;
+ return IIO_VAL_INT;
case IIO_PH:
*val = 1; /* 0.001 */
*val2 = 1000;
@@ -477,7 +476,7 @@ static int atlas_write_raw(struct iio_dev *indio_dev,
int val, int val2, long mask)
{
struct atlas_data *data = iio_priv(indio_dev);
- __be32 reg = cpu_to_be32(val);
+ __be32 reg = cpu_to_be32(val / 10);
if (val2 != 0 || val < 0 || val > 20000)
return -EINVAL;
diff --git a/drivers/infiniband/hw/hfi1/rc.c b/drivers/infiniband/hw/hfi1/rc.c
index 9bd63ab..6f013a5 100644
--- a/drivers/infiniband/hw/hfi1/rc.c
+++ b/drivers/infiniband/hw/hfi1/rc.c
@@ -1157,6 +1157,7 @@ void hfi1_rc_send_complete(struct rvt_qp *qp, struct hfi1_opa_header *opah)
if (cmp_psn(wqe->lpsn, qp->s_sending_psn) >= 0 &&
cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) <= 0)
break;
+ rvt_qp_wqe_unreserve(qp, wqe);
s_last = qp->s_last;
trace_hfi1_qp_send_completion(qp, wqe, s_last);
if (++s_last >= qp->s_size)
@@ -1209,6 +1210,7 @@ static struct rvt_swqe *do_rc_completion(struct rvt_qp *qp,
u32 s_last;
rvt_put_swqe(wqe);
+ rvt_qp_wqe_unreserve(qp, wqe);
s_last = qp->s_last;
trace_hfi1_qp_send_completion(qp, wqe, s_last);
if (++s_last >= qp->s_size)
diff --git a/drivers/infiniband/hw/hfi1/ruc.c b/drivers/infiniband/hw/hfi1/ruc.c
index 5f56f3c..62a3832 100644
--- a/drivers/infiniband/hw/hfi1/ruc.c
+++ b/drivers/infiniband/hw/hfi1/ruc.c
@@ -278,6 +278,8 @@ static void ruc_loopback(struct rvt_qp *sqp)
goto op_err;
if (!ret)
goto rnr_nak;
+ if (wqe->length > qp->r_len)
+ goto inv_err;
break;
case IB_WR_RDMA_WRITE_WITH_IMM:
@@ -445,7 +447,10 @@ static void ruc_loopback(struct rvt_qp *sqp)
goto err;
inv_err:
- send_status = IB_WC_REM_INV_REQ_ERR;
+ send_status =
+ sqp->ibqp.qp_type == IB_QPT_RC ?
+ IB_WC_REM_INV_REQ_ERR :
+ IB_WC_SUCCESS;
wc.status = IB_WC_LOC_QP_OP_ERR;
goto err;
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c
index 0d3473b..21f4239 100644
--- a/drivers/infiniband/hw/mthca/mthca_provider.c
+++ b/drivers/infiniband/hw/mthca/mthca_provider.c
@@ -533,7 +533,7 @@ static struct ib_qp *mthca_create_qp(struct ib_pd *pd,
{
struct mthca_ucontext *context;
- qp = kmalloc(sizeof *qp, GFP_KERNEL);
+ qp = kzalloc(sizeof(*qp), GFP_KERNEL);
if (!qp)
return ERR_PTR(-ENOMEM);
@@ -599,7 +599,7 @@ static struct ib_qp *mthca_create_qp(struct ib_pd *pd,
if (pd->uobject)
return ERR_PTR(-EINVAL);
- qp = kmalloc(sizeof (struct mthca_sqp), GFP_KERNEL);
+ qp = kzalloc(sizeof(struct mthca_sqp), GFP_KERNEL);
if (!qp)
return ERR_PTR(-ENOMEM);
diff --git a/drivers/infiniband/hw/qib/qib_ruc.c b/drivers/infiniband/hw/qib/qib_ruc.c
index f8a7de7..563f71e 100644
--- a/drivers/infiniband/hw/qib/qib_ruc.c
+++ b/drivers/infiniband/hw/qib/qib_ruc.c
@@ -274,6 +274,8 @@ static void qib_ruc_loopback(struct rvt_qp *sqp)
goto op_err;
if (!ret)
goto rnr_nak;
+ if (wqe->length > qp->r_len)
+ goto inv_err;
break;
case IB_WR_RDMA_WRITE_WITH_IMM:
@@ -434,7 +436,10 @@ static void qib_ruc_loopback(struct rvt_qp *sqp)
goto err;
inv_err:
- send_status = IB_WC_REM_INV_REQ_ERR;
+ send_status =
+ sqp->ibqp.qp_type == IB_QPT_RC ?
+ IB_WC_REM_INV_REQ_ERR :
+ IB_WC_SUCCESS;
wc.status = IB_WC_LOC_QP_OP_ERR;
goto err;
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index 0b34e9095..2c1114e 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -2951,7 +2951,6 @@ static int srp_reset_device(struct scsi_cmnd *scmnd)
{
struct srp_target_port *target = host_to_target(scmnd->device->host);
struct srp_rdma_ch *ch;
- int i, j;
u8 status;
shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n");
@@ -2963,15 +2962,6 @@ static int srp_reset_device(struct scsi_cmnd *scmnd)
if (status)
return FAILED;
- for (i = 0; i < target->ch_count; i++) {
- ch = &target->ch[i];
- for (j = 0; j < target->req_ring_size; ++j) {
- struct srp_request *req = &ch->req_ring[j];
-
- srp_finish_req(ch, req, scmnd->device, DID_RESET << 16);
- }
- }
-
return SUCCESS;
}
diff --git a/drivers/input/misc/bma150.c b/drivers/input/misc/bma150.c
index 1efcfdf..dd9dd4e 100644
--- a/drivers/input/misc/bma150.c
+++ b/drivers/input/misc/bma150.c
@@ -481,13 +481,14 @@ static int bma150_register_input_device(struct bma150_data *bma150)
idev->close = bma150_irq_close;
input_set_drvdata(idev, bma150);
+ bma150->input = idev;
+
error = input_register_device(idev);
if (error) {
input_free_device(idev);
return error;
}
- bma150->input = idev;
return 0;
}
@@ -510,15 +511,15 @@ static int bma150_register_polled_device(struct bma150_data *bma150)
bma150_init_input_device(bma150, ipoll_dev->input);
+ bma150->input_polled = ipoll_dev;
+ bma150->input = ipoll_dev->input;
+
error = input_register_polled_device(ipoll_dev);
if (error) {
input_free_polled_device(ipoll_dev);
return error;
}
- bma150->input_polled = ipoll_dev;
- bma150->input = ipoll_dev->input;
-
return 0;
}
diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c
index f322a17..225ae69 100644
--- a/drivers/input/mouse/elan_i2c_core.c
+++ b/drivers/input/mouse/elan_i2c_core.c
@@ -1336,7 +1336,6 @@ MODULE_DEVICE_TABLE(i2c, elan_id);
static const struct acpi_device_id elan_acpi_id[] = {
{ "ELAN0000", 0 },
{ "ELAN0100", 0 },
- { "ELAN0501", 0 },
{ "ELAN0600", 0 },
{ "ELAN0602", 0 },
{ "ELAN0605", 0 },
@@ -1346,6 +1345,7 @@ static const struct acpi_device_id elan_acpi_id[] = {
{ "ELAN060C", 0 },
{ "ELAN0611", 0 },
{ "ELAN0612", 0 },
+ { "ELAN0617", 0 },
{ "ELAN0618", 0 },
{ "ELAN061C", 0 },
{ "ELAN061D", 0 },
diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
index 9fe075c..a7f8b16 100644
--- a/drivers/input/mouse/elantech.c
+++ b/drivers/input/mouse/elantech.c
@@ -1119,6 +1119,8 @@ static int elantech_get_resolution_v4(struct psmouse *psmouse,
* Asus UX31 0x361f00 20, 15, 0e clickpad
* Asus UX32VD 0x361f02 00, 15, 0e clickpad
* Avatar AVIU-145A2 0x361f00 ? clickpad
+ * Fujitsu CELSIUS H760 0x570f02 40, 14, 0c 3 hw buttons (**)
+ * Fujitsu CELSIUS H780 0x5d0f02 41, 16, 0d 3 hw buttons (**)
* Fujitsu LIFEBOOK E544 0x470f00 d0, 12, 09 2 hw buttons
* Fujitsu LIFEBOOK E546 0x470f00 50, 12, 09 2 hw buttons
* Fujitsu LIFEBOOK E547 0x470f00 50, 12, 09 2 hw buttons
@@ -1171,6 +1173,13 @@ static const struct dmi_system_id elantech_dmi_has_middle_button[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "CELSIUS H760"),
},
},
+ {
+ /* Fujitsu H780 also has a middle button */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "CELSIUS H780"),
+ },
+ },
#endif
{ }
};
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index bee0dfb..34c9aa7 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -438,7 +438,14 @@ static int iommu_init_device(struct device *dev)
dev_data->alias = get_alias(dev);
- if (dev_is_pci(dev) && pci_iommuv2_capable(to_pci_dev(dev))) {
+ /*
+ * By default we use passthrough mode for IOMMUv2 capable device.
+ * But if amd_iommu=force_isolation is set (e.g. to debug DMA to
+ * invalid address), we ignore the capability for the device so
+ * it'll be forced to go into translation mode.
+ */
+ if ((iommu_pass_through || !amd_iommu_force_isolation) &&
+ dev_is_pci(dev) && pci_iommuv2_capable(to_pci_dev(dev))) {
struct amd_iommu *iommu;
iommu = amd_iommu_rlookup_table[dev_data->devid];
diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c
index ee76a88..5391896 100644
--- a/drivers/iommu/arm-smmu-v3.c
+++ b/drivers/iommu/arm-smmu-v3.c
@@ -586,7 +586,11 @@ struct arm_smmu_device {
struct arm_smmu_strtab_cfg strtab_cfg;
- u32 sync_count;
+ /* Hi16xx adds an extra 32 bits of goodness to its MSI payload */
+ union {
+ u32 sync_count;
+ u64 padding;
+ };
/* IOMMU core code handle */
struct iommu_device iommu;
@@ -684,7 +688,13 @@ static void queue_inc_cons(struct arm_smmu_queue *q)
u32 cons = (Q_WRP(q, q->cons) | Q_IDX(q, q->cons)) + 1;
q->cons = Q_OVF(q, q->cons) | Q_WRP(q, cons) | Q_IDX(q, cons);
- writel(q->cons, q->cons_reg);
+
+ /*
+ * Ensure that all CPU accesses (reads and writes) to the queue
+ * are complete before we update the cons pointer.
+ */
+ mb();
+ writel_relaxed(q->cons, q->cons_reg);
}
static int queue_sync_prod(struct arm_smmu_queue *q)
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index 77ff4bb..c6cbfee 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -135,6 +135,7 @@ enum arm_smmu_implementation {
ARM_MMU500,
CAVIUM_SMMUV2,
QCOM_SMMUV500,
+ QCOM_SMMUV2,
};
struct arm_smmu_impl_def_reg {
@@ -4681,6 +4682,7 @@ ARM_SMMU_MATCH_DATA(arm_mmu500, ARM_SMMU_V2, ARM_MMU500, NULL);
ARM_SMMU_MATCH_DATA(cavium_smmuv2, ARM_SMMU_V2, CAVIUM_SMMUV2, NULL);
ARM_SMMU_MATCH_DATA(qcom_smmuv500, ARM_SMMU_V2, QCOM_SMMUV500,
&qsmmuv500_arch_ops);
+ARM_SMMU_MATCH_DATA(qcom_smmuv2, ARM_SMMU_V2, QCOM_SMMUV2, NULL);
static const struct of_device_id arm_smmu_of_match[] = {
{ .compatible = "arm,smmu-v1", .data = &smmu_generic_v1 },
@@ -4690,6 +4692,7 @@ static const struct of_device_id arm_smmu_of_match[] = {
{ .compatible = "arm,mmu-500", .data = &arm_mmu500 },
{ .compatible = "cavium,smmu-v2", .data = &cavium_smmuv2 },
{ .compatible = "qcom,qsmmu-v500", .data = &qcom_smmuv500 },
+ { .compatible = "qcom,smmu-v2", .data = &qcom_smmuv2 },
{ },
};
MODULE_DEVICE_TABLE(of, arm_smmu_of_match);
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index cf3abb8..4c2246f 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -93,9 +93,14 @@ struct its_device;
* The ITS structure - contains most of the infrastructure, with the
* top-level MSI domain, the command queue, the collections, and the
* list of devices writing to it.
+ *
+ * dev_alloc_lock has to be taken for device allocations, while the
+ * spinlock must be taken to parse data structures such as the device
+ * list.
*/
struct its_node {
raw_spinlock_t lock;
+ struct mutex dev_alloc_lock;
struct list_head entry;
void __iomem *base;
phys_addr_t phys_base;
@@ -152,6 +157,7 @@ struct its_device {
void *itt;
u32 nr_ites;
u32 device_id;
+ bool shared;
};
static struct {
@@ -2290,6 +2296,7 @@ static int its_msi_prepare(struct irq_domain *domain, struct device *dev,
struct its_device *its_dev;
struct msi_domain_info *msi_info;
u32 dev_id;
+ int err = 0;
/*
* We ignore "dev" entierely, and rely on the dev_id that has
@@ -2312,6 +2319,7 @@ static int its_msi_prepare(struct irq_domain *domain, struct device *dev,
return -EINVAL;
}
+ mutex_lock(&its->dev_alloc_lock);
its_dev = its_find_device(its, dev_id);
if (its_dev) {
/*
@@ -2319,18 +2327,22 @@ static int its_msi_prepare(struct irq_domain *domain, struct device *dev,
* another alias (PCI bridge of some sort). No need to
* create the device.
*/
+ its_dev->shared = true;
pr_debug("Reusing ITT for devID %x\n", dev_id);
goto out;
}
its_dev = its_create_device(its, dev_id, nvec, true);
- if (!its_dev)
- return -ENOMEM;
+ if (!its_dev) {
+ err = -ENOMEM;
+ goto out;
+ }
pr_debug("ITT %d entries, %d bits\n", nvec, ilog2(nvec));
out:
+ mutex_unlock(&its->dev_alloc_lock);
info->scratchpad[0].ptr = its_dev;
- return 0;
+ return err;
}
static struct msi_domain_ops its_msi_domain_ops = {
@@ -2434,6 +2446,7 @@ static void its_irq_domain_free(struct irq_domain *domain, unsigned int virq,
{
struct irq_data *d = irq_domain_get_irq_data(domain, virq);
struct its_device *its_dev = irq_data_get_irq_chip_data(d);
+ struct its_node *its = its_dev->its;
int i;
for (i = 0; i < nr_irqs; i++) {
@@ -2448,8 +2461,14 @@ static void its_irq_domain_free(struct irq_domain *domain, unsigned int virq,
irq_domain_reset_irq_data(data);
}
- /* If all interrupts have been freed, start mopping the floor */
- if (bitmap_empty(its_dev->event_map.lpi_map,
+ mutex_lock(&its->dev_alloc_lock);
+
+ /*
+ * If all interrupts have been freed, start mopping the
+ * floor. This is conditionned on the device not being shared.
+ */
+ if (!its_dev->shared &&
+ bitmap_empty(its_dev->event_map.lpi_map,
its_dev->event_map.nr_lpis)) {
its_lpi_free(its_dev->event_map.lpi_map,
its_dev->event_map.lpi_base,
@@ -2461,6 +2480,8 @@ static void its_irq_domain_free(struct irq_domain *domain, unsigned int virq,
its_free_device(its_dev);
}
+ mutex_unlock(&its->dev_alloc_lock);
+
irq_domain_free_irqs_parent(domain, virq, nr_irqs);
}
@@ -3385,6 +3406,7 @@ static int __init its_probe_one(struct resource *res,
}
raw_spin_lock_init(&its->lock);
+ mutex_init(&its->dev_alloc_lock);
INIT_LIST_HEAD(&its->entry);
INIT_LIST_HEAD(&its->its_device_list);
typer = gic_read_typer(its_base + GITS_TYPER);
diff --git a/drivers/irqchip/qcom-pdc.c b/drivers/irqchip/qcom-pdc.c
index ca82313..603aabd 100644
--- a/drivers/irqchip/qcom-pdc.c
+++ b/drivers/irqchip/qcom-pdc.c
@@ -13,12 +13,13 @@
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_device.h>
+#include <linux/soc/qcom/irq.h>
#include <linux/spinlock.h>
-#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/types.h>
#define PDC_MAX_IRQS 138
+#define PDC_MAX_GPIO_IRQS 256
#define CLEAR_INTR(reg, intr) (reg & ~(1 << intr))
#define ENABLE_INTR(reg, intr) (reg | (1 << intr))
@@ -169,7 +170,6 @@ static irq_hw_number_t get_parent_hwirq(int pin)
return (region->parent_base + pin - region->pin_base);
}
- WARN_ON(1);
return ~0UL;
}
@@ -232,6 +232,60 @@ static const struct irq_domain_ops qcom_pdc_ops = {
.free = irq_domain_free_irqs_common,
};
+static int qcom_pdc_gpio_alloc(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs, void *data)
+{
+ struct qcom_irq_fwspec *qcom_fwspec = data;
+ struct irq_fwspec *fwspec = &qcom_fwspec->fwspec;
+ struct irq_fwspec parent_fwspec;
+ irq_hw_number_t hwirq, parent_hwirq;
+ unsigned int type;
+ int ret;
+
+ ret = qcom_pdc_translate(domain, fwspec, &hwirq, &type);
+ if (ret)
+ return -EINVAL;
+
+ parent_hwirq = get_parent_hwirq(hwirq);
+ if (parent_hwirq == ~0UL)
+ return -EINVAL;
+
+ ret = irq_domain_set_hwirq_and_chip(domain, virq, hwirq,
+ &qcom_pdc_gic_chip, NULL);
+ if (ret)
+ return ret;
+
+ qcom_fwspec->mask = true;
+
+ if (type & IRQ_TYPE_EDGE_BOTH)
+ type = IRQ_TYPE_EDGE_RISING;
+
+ if (type & IRQ_TYPE_LEVEL_MASK)
+ type = IRQ_TYPE_LEVEL_HIGH;
+
+ parent_fwspec.fwnode = domain->parent->fwnode;
+ parent_fwspec.param_count = 3;
+ parent_fwspec.param[0] = 0;
+ parent_fwspec.param[1] = parent_hwirq;
+ parent_fwspec.param[2] = type;
+
+ return irq_domain_alloc_irqs_parent(domain, virq, nr_irqs,
+ &parent_fwspec);
+}
+
+static int qcom_pdc_gpio_domain_select(struct irq_domain *d,
+ struct irq_fwspec *fwspec,
+ enum irq_domain_bus_token bus_token)
+{
+ return (bus_token == DOMAIN_BUS_WAKEUP);
+}
+
+static const struct irq_domain_ops qcom_pdc_gpio_ops = {
+ .select = qcom_pdc_gpio_domain_select,
+ .alloc = qcom_pdc_gpio_alloc,
+ .free = irq_domain_free_irqs_common,
+};
+
static int pdc_setup_pin_mapping(struct device_node *np)
{
int ret, n;
@@ -270,7 +324,7 @@ static int pdc_setup_pin_mapping(struct device_node *np)
static int qcom_pdc_init(struct device_node *node, struct device_node *parent)
{
- struct irq_domain *parent_domain, *pdc_domain;
+ struct irq_domain *parent_domain, *pdc_domain, *pdc_gpio_domain;
int ret;
pdc_base = of_iomap(node, 0);
@@ -301,6 +355,18 @@ static int qcom_pdc_init(struct device_node *node, struct device_node *parent)
goto fail;
}
+ pdc_gpio_domain = irq_domain_create_hierarchy(parent_domain, 0,
+ PDC_MAX_GPIO_IRQS,
+ of_fwnode_handle(node),
+ &qcom_pdc_gpio_ops, NULL);
+ if (!pdc_gpio_domain) {
+ pr_err("GIC domain add failed for GPIO domain\n");
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ irq_domain_update_bus_token(pdc_gpio_domain, DOMAIN_BUS_WAKEUP);
+
return 0;
fail:
diff --git a/drivers/isdn/hardware/avm/b1.c b/drivers/isdn/hardware/avm/b1.c
index 4ac378e..40ca1e8 100644
--- a/drivers/isdn/hardware/avm/b1.c
+++ b/drivers/isdn/hardware/avm/b1.c
@@ -423,7 +423,7 @@ void b1_parse_version(avmctrl_info *cinfo)
int i, j;
for (j = 0; j < AVM_MAXVERSION; j++)
- cinfo->version[j] = "\0\0" + 1;
+ cinfo->version[j] = "";
for (i = 0, j = 0;
j < AVM_MAXVERSION && i < cinfo->versionlen;
j++, i += cinfo->versionbuf[i] + 1)
diff --git a/drivers/isdn/hisax/hfc_pci.c b/drivers/isdn/hisax/hfc_pci.c
index 8e5b0316..64a6371 100644
--- a/drivers/isdn/hisax/hfc_pci.c
+++ b/drivers/isdn/hisax/hfc_pci.c
@@ -1170,11 +1170,13 @@ HFCPCI_l1hw(struct PStack *st, int pr, void *arg)
if (cs->debug & L1_DEB_LAPD)
debugl1(cs, "-> PH_REQUEST_PULL");
#endif
+ spin_lock_irqsave(&cs->lock, flags);
if (!cs->tx_skb) {
test_and_clear_bit(FLG_L1_PULL_REQ, &st->l1.Flags);
st->l1.l1l2(st, PH_PULL | CONFIRM, NULL);
} else
test_and_set_bit(FLG_L1_PULL_REQ, &st->l1.Flags);
+ spin_unlock_irqrestore(&cs->lock, flags);
break;
case (HW_RESET | REQUEST):
spin_lock_irqsave(&cs->lock, flags);
diff --git a/drivers/isdn/i4l/isdn_tty.c b/drivers/isdn/i4l/isdn_tty.c
index b730037..9cff667 100644
--- a/drivers/isdn/i4l/isdn_tty.c
+++ b/drivers/isdn/i4l/isdn_tty.c
@@ -1456,15 +1456,19 @@ isdn_tty_set_termios(struct tty_struct *tty, struct ktermios *old_termios)
{
modem_info *info = (modem_info *) tty->driver_data;
+ mutex_lock(&modem_info_mutex);
if (!old_termios)
isdn_tty_change_speed(info);
else {
if (tty->termios.c_cflag == old_termios->c_cflag &&
tty->termios.c_ispeed == old_termios->c_ispeed &&
- tty->termios.c_ospeed == old_termios->c_ospeed)
+ tty->termios.c_ospeed == old_termios->c_ospeed) {
+ mutex_unlock(&modem_info_mutex);
return;
+ }
isdn_tty_change_speed(info);
}
+ mutex_unlock(&modem_info_mutex);
}
/*
diff --git a/drivers/isdn/mISDN/timerdev.c b/drivers/isdn/mISDN/timerdev.c
index 211ed6c..5789787 100644
--- a/drivers/isdn/mISDN/timerdev.c
+++ b/drivers/isdn/mISDN/timerdev.c
@@ -170,8 +170,8 @@ dev_expire_timer(struct timer_list *t)
spin_lock_irqsave(&timer->dev->lock, flags);
if (timer->id >= 0)
list_move_tail(&timer->list, &timer->dev->expired);
- spin_unlock_irqrestore(&timer->dev->lock, flags);
wake_up_interruptible(&timer->dev->wait);
+ spin_unlock_irqrestore(&timer->dev->lock, flags);
}
static int
diff --git a/drivers/leds/leds-lp5523.c b/drivers/leds/leds-lp5523.c
index a2e74fe..fd64df5 100644
--- a/drivers/leds/leds-lp5523.c
+++ b/drivers/leds/leds-lp5523.c
@@ -318,7 +318,9 @@ static int lp5523_init_program_engine(struct lp55xx_chip *chip)
/* Let the programs run for couple of ms and check the engine status */
usleep_range(3000, 6000);
- lp55xx_read(chip, LP5523_REG_STATUS, &status);
+ ret = lp55xx_read(chip, LP5523_REG_STATUS, &status);
+ if (ret)
+ return ret;
status &= LP5523_ENG_STATUS_MASK;
if (status != LP5523_ENG_STATUS_MASK) {
diff --git a/drivers/lightnvm/pblk-core.c b/drivers/lightnvm/pblk-core.c
index 2940cdc..95be6e3 100644
--- a/drivers/lightnvm/pblk-core.c
+++ b/drivers/lightnvm/pblk-core.c
@@ -1252,15 +1252,22 @@ int pblk_line_recov_alloc(struct pblk *pblk, struct pblk_line *line)
ret = pblk_line_alloc_bitmaps(pblk, line);
if (ret)
- return ret;
+ goto fail;
if (!pblk_line_init_bb(pblk, line, 0)) {
- list_add(&line->list, &l_mg->free_list);
- return -EINTR;
+ ret = -EINTR;
+ goto fail;
}
pblk_rl_free_lines_dec(&pblk->rl, line, true);
return 0;
+
+fail:
+ spin_lock(&l_mg->free_lock);
+ list_add(&line->list, &l_mg->free_list);
+ spin_unlock(&l_mg->free_lock);
+
+ return ret;
}
void pblk_line_recov_close(struct pblk *pblk, struct pblk_line *line)
diff --git a/drivers/lightnvm/pblk-write.c b/drivers/lightnvm/pblk-write.c
index 879227d..c3e038d 100644
--- a/drivers/lightnvm/pblk-write.c
+++ b/drivers/lightnvm/pblk-write.c
@@ -158,9 +158,11 @@ static void pblk_prepare_resubmit(struct pblk *pblk, unsigned int sentry,
w_ctx = &entry->w_ctx;
/* Check if the lba has been overwritten */
- ppa_l2p = pblk_trans_map_get(pblk, w_ctx->lba);
- if (!pblk_ppa_comp(ppa_l2p, entry->cacheline))
- w_ctx->lba = ADDR_EMPTY;
+ if (w_ctx->lba != ADDR_EMPTY) {
+ ppa_l2p = pblk_trans_map_get(pblk, w_ctx->lba);
+ if (!pblk_ppa_comp(ppa_l2p, entry->cacheline))
+ w_ctx->lba = ADDR_EMPTY;
+ }
/* Mark up the entry as submittable again */
flags = READ_ONCE(w_ctx->flags);
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 5ddbb81..8f61366 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -932,7 +932,7 @@ static int dm_crypt_integrity_io_alloc(struct dm_crypt_io *io, struct bio *bio)
if (IS_ERR(bip))
return PTR_ERR(bip);
- tag_len = io->cc->on_disk_tag_size * bio_sectors(bio);
+ tag_len = io->cc->on_disk_tag_size * (bio_sectors(bio) >> io->cc->sector_shift);
bip->bip_iter.bi_size = tag_len;
bip->bip_iter.bi_sector = io->cc->start + io->sector;
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index c30a785..cd4220e 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -257,6 +257,7 @@ struct pool {
spinlock_t lock;
struct bio_list deferred_flush_bios;
+ struct bio_list deferred_flush_completions;
struct list_head prepared_mappings;
struct list_head prepared_discards;
struct list_head prepared_discards_pt2;
@@ -956,6 +957,39 @@ static void process_prepared_mapping_fail(struct dm_thin_new_mapping *m)
mempool_free(m, &m->tc->pool->mapping_pool);
}
+static void complete_overwrite_bio(struct thin_c *tc, struct bio *bio)
+{
+ struct pool *pool = tc->pool;
+ unsigned long flags;
+
+ /*
+ * If the bio has the REQ_FUA flag set we must commit the metadata
+ * before signaling its completion.
+ */
+ if (!bio_triggers_commit(tc, bio)) {
+ bio_endio(bio);
+ return;
+ }
+
+ /*
+ * Complete bio with an error if earlier I/O caused changes to the
+ * metadata that can't be committed, e.g, due to I/O errors on the
+ * metadata device.
+ */
+ if (dm_thin_aborted_changes(tc->td)) {
+ bio_io_error(bio);
+ return;
+ }
+
+ /*
+ * Batch together any bios that trigger commits and then issue a
+ * single commit for them in process_deferred_bios().
+ */
+ spin_lock_irqsave(&pool->lock, flags);
+ bio_list_add(&pool->deferred_flush_completions, bio);
+ spin_unlock_irqrestore(&pool->lock, flags);
+}
+
static void process_prepared_mapping(struct dm_thin_new_mapping *m)
{
struct thin_c *tc = m->tc;
@@ -988,7 +1022,7 @@ static void process_prepared_mapping(struct dm_thin_new_mapping *m)
*/
if (bio) {
inc_remap_and_issue_cell(tc, m->cell, m->data_block);
- bio_endio(bio);
+ complete_overwrite_bio(tc, bio);
} else {
inc_all_io_entry(tc->pool, m->cell->holder);
remap_and_issue(tc, m->cell->holder, m->data_block);
@@ -2317,7 +2351,7 @@ static void process_deferred_bios(struct pool *pool)
{
unsigned long flags;
struct bio *bio;
- struct bio_list bios;
+ struct bio_list bios, bio_completions;
struct thin_c *tc;
tc = get_first_thin(pool);
@@ -2328,26 +2362,36 @@ static void process_deferred_bios(struct pool *pool)
}
/*
- * If there are any deferred flush bios, we must commit
- * the metadata before issuing them.
+ * If there are any deferred flush bios, we must commit the metadata
+ * before issuing them or signaling their completion.
*/
bio_list_init(&bios);
+ bio_list_init(&bio_completions);
+
spin_lock_irqsave(&pool->lock, flags);
bio_list_merge(&bios, &pool->deferred_flush_bios);
bio_list_init(&pool->deferred_flush_bios);
+
+ bio_list_merge(&bio_completions, &pool->deferred_flush_completions);
+ bio_list_init(&pool->deferred_flush_completions);
spin_unlock_irqrestore(&pool->lock, flags);
- if (bio_list_empty(&bios) &&
+ if (bio_list_empty(&bios) && bio_list_empty(&bio_completions) &&
!(dm_pool_changed_this_transaction(pool->pmd) && need_commit_due_to_time(pool)))
return;
if (commit(pool)) {
+ bio_list_merge(&bios, &bio_completions);
+
while ((bio = bio_list_pop(&bios)))
bio_io_error(bio);
return;
}
pool->last_commit_jiffies = jiffies;
+ while ((bio = bio_list_pop(&bio_completions)))
+ bio_endio(bio);
+
while ((bio = bio_list_pop(&bios)))
generic_make_request(bio);
}
@@ -2954,6 +2998,7 @@ static struct pool *pool_create(struct mapped_device *pool_md,
INIT_DELAYED_WORK(&pool->no_space_timeout, do_no_space_timeout);
spin_lock_init(&pool->lock);
bio_list_init(&pool->deferred_flush_bios);
+ bio_list_init(&pool->deferred_flush_completions);
INIT_LIST_HEAD(&pool->prepared_mappings);
INIT_LIST_HEAD(&pool->prepared_discards);
INIT_LIST_HEAD(&pool->prepared_discards_pt2);
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 1d54109..fa47249 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -1863,6 +1863,20 @@ static void end_sync_read(struct bio *bio)
reschedule_retry(r1_bio);
}
+static void abort_sync_write(struct mddev *mddev, struct r1bio *r1_bio)
+{
+ sector_t sync_blocks = 0;
+ sector_t s = r1_bio->sector;
+ long sectors_to_go = r1_bio->sectors;
+
+ /* make sure these bits don't get cleared. */
+ do {
+ md_bitmap_end_sync(mddev->bitmap, s, &sync_blocks, 1);
+ s += sync_blocks;
+ sectors_to_go -= sync_blocks;
+ } while (sectors_to_go > 0);
+}
+
static void end_sync_write(struct bio *bio)
{
int uptodate = !bio->bi_status;
@@ -1874,15 +1888,7 @@ static void end_sync_write(struct bio *bio)
struct md_rdev *rdev = conf->mirrors[find_bio_disk(r1_bio, bio)].rdev;
if (!uptodate) {
- sector_t sync_blocks = 0;
- sector_t s = r1_bio->sector;
- long sectors_to_go = r1_bio->sectors;
- /* make sure these bits doesn't get cleared. */
- do {
- md_bitmap_end_sync(mddev->bitmap, s, &sync_blocks, 1);
- s += sync_blocks;
- sectors_to_go -= sync_blocks;
- } while (sectors_to_go > 0);
+ abort_sync_write(mddev, r1_bio);
set_bit(WriteErrorSeen, &rdev->flags);
if (!test_and_set_bit(WantReplacement, &rdev->flags))
set_bit(MD_RECOVERY_NEEDED, &
@@ -2172,8 +2178,10 @@ static void sync_request_write(struct mddev *mddev, struct r1bio *r1_bio)
(i == r1_bio->read_disk ||
!test_bit(MD_RECOVERY_SYNC, &mddev->recovery))))
continue;
- if (test_bit(Faulty, &conf->mirrors[i].rdev->flags))
+ if (test_bit(Faulty, &conf->mirrors[i].rdev->flags)) {
+ abort_sync_write(mddev, r1_bio);
continue;
+ }
bio_set_op_attrs(wbio, REQ_OP_WRITE, 0);
if (test_bit(FailFast, &conf->mirrors[i].rdev->flags))
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 811427e..7033a28 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -1208,7 +1208,9 @@ static void raid10_read_request(struct mddev *mddev, struct bio *bio,
struct bio *split = bio_split(bio, max_sectors,
gfp, &conf->bio_split);
bio_chain(split, bio);
+ allow_barrier(conf);
generic_make_request(bio);
+ wait_barrier(conf);
bio = split;
r10_bio->master_bio = bio;
r10_bio->sectors = max_sectors;
@@ -1513,7 +1515,9 @@ static void raid10_write_request(struct mddev *mddev, struct bio *bio,
struct bio *split = bio_split(bio, r10_bio->sectors,
GFP_NOIO, &conf->bio_split);
bio_chain(split, bio);
+ allow_barrier(conf);
generic_make_request(bio);
+ wait_barrier(conf);
bio = split;
r10_bio->master_bio = bio;
}
diff --git a/drivers/media/i2c/Kconfig b/drivers/media/i2c/Kconfig
index 82af974..63c9ac2 100644
--- a/drivers/media/i2c/Kconfig
+++ b/drivers/media/i2c/Kconfig
@@ -61,6 +61,7 @@
depends on VIDEO_V4L2 && I2C && VIDEO_V4L2_SUBDEV_API
depends on SND_SOC
select SND_PCM
+ select HDMI
---help---
V4L2 subdevice driver for the NXP TDA1997x HDMI receivers.
@@ -610,6 +611,7 @@
tristate "Sony IMX274 sensor support"
depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
depends on MEDIA_CAMERA_SUPPORT
+ select REGMAP_I2C
---help---
This is a V4L2 sensor driver for the Sony IMX274
CMOS image sensor.
diff --git a/drivers/media/i2c/ad9389b.c b/drivers/media/i2c/ad9389b.c
index 5b008b0..aa8b04c 100644
--- a/drivers/media/i2c/ad9389b.c
+++ b/drivers/media/i2c/ad9389b.c
@@ -578,7 +578,7 @@ static const struct v4l2_dv_timings_cap ad9389b_timings_cap = {
.type = V4L2_DV_BT_656_1120,
/* keep this initialization for compatibility with GCC < 4.4.6 */
.reserved = { 0 },
- V4L2_INIT_BT_TIMINGS(0, 1920, 0, 1200, 25000000, 170000000,
+ V4L2_INIT_BT_TIMINGS(640, 1920, 350, 1200, 25000000, 170000000,
V4L2_DV_BT_STD_CEA861 | V4L2_DV_BT_STD_DMT |
V4L2_DV_BT_STD_GTF | V4L2_DV_BT_STD_CVT,
V4L2_DV_BT_CAP_PROGRESSIVE | V4L2_DV_BT_CAP_REDUCED_BLANKING |
diff --git a/drivers/media/i2c/adv7511.c b/drivers/media/i2c/adv7511.c
index f3899cc8..88349b5 100644
--- a/drivers/media/i2c/adv7511.c
+++ b/drivers/media/i2c/adv7511.c
@@ -130,7 +130,7 @@ static const struct v4l2_dv_timings_cap adv7511_timings_cap = {
.type = V4L2_DV_BT_656_1120,
/* keep this initialization for compatibility with GCC < 4.4.6 */
.reserved = { 0 },
- V4L2_INIT_BT_TIMINGS(0, ADV7511_MAX_WIDTH, 0, ADV7511_MAX_HEIGHT,
+ V4L2_INIT_BT_TIMINGS(640, ADV7511_MAX_WIDTH, 350, ADV7511_MAX_HEIGHT,
ADV7511_MIN_PIXELCLOCK, ADV7511_MAX_PIXELCLOCK,
V4L2_DV_BT_STD_CEA861 | V4L2_DV_BT_STD_DMT |
V4L2_DV_BT_STD_GTF | V4L2_DV_BT_STD_CVT,
diff --git a/drivers/media/i2c/adv7604.c b/drivers/media/i2c/adv7604.c
index c786981..f01964c 100644
--- a/drivers/media/i2c/adv7604.c
+++ b/drivers/media/i2c/adv7604.c
@@ -766,7 +766,7 @@ static const struct v4l2_dv_timings_cap adv7604_timings_cap_analog = {
.type = V4L2_DV_BT_656_1120,
/* keep this initialization for compatibility with GCC < 4.4.6 */
.reserved = { 0 },
- V4L2_INIT_BT_TIMINGS(0, 1920, 0, 1200, 25000000, 170000000,
+ V4L2_INIT_BT_TIMINGS(640, 1920, 350, 1200, 25000000, 170000000,
V4L2_DV_BT_STD_CEA861 | V4L2_DV_BT_STD_DMT |
V4L2_DV_BT_STD_GTF | V4L2_DV_BT_STD_CVT,
V4L2_DV_BT_CAP_PROGRESSIVE | V4L2_DV_BT_CAP_REDUCED_BLANKING |
@@ -777,7 +777,7 @@ static const struct v4l2_dv_timings_cap adv76xx_timings_cap_digital = {
.type = V4L2_DV_BT_656_1120,
/* keep this initialization for compatibility with GCC < 4.4.6 */
.reserved = { 0 },
- V4L2_INIT_BT_TIMINGS(0, 1920, 0, 1200, 25000000, 225000000,
+ V4L2_INIT_BT_TIMINGS(640, 1920, 350, 1200, 25000000, 225000000,
V4L2_DV_BT_STD_CEA861 | V4L2_DV_BT_STD_DMT |
V4L2_DV_BT_STD_GTF | V4L2_DV_BT_STD_CVT,
V4L2_DV_BT_CAP_PROGRESSIVE | V4L2_DV_BT_CAP_REDUCED_BLANKING |
diff --git a/drivers/media/i2c/adv7842.c b/drivers/media/i2c/adv7842.c
index 71fe565..bb43a75 100644
--- a/drivers/media/i2c/adv7842.c
+++ b/drivers/media/i2c/adv7842.c
@@ -663,7 +663,7 @@ static const struct v4l2_dv_timings_cap adv7842_timings_cap_analog = {
.type = V4L2_DV_BT_656_1120,
/* keep this initialization for compatibility with GCC < 4.4.6 */
.reserved = { 0 },
- V4L2_INIT_BT_TIMINGS(0, 1920, 0, 1200, 25000000, 170000000,
+ V4L2_INIT_BT_TIMINGS(640, 1920, 350, 1200, 25000000, 170000000,
V4L2_DV_BT_STD_CEA861 | V4L2_DV_BT_STD_DMT |
V4L2_DV_BT_STD_GTF | V4L2_DV_BT_STD_CVT,
V4L2_DV_BT_CAP_PROGRESSIVE | V4L2_DV_BT_CAP_REDUCED_BLANKING |
@@ -674,7 +674,7 @@ static const struct v4l2_dv_timings_cap adv7842_timings_cap_digital = {
.type = V4L2_DV_BT_656_1120,
/* keep this initialization for compatibility with GCC < 4.4.6 */
.reserved = { 0 },
- V4L2_INIT_BT_TIMINGS(0, 1920, 0, 1200, 25000000, 225000000,
+ V4L2_INIT_BT_TIMINGS(640, 1920, 350, 1200, 25000000, 225000000,
V4L2_DV_BT_STD_CEA861 | V4L2_DV_BT_STD_DMT |
V4L2_DV_BT_STD_GTF | V4L2_DV_BT_STD_CVT,
V4L2_DV_BT_CAP_PROGRESSIVE | V4L2_DV_BT_CAP_REDUCED_BLANKING |
diff --git a/drivers/media/i2c/tc358743.c b/drivers/media/i2c/tc358743.c
index ff25ea9..26070fb 100644
--- a/drivers/media/i2c/tc358743.c
+++ b/drivers/media/i2c/tc358743.c
@@ -59,7 +59,7 @@ static const struct v4l2_dv_timings_cap tc358743_timings_cap = {
/* keep this initialization for compatibility with GCC < 4.4.6 */
.reserved = { 0 },
/* Pixel clock from REF_01 p. 20. Min/max height/width are unknown */
- V4L2_INIT_BT_TIMINGS(1, 10000, 1, 10000, 0, 165000000,
+ V4L2_INIT_BT_TIMINGS(640, 1920, 350, 1200, 13000000, 165000000,
V4L2_DV_BT_STD_CEA861 | V4L2_DV_BT_STD_DMT |
V4L2_DV_BT_STD_GTF | V4L2_DV_BT_STD_CVT,
V4L2_DV_BT_CAP_PROGRESSIVE |
diff --git a/drivers/media/i2c/ths8200.c b/drivers/media/i2c/ths8200.c
index 498ad23..f5ee280 100644
--- a/drivers/media/i2c/ths8200.c
+++ b/drivers/media/i2c/ths8200.c
@@ -49,7 +49,7 @@ static const struct v4l2_dv_timings_cap ths8200_timings_cap = {
.type = V4L2_DV_BT_656_1120,
/* keep this initialization for compatibility with GCC < 4.4.6 */
.reserved = { 0 },
- V4L2_INIT_BT_TIMINGS(0, 1920, 0, 1080, 25000000, 148500000,
+ V4L2_INIT_BT_TIMINGS(640, 1920, 350, 1080, 25000000, 148500000,
V4L2_DV_BT_STD_CEA861, V4L2_DV_BT_CAP_PROGRESSIVE)
};
diff --git a/drivers/media/i2c/video-i2c.c b/drivers/media/i2c/video-i2c.c
index 06d29d8..f27d294 100644
--- a/drivers/media/i2c/video-i2c.c
+++ b/drivers/media/i2c/video-i2c.c
@@ -510,7 +510,12 @@ static const struct v4l2_ioctl_ops video_i2c_ioctl_ops = {
static void video_i2c_release(struct video_device *vdev)
{
- kfree(video_get_drvdata(vdev));
+ struct video_i2c_data *data = video_get_drvdata(vdev);
+
+ v4l2_device_unregister(&data->v4l2_dev);
+ mutex_destroy(&data->lock);
+ mutex_destroy(&data->queue_lock);
+ kfree(data);
}
static int video_i2c_probe(struct i2c_client *client,
@@ -608,10 +613,6 @@ static int video_i2c_remove(struct i2c_client *client)
struct video_i2c_data *data = i2c_get_clientdata(client);
video_unregister_device(&data->vdev);
- v4l2_device_unregister(&data->v4l2_dev);
-
- mutex_destroy(&data->lock);
- mutex_destroy(&data->queue_lock);
return 0;
}
diff --git a/drivers/media/platform/coda/coda-bit.c b/drivers/media/platform/coda/coda-bit.c
index d26c2d85..d20d3df 100644
--- a/drivers/media/platform/coda/coda-bit.c
+++ b/drivers/media/platform/coda/coda-bit.c
@@ -991,16 +991,15 @@ static int coda_start_encoding(struct coda_ctx *ctx)
else
coda_write(dev, CODA_STD_H264,
CODA_CMD_ENC_SEQ_COD_STD);
- if (ctx->params.h264_deblk_enabled) {
- value = ((ctx->params.h264_deblk_alpha &
- CODA_264PARAM_DEBLKFILTEROFFSETALPHA_MASK) <<
- CODA_264PARAM_DEBLKFILTEROFFSETALPHA_OFFSET) |
- ((ctx->params.h264_deblk_beta &
- CODA_264PARAM_DEBLKFILTEROFFSETBETA_MASK) <<
- CODA_264PARAM_DEBLKFILTEROFFSETBETA_OFFSET);
- } else {
- value = 1 << CODA_264PARAM_DISABLEDEBLK_OFFSET;
- }
+ value = ((ctx->params.h264_disable_deblocking_filter_idc &
+ CODA_264PARAM_DISABLEDEBLK_MASK) <<
+ CODA_264PARAM_DISABLEDEBLK_OFFSET) |
+ ((ctx->params.h264_slice_alpha_c0_offset_div2 &
+ CODA_264PARAM_DEBLKFILTEROFFSETALPHA_MASK) <<
+ CODA_264PARAM_DEBLKFILTEROFFSETALPHA_OFFSET) |
+ ((ctx->params.h264_slice_beta_offset_div2 &
+ CODA_264PARAM_DEBLKFILTEROFFSETBETA_MASK) <<
+ CODA_264PARAM_DEBLKFILTEROFFSETBETA_OFFSET);
coda_write(dev, value, CODA_CMD_ENC_SEQ_264_PARA);
break;
case V4L2_PIX_FMT_JPEG:
diff --git a/drivers/media/platform/coda/coda-common.c b/drivers/media/platform/coda/coda-common.c
index bf7b841..19d92ed 100644
--- a/drivers/media/platform/coda/coda-common.c
+++ b/drivers/media/platform/coda/coda-common.c
@@ -1793,14 +1793,13 @@ static int coda_s_ctrl(struct v4l2_ctrl *ctrl)
ctx->params.h264_max_qp = ctrl->val;
break;
case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_ALPHA:
- ctx->params.h264_deblk_alpha = ctrl->val;
+ ctx->params.h264_slice_alpha_c0_offset_div2 = ctrl->val;
break;
case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_BETA:
- ctx->params.h264_deblk_beta = ctrl->val;
+ ctx->params.h264_slice_beta_offset_div2 = ctrl->val;
break;
case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_MODE:
- ctx->params.h264_deblk_enabled = (ctrl->val ==
- V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_ENABLED);
+ ctx->params.h264_disable_deblocking_filter_idc = ctrl->val;
break;
case V4L2_CID_MPEG_VIDEO_H264_PROFILE:
/* TODO: switch between baseline and constrained baseline */
@@ -1882,13 +1881,13 @@ static void coda_encode_ctrls(struct coda_ctx *ctx)
v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops,
V4L2_CID_MPEG_VIDEO_H264_MAX_QP, 0, 51, 1, 51);
v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops,
- V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_ALPHA, 0, 15, 1, 0);
+ V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_ALPHA, -6, 6, 1, 0);
v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops,
- V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_BETA, 0, 15, 1, 0);
+ V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_BETA, -6, 6, 1, 0);
v4l2_ctrl_new_std_menu(&ctx->ctrls, &coda_ctrl_ops,
V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_MODE,
- V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_DISABLED, 0x0,
- V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_ENABLED);
+ V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_DISABLED_AT_SLICE_BOUNDARY,
+ 0x0, V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_ENABLED);
v4l2_ctrl_new_std_menu(&ctx->ctrls, &coda_ctrl_ops,
V4L2_CID_MPEG_VIDEO_H264_PROFILE,
V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE, 0x0,
diff --git a/drivers/media/platform/coda/coda.h b/drivers/media/platform/coda/coda.h
index 19ac0b9..2469ca1d 100644
--- a/drivers/media/platform/coda/coda.h
+++ b/drivers/media/platform/coda/coda.h
@@ -115,9 +115,9 @@ struct coda_params {
u8 h264_inter_qp;
u8 h264_min_qp;
u8 h264_max_qp;
- u8 h264_deblk_enabled;
- u8 h264_deblk_alpha;
- u8 h264_deblk_beta;
+ u8 h264_disable_deblocking_filter_idc;
+ s8 h264_slice_alpha_c0_offset_div2;
+ s8 h264_slice_beta_offset_div2;
u8 h264_profile_idc;
u8 h264_level_idc;
u8 mpeg4_intra_qp;
diff --git a/drivers/media/platform/coda/coda_regs.h b/drivers/media/platform/coda/coda_regs.h
index 5e7b00a..e675e38 100644
--- a/drivers/media/platform/coda/coda_regs.h
+++ b/drivers/media/platform/coda/coda_regs.h
@@ -292,7 +292,7 @@
#define CODA_264PARAM_DEBLKFILTEROFFSETALPHA_OFFSET 8
#define CODA_264PARAM_DEBLKFILTEROFFSETALPHA_MASK 0x0f
#define CODA_264PARAM_DISABLEDEBLK_OFFSET 6
-#define CODA_264PARAM_DISABLEDEBLK_MASK 0x01
+#define CODA_264PARAM_DISABLEDEBLK_MASK 0x03
#define CODA_264PARAM_CONSTRAINEDINTRAPREDFLAG_OFFSET 5
#define CODA_264PARAM_CONSTRAINEDINTRAPREDFLAG_MASK 0x01
#define CODA_264PARAM_CHROMAQPOFFSET_OFFSET 0
diff --git a/drivers/media/platform/davinci/vpbe.c b/drivers/media/platform/davinci/vpbe.c
index 18c035e..df1ae6b 100644
--- a/drivers/media/platform/davinci/vpbe.c
+++ b/drivers/media/platform/davinci/vpbe.c
@@ -740,7 +740,7 @@ static int vpbe_initialize(struct device *dev, struct vpbe_device *vpbe_dev)
if (ret) {
v4l2_err(&vpbe_dev->v4l2_dev, "Failed to set default output %s",
def_output);
- return ret;
+ goto fail_kfree_amp;
}
printk(KERN_NOTICE "Setting default mode to %s\n", def_mode);
@@ -748,12 +748,15 @@ static int vpbe_initialize(struct device *dev, struct vpbe_device *vpbe_dev)
if (ret) {
v4l2_err(&vpbe_dev->v4l2_dev, "Failed to set default mode %s",
def_mode);
- return ret;
+ goto fail_kfree_amp;
}
vpbe_dev->initialized = 1;
/* TBD handling of bootargs for default output and mode */
return 0;
+fail_kfree_amp:
+ mutex_lock(&vpbe_dev->lock);
+ kfree(vpbe_dev->amp);
fail_kfree_encoders:
kfree(vpbe_dev->encoders);
fail_dev_unregister:
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c
index 408a101..f5e612c 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c
@@ -1859,7 +1859,7 @@ static int cam_ife_mgr_acquire_hw_for_ctx(
CAM_ERR(CAM_ISP, "Acquire IFE RD SRC resource Failed");
goto err;
}
- } else if (ipp_count || ppp_count) {
+ } else if (ipp_count || ppp_count || rdi_count) {
rc = cam_ife_hw_mgr_acquire_res_ife_src(ife_ctx,
in_port, false);
@@ -3717,8 +3717,8 @@ static int cam_isp_packet_generic_blob_handler(void *user_data,
struct cam_hw_prepare_update_args *prepare = NULL;
if (!blob_data || (blob_size == 0) || !blob_info) {
- CAM_ERR(CAM_ISP, "Invalid info blob %pK %d prepare %pK",
- blob_data, blob_size, prepare);
+ CAM_ERR(CAM_ISP, "Invalid args data %pK size %d info %pK",
+ blob_data, blob_size, blob_info);
return -EINVAL;
}
@@ -3738,8 +3738,29 @@ static int cam_isp_packet_generic_blob_handler(void *user_data,
CAM_DBG(CAM_ISP, "FS2: BLOB Type: %d", blob_type);
switch (blob_type) {
case CAM_ISP_GENERIC_BLOB_TYPE_HFR_CONFIG: {
- struct cam_isp_resource_hfr_config *hfr_config =
- (struct cam_isp_resource_hfr_config *)blob_data;
+ struct cam_isp_resource_hfr_config *hfr_config;
+
+ if (blob_size < sizeof(struct cam_isp_resource_hfr_config)) {
+ CAM_ERR(CAM_ISP, "Invalid blob size %u", blob_size);
+ return -EINVAL;
+ }
+
+ hfr_config = (struct cam_isp_resource_hfr_config *)blob_data;
+
+ if (hfr_config->num_ports > CAM_ISP_IFE_OUT_RES_MAX) {
+ CAM_ERR(CAM_ISP, "Invalid num_ports %u in hfr config",
+ hfr_config->num_ports);
+ return -EINVAL;
+ }
+
+ if (blob_size < (sizeof(uint32_t) * 2 + hfr_config->num_ports *
+ sizeof(struct cam_isp_port_hfr_config))) {
+ CAM_ERR(CAM_ISP, "Invalid blob size %u expected %u",
+ blob_size, sizeof(uint32_t) * 2 +
+ sizeof(struct cam_isp_port_hfr_config) *
+ hfr_config->num_ports);
+ return -EINVAL;
+ }
rc = cam_isp_blob_hfr_update(blob_type, blob_info,
hfr_config, prepare);
@@ -3748,8 +3769,29 @@ static int cam_isp_packet_generic_blob_handler(void *user_data,
}
break;
case CAM_ISP_GENERIC_BLOB_TYPE_CLOCK_CONFIG: {
- struct cam_isp_clock_config *clock_config =
- (struct cam_isp_clock_config *)blob_data;
+ struct cam_isp_clock_config *clock_config;
+
+ if (blob_size < sizeof(struct cam_isp_clock_config)) {
+ CAM_ERR(CAM_ISP, "Invalid blob size %u", blob_size);
+ return -EINVAL;
+ }
+
+ clock_config = (struct cam_isp_clock_config *)blob_data;
+
+ if (clock_config->num_rdi > CAM_IFE_RDI_NUM_MAX) {
+ CAM_ERR(CAM_ISP, "Invalid num_rdi %u in clock config",
+ clock_config->num_rdi);
+ return -EINVAL;
+ }
+
+ if (blob_size < (sizeof(uint32_t) * 2 + sizeof(uint64_t) *
+ (clock_config->num_rdi + 2))) {
+ CAM_ERR(CAM_ISP, "Invalid blob size %u expected %u",
+ blob_size,
+ sizeof(uint32_t) * 2 + sizeof(uint64_t) *
+ (clock_config->num_rdi + 2));
+ return -EINVAL;
+ }
rc = cam_isp_blob_clock_update(blob_type, blob_info,
clock_config, prepare);
@@ -3758,10 +3800,31 @@ static int cam_isp_packet_generic_blob_handler(void *user_data,
}
break;
case CAM_ISP_GENERIC_BLOB_TYPE_BW_CONFIG: {
- struct cam_isp_bw_config *bw_config =
- (struct cam_isp_bw_config *)blob_data;
+ struct cam_isp_bw_config *bw_config;
struct cam_isp_prepare_hw_update_data *prepare_hw_data;
+ if (blob_size < sizeof(struct cam_isp_bw_config)) {
+ CAM_ERR(CAM_ISP, "Invalid blob size %u", blob_size);
+ return -EINVAL;
+ }
+
+ bw_config = (struct cam_isp_bw_config *)blob_data;
+
+ if (bw_config->num_rdi > CAM_IFE_RDI_NUM_MAX) {
+ CAM_ERR(CAM_ISP, "Invalid num_rdi %u in bw config",
+ bw_config->num_rdi);
+ return -EINVAL;
+ }
+
+ if (blob_size < (sizeof(uint32_t) * 2 + (bw_config->num_rdi + 2)
+ * sizeof(struct cam_isp_bw_vote))) {
+ CAM_ERR(CAM_ISP, "Invalid blob size %u expected %u",
+ blob_size,
+ sizeof(uint32_t) * 2 + (bw_config->num_rdi + 2)
+ * sizeof(struct cam_isp_bw_vote));
+ return -EINVAL;
+ }
+
if (!prepare || !prepare->priv ||
(bw_config->usage_type >= CAM_IFE_HW_NUM_MAX)) {
CAM_ERR(CAM_ISP, "Invalid inputs");
@@ -3779,8 +3842,29 @@ static int cam_isp_packet_generic_blob_handler(void *user_data,
}
break;
case CAM_ISP_GENERIC_BLOB_TYPE_UBWC_CONFIG: {
- struct cam_ubwc_config *ubwc_config =
- (struct cam_ubwc_config *)blob_data;
+ struct cam_ubwc_config *ubwc_config;
+
+ if (blob_size < sizeof(struct cam_ubwc_config)) {
+ CAM_ERR(CAM_ISP, "Invalid blob_size %u", blob_size);
+ return -EINVAL;
+ }
+
+ ubwc_config = (struct cam_ubwc_config *)blob_data;
+
+ if (ubwc_config->num_ports > CAM_VFE_MAX_UBWC_PORTS) {
+ CAM_ERR(CAM_ISP, "Invalid num_ports %u in ubwc config",
+ ubwc_config->num_ports);
+ return -EINVAL;
+ }
+
+ if (blob_size < (sizeof(uint32_t) * 2 + ubwc_config->num_ports *
+ sizeof(struct cam_ubwc_plane_cfg_v1) * 2)) {
+ CAM_ERR(CAM_ISP, "Invalid blob_size %u expected %u",
+ blob_size,
+ sizeof(uint32_t) * 2 + ubwc_config->num_ports *
+ sizeof(struct cam_ubwc_plane_cfg_v1) * 2);
+ return -EINVAL;
+ }
rc = cam_isp_blob_ubwc_update(blob_type, blob_info,
ubwc_config, prepare);
@@ -3790,8 +3874,29 @@ static int cam_isp_packet_generic_blob_handler(void *user_data,
break;
case CAM_ISP_GENERIC_BLOB_TYPE_UBWC_CONFIG_V2: {
- struct cam_ubwc_config_v2 *ubwc_config =
- (struct cam_ubwc_config_v2 *)blob_data;
+ struct cam_ubwc_config_v2 *ubwc_config;
+
+ if (blob_size < sizeof(struct cam_ubwc_config_v2)) {
+ CAM_ERR(CAM_ISP, "Invalid blob_size %u", blob_size);
+ return -EINVAL;
+ }
+
+ ubwc_config = (struct cam_ubwc_config_v2 *)blob_data;
+
+ if (ubwc_config->num_ports > CAM_VFE_MAX_UBWC_PORTS) {
+ CAM_ERR(CAM_ISP, "Invalid num_ports %u in ubwc config",
+ ubwc_config->num_ports);
+ return -EINVAL;
+ }
+
+ if (blob_size < (sizeof(uint32_t) * 2 + ubwc_config->num_ports *
+ sizeof(struct cam_ubwc_plane_cfg_v2) * 2)) {
+ CAM_ERR(CAM_ISP, "Invalid blob_size %u expected %u",
+ blob_size,
+ sizeof(uint32_t) * 2 + ubwc_config->num_ports *
+ sizeof(struct cam_ubwc_plane_cfg_v2) * 2);
+ return -EINVAL;
+ }
rc = cam_isp_blob_ubwc_update_v2(blob_type, blob_info,
ubwc_config, prepare);
@@ -3800,8 +3905,16 @@ static int cam_isp_packet_generic_blob_handler(void *user_data,
}
break;
case CAM_ISP_GENERIC_BLOB_TYPE_CSID_CLOCK_CONFIG: {
- struct cam_isp_csid_clock_config *clock_config =
- (struct cam_isp_csid_clock_config *)blob_data;
+ struct cam_isp_csid_clock_config *clock_config;
+
+ if (blob_size < sizeof(struct cam_isp_csid_clock_config)) {
+ CAM_ERR(CAM_ISP, "Invalid blob size %u expected %u",
+ blob_size,
+ sizeof(struct cam_isp_csid_clock_config));
+ return -EINVAL;
+ }
+
+ clock_config = (struct cam_isp_csid_clock_config *)blob_data;
rc = cam_isp_blob_csid_clock_update(blob_type, blob_info,
clock_config, prepare);
@@ -3810,8 +3923,16 @@ static int cam_isp_packet_generic_blob_handler(void *user_data,
}
break;
case CAM_ISP_GENERIC_BLOB_TYPE_FE_CONFIG: {
- struct cam_fe_config *fe_config =
- (struct cam_fe_config *)blob_data;
+ struct cam_fe_config *fe_config;
+
+ if (blob_size < sizeof(struct cam_fe_config)) {
+ CAM_ERR(CAM_ISP, "Invalid blob size %u expected %u",
+ blob_size, sizeof(struct cam_fe_config));
+ return -EINVAL;
+ }
+
+ fe_config = (struct cam_fe_config *)blob_data;
+
rc = cam_isp_blob_fe_update(blob_type, blob_info,
fe_config, prepare);
if (rc)
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_vfe_hw_intf.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_vfe_hw_intf.h
index 14bced0..c331a55 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_vfe_hw_intf.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_vfe_hw_intf.h
@@ -18,6 +18,8 @@
#define VFE_VBIF_BASE_IDX 1
#define VFE_BUS_BASE_IDX 1
+#define CAM_VFE_MAX_UBWC_PORTS 4
+
enum cam_isp_hw_vfe_in_mux {
CAM_ISP_HW_VFE_IN_CAMIF = 0,
CAM_ISP_HW_VFE_IN_TESTGEN = 1,
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_rd_ver1.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_rd_ver1.c
index 3b5d065..d508113 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_rd_ver1.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_rd_ver1.c
@@ -940,7 +940,7 @@ static int cam_vfe_bus_init_hw(void *hw_priv,
void *init_hw_args, uint32_t arg_size)
{
struct cam_vfe_bus_rd_ver1_priv *bus_priv = hw_priv;
- uint32_t top_irq_reg_mask[2] = {0};
+ uint32_t top_irq_reg_mask[3] = {0};
uint32_t offset = 0, val = 0;
struct cam_vfe_bus_rd_ver1_reg_offset_common *common_reg;
diff --git a/drivers/media/platform/msm/camera/cam_smmu/cam_smmu_api.c b/drivers/media/platform/msm/camera/cam_smmu/cam_smmu_api.c
index f2e973e..3f5790b 100644
--- a/drivers/media/platform/msm/camera/cam_smmu/cam_smmu_api.c
+++ b/drivers/media/platform/msm/camera/cam_smmu/cam_smmu_api.c
@@ -673,11 +673,6 @@ static int cam_smmu_create_add_handle_in_table(char *name,
if (!strcmp(iommu_cb_set.cb_info[i].name, name)) {
mutex_lock(&iommu_cb_set.cb_info[i].lock);
if (iommu_cb_set.cb_info[i].handle != HANDLE_INIT) {
- CAM_ERR(CAM_SMMU,
- "Error: %s already got handle 0x%x",
- name,
- iommu_cb_set.cb_info[i].handle);
-
if (iommu_cb_set.cb_info[i].is_secure)
iommu_cb_set.cb_info[i].secure_count++;
@@ -686,6 +681,11 @@ static int cam_smmu_create_add_handle_in_table(char *name,
*hdl = iommu_cb_set.cb_info[i].handle;
return 0;
}
+
+ CAM_ERR(CAM_SMMU,
+ "Error: %s already got handle 0x%x",
+ name, iommu_cb_set.cb_info[i].handle);
+
return -EINVAL;
}
diff --git a/drivers/media/platform/msm/cvp/msm_cvp_debug.c b/drivers/media/platform/msm/cvp/msm_cvp_debug.c
index 2ab6d44..8d5d7c5 100644
--- a/drivers/media/platform/msm/cvp/msm_cvp_debug.c
+++ b/drivers/media/platform/msm/cvp/msm_cvp_debug.c
@@ -8,7 +8,7 @@
#include "msm_cvp_debug.h"
#include "cvp_hfi_api.h"
-int msm_cvp_debug = CVP_ERR | CVP_WARN | CVP_DBG;
+int msm_cvp_debug = CVP_ERR | CVP_WARN;
EXPORT_SYMBOL(msm_cvp_debug);
int msm_cvp_debug_out = CVP_OUT_PRINTK;
diff --git a/drivers/media/platform/msm/cvp/msm_cvp_dsp.c b/drivers/media/platform/msm/cvp/msm_cvp_dsp.c
index 740a709..e1e641c 100644
--- a/drivers/media/platform/msm/cvp/msm_cvp_dsp.c
+++ b/drivers/media/platform/msm/cvp/msm_cvp_dsp.c
@@ -5,6 +5,7 @@
#include <linux/module.h>
#include <linux/rpmsg.h>
#include <linux/of_platform.h>
+#include <linux/of_fdt.h>
#include <soc/qcom/secure_buffer.h>
#include "msm_cvp_dsp.h"
@@ -32,7 +33,7 @@ struct cvp_dsp_cmd_msg {
uint32_t buff_index;
uint32_t buff_size;
uint32_t session_id;
- int64_t ddr_type;
+ int32_t ddr_type;
uint32_t reserved[CVP_DSP_MAX_RESERVED];
};
@@ -192,14 +193,21 @@ int cvp_dsp_send_cmd_hfi_queue(phys_addr_t *phys_addr,
local_cmd_msg.cmd_msg_type = CVP_DSP_SEND_HFI_CMD_QUEUE;
local_cmd_msg.msg_ptr = (uint64_t)phys_addr;
local_cmd_msg.msg_ptr_len = size_in_bytes;
+ local_cmd_msg.ddr_type = of_fdt_get_ddrtype();
+ if (local_cmd_msg.ddr_type < 0) {
+ dprintk(CVP_ERR,
+ "%s: Incorrect DDR type value %d\n",
+ __func__, local_cmd_msg.ddr_type);
+ }
+
mutex_lock(&me->smd_mutex);
cmd_msg.msg_ptr = (uint64_t)phys_addr;
cmd_msg.msg_ptr_len = (size_in_bytes);
mutex_unlock(&me->smd_mutex);
dprintk(CVP_DBG,
- "%s :: address of buffer, PA=0x%pK size_buff=%d\n",
- __func__, phys_addr, size_in_bytes);
+ "%s :: address of buffer, PA=0x%pK size_buff=%d ddr_type=%d\n",
+ __func__, phys_addr, size_in_bytes, local_cmd_msg.ddr_type);
err = hyp_assign_phys((uint64_t)local_cmd_msg.msg_ptr,
local_cmd_msg.msg_ptr_len, srcVM, SRC_VM_NUM, destVM,
diff --git a/drivers/media/platform/msm/synx/synx.c b/drivers/media/platform/msm/synx/synx.c
index 90fef13..2b05cc9 100644
--- a/drivers/media/platform/msm/synx/synx.c
+++ b/drivers/media/platform/msm/synx/synx.c
@@ -1389,6 +1389,9 @@ static const struct file_operations synx_fops = {
.flush = synx_close,
.poll = synx_poll,
.unlocked_ioctl = synx_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = synx_ioctl,
+#endif
};
#ifdef CONFIG_SPECTRA_CAMERA
diff --git a/drivers/media/platform/msm/vidc/msm_vdec.c b/drivers/media/platform/msm/vidc/msm_vdec.c
index 0d2309d..69d41ed 100644
--- a/drivers/media/platform/msm/vidc/msm_vdec.c
+++ b/drivers/media/platform/msm/vidc/msm_vdec.c
@@ -67,9 +67,9 @@ static struct msm_vidc_ctrl msm_vdec_ctrls[] = {
.id = V4L2_CID_MPEG_VIDEO_UNKNOWN,
.name = "Invalid control",
.type = V4L2_CTRL_TYPE_INTEGER,
- .minimum = INT_MAX,
- .maximum = INT_MAX,
- .default_value = INT_MAX,
+ .minimum = 0,
+ .maximum = 0,
+ .default_value = 0,
.step = 1,
.menu_skip_mask = 0,
.qmenu = NULL,
@@ -281,8 +281,7 @@ static struct msm_vidc_ctrl msm_vdec_ctrls[] = {
.menu_skip_mask = ~(
(1 << V4L2_MPEG_VIDEO_VP9_PROFILE_0) |
(1 << V4L2_MPEG_VIDEO_VP9_PROFILE_1) |
- (1 << V4L2_MPEG_VIDEO_VP9_PROFILE_2) |
- (1 << V4L2_MPEG_VIDEO_VP9_PROFILE_3)
+ (1 << V4L2_MPEG_VIDEO_VP9_PROFILE_2)
),
.qmenu = NULL,
.flags = V4L2_CTRL_FLAG_VOLATILE | V4L2_CTRL_FLAG_READ_ONLY,
@@ -836,7 +835,6 @@ int msm_vdec_s_ctrl(struct msm_vidc_inst *inst, struct v4l2_ctrl *ctrl)
return -EINVAL;
}
- v4l2_ctrl_unlock(ctrl);
dprintk(VIDC_DBG,
"%s: %x : control name = %s, id = 0x%x value = %d\n",
__func__, hash32_ptr(inst->session), ctrl->name,
@@ -930,7 +928,6 @@ int msm_vdec_s_ctrl(struct msm_vidc_inst *inst, struct v4l2_ctrl *ctrl)
"Unknown control %#x\n", ctrl->id);
break;
}
- v4l2_ctrl_lock(ctrl);
return rc;
}
diff --git a/drivers/media/platform/msm/vidc/msm_venc.c b/drivers/media/platform/msm/vidc/msm_venc.c
index b574e5e..bbbf3ea 100644
--- a/drivers/media/platform/msm/vidc/msm_venc.c
+++ b/drivers/media/platform/msm/vidc/msm_venc.c
@@ -76,9 +76,9 @@ static struct msm_vidc_ctrl msm_venc_ctrls[] = {
.id = V4L2_CID_MPEG_VIDEO_UNKNOWN,
.name = "Invalid control",
.type = V4L2_CTRL_TYPE_INTEGER,
- .minimum = INT_MAX,
- .maximum = INT_MAX,
- .default_value = INT_MAX,
+ .minimum = 0,
+ .maximum = 0,
+ .default_value = 0,
.step = 1,
.menu_skip_mask = 0,
.qmenu = NULL,
@@ -1396,13 +1396,6 @@ int msm_venc_s_ctrl(struct msm_vidc_inst *inst, struct v4l2_ctrl *ctrl)
mdisp_sei = &(inst->hdr10_sei_params.disp_color_sei);
cll_sei = &(inst->hdr10_sei_params.cll_sei);
- /*
- * Unlock the control prior to setting to the hardware. Otherwise
- * lower level code that attempts to do a get_ctrl() will end up
- * deadlocking.
- */
- v4l2_ctrl_unlock(ctrl);
-
dprintk(VIDC_DBG,
"%s: %x : name %s, id 0x%x value %d\n",
__func__, hash32_ptr(inst->session), ctrl->name,
@@ -1751,7 +1744,6 @@ int msm_venc_s_ctrl(struct msm_vidc_inst *inst, struct v4l2_ctrl *ctrl)
break;
}
- v4l2_ctrl_lock(ctrl);
return rc;
}
@@ -2864,9 +2856,18 @@ int msm_venc_set_intra_refresh_mode(struct msm_vidc_inst *inst)
ctrl = get_ctrl(inst, V4L2_CID_MPEG_VIDC_VIDEO_INTRA_REFRESH_RANDOM);
intra_refresh.mbs = 0;
if (ctrl->val) {
+ u32 num_mbs_per_frame = 0;
+ u32 width = inst->prop.width[CAPTURE_PORT];
+ u32 height = inst->prop.height[CAPTURE_PORT];
+
/* ignore cyclic mode if random mode is set */
intra_refresh.mode = HFI_INTRA_REFRESH_RANDOM;
- intra_refresh.mbs = ctrl->val;
+
+ num_mbs_per_frame = NUM_MBS_PER_FRAME(height, width);
+ intra_refresh.mbs = num_mbs_per_frame / ctrl->val;
+ if (num_mbs_per_frame % ctrl->val) {
+ intra_refresh.mbs++;
+ }
} else {
ctrl = get_ctrl(inst,
V4L2_CID_MPEG_VIDEO_CYCLIC_INTRA_REFRESH_MB);
@@ -3365,12 +3366,16 @@ int msm_venc_set_8x8_transform(struct msm_vidc_inst *inst)
}
hdev = inst->core->device;
- if (inst->fmts[CAPTURE_PORT].fourcc != V4L2_PIX_FMT_H264 &&
- inst->profile != HFI_H264_PROFILE_HIGH &&
+ if (inst->fmts[CAPTURE_PORT].fourcc != V4L2_PIX_FMT_H264) {
+ dprintk(VIDC_DBG, "%s: skip due to %#x\n",
+ __func__, inst->fmts[CAPTURE_PORT].fourcc);
+ return 0;
+ }
+
+ if (inst->profile != HFI_H264_PROFILE_HIGH &&
inst->profile != HFI_H264_PROFILE_CONSTRAINED_HIGH) {
- dprintk(VIDC_DBG, "%s: skip due to %#x %#x\n",
- __func__, inst->fmts[CAPTURE_PORT].fourcc,
- inst->profile);
+ dprintk(VIDC_DBG, "%s: skip due to %#x\n",
+ __func__, inst->profile);
return 0;
}
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_buffer_calculations.c b/drivers/media/platform/msm/vidc/msm_vidc_buffer_calculations.c
index 2d28758..ea0107d 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_buffer_calculations.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_buffer_calculations.c
@@ -587,9 +587,8 @@ void msm_vidc_init_buffer_size_calculators(struct msm_vidc_inst *inst)
inst->buffer_size_calculators = NULL;
core = inst->core;
- /* Only decoder is enabled for now */
- if ((core->platform_data->vpu_ver == VPU_VERSION_IRIS2) &&
- (inst->session_type == MSM_VIDC_DECODER))
+ /* Change this to IRIS2 when ready */
+ if (core->platform_data->vpu_ver == VPU_VERSION_AR50)
inst->buffer_size_calculators =
msm_vidc_calculate_internal_buffer_sizes;
}
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_clocks.c b/drivers/media/platform/msm/vidc/msm_vidc_clocks.c
index e80e32f..7e10ec6 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_clocks.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_clocks.c
@@ -315,17 +315,17 @@ int msm_comm_vote_bus(struct msm_vidc_core *core)
vote_data[i].input_height = inst->prop.height[OUTPUT_PORT];
vote_data[i].output_width = inst->prop.width[CAPTURE_PORT];
vote_data[i].output_height = inst->prop.height[CAPTURE_PORT];
- vote_data[i].rotation =
- msm_comm_g_ctrl_for_id(inst, V4L2_CID_ROTATE);
vote_data[i].lcu_size = (codec == V4L2_PIX_FMT_HEVC ||
codec == V4L2_PIX_FMT_VP9) ? 32 : 16;
- vote_data[i].b_frames_enabled =
- msm_comm_g_ctrl_for_id(inst,
- V4L2_CID_MPEG_VIDEO_B_FRAMES) != 0;
vote_data[i].fps = msm_vidc_get_fps(inst);
if (inst->session_type == MSM_VIDC_ENCODER) {
vote_data[i].bitrate = inst->clk_data.bitrate;
+ vote_data[i].rotation =
+ msm_comm_g_ctrl_for_id(inst, V4L2_CID_ROTATE);
+ vote_data[i].b_frames_enabled =
+ msm_comm_g_ctrl_for_id(inst,
+ V4L2_CID_MPEG_VIDEO_B_FRAMES) != 0;
/* scale bitrate if operating rate is larger than fps */
if (vote_data[i].fps > (inst->clk_data.frame_rate >> 16)
&& (inst->clk_data.frame_rate >> 16)) {
@@ -1682,7 +1682,7 @@ int msm_vidc_decide_core_and_power_mode(struct msm_vidc_inst *inst)
struct hfi_videocores_usage_type core_info;
u32 core0_load = 0, core1_load = 0, core0_lp_load = 0,
core1_lp_load = 0;
- u32 current_inst_load = 0, current_inst_lp_load = 0,
+ u32 current_inst_load = 0, cur_inst_lp_load = 0,
min_load = 0, min_lp_load = 0;
u32 min_core_id, min_lp_core_id;
@@ -1727,7 +1727,7 @@ int msm_vidc_decide_core_and_power_mode(struct msm_vidc_inst *inst)
current_inst_load = (msm_comm_get_inst_load(inst, LOAD_CALC_NO_QUIRKS) *
inst->clk_data.entry->vpp_cycles)/inst->clk_data.work_route;
- current_inst_lp_load = (msm_comm_get_inst_load(inst,
+ cur_inst_lp_load = (msm_comm_get_inst_load(inst,
LOAD_CALC_NO_QUIRKS) * lp_cycles)/inst->clk_data.work_route;
dprintk(VIDC_DBG, "Core 0 RT Load = %d Core 1 RT Load = %d\n",
@@ -1736,40 +1736,32 @@ int msm_vidc_decide_core_and_power_mode(struct msm_vidc_inst *inst)
core0_lp_load, core1_lp_load);
dprintk(VIDC_DBG, "Max Load = %lu\n", max_freq);
dprintk(VIDC_DBG, "Current Load = %d Current LP Load = %d\n",
- current_inst_load, current_inst_lp_load);
+ current_inst_load, cur_inst_lp_load);
- /* Hier mode can be normal HP or Hybrid HP. */
+ if (inst->session_type == MSM_VIDC_ENCODER) {
+ /* Hier mode can be normal HP or Hybrid HP. */
+ u32 max_cores, work_mode;
- hier_mode = msm_comm_g_ctrl_for_id(inst,
- V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_LAYER);
-
- /* Try for preferred core based on settings. */
- if (inst->session_type == MSM_VIDC_ENCODER && hier_mode &&
- inst->capability.cap[CAP_MAX_VIDEOCORES].max >=
- VIDC_CORE_ID_3) {
- if (current_inst_load / 2 + core0_load <= max_freq &&
+ hier_mode = msm_comm_g_ctrl_for_id(inst,
+ V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_LAYER);
+ max_cores = inst->capability.cap[CAP_MAX_VIDEOCORES].max;
+ work_mode = inst->clk_data.work_mode;
+ if (hier_mode && max_cores >= VIDC_CORE_ID_3 &&
+ work_mode == HFI_WORKMODE_2) {
+ if (current_inst_load / 2 + core0_load <= max_freq &&
current_inst_load / 2 + core1_load <= max_freq) {
- if (inst->clk_data.work_mode == HFI_WORKMODE_2) {
inst->clk_data.core_id = VIDC_CORE_ID_3;
msm_vidc_power_save_mode_enable(inst, false);
goto decision_done;
}
- }
- }
-
- if (inst->session_type == MSM_VIDC_ENCODER && hier_mode &&
- inst->capability.cap[CAP_MAX_VIDEOCORES].max >=
- VIDC_CORE_ID_3) {
- if (current_inst_lp_load / 2 +
- core0_lp_load <= max_freq &&
- current_inst_lp_load / 2 +
- core1_lp_load <= max_freq) {
- if (inst->clk_data.work_mode == HFI_WORKMODE_2) {
+ if (cur_inst_lp_load / 2 + core0_lp_load <= max_freq &&
+ cur_inst_lp_load / 2 + core1_lp_load <= max_freq) {
inst->clk_data.core_id = VIDC_CORE_ID_3;
msm_vidc_power_save_mode_enable(inst, true);
goto decision_done;
}
}
+
}
if (current_inst_load + min_load < max_freq) {
@@ -1778,7 +1770,7 @@ int msm_vidc_decide_core_and_power_mode(struct msm_vidc_inst *inst)
"Selected normally : Core ID = %d\n",
inst->clk_data.core_id);
msm_vidc_power_save_mode_enable(inst, false);
- } else if (current_inst_lp_load + min_load < max_freq) {
+ } else if (cur_inst_lp_load + min_load < max_freq) {
/* Move current instance to LP and return */
inst->clk_data.core_id = min_core_id;
dprintk(VIDC_DBG,
@@ -1786,7 +1778,7 @@ int msm_vidc_decide_core_and_power_mode(struct msm_vidc_inst *inst)
inst->clk_data.core_id);
msm_vidc_power_save_mode_enable(inst, true);
- } else if (current_inst_lp_load + min_lp_load < max_freq) {
+ } else if (cur_inst_lp_load + min_lp_load < max_freq) {
/* Move all instances to LP mode and return */
inst->clk_data.core_id = min_lp_core_id;
dprintk(VIDC_DBG,
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_common.c b/drivers/media/platform/msm/vidc/msm_vidc_common.c
index 681c33c..c77cba9 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_common.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_common.c
@@ -37,72 +37,12 @@ static void msm_vidc_print_running_insts(struct msm_vidc_core *core);
#define V4L2_HEVC_LEVEL_UNKNOWN V4L2_MPEG_VIDEO_HEVC_LEVEL_UNKNOWN
#define V4L2_VP9_LEVEL_61 V4L2_MPEG_VIDC_VIDEO_VP9_LEVEL_61
-int h264_level_v4l2_to_hfi[] = {
- HFI_H264_LEVEL_1,
- HFI_H264_LEVEL_1b,
- HFI_H264_LEVEL_11,
- HFI_H264_LEVEL_12,
- HFI_H264_LEVEL_13,
- HFI_H264_LEVEL_2,
- HFI_H264_LEVEL_21,
- HFI_H264_LEVEL_22,
- HFI_H264_LEVEL_3,
- HFI_H264_LEVEL_31,
- HFI_H264_LEVEL_32,
- HFI_H264_LEVEL_4,
- HFI_H264_LEVEL_41,
- HFI_H264_LEVEL_42,
- HFI_H264_LEVEL_5,
- HFI_H264_LEVEL_51,
- HFI_H264_LEVEL_52,
- HFI_H264_LEVEL_6,
- HFI_H264_LEVEL_61,
- HFI_H264_LEVEL_62,
- HFI_LEVEL_UNKNOWN,
-};
-
-int hevc_level_v4l2_to_hfi[] = {
- HFI_HEVC_LEVEL_1,
- HFI_HEVC_LEVEL_2,
- HFI_HEVC_LEVEL_21,
- HFI_HEVC_LEVEL_3,
- HFI_HEVC_LEVEL_31,
- HFI_HEVC_LEVEL_4,
- HFI_HEVC_LEVEL_41,
- HFI_HEVC_LEVEL_5,
- HFI_HEVC_LEVEL_51,
- HFI_HEVC_LEVEL_52,
- HFI_HEVC_LEVEL_6,
- HFI_HEVC_LEVEL_61,
- HFI_HEVC_LEVEL_62,
- HFI_LEVEL_UNKNOWN,
-};
-
-int vp9_level_v4l2_to_hfi[] = {
- HFI_LEVEL_UNKNOWN,
- HFI_VP9_LEVEL_1,
- HFI_VP9_LEVEL_11,
- HFI_VP9_LEVEL_2,
- HFI_VP9_LEVEL_21,
- HFI_VP9_LEVEL_3,
- HFI_VP9_LEVEL_31,
- HFI_VP9_LEVEL_4,
- HFI_VP9_LEVEL_41,
- HFI_VP9_LEVEL_5,
- HFI_VP9_LEVEL_51,
- HFI_VP9_LEVEL_6,
- HFI_VP9_LEVEL_61,
-};
-
int msm_comm_g_ctrl_for_id(struct msm_vidc_inst *inst, int id)
{
- int rc = 0;
- struct v4l2_control ctrl = {
- .id = id,
- };
+ struct v4l2_ctrl *ctrl;
- rc = msm_comm_g_ctrl(inst, &ctrl);
- return rc ? rc : ctrl.value;
+ ctrl = get_ctrl(inst, id);
+ return ctrl->val;
}
static struct v4l2_ctrl **get_super_cluster(struct msm_vidc_inst *inst,
@@ -329,6 +269,138 @@ int msm_comm_hfi_to_v4l2(int id, int value)
return -EINVAL;
}
+static int h264_level_v4l2_to_hfi(int value)
+{
+ switch (value) {
+ case V4L2_MPEG_VIDEO_H264_LEVEL_1_0:
+ return HFI_H264_LEVEL_1;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_1B:
+ return HFI_H264_LEVEL_1b;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_1_1:
+ return HFI_H264_LEVEL_11;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_1_2:
+ return HFI_H264_LEVEL_12;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_1_3:
+ return HFI_H264_LEVEL_13;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_2_0:
+ return HFI_H264_LEVEL_2;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_2_1:
+ return HFI_H264_LEVEL_21;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_2_2:
+ return HFI_H264_LEVEL_22;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_3_0:
+ return HFI_H264_LEVEL_3;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_3_1:
+ return HFI_H264_LEVEL_31;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_3_2:
+ return HFI_H264_LEVEL_32;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_4_0:
+ return HFI_H264_LEVEL_4;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_4_1:
+ return HFI_H264_LEVEL_41;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_4_2:
+ return HFI_H264_LEVEL_42;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_5_0:
+ return HFI_H264_LEVEL_5;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_5_1:
+ return HFI_H264_LEVEL_51;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_5_2:
+ return HFI_H264_LEVEL_52;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_6_0:
+ return HFI_H264_LEVEL_6;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_6_1:
+ return HFI_H264_LEVEL_61;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_6_2:
+ return HFI_H264_LEVEL_62;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_UNKNOWN:
+ return HFI_LEVEL_UNKNOWN;
+ default:
+ goto unknown_value;
+ }
+
+unknown_value:
+ dprintk(VIDC_WARN, "Unknown level (%d)\n", value);
+ return -EINVAL;
+}
+
+static int hevc_level_v4l2_to_hfi(int value)
+{
+ switch (value) {
+ case V4L2_MPEG_VIDEO_HEVC_LEVEL_1:
+ return HFI_HEVC_LEVEL_1;
+ case V4L2_MPEG_VIDEO_HEVC_LEVEL_2:
+ return HFI_HEVC_LEVEL_2;
+ case V4L2_MPEG_VIDEO_HEVC_LEVEL_2_1:
+ return HFI_HEVC_LEVEL_21;
+ case V4L2_MPEG_VIDEO_HEVC_LEVEL_3:
+ return HFI_HEVC_LEVEL_3;
+ case V4L2_MPEG_VIDEO_HEVC_LEVEL_3_1:
+ return HFI_HEVC_LEVEL_31;
+ case V4L2_MPEG_VIDEO_HEVC_LEVEL_4:
+ return HFI_HEVC_LEVEL_4;
+ case V4L2_MPEG_VIDEO_HEVC_LEVEL_4_1:
+ return HFI_HEVC_LEVEL_41;
+ case V4L2_MPEG_VIDEO_HEVC_LEVEL_5:
+ return HFI_HEVC_LEVEL_5;
+ case V4L2_MPEG_VIDEO_HEVC_LEVEL_5_1:
+ return HFI_HEVC_LEVEL_51;
+ case V4L2_MPEG_VIDEO_HEVC_LEVEL_5_2:
+ return HFI_HEVC_LEVEL_52;
+ case V4L2_MPEG_VIDEO_HEVC_LEVEL_6:
+ return HFI_HEVC_LEVEL_6;
+ case V4L2_MPEG_VIDEO_HEVC_LEVEL_6_1:
+ return HFI_HEVC_LEVEL_61;
+ case V4L2_MPEG_VIDEO_HEVC_LEVEL_6_2:
+ return HFI_HEVC_LEVEL_62;
+ case V4L2_MPEG_VIDEO_HEVC_LEVEL_UNKNOWN:
+ return HFI_LEVEL_UNKNOWN;
+ default:
+ goto unknown_value;
+ }
+
+unknown_value:
+ dprintk(VIDC_WARN, "Unknown level (%d)\n", value);
+ return -EINVAL;
+}
+
+static int vp9_level_v4l2_to_hfi(value)
+{
+ switch (value) {
+ case V4L2_MPEG_VIDC_VIDEO_VP9_LEVEL_1:
+ return HFI_VP9_LEVEL_1;
+ case V4L2_MPEG_VIDC_VIDEO_VP9_LEVEL_11:
+ return HFI_VP9_LEVEL_11;
+ case V4L2_MPEG_VIDC_VIDEO_VP9_LEVEL_2:
+ return HFI_VP9_LEVEL_2;
+ case V4L2_MPEG_VIDC_VIDEO_VP9_LEVEL_21:
+ return HFI_VP9_LEVEL_21;
+ case V4L2_MPEG_VIDC_VIDEO_VP9_LEVEL_3:
+ return HFI_VP9_LEVEL_3;
+ case V4L2_MPEG_VIDC_VIDEO_VP9_LEVEL_31:
+ return HFI_VP9_LEVEL_31;
+ case V4L2_MPEG_VIDC_VIDEO_VP9_LEVEL_4:
+ return HFI_VP9_LEVEL_4;
+ case V4L2_MPEG_VIDC_VIDEO_VP9_LEVEL_41:
+ return HFI_VP9_LEVEL_41;
+ case V4L2_MPEG_VIDC_VIDEO_VP9_LEVEL_5:
+ return HFI_VP9_LEVEL_5;
+ case V4L2_MPEG_VIDC_VIDEO_VP9_LEVEL_51:
+ return HFI_VP9_LEVEL_51;
+ case V4L2_MPEG_VIDC_VIDEO_VP9_LEVEL_6:
+ return HFI_VP9_LEVEL_6;
+ case V4L2_MPEG_VIDC_VIDEO_VP9_LEVEL_61:
+ return HFI_VP9_LEVEL_61;
+ case V4L2_MPEG_VIDC_VIDEO_VP9_LEVEL_UNUSED:
+ return HFI_LEVEL_UNKNOWN;
+ default:
+ goto unknown_value;
+ }
+
+unknown_value:
+ dprintk(VIDC_WARN, "Unknown level (%d)\n", value);
+ return -EINVAL;
+
+}
int msm_comm_v4l2_to_hfi(int id, int value)
{
switch (id) {
@@ -353,11 +425,7 @@ int msm_comm_v4l2_to_hfi(int id, int value)
return HFI_H264_PROFILE_HIGH;
}
case V4L2_CID_MPEG_VIDEO_H264_LEVEL:
- if (value >= 0 && value <= V4L2_H264_LEVEL_UNKNOWN) {
- return h264_level_v4l2_to_hfi[value];
- } else {
- return h264_level_v4l2_to_hfi[V4L2_H264_LEVEL_UNKNOWN];
- }
+ return h264_level_v4l2_to_hfi(value);
case V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE:
switch (value) {
case V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CAVLC:
@@ -399,11 +467,7 @@ int msm_comm_v4l2_to_hfi(int id, int value)
return HFI_VP9_PROFILE_P0;
}
case V4L2_CID_MPEG_VIDC_VIDEO_VP9_LEVEL:
- if (value >= 0 && value <= V4L2_VP9_LEVEL_61) {
- return vp9_level_v4l2_to_hfi[value];
- } else {
- return vp9_level_v4l2_to_hfi[V4L2_VP9_LEVEL_61];
- }
+ return vp9_level_v4l2_to_hfi(value);
case V4L2_CID_MPEG_VIDEO_HEVC_PROFILE:
switch (value) {
case V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN:
@@ -416,11 +480,7 @@ int msm_comm_v4l2_to_hfi(int id, int value)
return HFI_HEVC_PROFILE_MAIN;
}
case V4L2_CID_MPEG_VIDEO_HEVC_LEVEL:
- if (value >= 0 && value <= V4L2_HEVC_LEVEL_UNKNOWN) {
- return hevc_level_v4l2_to_hfi[value];
- } else {
- return hevc_level_v4l2_to_hfi[V4L2_HEVC_LEVEL_UNKNOWN];
- }
+ return hevc_level_v4l2_to_hfi(value);
case V4L2_CID_MPEG_VIDEO_HEVC_TIER:
switch (value) {
case V4L2_MPEG_VIDEO_HEVC_TIER_MAIN:
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_pm.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_pm.c
index 3e73e9d..7c02504 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_pm.c
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_pm.c
@@ -41,25 +41,27 @@ int mtk_vcodec_init_enc_pm(struct mtk_vcodec_dev *mtkdev)
node = of_parse_phandle(dev->of_node, "mediatek,larb", 0);
if (!node) {
mtk_v4l2_err("no mediatek,larb found");
- return -1;
+ return -ENODEV;
}
pdev = of_find_device_by_node(node);
+ of_node_put(node);
if (!pdev) {
mtk_v4l2_err("no mediatek,larb device found");
- return -1;
+ return -ENODEV;
}
pm->larbvenc = &pdev->dev;
node = of_parse_phandle(dev->of_node, "mediatek,larb", 1);
if (!node) {
mtk_v4l2_err("no mediatek,larb found");
- return -1;
+ return -ENODEV;
}
pdev = of_find_device_by_node(node);
+ of_node_put(node);
if (!pdev) {
mtk_v4l2_err("no mediatek,larb device found");
- return -1;
+ return -ENODEV;
}
pm->larbvenclt = &pdev->dev;
diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c
index 8b2c16d..0f218af 100644
--- a/drivers/media/rc/rc-main.c
+++ b/drivers/media/rc/rc-main.c
@@ -1956,6 +1956,8 @@ void rc_unregister_device(struct rc_dev *dev)
rc_free_rx_device(dev);
mutex_lock(&dev->lock);
+ if (dev->users && dev->close)
+ dev->close(dev);
dev->registered = false;
mutex_unlock(&dev->lock);
diff --git a/drivers/memstick/core/memstick.c b/drivers/memstick/core/memstick.c
index 76382c8..1246d69 100644
--- a/drivers/memstick/core/memstick.c
+++ b/drivers/memstick/core/memstick.c
@@ -18,6 +18,7 @@
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/module.h>
+#include <linux/pm_runtime.h>
#define DRIVER_NAME "memstick"
@@ -436,6 +437,7 @@ static void memstick_check(struct work_struct *work)
struct memstick_dev *card;
dev_dbg(&host->dev, "memstick_check started\n");
+ pm_runtime_get_noresume(host->dev.parent);
mutex_lock(&host->lock);
if (!host->card) {
if (memstick_power_on(host))
@@ -479,6 +481,7 @@ static void memstick_check(struct work_struct *work)
host->set_param(host, MEMSTICK_POWER, MEMSTICK_POWER_OFF);
mutex_unlock(&host->lock);
+ pm_runtime_put(host->dev.parent);
dev_dbg(&host->dev, "memstick_check finished\n");
}
diff --git a/drivers/mfd/ab8500-core.c b/drivers/mfd/ab8500-core.c
index 30d09d1..11ab17f 100644
--- a/drivers/mfd/ab8500-core.c
+++ b/drivers/mfd/ab8500-core.c
@@ -261,7 +261,7 @@ static int get_register_interruptible(struct ab8500 *ab8500, u8 bank,
mutex_unlock(&ab8500->lock);
dev_vdbg(ab8500->dev, "rd: addr %#x => data %#x\n", addr, ret);
- return ret;
+ return (ret < 0) ? ret : 0;
}
static int ab8500_get_register(struct device *dev, u8 bank,
diff --git a/drivers/mfd/axp20x.c b/drivers/mfd/axp20x.c
index 0be511d..f8e0fa9 100644
--- a/drivers/mfd/axp20x.c
+++ b/drivers/mfd/axp20x.c
@@ -640,9 +640,9 @@ static const struct mfd_cell axp221_cells[] = {
static const struct mfd_cell axp223_cells[] = {
{
- .name = "axp221-pek",
- .num_resources = ARRAY_SIZE(axp22x_pek_resources),
- .resources = axp22x_pek_resources,
+ .name = "axp221-pek",
+ .num_resources = ARRAY_SIZE(axp22x_pek_resources),
+ .resources = axp22x_pek_resources,
}, {
.name = "axp22x-adc",
.of_compatible = "x-powers,axp221-adc",
@@ -650,7 +650,7 @@ static const struct mfd_cell axp223_cells[] = {
.name = "axp20x-battery-power-supply",
.of_compatible = "x-powers,axp221-battery-power-supply",
}, {
- .name = "axp20x-regulator",
+ .name = "axp20x-regulator",
}, {
.name = "axp20x-ac-power-supply",
.of_compatible = "x-powers,axp221-ac-power-supply",
@@ -666,9 +666,9 @@ static const struct mfd_cell axp223_cells[] = {
static const struct mfd_cell axp152_cells[] = {
{
- .name = "axp20x-pek",
- .num_resources = ARRAY_SIZE(axp152_pek_resources),
- .resources = axp152_pek_resources,
+ .name = "axp20x-pek",
+ .num_resources = ARRAY_SIZE(axp152_pek_resources),
+ .resources = axp152_pek_resources,
},
};
@@ -697,87 +697,101 @@ static const struct resource axp288_charger_resources[] = {
static const struct mfd_cell axp288_cells[] = {
{
- .name = "axp288_adc",
- .num_resources = ARRAY_SIZE(axp288_adc_resources),
- .resources = axp288_adc_resources,
- },
- {
- .name = "axp288_extcon",
- .num_resources = ARRAY_SIZE(axp288_extcon_resources),
- .resources = axp288_extcon_resources,
- },
- {
- .name = "axp288_charger",
- .num_resources = ARRAY_SIZE(axp288_charger_resources),
- .resources = axp288_charger_resources,
- },
- {
- .name = "axp288_fuel_gauge",
- .num_resources = ARRAY_SIZE(axp288_fuel_gauge_resources),
- .resources = axp288_fuel_gauge_resources,
- },
- {
- .name = "axp221-pek",
- .num_resources = ARRAY_SIZE(axp288_power_button_resources),
- .resources = axp288_power_button_resources,
- },
- {
- .name = "axp288_pmic_acpi",
+ .name = "axp288_adc",
+ .num_resources = ARRAY_SIZE(axp288_adc_resources),
+ .resources = axp288_adc_resources,
+ }, {
+ .name = "axp288_extcon",
+ .num_resources = ARRAY_SIZE(axp288_extcon_resources),
+ .resources = axp288_extcon_resources,
+ }, {
+ .name = "axp288_charger",
+ .num_resources = ARRAY_SIZE(axp288_charger_resources),
+ .resources = axp288_charger_resources,
+ }, {
+ .name = "axp288_fuel_gauge",
+ .num_resources = ARRAY_SIZE(axp288_fuel_gauge_resources),
+ .resources = axp288_fuel_gauge_resources,
+ }, {
+ .name = "axp221-pek",
+ .num_resources = ARRAY_SIZE(axp288_power_button_resources),
+ .resources = axp288_power_button_resources,
+ }, {
+ .name = "axp288_pmic_acpi",
},
};
static const struct mfd_cell axp803_cells[] = {
{
- .name = "axp221-pek",
- .num_resources = ARRAY_SIZE(axp803_pek_resources),
- .resources = axp803_pek_resources,
+ .name = "axp221-pek",
+ .num_resources = ARRAY_SIZE(axp803_pek_resources),
+ .resources = axp803_pek_resources,
+ }, {
+ .name = "axp20x-gpio",
+ .of_compatible = "x-powers,axp813-gpio",
+ }, {
+ .name = "axp813-adc",
+ .of_compatible = "x-powers,axp813-adc",
+ }, {
+ .name = "axp20x-battery-power-supply",
+ .of_compatible = "x-powers,axp813-battery-power-supply",
+ }, {
+ .name = "axp20x-ac-power-supply",
+ .of_compatible = "x-powers,axp813-ac-power-supply",
+ .num_resources = ARRAY_SIZE(axp20x_ac_power_supply_resources),
+ .resources = axp20x_ac_power_supply_resources,
},
- { .name = "axp20x-regulator" },
+ { .name = "axp20x-regulator" },
};
static const struct mfd_cell axp806_self_working_cells[] = {
{
- .name = "axp221-pek",
- .num_resources = ARRAY_SIZE(axp806_pek_resources),
- .resources = axp806_pek_resources,
+ .name = "axp221-pek",
+ .num_resources = ARRAY_SIZE(axp806_pek_resources),
+ .resources = axp806_pek_resources,
},
- { .name = "axp20x-regulator" },
+ { .name = "axp20x-regulator" },
};
static const struct mfd_cell axp806_cells[] = {
{
- .id = 2,
- .name = "axp20x-regulator",
+ .id = 2,
+ .name = "axp20x-regulator",
},
};
static const struct mfd_cell axp809_cells[] = {
{
- .name = "axp221-pek",
- .num_resources = ARRAY_SIZE(axp809_pek_resources),
- .resources = axp809_pek_resources,
+ .name = "axp221-pek",
+ .num_resources = ARRAY_SIZE(axp809_pek_resources),
+ .resources = axp809_pek_resources,
}, {
- .id = 1,
- .name = "axp20x-regulator",
+ .id = 1,
+ .name = "axp20x-regulator",
},
};
static const struct mfd_cell axp813_cells[] = {
{
- .name = "axp221-pek",
- .num_resources = ARRAY_SIZE(axp803_pek_resources),
- .resources = axp803_pek_resources,
+ .name = "axp221-pek",
+ .num_resources = ARRAY_SIZE(axp803_pek_resources),
+ .resources = axp803_pek_resources,
}, {
- .name = "axp20x-regulator",
+ .name = "axp20x-regulator",
}, {
- .name = "axp20x-gpio",
- .of_compatible = "x-powers,axp813-gpio",
+ .name = "axp20x-gpio",
+ .of_compatible = "x-powers,axp813-gpio",
}, {
- .name = "axp813-adc",
- .of_compatible = "x-powers,axp813-adc",
+ .name = "axp813-adc",
+ .of_compatible = "x-powers,axp813-adc",
}, {
.name = "axp20x-battery-power-supply",
.of_compatible = "x-powers,axp813-battery-power-supply",
+ }, {
+ .name = "axp20x-ac-power-supply",
+ .of_compatible = "x-powers,axp813-ac-power-supply",
+ .num_resources = ARRAY_SIZE(axp20x_ac_power_supply_resources),
+ .resources = axp20x_ac_power_supply_resources,
},
};
diff --git a/drivers/mfd/bd9571mwv.c b/drivers/mfd/bd9571mwv.c
index 503979c..fab3cdc 100644
--- a/drivers/mfd/bd9571mwv.c
+++ b/drivers/mfd/bd9571mwv.c
@@ -59,6 +59,7 @@ static const struct regmap_access_table bd9571mwv_writable_table = {
};
static const struct regmap_range bd9571mwv_volatile_yes_ranges[] = {
+ regmap_reg_range(BD9571MWV_DVFS_MONIVDAC, BD9571MWV_DVFS_MONIVDAC),
regmap_reg_range(BD9571MWV_GPIO_IN, BD9571MWV_GPIO_IN),
regmap_reg_range(BD9571MWV_GPIO_INT, BD9571MWV_GPIO_INT),
regmap_reg_range(BD9571MWV_INT_INTREQ, BD9571MWV_INT_INTREQ),
diff --git a/drivers/mfd/cros_ec_dev.c b/drivers/mfd/cros_ec_dev.c
index 6b22d54..bccde3e 100644
--- a/drivers/mfd/cros_ec_dev.c
+++ b/drivers/mfd/cros_ec_dev.c
@@ -499,6 +499,7 @@ static int ec_device_remove(struct platform_device *pdev)
cros_ec_debugfs_remove(ec);
+ mfd_remove_devices(ec->dev);
cdev_del(&ec->cdev);
device_unregister(&ec->class_dev);
return 0;
diff --git a/drivers/mfd/db8500-prcmu.c b/drivers/mfd/db8500-prcmu.c
index 5970b8de..aec20e1 100644
--- a/drivers/mfd/db8500-prcmu.c
+++ b/drivers/mfd/db8500-prcmu.c
@@ -2584,7 +2584,7 @@ static struct irq_chip prcmu_irq_chip = {
.irq_unmask = prcmu_irq_unmask,
};
-static __init char *fw_project_name(u32 project)
+static char *fw_project_name(u32 project)
{
switch (project) {
case PRCMU_FW_PROJECT_U8500:
@@ -2732,7 +2732,7 @@ void __init db8500_prcmu_early_init(u32 phy_base, u32 size)
INIT_WORK(&mb0_transfer.mask_work, prcmu_mask_work);
}
-static void __init init_prcm_registers(void)
+static void init_prcm_registers(void)
{
u32 val;
diff --git a/drivers/mfd/mc13xxx-core.c b/drivers/mfd/mc13xxx-core.c
index c63e331..234febf 100644
--- a/drivers/mfd/mc13xxx-core.c
+++ b/drivers/mfd/mc13xxx-core.c
@@ -274,7 +274,9 @@ int mc13xxx_adc_do_conversion(struct mc13xxx *mc13xxx, unsigned int mode,
mc13xxx->adcflags |= MC13XXX_ADC_WORKING;
- mc13xxx_reg_read(mc13xxx, MC13XXX_ADC0, &old_adc0);
+ ret = mc13xxx_reg_read(mc13xxx, MC13XXX_ADC0, &old_adc0);
+ if (ret)
+ goto out;
adc0 = MC13XXX_ADC0_ADINC1 | MC13XXX_ADC0_ADINC2;
adc1 = MC13XXX_ADC1_ADEN | MC13XXX_ADC1_ADTRIGIGN | MC13XXX_ADC1_ASC;
diff --git a/drivers/mfd/mt6397-core.c b/drivers/mfd/mt6397-core.c
index 77b64bd..ab24e17 100644
--- a/drivers/mfd/mt6397-core.c
+++ b/drivers/mfd/mt6397-core.c
@@ -329,8 +329,7 @@ static int mt6397_probe(struct platform_device *pdev)
default:
dev_err(&pdev->dev, "unsupported chip: %d\n", id);
- ret = -ENODEV;
- break;
+ return -ENODEV;
}
if (ret) {
diff --git a/drivers/mfd/qcom_rpm.c b/drivers/mfd/qcom_rpm.c
index 52fafea..8d420c3 100644
--- a/drivers/mfd/qcom_rpm.c
+++ b/drivers/mfd/qcom_rpm.c
@@ -638,6 +638,10 @@ static int qcom_rpm_probe(struct platform_device *pdev)
return -EFAULT;
}
+ writel(fw_version[0], RPM_CTRL_REG(rpm, 0));
+ writel(fw_version[1], RPM_CTRL_REG(rpm, 1));
+ writel(fw_version[2], RPM_CTRL_REG(rpm, 2));
+
dev_info(&pdev->dev, "RPM firmware %u.%u.%u\n", fw_version[0],
fw_version[1],
fw_version[2]);
diff --git a/drivers/mfd/ti_am335x_tscadc.c b/drivers/mfd/ti_am335x_tscadc.c
index 7a30546..fe8d335 100644
--- a/drivers/mfd/ti_am335x_tscadc.c
+++ b/drivers/mfd/ti_am335x_tscadc.c
@@ -264,8 +264,9 @@ static int ti_tscadc_probe(struct platform_device *pdev)
cell->pdata_size = sizeof(tscadc);
}
- err = mfd_add_devices(&pdev->dev, pdev->id, tscadc->cells,
- tscadc->used_cells, NULL, 0, NULL);
+ err = mfd_add_devices(&pdev->dev, PLATFORM_DEVID_AUTO,
+ tscadc->cells, tscadc->used_cells, NULL,
+ 0, NULL);
if (err < 0)
goto err_disable_clk;
diff --git a/drivers/mfd/tps65218.c b/drivers/mfd/tps65218.c
index 910f569..8bcdecf 100644
--- a/drivers/mfd/tps65218.c
+++ b/drivers/mfd/tps65218.c
@@ -235,9 +235,9 @@ static int tps65218_probe(struct i2c_client *client,
mutex_init(&tps->tps_lock);
- ret = regmap_add_irq_chip(tps->regmap, tps->irq,
- IRQF_ONESHOT, 0, &tps65218_irq_chip,
- &tps->irq_data);
+ ret = devm_regmap_add_irq_chip(&client->dev, tps->regmap, tps->irq,
+ IRQF_ONESHOT, 0, &tps65218_irq_chip,
+ &tps->irq_data);
if (ret < 0)
return ret;
@@ -253,26 +253,9 @@ static int tps65218_probe(struct i2c_client *client,
ARRAY_SIZE(tps65218_cells), NULL, 0,
regmap_irq_get_domain(tps->irq_data));
- if (ret < 0)
- goto err_irq;
-
- return 0;
-
-err_irq:
- regmap_del_irq_chip(tps->irq, tps->irq_data);
-
return ret;
}
-static int tps65218_remove(struct i2c_client *client)
-{
- struct tps65218 *tps = i2c_get_clientdata(client);
-
- regmap_del_irq_chip(tps->irq, tps->irq_data);
-
- return 0;
-}
-
static const struct i2c_device_id tps65218_id_table[] = {
{ "tps65218", TPS65218 },
{ },
@@ -285,7 +268,6 @@ static struct i2c_driver tps65218_driver = {
.of_match_table = of_tps65218_match_table,
},
.probe = tps65218_probe,
- .remove = tps65218_remove,
.id_table = tps65218_id_table,
};
diff --git a/drivers/mfd/twl-core.c b/drivers/mfd/twl-core.c
index 4be3d23..299016b 100644
--- a/drivers/mfd/twl-core.c
+++ b/drivers/mfd/twl-core.c
@@ -979,7 +979,7 @@ add_children(struct twl4030_platform_data *pdata, unsigned irq_base,
* letting it generate the right frequencies for USB, MADC, and
* other purposes.
*/
-static inline int __init protect_pm_master(void)
+static inline int protect_pm_master(void)
{
int e = 0;
@@ -988,7 +988,7 @@ static inline int __init protect_pm_master(void)
return e;
}
-static inline int __init unprotect_pm_master(void)
+static inline int unprotect_pm_master(void)
{
int e = 0;
diff --git a/drivers/mfd/wm5110-tables.c b/drivers/mfd/wm5110-tables.c
index 1ee68bd..16c6e2a 100644
--- a/drivers/mfd/wm5110-tables.c
+++ b/drivers/mfd/wm5110-tables.c
@@ -1618,6 +1618,7 @@ static const struct reg_default wm5110_reg_default[] = {
{ 0x00000ECD, 0x0000 }, /* R3789 - HPLPF4_2 */
{ 0x00000EE0, 0x0000 }, /* R3808 - ASRC_ENABLE */
{ 0x00000EE2, 0x0000 }, /* R3810 - ASRC_RATE1 */
+ { 0x00000EE3, 0x4000 }, /* R3811 - ASRC_RATE2 */
{ 0x00000EF0, 0x0000 }, /* R3824 - ISRC 1 CTRL 1 */
{ 0x00000EF1, 0x0000 }, /* R3825 - ISRC 1 CTRL 2 */
{ 0x00000EF2, 0x0000 }, /* R3826 - ISRC 1 CTRL 3 */
@@ -2869,6 +2870,7 @@ static bool wm5110_readable_register(struct device *dev, unsigned int reg)
case ARIZONA_ASRC_ENABLE:
case ARIZONA_ASRC_STATUS:
case ARIZONA_ASRC_RATE1:
+ case ARIZONA_ASRC_RATE2:
case ARIZONA_ISRC_1_CTRL_1:
case ARIZONA_ISRC_1_CTRL_2:
case ARIZONA_ISRC_1_CTRL_3:
diff --git a/drivers/misc/eeprom/Kconfig b/drivers/misc/eeprom/Kconfig
index 68a1ac9..d382b13 100644
--- a/drivers/misc/eeprom/Kconfig
+++ b/drivers/misc/eeprom/Kconfig
@@ -13,7 +13,7 @@
ones like at24c64, 24lc02 or fm24c04:
24c00, 24c01, 24c02, spd (readonly 24c02), 24c04, 24c08,
- 24c16, 24c32, 24c64, 24c128, 24c256, 24c512, 24c1024
+ 24c16, 24c32, 24c64, 24c128, 24c256, 24c512, 24c1024, 24c2048
Unless you like data loss puzzles, always be sure that any chip
you configure as a 24c32 (32 kbit) or larger is NOT really a
diff --git a/drivers/misc/eeprom/at24.c b/drivers/misc/eeprom/at24.c
index 7e50e1d..94836fc 100644
--- a/drivers/misc/eeprom/at24.c
+++ b/drivers/misc/eeprom/at24.c
@@ -173,6 +173,7 @@ AT24_CHIP_DATA(at24_data_24c128, 131072 / 8, AT24_FLAG_ADDR16);
AT24_CHIP_DATA(at24_data_24c256, 262144 / 8, AT24_FLAG_ADDR16);
AT24_CHIP_DATA(at24_data_24c512, 524288 / 8, AT24_FLAG_ADDR16);
AT24_CHIP_DATA(at24_data_24c1024, 1048576 / 8, AT24_FLAG_ADDR16);
+AT24_CHIP_DATA(at24_data_24c2048, 2097152 / 8, AT24_FLAG_ADDR16);
/* identical to 24c08 ? */
AT24_CHIP_DATA(at24_data_INT3499, 8192 / 8, 0);
@@ -199,6 +200,7 @@ static const struct i2c_device_id at24_ids[] = {
{ "24c256", (kernel_ulong_t)&at24_data_24c256 },
{ "24c512", (kernel_ulong_t)&at24_data_24c512 },
{ "24c1024", (kernel_ulong_t)&at24_data_24c1024 },
+ { "24c2048", (kernel_ulong_t)&at24_data_24c2048 },
{ "at24", 0 },
{ /* END OF LIST */ }
};
@@ -227,6 +229,7 @@ static const struct of_device_id at24_of_match[] = {
{ .compatible = "atmel,24c256", .data = &at24_data_24c256 },
{ .compatible = "atmel,24c512", .data = &at24_data_24c512 },
{ .compatible = "atmel,24c1024", .data = &at24_data_24c1024 },
+ { .compatible = "atmel,24c2048", .data = &at24_data_24c2048 },
{ /* END OF LIST */ },
};
MODULE_DEVICE_TABLE(of, at24_of_match);
diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h
index 23739a6..bb1ee98 100644
--- a/drivers/misc/mei/hw-me-regs.h
+++ b/drivers/misc/mei/hw-me-regs.h
@@ -139,6 +139,8 @@
#define MEI_DEV_ID_CNP_H 0xA360 /* Cannon Point H */
#define MEI_DEV_ID_CNP_H_4 0xA364 /* Cannon Point H 4 (iTouch) */
+#define MEI_DEV_ID_ICP_LP 0x34E0 /* Ice Lake Point LP */
+
/*
* MEI HW Section
*/
diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
index c8e21c8..4299658 100644
--- a/drivers/misc/mei/pci-me.c
+++ b/drivers/misc/mei/pci-me.c
@@ -105,6 +105,8 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
{MEI_PCI_DEVICE(MEI_DEV_ID_CNP_H, MEI_ME_PCH8_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_CNP_H_4, MEI_ME_PCH8_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_ICP_LP, MEI_ME_PCH12_CFG)},
+
/* required last entry */
{0, }
};
diff --git a/drivers/misc/mic/vop/vop_main.c b/drivers/misc/mic/vop/vop_main.c
index 3633202..de7f035 100644
--- a/drivers/misc/mic/vop/vop_main.c
+++ b/drivers/misc/mic/vop/vop_main.c
@@ -563,6 +563,8 @@ static int _vop_remove_device(struct mic_device_desc __iomem *d,
int ret = -1;
if (ioread8(&dc->config_change) == MIC_VIRTIO_PARAM_DEV_REMOVE) {
+ struct device *dev = get_device(&vdev->vdev.dev);
+
dev_dbg(&vpdev->dev,
"%s %d config_change %d type %d vdev %p\n",
__func__, __LINE__,
@@ -574,7 +576,7 @@ static int _vop_remove_device(struct mic_device_desc __iomem *d,
iowrite8(-1, &dc->h2c_vdev_db);
if (status & VIRTIO_CONFIG_S_DRIVER_OK)
wait_for_completion(&vdev->reset_done);
- put_device(&vdev->vdev.dev);
+ put_device(dev);
iowrite8(1, &dc->guest_ack);
dev_dbg(&vpdev->dev, "%s %d guest_ack %d\n",
__func__, __LINE__, ioread8(&dc->guest_ack));
diff --git a/drivers/misc/vexpress-syscfg.c b/drivers/misc/vexpress-syscfg.c
index 6c3591c..a3c6c77 100644
--- a/drivers/misc/vexpress-syscfg.c
+++ b/drivers/misc/vexpress-syscfg.c
@@ -61,7 +61,7 @@ static int vexpress_syscfg_exec(struct vexpress_syscfg_func *func,
int tries;
long timeout;
- if (WARN_ON(index > func->num_templates))
+ if (WARN_ON(index >= func->num_templates))
return -EINVAL;
command = readl(syscfg->base + SYS_CFGCTRL);
diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
index f37625f..bf4a3e2 100644
--- a/drivers/mmc/core/block.c
+++ b/drivers/mmc/core/block.c
@@ -2158,7 +2158,7 @@ static void mmc_blk_mq_req_done(struct mmc_request *mrq)
if (waiting)
wake_up(&mq->wait);
else
- kblockd_schedule_work(&mq->complete_work);
+ queue_work(mq->card->complete_wq, &mq->complete_work);
return;
}
@@ -2972,6 +2972,13 @@ static int mmc_blk_probe(struct mmc_card *card)
mmc_fixup_device(card, mmc_blk_fixups);
+ card->complete_wq = alloc_workqueue("mmc_complete",
+ WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
+ if (unlikely(!card->complete_wq)) {
+ pr_err("Failed to create mmc completion workqueue");
+ return -ENOMEM;
+ }
+
md = mmc_blk_alloc(card);
if (IS_ERR(md))
return PTR_ERR(md);
@@ -3042,6 +3049,7 @@ static void mmc_blk_remove(struct mmc_card *card)
#ifdef CONFIG_MMC_BLOCK_DEFERRED_RESUME
mmc_set_bus_resume_policy(card->host, 0);
#endif
+ destroy_workqueue(card->complete_wq);
}
static int _mmc_blk_suspend(struct mmc_card *card)
diff --git a/drivers/mmc/host/bcm2835.c b/drivers/mmc/host/bcm2835.c
index 0d3b747..5301302 100644
--- a/drivers/mmc/host/bcm2835.c
+++ b/drivers/mmc/host/bcm2835.c
@@ -286,6 +286,7 @@ static void bcm2835_reset(struct mmc_host *mmc)
if (host->dma_chan)
dmaengine_terminate_sync(host->dma_chan);
+ host->dma_chan = NULL;
bcm2835_reset_internal(host);
}
@@ -772,6 +773,8 @@ static void bcm2835_finish_command(struct bcm2835_host *host)
if (!(sdhsts & SDHSTS_CRC7_ERROR) ||
(host->cmd->opcode != MMC_SEND_OP_COND)) {
+ u32 edm, fsm;
+
if (sdhsts & SDHSTS_CMD_TIME_OUT) {
host->cmd->error = -ETIMEDOUT;
} else {
@@ -780,6 +783,13 @@ static void bcm2835_finish_command(struct bcm2835_host *host)
bcm2835_dumpregs(host);
host->cmd->error = -EILSEQ;
}
+ edm = readl(host->ioaddr + SDEDM);
+ fsm = edm & SDEDM_FSM_MASK;
+ if (fsm == SDEDM_FSM_READWAIT ||
+ fsm == SDEDM_FSM_WRITESTART1)
+ /* Kick the FSM out of its wait */
+ writel(edm | SDEDM_FORCE_DATA_MODE,
+ host->ioaddr + SDEDM);
bcm2835_finish_request(host);
return;
}
@@ -837,6 +847,8 @@ static void bcm2835_timeout(struct work_struct *work)
dev_err(dev, "timeout waiting for hardware interrupt.\n");
bcm2835_dumpregs(host);
+ bcm2835_reset(host->mmc);
+
if (host->data) {
host->data->error = -ETIMEDOUT;
bcm2835_finish_data(host);
diff --git a/drivers/mmc/host/jz4740_mmc.c b/drivers/mmc/host/jz4740_mmc.c
index 993386c9..864338e 100644
--- a/drivers/mmc/host/jz4740_mmc.c
+++ b/drivers/mmc/host/jz4740_mmc.c
@@ -983,17 +983,17 @@ static int jz4740_mmc_request_gpios(struct mmc_host *mmc,
if (!pdata->read_only_active_low)
mmc->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH;
- if (gpio_is_valid(pdata->gpio_card_detect)) {
- ret = mmc_gpio_request_cd(mmc, pdata->gpio_card_detect, 0);
- if (ret)
- return ret;
- }
+ /*
+ * Get optional card detect and write protect GPIOs,
+ * only back out on probe deferral.
+ */
+ ret = mmc_gpiod_request_cd(mmc, "cd", 0, false, 0, NULL);
+ if (ret == -EPROBE_DEFER)
+ return ret;
- if (gpio_is_valid(pdata->gpio_read_only)) {
- ret = mmc_gpio_request_ro(mmc, pdata->gpio_read_only);
- if (ret)
- return ret;
- }
+ ret = mmc_gpiod_request_ro(mmc, "wp", 0, false, 0, NULL);
+ if (ret == -EPROBE_DEFER)
+ return ret;
return jz4740_mmc_request_gpio(&pdev->dev, pdata->gpio_power,
"MMC read only", true, pdata->power_active_low);
diff --git a/drivers/mmc/host/meson-gx-mmc.c b/drivers/mmc/host/meson-gx-mmc.c
index ef9deaa..ddd98cdd 100644
--- a/drivers/mmc/host/meson-gx-mmc.c
+++ b/drivers/mmc/host/meson-gx-mmc.c
@@ -1286,7 +1286,8 @@ static int meson_mmc_probe(struct platform_device *pdev)
host->regs + SD_EMMC_IRQ_EN);
ret = request_threaded_irq(host->irq, meson_mmc_irq,
- meson_mmc_irq_thread, IRQF_SHARED, NULL, host);
+ meson_mmc_irq_thread, IRQF_SHARED,
+ dev_name(&pdev->dev), host);
if (ret)
goto err_init_clk;
diff --git a/drivers/mmc/host/meson-mx-sdio.c b/drivers/mmc/host/meson-mx-sdio.c
index 2cfec33..9841b44 100644
--- a/drivers/mmc/host/meson-mx-sdio.c
+++ b/drivers/mmc/host/meson-mx-sdio.c
@@ -596,6 +596,9 @@ static int meson_mx_mmc_register_clks(struct meson_mx_mmc_host *host)
init.name = devm_kasprintf(host->controller_dev, GFP_KERNEL,
"%s#fixed_factor",
dev_name(host->controller_dev));
+ if (!init.name)
+ return -ENOMEM;
+
init.ops = &clk_fixed_factor_ops;
init.flags = 0;
init.parent_names = &clk_fixed_factor_parent;
@@ -612,6 +615,9 @@ static int meson_mx_mmc_register_clks(struct meson_mx_mmc_host *host)
clk_div_parent = __clk_get_name(host->fixed_factor_clk);
init.name = devm_kasprintf(host->controller_dev, GFP_KERNEL,
"%s#div", dev_name(host->controller_dev));
+ if (!init.name)
+ return -ENOMEM;
+
init.ops = &clk_divider_ops;
init.flags = CLK_SET_RATE_PARENT;
init.parent_names = &clk_div_parent;
diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c
index 9cb7554..a7bf851 100644
--- a/drivers/mmc/host/sdhci-of-esdhc.c
+++ b/drivers/mmc/host/sdhci-of-esdhc.c
@@ -526,8 +526,12 @@ static void esdhc_clock_enable(struct sdhci_host *host, bool enable)
/* Wait max 20 ms */
timeout = ktime_add_ms(ktime_get(), 20);
val = ESDHC_CLOCK_STABLE;
- while (!(sdhci_readl(host, ESDHC_PRSSTAT) & val)) {
- if (ktime_after(ktime_get(), timeout)) {
+ while (1) {
+ bool timedout = ktime_after(ktime_get(), timeout);
+
+ if (sdhci_readl(host, ESDHC_PRSSTAT) & val)
+ break;
+ if (timedout) {
pr_err("%s: Internal clock never stabilised.\n",
mmc_hostname(host->mmc));
break;
@@ -591,8 +595,12 @@ static void esdhc_of_set_clock(struct sdhci_host *host, unsigned int clock)
/* Wait max 20 ms */
timeout = ktime_add_ms(ktime_get(), 20);
- while (!(sdhci_readl(host, ESDHC_PRSSTAT) & ESDHC_CLOCK_STABLE)) {
- if (ktime_after(ktime_get(), timeout)) {
+ while (1) {
+ bool timedout = ktime_after(ktime_get(), timeout);
+
+ if (sdhci_readl(host, ESDHC_PRSSTAT) & ESDHC_CLOCK_STABLE)
+ break;
+ if (timedout) {
pr_err("%s: Internal clock never stabilised.\n",
mmc_hostname(host->mmc));
return;
diff --git a/drivers/mmc/host/sdhci-omap.c b/drivers/mmc/host/sdhci-omap.c
index d264391..d02f5cf 100644
--- a/drivers/mmc/host/sdhci-omap.c
+++ b/drivers/mmc/host/sdhci-omap.c
@@ -220,8 +220,12 @@ static void sdhci_omap_conf_bus_power(struct sdhci_omap_host *omap_host,
/* wait 1ms */
timeout = ktime_add_ms(ktime_get(), SDHCI_OMAP_TIMEOUT);
- while (!(sdhci_omap_readl(omap_host, SDHCI_OMAP_HCTL) & HCTL_SDBP)) {
- if (WARN_ON(ktime_after(ktime_get(), timeout)))
+ while (1) {
+ bool timedout = ktime_after(ktime_get(), timeout);
+
+ if (sdhci_omap_readl(omap_host, SDHCI_OMAP_HCTL) & HCTL_SDBP)
+ break;
+ if (WARN_ON(timedout))
return;
usleep_range(5, 10);
}
@@ -653,8 +657,12 @@ static void sdhci_omap_init_74_clocks(struct sdhci_host *host, u8 power_mode)
/* wait 1ms */
timeout = ktime_add_ms(ktime_get(), SDHCI_OMAP_TIMEOUT);
- while (!(sdhci_omap_readl(omap_host, SDHCI_OMAP_STAT) & INT_CC_EN)) {
- if (WARN_ON(ktime_after(ktime_get(), timeout)))
+ while (1) {
+ bool timedout = ktime_after(ktime_get(), timeout);
+
+ if (sdhci_omap_readl(omap_host, SDHCI_OMAP_STAT) & INT_CC_EN)
+ break;
+ if (WARN_ON(timedout))
return;
usleep_range(5, 10);
}
diff --git a/drivers/mmc/host/sdhci-xenon-phy.c b/drivers/mmc/host/sdhci-xenon-phy.c
index c335052..caccedc 100644
--- a/drivers/mmc/host/sdhci-xenon-phy.c
+++ b/drivers/mmc/host/sdhci-xenon-phy.c
@@ -357,9 +357,13 @@ static int xenon_emmc_phy_enable_dll(struct sdhci_host *host)
/* Wait max 32 ms */
timeout = ktime_add_ms(ktime_get(), 32);
- while (!(sdhci_readw(host, XENON_SLOT_EXT_PRESENT_STATE) &
- XENON_DLL_LOCK_STATE)) {
- if (ktime_after(ktime_get(), timeout)) {
+ while (1) {
+ bool timedout = ktime_after(ktime_get(), timeout);
+
+ if (sdhci_readw(host, XENON_SLOT_EXT_PRESENT_STATE) &
+ XENON_DLL_LOCK_STATE)
+ break;
+ if (timedout) {
dev_err(mmc_dev(host->mmc), "Wait for DLL Lock time-out\n");
return -ETIMEDOUT;
}
diff --git a/drivers/mmc/host/sdhci-xenon.c b/drivers/mmc/host/sdhci-xenon.c
index 4d0791f..a0b5089 100644
--- a/drivers/mmc/host/sdhci-xenon.c
+++ b/drivers/mmc/host/sdhci-xenon.c
@@ -34,9 +34,13 @@ static int xenon_enable_internal_clk(struct sdhci_host *host)
sdhci_writel(host, reg, SDHCI_CLOCK_CONTROL);
/* Wait max 20 ms */
timeout = ktime_add_ms(ktime_get(), 20);
- while (!((reg = sdhci_readw(host, SDHCI_CLOCK_CONTROL))
- & SDHCI_CLOCK_INT_STABLE)) {
- if (ktime_after(ktime_get(), timeout)) {
+ while (1) {
+ bool timedout = ktime_after(ktime_get(), timeout);
+
+ reg = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
+ if (reg & SDHCI_CLOCK_INT_STABLE)
+ break;
+ if (timedout) {
dev_err(mmc_dev(host->mmc), "Internal clock never stabilised.\n");
return -ETIMEDOUT;
}
diff --git a/drivers/mmc/host/sunxi-mmc.c b/drivers/mmc/host/sunxi-mmc.c
index 568349e..c458418 100644
--- a/drivers/mmc/host/sunxi-mmc.c
+++ b/drivers/mmc/host/sunxi-mmc.c
@@ -1394,6 +1394,21 @@ static int sunxi_mmc_probe(struct platform_device *pdev)
if (ret)
goto error_free_dma;
+ /*
+ * If we don't support delay chains in the SoC, we can't use any
+ * of the higher speed modes. Mask them out in case the device
+ * tree specifies the properties for them, which gets added to
+ * the caps by mmc_of_parse() above.
+ */
+ if (!(host->cfg->clk_delays || host->use_new_timings)) {
+ mmc->caps &= ~(MMC_CAP_3_3V_DDR | MMC_CAP_1_8V_DDR |
+ MMC_CAP_1_2V_DDR | MMC_CAP_UHS);
+ mmc->caps2 &= ~MMC_CAP2_HS200;
+ }
+
+ /* TODO: This driver doesn't support HS400 mode yet */
+ mmc->caps2 &= ~MMC_CAP2_HS400;
+
ret = sunxi_mmc_init_host(host);
if (ret)
goto error_free_dma;
diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c
index 99c460f..0bbb23b 100644
--- a/drivers/mtd/mtdpart.c
+++ b/drivers/mtd/mtdpart.c
@@ -470,6 +470,10 @@ static struct mtd_part *allocate_partition(struct mtd_info *parent,
/* let's register it anyway to preserve ordering */
slave->offset = 0;
slave->mtd.size = 0;
+
+ /* Initialize ->erasesize to make add_mtd_device() happy. */
+ slave->mtd.erasesize = parent->erasesize;
+
printk(KERN_ERR"mtd: partition \"%s\" is out of reach -- disabled\n",
part->name);
goto out_register;
diff --git a/drivers/mtd/nand/raw/gpmi-nand/gpmi-lib.c b/drivers/mtd/nand/raw/gpmi-nand/gpmi-lib.c
index 88ea220..322a008 100644
--- a/drivers/mtd/nand/raw/gpmi-nand/gpmi-lib.c
+++ b/drivers/mtd/nand/raw/gpmi-nand/gpmi-lib.c
@@ -155,9 +155,10 @@ int gpmi_init(struct gpmi_nand_data *this)
/*
* Reset BCH here, too. We got failures otherwise :(
- * See later BCH reset for explanation of MX23 handling
+ * See later BCH reset for explanation of MX23 and MX28 handling
*/
- ret = gpmi_reset_block(r->bch_regs, GPMI_IS_MX23(this));
+ ret = gpmi_reset_block(r->bch_regs,
+ GPMI_IS_MX23(this) || GPMI_IS_MX28(this));
if (ret)
goto err_out;
@@ -263,12 +264,10 @@ int bch_set_geometry(struct gpmi_nand_data *this)
/*
* Due to erratum #2847 of the MX23, the BCH cannot be soft reset on this
* chip, otherwise it will lock up. So we skip resetting BCH on the MX23.
- * On the other hand, the MX28 needs the reset, because one case has been
- * seen where the BCH produced ECC errors constantly after 10000
- * consecutive reboots. The latter case has not been seen on the MX23
- * yet, still we don't know if it could happen there as well.
+ * and MX28.
*/
- ret = gpmi_reset_block(r->bch_regs, GPMI_IS_MX23(this));
+ ret = gpmi_reset_block(r->bch_regs,
+ GPMI_IS_MX23(this) || GPMI_IS_MX28(this));
if (ret)
goto err_out;
diff --git a/drivers/mtd/nand/spi/core.c b/drivers/mtd/nand/spi/core.c
index 30f8364..8c7bf91 100644
--- a/drivers/mtd/nand/spi/core.c
+++ b/drivers/mtd/nand/spi/core.c
@@ -304,24 +304,30 @@ static int spinand_write_to_cache_op(struct spinand_device *spinand,
struct nand_device *nand = spinand_to_nand(spinand);
struct mtd_info *mtd = nanddev_to_mtd(nand);
struct nand_page_io_req adjreq = *req;
- unsigned int nbytes = 0;
- void *buf = NULL;
+ void *buf = spinand->databuf;
+ unsigned int nbytes;
u16 column = 0;
int ret;
- memset(spinand->databuf, 0xff,
- nanddev_page_size(nand) +
- nanddev_per_page_oobsize(nand));
+ /*
+ * Looks like PROGRAM LOAD (AKA write cache) does not necessarily reset
+ * the cache content to 0xFF (depends on vendor implementation), so we
+ * must fill the page cache entirely even if we only want to program
+ * the data portion of the page, otherwise we might corrupt the BBM or
+ * user data previously programmed in OOB area.
+ */
+ nbytes = nanddev_page_size(nand) + nanddev_per_page_oobsize(nand);
+ memset(spinand->databuf, 0xff, nbytes);
+ adjreq.dataoffs = 0;
+ adjreq.datalen = nanddev_page_size(nand);
+ adjreq.databuf.out = spinand->databuf;
+ adjreq.ooblen = nanddev_per_page_oobsize(nand);
+ adjreq.ooboffs = 0;
+ adjreq.oobbuf.out = spinand->oobbuf;
- if (req->datalen) {
+ if (req->datalen)
memcpy(spinand->databuf + req->dataoffs, req->databuf.out,
req->datalen);
- adjreq.dataoffs = 0;
- adjreq.datalen = nanddev_page_size(nand);
- adjreq.databuf.out = spinand->databuf;
- nbytes = adjreq.datalen;
- buf = spinand->databuf;
- }
if (req->ooblen) {
if (req->mode == MTD_OPS_AUTO_OOB)
@@ -332,14 +338,6 @@ static int spinand_write_to_cache_op(struct spinand_device *spinand,
else
memcpy(spinand->oobbuf + req->ooboffs, req->oobbuf.out,
req->ooblen);
-
- adjreq.ooblen = nanddev_per_page_oobsize(nand);
- adjreq.ooboffs = 0;
- nbytes += nanddev_per_page_oobsize(nand);
- if (!buf) {
- buf = spinand->oobbuf;
- column = nanddev_page_size(nand);
- }
}
spinand_cache_op_adjust_colum(spinand, &adjreq, &column);
@@ -370,8 +368,8 @@ static int spinand_write_to_cache_op(struct spinand_device *spinand,
/*
* We need to use the RANDOM LOAD CACHE operation if there's
- * more than one iteration, because the LOAD operation resets
- * the cache to 0xff.
+ * more than one iteration, because the LOAD operation might
+ * reset the cache to 0xff.
*/
if (nbytes) {
column = op.addr.val;
@@ -1016,11 +1014,11 @@ static int spinand_init(struct spinand_device *spinand)
for (i = 0; i < nand->memorg.ntargets; i++) {
ret = spinand_select_target(spinand, i);
if (ret)
- goto err_free_bufs;
+ goto err_manuf_cleanup;
ret = spinand_lock_block(spinand, BL_ALL_UNLOCKED);
if (ret)
- goto err_free_bufs;
+ goto err_manuf_cleanup;
}
ret = nanddev_init(nand, &spinand_ops, THIS_MODULE);
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index 258918d..9f697a5 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -261,6 +261,7 @@ static irqreturn_t mv88e6xxx_g1_irq_thread_work(struct mv88e6xxx_chip *chip)
unsigned int sub_irq;
unsigned int n;
u16 reg;
+ u16 ctl1;
int err;
mutex_lock(&chip->reg_lock);
@@ -270,13 +271,28 @@ static irqreturn_t mv88e6xxx_g1_irq_thread_work(struct mv88e6xxx_chip *chip)
if (err)
goto out;
- for (n = 0; n < chip->g1_irq.nirqs; ++n) {
- if (reg & (1 << n)) {
- sub_irq = irq_find_mapping(chip->g1_irq.domain, n);
- handle_nested_irq(sub_irq);
- ++nhandled;
+ do {
+ for (n = 0; n < chip->g1_irq.nirqs; ++n) {
+ if (reg & (1 << n)) {
+ sub_irq = irq_find_mapping(chip->g1_irq.domain,
+ n);
+ handle_nested_irq(sub_irq);
+ ++nhandled;
+ }
}
- }
+
+ mutex_lock(&chip->reg_lock);
+ err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_CTL1, &ctl1);
+ if (err)
+ goto unlock;
+ err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_STS, ®);
+unlock:
+ mutex_unlock(&chip->reg_lock);
+ if (err)
+ goto out;
+ ctl1 &= GENMASK(chip->g1_irq.nirqs, 0);
+ } while (reg & ctl1);
+
out:
return (nhandled > 0 ? IRQ_HANDLED : IRQ_NONE);
}
diff --git a/drivers/net/dsa/mv88e6xxx/global1_atu.c b/drivers/net/dsa/mv88e6xxx/global1_atu.c
index 5200e4b..ea24384 100644
--- a/drivers/net/dsa/mv88e6xxx/global1_atu.c
+++ b/drivers/net/dsa/mv88e6xxx/global1_atu.c
@@ -314,6 +314,7 @@ static irqreturn_t mv88e6xxx_g1_atu_prob_irq_thread_fn(int irq, void *dev_id)
{
struct mv88e6xxx_chip *chip = dev_id;
struct mv88e6xxx_atu_entry entry;
+ int spid;
int err;
u16 val;
@@ -336,6 +337,8 @@ static irqreturn_t mv88e6xxx_g1_atu_prob_irq_thread_fn(int irq, void *dev_id)
if (err)
goto out;
+ spid = entry.state;
+
if (val & MV88E6XXX_G1_ATU_OP_AGE_OUT_VIOLATION) {
dev_err_ratelimited(chip->dev,
"ATU age out violation for %pM\n",
@@ -344,23 +347,23 @@ static irqreturn_t mv88e6xxx_g1_atu_prob_irq_thread_fn(int irq, void *dev_id)
if (val & MV88E6XXX_G1_ATU_OP_MEMBER_VIOLATION) {
dev_err_ratelimited(chip->dev,
- "ATU member violation for %pM portvec %x\n",
- entry.mac, entry.portvec);
- chip->ports[entry.portvec].atu_member_violation++;
+ "ATU member violation for %pM portvec %x spid %d\n",
+ entry.mac, entry.portvec, spid);
+ chip->ports[spid].atu_member_violation++;
}
if (val & MV88E6XXX_G1_ATU_OP_MISS_VIOLATION) {
dev_err_ratelimited(chip->dev,
- "ATU miss violation for %pM portvec %x\n",
- entry.mac, entry.portvec);
- chip->ports[entry.portvec].atu_miss_violation++;
+ "ATU miss violation for %pM portvec %x spid %d\n",
+ entry.mac, entry.portvec, spid);
+ chip->ports[spid].atu_miss_violation++;
}
if (val & MV88E6XXX_G1_ATU_OP_FULL_VIOLATION) {
dev_err_ratelimited(chip->dev,
- "ATU full violation for %pM portvec %x\n",
- entry.mac, entry.portvec);
- chip->ports[entry.portvec].atu_full_violation++;
+ "ATU full violation for %pM portvec %x spid %d\n",
+ entry.mac, entry.portvec, spid);
+ chip->ports[spid].atu_full_violation++;
}
mutex_unlock(&chip->reg_lock);
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index 4b73131..1b5f591 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -2595,11 +2595,6 @@ static int ena_restore_device(struct ena_adapter *adapter)
goto err_device_destroy;
}
- clear_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags);
- /* Make sure we don't have a race with AENQ Links state handler */
- if (test_bit(ENA_FLAG_LINK_UP, &adapter->flags))
- netif_carrier_on(adapter->netdev);
-
rc = ena_enable_msix_and_set_admin_interrupts(adapter,
adapter->num_queues);
if (rc) {
@@ -2616,6 +2611,11 @@ static int ena_restore_device(struct ena_adapter *adapter)
}
set_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags);
+
+ clear_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags);
+ if (test_bit(ENA_FLAG_LINK_UP, &adapter->flags))
+ netif_carrier_on(adapter->netdev);
+
mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ));
dev_err(&pdev->dev, "Device reset completed successfully\n");
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
index c965e65..9939cca 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
@@ -262,6 +262,8 @@ int hw_atl_utils_soft_reset(struct aq_hw_s *self)
AQ_HW_WAIT_FOR((aq_hw_read_reg(self, HW_ATL_MPI_STATE_ADR) &
HW_ATL_MPI_STATE_MSK) == MPI_DEINIT,
10, 1000U);
+ if (err)
+ return err;
}
if (self->rbl_enabled)
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index 7b6859e..fc16b2b 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -519,7 +519,6 @@ static void bcm_sysport_get_wol(struct net_device *dev,
struct ethtool_wolinfo *wol)
{
struct bcm_sysport_priv *priv = netdev_priv(dev);
- u32 reg;
wol->supported = WAKE_MAGIC | WAKE_MAGICSECURE | WAKE_FILTER;
wol->wolopts = priv->wolopts;
@@ -527,11 +526,7 @@ static void bcm_sysport_get_wol(struct net_device *dev,
if (!(priv->wolopts & WAKE_MAGICSECURE))
return;
- /* Return the programmed SecureOn password */
- reg = umac_readl(priv, UMAC_PSW_MS);
- put_unaligned_be16(reg, &wol->sopass[0]);
- reg = umac_readl(priv, UMAC_PSW_LS);
- put_unaligned_be32(reg, &wol->sopass[2]);
+ memcpy(wol->sopass, priv->sopass, sizeof(priv->sopass));
}
static int bcm_sysport_set_wol(struct net_device *dev,
@@ -547,13 +542,8 @@ static int bcm_sysport_set_wol(struct net_device *dev,
if (wol->wolopts & ~supported)
return -EINVAL;
- /* Program the SecureOn password */
- if (wol->wolopts & WAKE_MAGICSECURE) {
- umac_writel(priv, get_unaligned_be16(&wol->sopass[0]),
- UMAC_PSW_MS);
- umac_writel(priv, get_unaligned_be32(&wol->sopass[2]),
- UMAC_PSW_LS);
- }
+ if (wol->wolopts & WAKE_MAGICSECURE)
+ memcpy(priv->sopass, wol->sopass, sizeof(priv->sopass));
/* Flag the device and relevant IRQ as wakeup capable */
if (wol->wolopts) {
@@ -2588,13 +2578,18 @@ static int bcm_sysport_suspend_to_wol(struct bcm_sysport_priv *priv)
unsigned int index, i = 0;
u32 reg;
- /* Password has already been programmed */
reg = umac_readl(priv, UMAC_MPD_CTRL);
if (priv->wolopts & (WAKE_MAGIC | WAKE_MAGICSECURE))
reg |= MPD_EN;
reg &= ~PSW_EN;
- if (priv->wolopts & WAKE_MAGICSECURE)
+ if (priv->wolopts & WAKE_MAGICSECURE) {
+ /* Program the SecureOn password */
+ umac_writel(priv, get_unaligned_be16(&priv->sopass[0]),
+ UMAC_PSW_MS);
+ umac_writel(priv, get_unaligned_be32(&priv->sopass[2]),
+ UMAC_PSW_LS);
reg |= PSW_EN;
+ }
umac_writel(priv, reg, UMAC_MPD_CTRL);
if (priv->wolopts & WAKE_FILTER) {
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.h b/drivers/net/ethernet/broadcom/bcmsysport.h
index 046c6c1..36e0adf 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.h
+++ b/drivers/net/ethernet/broadcom/bcmsysport.h
@@ -12,6 +12,7 @@
#define __BCM_SYSPORT_H
#include <linux/bitmap.h>
+#include <linux/ethtool.h>
#include <linux/if_vlan.h>
#include <linux/net_dim.h>
@@ -776,6 +777,7 @@ struct bcm_sysport_priv {
unsigned int crc_fwd:1;
u16 rev;
u32 wolopts;
+ u8 sopass[SOPASS_MAX];
unsigned int wol_irq_disabled:1;
/* MIB related fields */
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index e2d9254..034f575 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -6073,23 +6073,26 @@ static void bnxt_clear_int_mode(struct bnxt *bp)
int bnxt_reserve_rings(struct bnxt *bp)
{
int tcs = netdev_get_num_tc(bp->dev);
+ bool reinit_irq = false;
int rc;
if (!bnxt_need_reserve_rings(bp))
return 0;
- rc = __bnxt_reserve_rings(bp);
- if (rc) {
- netdev_err(bp->dev, "ring reservation failure rc: %d\n", rc);
- return rc;
- }
if (BNXT_NEW_RM(bp) && (bnxt_get_num_msix(bp) != bp->total_irqs)) {
bnxt_ulp_irq_stop(bp);
bnxt_clear_int_mode(bp);
- rc = bnxt_init_int_mode(bp);
+ reinit_irq = true;
+ }
+ rc = __bnxt_reserve_rings(bp);
+ if (reinit_irq) {
+ if (!rc)
+ rc = bnxt_init_int_mode(bp);
bnxt_ulp_irq_restart(bp, rc);
- if (rc)
- return rc;
+ }
+ if (rc) {
+ netdev_err(bp->dev, "ring reservation/IRQ init failure rc: %d\n", rc);
+ return rc;
}
if (tcs && (bp->tx_nr_rings_per_tc * tcs != bp->tx_nr_rings)) {
netdev_err(bp->dev, "tx ring reservation failure\n");
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index 60641e20..9a7f70d 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -1434,7 +1434,8 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq,
* csum is correct or is zero.
*/
if ((netdev->features & NETIF_F_RXCSUM) && !csum_not_calc &&
- tcp_udp_csum_ok && ipv4_csum_ok && outer_csum_ok) {
+ tcp_udp_csum_ok && outer_csum_ok &&
+ (ipv4_csum_ok || ipv6)) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
skb->csum_level = encap;
}
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
index 65a22cd..029730b 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
@@ -2052,6 +2052,7 @@ static int dpaa_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
bool nonlinear = skb_is_nonlinear(skb);
struct rtnl_link_stats64 *percpu_stats;
struct dpaa_percpu_priv *percpu_priv;
+ struct netdev_queue *txq;
struct dpaa_priv *priv;
struct qm_fd fd;
int offset = 0;
@@ -2101,6 +2102,11 @@ static int dpaa_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
if (unlikely(err < 0))
goto skb_to_fd_failed;
+ txq = netdev_get_tx_queue(net_dev, queue_mapping);
+
+ /* LLTX requires to do our own update of trans_start */
+ txq->trans_start = jiffies;
+
if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
fd.cmd |= cpu_to_be32(FM_FD_CMD_UPD);
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
diff --git a/drivers/net/ethernet/freescale/fman/fman_memac.c b/drivers/net/ethernet/freescale/fman/fman_memac.c
index bc6eb30..41c6fa20 100644
--- a/drivers/net/ethernet/freescale/fman/fman_memac.c
+++ b/drivers/net/ethernet/freescale/fman/fman_memac.c
@@ -928,7 +928,7 @@ int memac_add_hash_mac_address(struct fman_mac *memac, enet_addr_t *eth_addr)
hash = get_mac_addr_hash_code(addr) & HASH_CTRL_ADDR_MASK;
/* Create element to be added to the driver hash table */
- hash_entry = kmalloc(sizeof(*hash_entry), GFP_KERNEL);
+ hash_entry = kmalloc(sizeof(*hash_entry), GFP_ATOMIC);
if (!hash_entry)
return -ENOMEM;
hash_entry->addr = addr;
diff --git a/drivers/net/ethernet/freescale/fman/fman_tgec.c b/drivers/net/ethernet/freescale/fman/fman_tgec.c
index 4070593..f75b9c1 100644
--- a/drivers/net/ethernet/freescale/fman/fman_tgec.c
+++ b/drivers/net/ethernet/freescale/fman/fman_tgec.c
@@ -553,7 +553,7 @@ int tgec_add_hash_mac_address(struct fman_mac *tgec, enet_addr_t *eth_addr)
hash = (crc >> TGEC_HASH_MCAST_SHIFT) & TGEC_HASH_ADR_MSK;
/* Create element to be added to the driver hash table */
- hash_entry = kmalloc(sizeof(*hash_entry), GFP_KERNEL);
+ hash_entry = kmalloc(sizeof(*hash_entry), GFP_ATOMIC);
if (!hash_entry)
return -ENOMEM;
hash_entry->addr = addr;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
index ad1779f..a78bfaf 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
@@ -147,12 +147,10 @@ static void hns_ae_put_handle(struct hnae_handle *handle)
struct hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(handle);
int i;
- vf_cb->mac_cb = NULL;
-
- kfree(vf_cb);
-
for (i = 0; i < handle->q_num; i++)
hns_ae_get_ring_pair(handle->qs[i])->used_by_vf = 0;
+
+ kfree(vf_cb);
}
static int hns_ae_wait_flow_down(struct hnae_handle *handle)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index b7b2f82..0ccfa6a 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -2691,6 +2691,8 @@ static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv)
static int hns3_nic_alloc_vector_data(struct hns3_nic_priv *priv)
{
+#define HNS3_VECTOR_PF_MAX_NUM 64
+
struct hnae3_handle *h = priv->ae_handle;
struct hns3_enet_tqp_vector *tqp_vector;
struct hnae3_vector_info *vector;
@@ -2703,6 +2705,8 @@ static int hns3_nic_alloc_vector_data(struct hns3_nic_priv *priv)
/* RSS size, cpu online and vector_num should be the same */
/* Should consider 2p/4p later */
vector_num = min_t(u16, num_online_cpus(), tqp_num);
+ vector_num = min_t(u16, vector_num, HNS3_VECTOR_PF_MAX_NUM);
+
vector = devm_kcalloc(&pdev->dev, vector_num, sizeof(*vector),
GFP_KERNEL);
if (!vector)
@@ -2760,12 +2764,12 @@ static int hns3_nic_uninit_vector_data(struct hns3_nic_priv *priv)
hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain);
- if (priv->tqp_vector[i].irq_init_flag == HNS3_VECTOR_INITED) {
- (void)irq_set_affinity_hint(
- priv->tqp_vector[i].vector_irq,
- NULL);
- free_irq(priv->tqp_vector[i].vector_irq,
- &priv->tqp_vector[i]);
+ if (tqp_vector->irq_init_flag == HNS3_VECTOR_INITED) {
+ irq_set_affinity_notifier(tqp_vector->vector_irq,
+ NULL);
+ irq_set_affinity_hint(tqp_vector->vector_irq, NULL);
+ free_irq(tqp_vector->vector_irq, tqp_vector);
+ tqp_vector->irq_init_flag = HNS3_VECTOR_NOT_INITED;
}
priv->ring_data[i].ring->irq_init_flag = HNS3_VECTOR_NOT_INITED;
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index 7a80652..f84e2c2 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -122,6 +122,7 @@ enum i40e_state_t {
__I40E_MDD_EVENT_PENDING,
__I40E_VFLR_EVENT_PENDING,
__I40E_RESET_RECOVERY_PENDING,
+ __I40E_TIMEOUT_RECOVERY_PENDING,
__I40E_MISC_IRQ_REQUESTED,
__I40E_RESET_INTR_RECEIVED,
__I40E_REINIT_REQUESTED,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index ed9d3fc..41fa22c 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -336,6 +336,10 @@ static void i40e_tx_timeout(struct net_device *netdev)
(pf->tx_timeout_last_recovery + netdev->watchdog_timeo)))
return; /* don't do any new action before the next timeout */
+ /* don't kick off another recovery if one is already pending */
+ if (test_and_set_bit(__I40E_TIMEOUT_RECOVERY_PENDING, pf->state))
+ return;
+
if (tx_ring) {
head = i40e_get_head(tx_ring);
/* Read interrupt register */
@@ -9566,6 +9570,7 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
clear_bit(__I40E_RESET_FAILED, pf->state);
clear_recovery:
clear_bit(__I40E_RESET_RECOVERY_PENDING, pf->state);
+ clear_bit(__I40E_TIMEOUT_RECOVERY_PENDING, pf->state);
}
/**
@@ -12011,6 +12016,9 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
ether_addr_copy(netdev->dev_addr, mac_addr);
ether_addr_copy(netdev->perm_addr, mac_addr);
+ /* i40iw_net_event() reads 16 bytes from neigh->primary_key */
+ netdev->neigh_priv_len = sizeof(u32) * 4;
+
netdev->priv_flags |= IFF_UNICAST_FLT;
netdev->priv_flags |= IFF_SUPP_NOFCS;
/* Setup netdev TC information */
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index 3f047bb..db1543b 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -4333,8 +4333,12 @@ static void ice_napi_enable_all(struct ice_vsi *vsi)
if (!vsi->netdev)
return;
- for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++)
- napi_enable(&vsi->q_vectors[q_idx]->napi);
+ for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) {
+ struct ice_q_vector *q_vector = vsi->q_vectors[q_idx];
+
+ if (q_vector->rx.ring || q_vector->tx.ring)
+ napi_enable(&q_vector->napi);
+ }
}
/**
@@ -4817,8 +4821,12 @@ static void ice_napi_disable_all(struct ice_vsi *vsi)
if (!vsi->netdev)
return;
- for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++)
- napi_disable(&vsi->q_vectors[q_idx]->napi);
+ for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) {
+ struct ice_q_vector *q_vector = vsi->q_vectors[q_idx];
+
+ if (q_vector->rx.ring || q_vector->tx.ring)
+ napi_disable(&q_vector->napi);
+ }
}
/**
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 0796cef..ffaa6e0 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -8770,9 +8770,11 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake,
rtnl_unlock();
#ifdef CONFIG_PM
- retval = pci_save_state(pdev);
- if (retval)
- return retval;
+ if (!runtime) {
+ retval = pci_save_state(pdev);
+ if (retval)
+ return retval;
+ }
#endif
status = rd32(E1000_STATUS);
diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c
index 9c08c36..15dea48 100644
--- a/drivers/net/ethernet/marvell/skge.c
+++ b/drivers/net/ethernet/marvell/skge.c
@@ -152,8 +152,10 @@ static void skge_get_regs(struct net_device *dev, struct ethtool_regs *regs,
memset(p, 0, regs->len);
memcpy_fromio(p, io, B3_RAM_ADDR);
- memcpy_fromio(p + B3_RI_WTO_R1, io + B3_RI_WTO_R1,
- regs->len - B3_RI_WTO_R1);
+ if (regs->len > B3_RI_WTO_R1) {
+ memcpy_fromio(p + B3_RI_WTO_R1, io + B3_RI_WTO_R1,
+ regs->len - B3_RI_WTO_R1);
+ }
}
/* Wake on Lan only supported on Yukon chips with rev 1 or above */
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index 697d9b3..ae2f350 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -5087,7 +5087,7 @@ static int sky2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
INIT_WORK(&hw->restart_work, sky2_restart);
pci_set_drvdata(pdev, hw);
- pdev->d3_delay = 200;
+ pdev->d3_delay = 300;
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index a1aeeb8..f5cd953 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -620,6 +620,8 @@ static int get_fixed_ipv6_csum(__wsum hw_checksum, struct sk_buff *skb,
}
#endif
+#define short_frame(size) ((size) <= ETH_ZLEN + ETH_FCS_LEN)
+
/* We reach this function only after checking that any of
* the (IPv4 | IPv6) bits are set in cqe->status.
*/
@@ -627,9 +629,20 @@ static int check_csum(struct mlx4_cqe *cqe, struct sk_buff *skb, void *va,
netdev_features_t dev_features)
{
__wsum hw_checksum = 0;
+ void *hdr;
- void *hdr = (u8 *)va + sizeof(struct ethhdr);
+ /* CQE csum doesn't cover padding octets in short ethernet
+ * frames. And the pad field is appended prior to calculating
+ * and appending the FCS field.
+ *
+ * Detecting these padded frames requires to verify and parse
+ * IP headers, so we simply force all those small frames to skip
+ * checksum complete.
+ */
+ if (short_frame(skb->len))
+ return -EINVAL;
+ hdr = (u8 *)va + sizeof(struct ethhdr);
hw_checksum = csum_unfold((__force __sum16)cqe->checksum);
if (cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_CVLAN_PRESENT_MASK) &&
@@ -822,6 +835,11 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
skb_record_rx_queue(skb, cq_ring);
if (likely(dev->features & NETIF_F_RXCSUM)) {
+ /* TODO: For IP non TCP/UDP packets when csum complete is
+ * not an option (not supported or any other reason) we can
+ * actually check cqe IPOK status bit and report
+ * CHECKSUM_UNNECESSARY rather than CHECKSUM_NONE
+ */
if ((cqe->status & cpu_to_be16(MLX4_CQE_STATUS_TCP |
MLX4_CQE_STATUS_UDP)) &&
(cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPOK)) &&
diff --git a/drivers/net/ethernet/mellanox/mlx4/icm.c b/drivers/net/ethernet/mellanox/mlx4/icm.c
index 7262c63..288fca8 100644
--- a/drivers/net/ethernet/mellanox/mlx4/icm.c
+++ b/drivers/net/ethernet/mellanox/mlx4/icm.c
@@ -57,12 +57,12 @@ static void mlx4_free_icm_pages(struct mlx4_dev *dev, struct mlx4_icm_chunk *chu
int i;
if (chunk->nsg > 0)
- pci_unmap_sg(dev->persist->pdev, chunk->mem, chunk->npages,
+ pci_unmap_sg(dev->persist->pdev, chunk->sg, chunk->npages,
PCI_DMA_BIDIRECTIONAL);
for (i = 0; i < chunk->npages; ++i)
- __free_pages(sg_page(&chunk->mem[i]),
- get_order(chunk->mem[i].length));
+ __free_pages(sg_page(&chunk->sg[i]),
+ get_order(chunk->sg[i].length));
}
static void mlx4_free_icm_coherent(struct mlx4_dev *dev, struct mlx4_icm_chunk *chunk)
@@ -71,9 +71,9 @@ static void mlx4_free_icm_coherent(struct mlx4_dev *dev, struct mlx4_icm_chunk *
for (i = 0; i < chunk->npages; ++i)
dma_free_coherent(&dev->persist->pdev->dev,
- chunk->mem[i].length,
- lowmem_page_address(sg_page(&chunk->mem[i])),
- sg_dma_address(&chunk->mem[i]));
+ chunk->buf[i].size,
+ chunk->buf[i].addr,
+ chunk->buf[i].dma_addr);
}
void mlx4_free_icm(struct mlx4_dev *dev, struct mlx4_icm *icm, int coherent)
@@ -111,22 +111,21 @@ static int mlx4_alloc_icm_pages(struct scatterlist *mem, int order,
return 0;
}
-static int mlx4_alloc_icm_coherent(struct device *dev, struct scatterlist *mem,
- int order, gfp_t gfp_mask)
+static int mlx4_alloc_icm_coherent(struct device *dev, struct mlx4_icm_buf *buf,
+ int order, gfp_t gfp_mask)
{
- void *buf = dma_alloc_coherent(dev, PAGE_SIZE << order,
- &sg_dma_address(mem), gfp_mask);
- if (!buf)
+ buf->addr = dma_alloc_coherent(dev, PAGE_SIZE << order,
+ &buf->dma_addr, gfp_mask);
+ if (!buf->addr)
return -ENOMEM;
- if (offset_in_page(buf)) {
- dma_free_coherent(dev, PAGE_SIZE << order,
- buf, sg_dma_address(mem));
+ if (offset_in_page(buf->addr)) {
+ dma_free_coherent(dev, PAGE_SIZE << order, buf->addr,
+ buf->dma_addr);
return -ENOMEM;
}
- sg_set_buf(mem, buf, PAGE_SIZE << order);
- sg_dma_len(mem) = PAGE_SIZE << order;
+ buf->size = PAGE_SIZE << order;
return 0;
}
@@ -159,21 +158,21 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,
while (npages > 0) {
if (!chunk) {
- chunk = kmalloc_node(sizeof(*chunk),
+ chunk = kzalloc_node(sizeof(*chunk),
gfp_mask & ~(__GFP_HIGHMEM |
__GFP_NOWARN),
dev->numa_node);
if (!chunk) {
- chunk = kmalloc(sizeof(*chunk),
+ chunk = kzalloc(sizeof(*chunk),
gfp_mask & ~(__GFP_HIGHMEM |
__GFP_NOWARN));
if (!chunk)
goto fail;
}
+ chunk->coherent = coherent;
- sg_init_table(chunk->mem, MLX4_ICM_CHUNK_LEN);
- chunk->npages = 0;
- chunk->nsg = 0;
+ if (!coherent)
+ sg_init_table(chunk->sg, MLX4_ICM_CHUNK_LEN);
list_add_tail(&chunk->list, &icm->chunk_list);
}
@@ -186,10 +185,10 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,
if (coherent)
ret = mlx4_alloc_icm_coherent(&dev->persist->pdev->dev,
- &chunk->mem[chunk->npages],
- cur_order, mask);
+ &chunk->buf[chunk->npages],
+ cur_order, mask);
else
- ret = mlx4_alloc_icm_pages(&chunk->mem[chunk->npages],
+ ret = mlx4_alloc_icm_pages(&chunk->sg[chunk->npages],
cur_order, mask,
dev->numa_node);
@@ -205,7 +204,7 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,
if (coherent)
++chunk->nsg;
else if (chunk->npages == MLX4_ICM_CHUNK_LEN) {
- chunk->nsg = pci_map_sg(dev->persist->pdev, chunk->mem,
+ chunk->nsg = pci_map_sg(dev->persist->pdev, chunk->sg,
chunk->npages,
PCI_DMA_BIDIRECTIONAL);
@@ -220,7 +219,7 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,
}
if (!coherent && chunk) {
- chunk->nsg = pci_map_sg(dev->persist->pdev, chunk->mem,
+ chunk->nsg = pci_map_sg(dev->persist->pdev, chunk->sg,
chunk->npages,
PCI_DMA_BIDIRECTIONAL);
@@ -320,7 +319,7 @@ void *mlx4_table_find(struct mlx4_icm_table *table, u32 obj,
u64 idx;
struct mlx4_icm_chunk *chunk;
struct mlx4_icm *icm;
- struct page *page = NULL;
+ void *addr = NULL;
if (!table->lowmem)
return NULL;
@@ -336,28 +335,49 @@ void *mlx4_table_find(struct mlx4_icm_table *table, u32 obj,
list_for_each_entry(chunk, &icm->chunk_list, list) {
for (i = 0; i < chunk->npages; ++i) {
- if (dma_handle && dma_offset >= 0) {
- if (sg_dma_len(&chunk->mem[i]) > dma_offset)
- *dma_handle = sg_dma_address(&chunk->mem[i]) +
- dma_offset;
- dma_offset -= sg_dma_len(&chunk->mem[i]);
+ dma_addr_t dma_addr;
+ size_t len;
+
+ if (table->coherent) {
+ len = chunk->buf[i].size;
+ dma_addr = chunk->buf[i].dma_addr;
+ addr = chunk->buf[i].addr;
+ } else {
+ struct page *page;
+
+ len = sg_dma_len(&chunk->sg[i]);
+ dma_addr = sg_dma_address(&chunk->sg[i]);
+
+ /* XXX: we should never do this for highmem
+ * allocation. This function either needs
+ * to be split, or the kernel virtual address
+ * return needs to be made optional.
+ */
+ page = sg_page(&chunk->sg[i]);
+ addr = lowmem_page_address(page);
}
+
+ if (dma_handle && dma_offset >= 0) {
+ if (len > dma_offset)
+ *dma_handle = dma_addr + dma_offset;
+ dma_offset -= len;
+ }
+
/*
* DMA mapping can merge pages but not split them,
* so if we found the page, dma_handle has already
* been assigned to.
*/
- if (chunk->mem[i].length > offset) {
- page = sg_page(&chunk->mem[i]);
+ if (len > offset)
goto out;
- }
- offset -= chunk->mem[i].length;
+ offset -= len;
}
}
+ addr = NULL;
out:
mutex_unlock(&table->mutex);
- return page ? lowmem_page_address(page) + offset : NULL;
+ return addr ? addr + offset : NULL;
}
int mlx4_table_get_range(struct mlx4_dev *dev, struct mlx4_icm_table *table,
diff --git a/drivers/net/ethernet/mellanox/mlx4/icm.h b/drivers/net/ethernet/mellanox/mlx4/icm.h
index c9169a49..d199874 100644
--- a/drivers/net/ethernet/mellanox/mlx4/icm.h
+++ b/drivers/net/ethernet/mellanox/mlx4/icm.h
@@ -47,11 +47,21 @@ enum {
MLX4_ICM_PAGE_SIZE = 1 << MLX4_ICM_PAGE_SHIFT,
};
+struct mlx4_icm_buf {
+ void *addr;
+ size_t size;
+ dma_addr_t dma_addr;
+};
+
struct mlx4_icm_chunk {
struct list_head list;
int npages;
int nsg;
- struct scatterlist mem[MLX4_ICM_CHUNK_LEN];
+ bool coherent;
+ union {
+ struct scatterlist sg[MLX4_ICM_CHUNK_LEN];
+ struct mlx4_icm_buf buf[MLX4_ICM_CHUNK_LEN];
+ };
};
struct mlx4_icm {
@@ -114,12 +124,18 @@ static inline void mlx4_icm_next(struct mlx4_icm_iter *iter)
static inline dma_addr_t mlx4_icm_addr(struct mlx4_icm_iter *iter)
{
- return sg_dma_address(&iter->chunk->mem[iter->page_idx]);
+ if (iter->chunk->coherent)
+ return iter->chunk->buf[iter->page_idx].dma_addr;
+ else
+ return sg_dma_address(&iter->chunk->sg[iter->page_idx]);
}
static inline unsigned long mlx4_icm_size(struct mlx4_icm_iter *iter)
{
- return sg_dma_len(&iter->chunk->mem[iter->page_idx]);
+ if (iter->chunk->coherent)
+ return iter->chunk->buf[iter->page_idx].size;
+ else
+ return sg_dma_len(&iter->chunk->sg[iter->page_idx]);
}
int mlx4_MAP_ICM_AUX(struct mlx4_dev *dev, struct mlx4_icm *icm);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 16ceeb1..da52e60 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -633,6 +633,7 @@ enum {
MLX5E_STATE_ASYNC_EVENTS_ENABLED,
MLX5E_STATE_OPENED,
MLX5E_STATE_DESTROYING,
+ MLX5E_STATE_XDP_TX_ENABLED,
};
struct mlx5e_rqt {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
index ad6d471..4a33c9a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
@@ -262,7 +262,8 @@ int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
int sq_num;
int i;
- if (unlikely(!test_bit(MLX5E_STATE_OPENED, &priv->state)))
+ /* this flag is sufficient, no need to test internal sq state */
+ if (unlikely(!mlx5e_xdp_tx_is_enabled(priv)))
return -ENETDOWN;
if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
@@ -275,9 +276,6 @@ int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
sq = &priv->channels.c[sq_num]->xdpsq;
- if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state)))
- return -ENETDOWN;
-
for (i = 0; i < n; i++) {
struct xdp_frame *xdpf = frames[i];
struct mlx5e_xdp_info xdpi;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
index 6dfab04..4d09662 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
@@ -49,6 +49,23 @@ bool mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xdp_info *xdpi);
int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
u32 flags);
+static inline void mlx5e_xdp_tx_enable(struct mlx5e_priv *priv)
+{
+ set_bit(MLX5E_STATE_XDP_TX_ENABLED, &priv->state);
+}
+
+static inline void mlx5e_xdp_tx_disable(struct mlx5e_priv *priv)
+{
+ clear_bit(MLX5E_STATE_XDP_TX_ENABLED, &priv->state);
+ /* let other device's napi(s) see our new state */
+ synchronize_rcu();
+}
+
+static inline bool mlx5e_xdp_tx_is_enabled(struct mlx5e_priv *priv)
+{
+ return test_bit(MLX5E_STATE_XDP_TX_ENABLED, &priv->state);
+}
+
static inline void mlx5e_xmit_xdp_doorbell(struct mlx5e_xdpsq *sq)
{
struct mlx5_wq_cyc *wq = &sq->wq;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 7365899..637d59c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -1758,7 +1758,7 @@ static void mlx5e_close_cq(struct mlx5e_cq *cq)
static int mlx5e_get_cpu(struct mlx5e_priv *priv, int ix)
{
- return cpumask_first(priv->mdev->priv.irq_info[ix].mask);
+ return cpumask_first(priv->mdev->priv.irq_info[ix + MLX5_EQ_VEC_COMP_BASE].mask);
}
static int mlx5e_open_tx_cqs(struct mlx5e_channel *c,
@@ -2890,6 +2890,7 @@ void mlx5e_activate_priv_channels(struct mlx5e_priv *priv)
mlx5e_build_tx2sq_maps(priv);
mlx5e_activate_channels(&priv->channels);
+ mlx5e_xdp_tx_enable(priv);
netif_tx_start_all_queues(priv->netdev);
if (MLX5_ESWITCH_MANAGER(priv->mdev))
@@ -2911,6 +2912,7 @@ void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv)
*/
netif_tx_stop_all_queues(priv->netdev);
netif_tx_disable(priv->netdev);
+ mlx5e_xdp_tx_disable(priv);
mlx5e_deactivate_channels(&priv->channels);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index c9cc974..701624a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -144,6 +144,7 @@ static void mlx5e_rep_update_sw_counters(struct mlx5e_priv *priv)
s->tx_packets += sq_stats->packets;
s->tx_bytes += sq_stats->bytes;
+ s->tx_queue_dropped += sq_stats->dropped;
}
}
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 8262f09..d3f794d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -707,6 +707,8 @@ static u32 mlx5e_get_fcs(const struct sk_buff *skb)
return __get_unaligned_cpu32(fcs_bytes);
}
+#define short_frame(size) ((size) <= ETH_ZLEN + ETH_FCS_LEN)
+
static inline void mlx5e_handle_csum(struct net_device *netdev,
struct mlx5_cqe64 *cqe,
struct mlx5e_rq *rq,
@@ -725,6 +727,17 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
return;
}
+ /* CQE csum doesn't cover padding octets in short ethernet
+ * frames. And the pad field is appended prior to calculating
+ * and appending the FCS field.
+ *
+ * Detecting these padded frames requires to verify and parse
+ * IP headers, so we simply force all those small frames to be
+ * CHECKSUM_UNNECESSARY even if they are not padded.
+ */
+ if (short_frame(skb->len))
+ goto csum_unnecessary;
+
if (likely(is_last_ethertype_ip(skb, &network_depth))) {
skb->ip_summed = CHECKSUM_COMPLETE;
skb->csum = csum_unfold((__force __sum16)cqe->check_sum);
@@ -744,6 +757,7 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
return;
}
+csum_unnecessary:
if (likely((cqe->hds_ip_ext & CQE_L3_OK) &&
(cqe->hds_ip_ext & CQE_L4_OK))) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 3092c59..9f7f842 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -96,6 +96,7 @@ struct mlx5e_tc_flow_parse_attr {
struct ip_tunnel_info tun_info;
struct mlx5_flow_spec spec;
int num_mod_hdr_actions;
+ int max_mod_hdr_actions;
void *mod_hdr_actions;
int mirred_ifindex;
};
@@ -1742,9 +1743,9 @@ static struct mlx5_fields fields[] = {
OFFLOAD(UDP_DPORT, 2, udp.dest, 0),
};
-/* On input attr->num_mod_hdr_actions tells how many HW actions can be parsed at
- * max from the SW pedit action. On success, it says how many HW actions were
- * actually parsed.
+/* On input attr->max_mod_hdr_actions tells how many HW actions can be parsed at
+ * max from the SW pedit action. On success, attr->num_mod_hdr_actions
+ * says how many HW actions were actually parsed.
*/
static int offload_pedit_fields(struct pedit_headers *masks,
struct pedit_headers *vals,
@@ -1767,9 +1768,11 @@ static int offload_pedit_fields(struct pedit_headers *masks,
add_vals = &vals[TCA_PEDIT_KEY_EX_CMD_ADD];
action_size = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto);
- action = parse_attr->mod_hdr_actions;
- max_actions = parse_attr->num_mod_hdr_actions;
- nactions = 0;
+ action = parse_attr->mod_hdr_actions +
+ parse_attr->num_mod_hdr_actions * action_size;
+
+ max_actions = parse_attr->max_mod_hdr_actions;
+ nactions = parse_attr->num_mod_hdr_actions;
for (i = 0; i < ARRAY_SIZE(fields); i++) {
f = &fields[i];
@@ -1874,7 +1877,7 @@ static int alloc_mod_hdr_actions(struct mlx5e_priv *priv,
if (!parse_attr->mod_hdr_actions)
return -ENOMEM;
- parse_attr->num_mod_hdr_actions = max_actions;
+ parse_attr->max_mod_hdr_actions = max_actions;
return 0;
}
@@ -1918,9 +1921,11 @@ static int parse_tc_pedit_action(struct mlx5e_priv *priv,
goto out_err;
}
- err = alloc_mod_hdr_actions(priv, a, namespace, parse_attr);
- if (err)
- goto out_err;
+ if (!parse_attr->mod_hdr_actions) {
+ err = alloc_mod_hdr_actions(priv, a, namespace, parse_attr);
+ if (err)
+ goto out_err;
+ }
err = offload_pedit_fields(masks, vals, parse_attr);
if (err < 0)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index 6dacaeb..0b03d654 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -387,8 +387,14 @@ netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
contig_wqebbs_room = mlx5_wq_cyc_get_contig_wqebbs(wq, pi);
if (unlikely(contig_wqebbs_room < num_wqebbs)) {
+#ifdef CONFIG_MLX5_EN_IPSEC
+ struct mlx5_wqe_eth_seg cur_eth = wqe->eth;
+#endif
mlx5e_fill_sq_frag_edge(sq, wq, pi, contig_wqebbs_room);
mlx5e_sq_fetch_wqe(sq, &wqe, &pi);
+#ifdef CONFIG_MLX5_EN_IPSEC
+ wqe->eth = cur_eth;
+#endif
}
/* fill wqe */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
index 3f767cd..54f1a40 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
@@ -511,14 +511,14 @@ void mlx5_init_clock(struct mlx5_core_dev *mdev)
ktime_to_ns(ktime_get_real()));
/* Calculate period in seconds to call the overflow watchdog - to make
- * sure counter is checked at least once every wrap around.
+ * sure counter is checked at least twice every wrap around.
* The period is calculated as the minimum between max HW cycles count
* (The clock source mask) and max amount of cycles that can be
* multiplied by clock multiplier where the result doesn't exceed
* 64bits.
*/
overflow_cycles = div64_u64(~0ULL >> 1, clock->cycles.mult);
- overflow_cycles = min(overflow_cycles, clock->cycles.mask >> 1);
+ overflow_cycles = min(overflow_cycles, div_u64(clock->cycles.mask, 3));
ns = cyclecounter_cyc2ns(&clock->cycles, overflow_cycles,
frac, &frac);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index b5e9f66..563ce3f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -640,18 +640,19 @@ u64 mlx5_read_internal_timer(struct mlx5_core_dev *dev)
static int mlx5_irq_set_affinity_hint(struct mlx5_core_dev *mdev, int i)
{
struct mlx5_priv *priv = &mdev->priv;
- int irq = pci_irq_vector(mdev->pdev, MLX5_EQ_VEC_COMP_BASE + i);
+ int vecidx = MLX5_EQ_VEC_COMP_BASE + i;
+ int irq = pci_irq_vector(mdev->pdev, vecidx);
- if (!zalloc_cpumask_var(&priv->irq_info[i].mask, GFP_KERNEL)) {
+ if (!zalloc_cpumask_var(&priv->irq_info[vecidx].mask, GFP_KERNEL)) {
mlx5_core_warn(mdev, "zalloc_cpumask_var failed");
return -ENOMEM;
}
cpumask_set_cpu(cpumask_local_spread(i, priv->numa_node),
- priv->irq_info[i].mask);
+ priv->irq_info[vecidx].mask);
if (IS_ENABLED(CONFIG_SMP) &&
- irq_set_affinity_hint(irq, priv->irq_info[i].mask))
+ irq_set_affinity_hint(irq, priv->irq_info[vecidx].mask))
mlx5_core_warn(mdev, "irq_set_affinity_hint failed, irq 0x%.4x", irq);
return 0;
@@ -659,11 +660,12 @@ static int mlx5_irq_set_affinity_hint(struct mlx5_core_dev *mdev, int i)
static void mlx5_irq_clear_affinity_hint(struct mlx5_core_dev *mdev, int i)
{
+ int vecidx = MLX5_EQ_VEC_COMP_BASE + i;
struct mlx5_priv *priv = &mdev->priv;
- int irq = pci_irq_vector(mdev->pdev, MLX5_EQ_VEC_COMP_BASE + i);
+ int irq = pci_irq_vector(mdev->pdev, vecidx);
irq_set_affinity_hint(irq, NULL);
- free_cpumask_var(priv->irq_info[i].mask);
+ free_cpumask_var(priv->irq_info[vecidx].mask);
}
static int mlx5_irq_set_affinity_hints(struct mlx5_core_dev *mdev)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c
index c7901a3..a903e97 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c
@@ -1367,10 +1367,10 @@ static int mlxsw_pci_sw_reset(struct mlxsw_pci *mlxsw_pci,
u32 val = mlxsw_pci_read32(mlxsw_pci, FW_READY);
if ((val & MLXSW_PCI_FW_READY_MASK) == MLXSW_PCI_FW_READY_MAGIC)
- break;
+ return 0;
cond_resched();
} while (time_before(jiffies, end));
- return 0;
+ return -EBUSY;
}
static int mlxsw_pci_alloc_irq_vectors(struct mlxsw_pci *mlxsw_pci)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index de821a9..a12b571 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -841,8 +841,9 @@ int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu,
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
bool configure = false;
bool pfc = false;
+ u16 thres_cells;
+ u16 delay_cells;
bool lossy;
- u16 thres;
for (j = 0; j < IEEE_8021QAZ_MAX_TCS; j++) {
if (prio_tc[j] == i) {
@@ -856,10 +857,11 @@ int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu,
continue;
lossy = !(pfc || pause_en);
- thres = mlxsw_sp_pg_buf_threshold_get(mlxsw_sp, mtu);
- delay = mlxsw_sp_pg_buf_delay_get(mlxsw_sp, mtu, delay, pfc,
- pause_en);
- mlxsw_sp_pg_buf_pack(pbmc_pl, i, thres + delay, thres, lossy);
+ thres_cells = mlxsw_sp_pg_buf_threshold_get(mlxsw_sp, mtu);
+ delay_cells = mlxsw_sp_pg_buf_delay_get(mlxsw_sp, mtu, delay,
+ pfc, pause_en);
+ mlxsw_sp_pg_buf_pack(pbmc_pl, i, thres_cells + delay_cells,
+ thres_cells, lossy);
}
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
@@ -4235,6 +4237,25 @@ void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port)
dev_put(mlxsw_sp_port->dev);
}
+static void
+mlxsw_sp_port_lag_uppers_cleanup(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct net_device *lag_dev)
+{
+ struct net_device *br_dev = netdev_master_upper_dev_get(lag_dev);
+ struct net_device *upper_dev;
+ struct list_head *iter;
+
+ if (netif_is_bridge_port(lag_dev))
+ mlxsw_sp_port_bridge_leave(mlxsw_sp_port, lag_dev, br_dev);
+
+ netdev_for_each_upper_dev_rcu(lag_dev, upper_dev, iter) {
+ if (!netif_is_bridge_port(upper_dev))
+ continue;
+ br_dev = netdev_master_upper_dev_get(upper_dev);
+ mlxsw_sp_port_bridge_leave(mlxsw_sp_port, upper_dev, br_dev);
+ }
+}
+
static int mlxsw_sp_lag_create(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
{
char sldr_pl[MLXSW_REG_SLDR_LEN];
@@ -4427,6 +4448,10 @@ static void mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port,
/* Any VLANs configured on the port are no longer valid */
mlxsw_sp_port_vlan_flush(mlxsw_sp_port);
+ /* Make the LAG and its directly linked uppers leave bridges they
+ * are memeber in
+ */
+ mlxsw_sp_port_lag_uppers_cleanup(mlxsw_sp_port, lag_dev);
if (lag->ref_count == 1)
mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c
index e3c6fe8..1dcf152 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c
@@ -75,7 +75,15 @@ mlxsw_sp_acl_ctcam_region_entry_insert(struct mlxsw_sp *mlxsw_sp,
act_set = mlxsw_afa_block_first_set(rulei->act_block);
mlxsw_reg_ptce2_flex_action_set_memcpy_to(ptce2_pl, act_set);
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptce2), ptce2_pl);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptce2), ptce2_pl);
+ if (err)
+ goto err_ptce2_write;
+
+ return 0;
+
+err_ptce2_write:
+ cregion->ops->entry_remove(cregion, centry);
+ return err;
}
static void
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
index e171513..30931a2 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
@@ -95,8 +95,9 @@ int mlxsw_sp_acl_tcam_priority_get(struct mlxsw_sp *mlxsw_sp,
if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, KVD_SIZE))
return -EIO;
- max_priority = MLXSW_CORE_RES_GET(mlxsw_sp->core, KVD_SIZE);
- if (rulei->priority > max_priority)
+ /* Priority range is 1..cap_kvd_size-1. */
+ max_priority = MLXSW_CORE_RES_GET(mlxsw_sp->core, KVD_SIZE) - 1;
+ if (rulei->priority >= max_priority)
return -EINVAL;
/* Unlike in TC, in HW, higher number means higher priority. */
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
index 0d9ea37..af673ab 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
@@ -282,30 +282,6 @@ mlxsw_sp_bridge_port_destroy(struct mlxsw_sp_bridge_port *bridge_port)
kfree(bridge_port);
}
-static bool
-mlxsw_sp_bridge_port_should_destroy(const struct mlxsw_sp_bridge_port *
- bridge_port)
-{
- struct net_device *dev = bridge_port->dev;
- struct mlxsw_sp *mlxsw_sp;
-
- if (is_vlan_dev(dev))
- mlxsw_sp = mlxsw_sp_lower_get(vlan_dev_real_dev(dev));
- else
- mlxsw_sp = mlxsw_sp_lower_get(dev);
-
- /* In case ports were pulled from out of a bridged LAG, then
- * it's possible the reference count isn't zero, yet the bridge
- * port should be destroyed, as it's no longer an upper of ours.
- */
- if (!mlxsw_sp && list_empty(&bridge_port->vlans_list))
- return true;
- else if (bridge_port->ref_count == 0)
- return true;
- else
- return false;
-}
-
static struct mlxsw_sp_bridge_port *
mlxsw_sp_bridge_port_get(struct mlxsw_sp_bridge *bridge,
struct net_device *brport_dev)
@@ -343,8 +319,7 @@ static void mlxsw_sp_bridge_port_put(struct mlxsw_sp_bridge *bridge,
{
struct mlxsw_sp_bridge_device *bridge_device;
- bridge_port->ref_count--;
- if (!mlxsw_sp_bridge_port_should_destroy(bridge_port))
+ if (--bridge_port->ref_count != 0)
return;
bridge_device = bridge_port->bridge_device;
mlxsw_sp_bridge_port_destroy(bridge_port);
@@ -1234,7 +1209,7 @@ mlxsw_sp_bridge_port_fdb_flush(struct mlxsw_sp *mlxsw_sp,
static enum mlxsw_reg_sfd_rec_policy mlxsw_sp_sfd_rec_policy(bool dynamic)
{
return dynamic ? MLXSW_REG_SFD_REC_POLICY_DYNAMIC_ENTRY_INGRESS :
- MLXSW_REG_SFD_REC_POLICY_STATIC_ENTRY;
+ MLXSW_REG_SFD_REC_POLICY_DYNAMIC_ENTRY_MLAG;
}
static enum mlxsw_reg_sfd_op mlxsw_sp_sfd_op(bool adding)
@@ -1246,7 +1221,7 @@ static enum mlxsw_reg_sfd_op mlxsw_sp_sfd_op(bool adding)
static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port,
const char *mac, u16 fid, bool adding,
enum mlxsw_reg_sfd_rec_action action,
- bool dynamic)
+ enum mlxsw_reg_sfd_rec_policy policy)
{
char *sfd_pl;
u8 num_rec;
@@ -1257,8 +1232,7 @@ static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port,
return -ENOMEM;
mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
- mlxsw_reg_sfd_uc_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic),
- mac, fid, action, local_port);
+ mlxsw_reg_sfd_uc_pack(sfd_pl, 0, policy, mac, fid, action, local_port);
num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
if (err)
@@ -1277,7 +1251,8 @@ static int mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port,
bool dynamic)
{
return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid, adding,
- MLXSW_REG_SFD_REC_ACTION_NOP, dynamic);
+ MLXSW_REG_SFD_REC_ACTION_NOP,
+ mlxsw_sp_sfd_rec_policy(dynamic));
}
int mlxsw_sp_rif_fdb_op(struct mlxsw_sp *mlxsw_sp, const char *mac, u16 fid,
@@ -1285,7 +1260,7 @@ int mlxsw_sp_rif_fdb_op(struct mlxsw_sp *mlxsw_sp, const char *mac, u16 fid,
{
return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, 0, mac, fid, adding,
MLXSW_REG_SFD_REC_ACTION_FORWARD_IP_ROUTER,
- false);
+ MLXSW_REG_SFD_REC_POLICY_STATIC_ENTRY);
}
static int mlxsw_sp_port_fdb_uc_lag_op(struct mlxsw_sp *mlxsw_sp, u16 lag_id,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
index 2fa1c05..92cd8ab 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
@@ -1592,6 +1592,10 @@ static void qed_ll2_post_rx_buffer_notify_fw(struct qed_hwfn *p_hwfn,
cq_prod = qed_chain_get_prod_idx(&p_rx->rcq_chain);
rx_prod.bd_prod = cpu_to_le16(bd_prod);
rx_prod.cqe_prod = cpu_to_le16(cq_prod);
+
+ /* Make sure chain element is updated before ringing the doorbell */
+ dma_wmb();
+
DIRECT_REG_WR(p_rx->set_prod_addr, *((u32 *)&rx_prod));
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
index 20299f6..736e296 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
@@ -241,15 +241,18 @@ static inline void dwmac4_get_timestamp(void *desc, u32 ats, u64 *ts)
static int dwmac4_rx_check_timestamp(void *desc)
{
struct dma_desc *p = (struct dma_desc *)desc;
+ unsigned int rdes0 = le32_to_cpu(p->des0);
+ unsigned int rdes1 = le32_to_cpu(p->des1);
+ unsigned int rdes3 = le32_to_cpu(p->des3);
u32 own, ctxt;
int ret = 1;
- own = p->des3 & RDES3_OWN;
- ctxt = ((p->des3 & RDES3_CONTEXT_DESCRIPTOR)
+ own = rdes3 & RDES3_OWN;
+ ctxt = ((rdes3 & RDES3_CONTEXT_DESCRIPTOR)
>> RDES3_CONTEXT_DESCRIPTOR_SHIFT);
if (likely(!own && ctxt)) {
- if ((p->des0 == 0xffffffff) && (p->des1 == 0xffffffff))
+ if ((rdes0 == 0xffffffff) && (rdes1 == 0xffffffff))
/* Corrupted value */
ret = -EINVAL;
else
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
index 2090903..1c39305 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
@@ -260,6 +260,7 @@ static int dwxgmac2_dma_interrupt(void __iomem *ioaddr,
struct stmmac_extra_stats *x, u32 chan)
{
u32 intr_status = readl(ioaddr + XGMAC_DMA_CH_STATUS(chan));
+ u32 intr_en = readl(ioaddr + XGMAC_DMA_CH_INT_EN(chan));
int ret = 0;
/* ABNORMAL interrupts */
@@ -279,8 +280,7 @@ static int dwxgmac2_dma_interrupt(void __iomem *ioaddr,
x->normal_irq_n++;
if (likely(intr_status & XGMAC_RI)) {
- u32 value = readl(ioaddr + XGMAC_DMA_CH_INT_EN(chan));
- if (likely(value & XGMAC_RIE)) {
+ if (likely(intr_en & XGMAC_RIE)) {
x->rx_normal_irq_n++;
ret |= handle_rx;
}
@@ -292,7 +292,7 @@ static int dwxgmac2_dma_interrupt(void __iomem *ioaddr,
}
/* Clear interrupts */
- writel(~0x0, ioaddr + XGMAC_DMA_CH_STATUS(chan));
+ writel(intr_en & intr_status, ioaddr + XGMAC_DMA_CH_STATUS(chan));
return ret;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
index 5710864..9caf79b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
@@ -692,25 +692,27 @@ static int stmmac_ethtool_op_set_eee(struct net_device *dev,
struct ethtool_eee *edata)
{
struct stmmac_priv *priv = netdev_priv(dev);
+ int ret;
- priv->eee_enabled = edata->eee_enabled;
-
- if (!priv->eee_enabled)
+ if (!edata->eee_enabled) {
stmmac_disable_eee_mode(priv);
- else {
+ } else {
/* We are asking for enabling the EEE but it is safe
* to verify all by invoking the eee_init function.
* In case of failure it will return an error.
*/
- priv->eee_enabled = stmmac_eee_init(priv);
- if (!priv->eee_enabled)
+ edata->eee_enabled = stmmac_eee_init(priv);
+ if (!edata->eee_enabled)
return -EOPNOTSUPP;
-
- /* Do not change tx_lpi_timer in case of failure */
- priv->tx_lpi_timer = edata->tx_lpi_timer;
}
- return phy_ethtool_set_eee(dev->phydev, edata);
+ ret = phy_ethtool_set_eee(dev->phydev, edata);
+ if (ret)
+ return ret;
+
+ priv->eee_enabled = edata->eee_enabled;
+ priv->tx_lpi_timer = edata->tx_lpi_timer;
+ return 0;
}
static u32 stmmac_usec2riwt(u32 usec, struct stmmac_priv *priv)
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 2103b86..123b74e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -3522,27 +3522,28 @@ static int stmmac_napi_poll(struct napi_struct *napi, int budget)
struct stmmac_channel *ch =
container_of(napi, struct stmmac_channel, napi);
struct stmmac_priv *priv = ch->priv_data;
- int work_done = 0, work_rem = budget;
+ int work_done, rx_done = 0, tx_done = 0;
u32 chan = ch->index;
priv->xstats.napi_poll++;
- if (ch->has_tx) {
- int done = stmmac_tx_clean(priv, work_rem, chan);
+ if (ch->has_tx)
+ tx_done = stmmac_tx_clean(priv, budget, chan);
+ if (ch->has_rx)
+ rx_done = stmmac_rx(priv, budget, chan);
- work_done += done;
- work_rem -= done;
- }
+ work_done = max(rx_done, tx_done);
+ work_done = min(work_done, budget);
- if (ch->has_rx) {
- int done = stmmac_rx(priv, work_rem, chan);
+ if (work_done < budget && napi_complete_done(napi, work_done)) {
+ int stat;
- work_done += done;
- work_rem -= done;
- }
-
- if (work_done < budget && napi_complete_done(napi, work_done))
stmmac_enable_dma_irq(priv, priv->ioaddr, chan);
+ stat = stmmac_dma_interrupt_status(priv, priv->ioaddr,
+ &priv->xstats, chan);
+ if (stat && napi_reschedule(napi))
+ stmmac_disable_dma_irq(priv, priv->ioaddr, chan);
+ }
return work_done;
}
@@ -4191,6 +4192,18 @@ static int stmmac_hw_init(struct stmmac_priv *priv)
return ret;
}
+ /* Rx Watchdog is available in the COREs newer than the 3.40.
+ * In some case, for example on bugged HW this feature
+ * has to be disable and this can be done by passing the
+ * riwt_off field from the platform.
+ */
+ if (((priv->synopsys_id >= DWMAC_CORE_3_50) ||
+ (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) {
+ priv->use_riwt = 1;
+ dev_info(priv->device,
+ "Enable RX Mitigation via HW Watchdog Timer\n");
+ }
+
return 0;
}
@@ -4323,18 +4336,6 @@ int stmmac_dvr_probe(struct device *device,
if (flow_ctrl)
priv->flow_ctrl = FLOW_AUTO; /* RX/TX pause on */
- /* Rx Watchdog is available in the COREs newer than the 3.40.
- * In some case, for example on bugged HW this feature
- * has to be disable and this can be done by passing the
- * riwt_off field from the platform.
- */
- if (((priv->synopsys_id >= DWMAC_CORE_3_50) ||
- (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) {
- priv->use_riwt = 1;
- dev_info(priv->device,
- "Enable RX Mitigation via HW Watchdog Timer\n");
- }
-
/* Setup channels NAPI */
maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
index c54a50d..d819e8e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
@@ -299,7 +299,17 @@ static int stmmac_pci_probe(struct pci_dev *pdev,
*/
static void stmmac_pci_remove(struct pci_dev *pdev)
{
+ int i;
+
stmmac_dvr_remove(&pdev->dev);
+
+ for (i = 0; i <= PCI_STD_RESOURCE_END; i++) {
+ if (pci_resource_len(pdev, i) == 0)
+ continue;
+ pcim_iounmap_regions(pdev, BIT(i));
+ break;
+ }
+
pci_disable_device(pdev);
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
index 531294f..58ea18a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
@@ -301,6 +301,8 @@ static int tc_setup_cbs(struct stmmac_priv *priv,
/* Queue 0 is not AVB capable */
if (queue <= 0 || queue >= tx_queues_count)
return -EINVAL;
+ if (!priv->dma_cap.av)
+ return -EOPNOTSUPP;
if (priv->speed != SPEED_100 && priv->speed != SPEED_1000)
return -EOPNOTSUPP;
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index 9319d84..d845014 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -8100,6 +8100,8 @@ static int niu_pci_vpd_scan_props(struct niu *np, u32 start, u32 end)
start += 3;
prop_len = niu_pci_eeprom_read(np, start + 4);
+ if (prop_len < 0)
+ return prop_len;
err = niu_pci_vpd_get_propname(np, start + 5, namebuf, 64);
if (err < 0)
return err;
@@ -8144,8 +8146,12 @@ static int niu_pci_vpd_scan_props(struct niu *np, u32 start, u32 end)
netif_printk(np, probe, KERN_DEBUG, np->dev,
"VPD_SCAN: Reading in property [%s] len[%d]\n",
namebuf, prop_len);
- for (i = 0; i < prop_len; i++)
- *prop_buf++ = niu_pci_eeprom_read(np, off + i);
+ for (i = 0; i < prop_len; i++) {
+ err = niu_pci_eeprom_read(np, off + i);
+ if (err >= 0)
+ *prop_buf = err;
+ ++prop_buf;
+ }
}
start += len;
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index 493cd38..01711e6 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -1406,9 +1406,13 @@ static void geneve_link_config(struct net_device *dev,
}
#if IS_ENABLED(CONFIG_IPV6)
case AF_INET6: {
- struct rt6_info *rt = rt6_lookup(geneve->net,
- &info->key.u.ipv6.dst, NULL, 0,
- NULL, 0);
+ struct rt6_info *rt;
+
+ if (!__in6_dev_get(dev))
+ break;
+
+ rt = rt6_lookup(geneve->net, &info->key.u.ipv6.dst, NULL, 0,
+ NULL, 0);
if (rt && rt->dst.dev)
ldev_mtu = rt->dst.dev->mtu - GENEVE_IPV6_HLEN;
diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
index 29aa8d7..59b3f1f 100644
--- a/drivers/net/phy/dp83640.c
+++ b/drivers/net/phy/dp83640.c
@@ -896,14 +896,14 @@ static void decode_txts(struct dp83640_private *dp83640,
struct phy_txts *phy_txts)
{
struct skb_shared_hwtstamps shhwtstamps;
+ struct dp83640_skb_info *skb_info;
struct sk_buff *skb;
- u64 ns;
u8 overflow;
+ u64 ns;
/* We must already have the skb that triggered this. */
-
+again:
skb = skb_dequeue(&dp83640->tx_queue);
-
if (!skb) {
pr_debug("have timestamp but tx_queue empty\n");
return;
@@ -918,6 +918,11 @@ static void decode_txts(struct dp83640_private *dp83640,
}
return;
}
+ skb_info = (struct dp83640_skb_info *)skb->cb;
+ if (time_after(jiffies, skb_info->tmo)) {
+ kfree_skb(skb);
+ goto again;
+ }
ns = phy2txts(phy_txts);
memset(&shhwtstamps, 0, sizeof(shhwtstamps));
@@ -1470,6 +1475,7 @@ static bool dp83640_rxtstamp(struct phy_device *phydev,
static void dp83640_txtstamp(struct phy_device *phydev,
struct sk_buff *skb, int type)
{
+ struct dp83640_skb_info *skb_info = (struct dp83640_skb_info *)skb->cb;
struct dp83640_private *dp83640 = phydev->priv;
switch (dp83640->hwts_tx_en) {
@@ -1482,6 +1488,7 @@ static void dp83640_txtstamp(struct phy_device *phydev,
/* fall through */
case HWTSTAMP_TX_ON:
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ skb_info->tmo = jiffies + SKB_TIMESTAMP_TIMEOUT;
skb_queue_tail(&dp83640->tx_queue, skb);
break;
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index d71be15..73813c7 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -868,8 +868,6 @@ static int m88e1510_config_init(struct phy_device *phydev)
/* SGMII-to-Copper mode initialization */
if (phydev->interface == PHY_INTERFACE_MODE_SGMII) {
- u32 pause;
-
/* Select page 18 */
err = marvell_set_page(phydev, 18);
if (err < 0)
@@ -892,16 +890,6 @@ static int m88e1510_config_init(struct phy_device *phydev)
err = marvell_set_page(phydev, MII_MARVELL_COPPER_PAGE);
if (err < 0)
return err;
-
- /* There appears to be a bug in the 88e1512 when used in
- * SGMII to copper mode, where the AN advertisement register
- * clears the pause bits each time a negotiation occurs.
- * This means we can never be truely sure what was advertised,
- * so disable Pause support.
- */
- pause = SUPPORTED_Pause | SUPPORTED_Asym_Pause;
- phydev->supported &= ~pause;
- phydev->advertising &= ~pause;
}
return m88e1318_config_init(phydev);
diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
index 70f3f90..2787e8b 100644
--- a/drivers/net/phy/phylink.c
+++ b/drivers/net/phy/phylink.c
@@ -502,6 +502,17 @@ static void phylink_run_resolve(struct phylink *pl)
queue_work(system_power_efficient_wq, &pl->resolve);
}
+static void phylink_run_resolve_and_disable(struct phylink *pl, int bit)
+{
+ unsigned long state = pl->phylink_disable_state;
+
+ set_bit(bit, &pl->phylink_disable_state);
+ if (state == 0) {
+ queue_work(system_power_efficient_wq, &pl->resolve);
+ flush_work(&pl->resolve);
+ }
+}
+
static void phylink_fixed_poll(struct timer_list *t)
{
struct phylink *pl = container_of(t, struct phylink, link_poll);
@@ -955,9 +966,7 @@ void phylink_stop(struct phylink *pl)
if (pl->link_an_mode == MLO_AN_FIXED && !IS_ERR(pl->link_gpio))
del_timer_sync(&pl->link_poll);
- set_bit(PHYLINK_DISABLE_STOPPED, &pl->phylink_disable_state);
- queue_work(system_power_efficient_wq, &pl->resolve);
- flush_work(&pl->resolve);
+ phylink_run_resolve_and_disable(pl, PHYLINK_DISABLE_STOPPED);
}
EXPORT_SYMBOL_GPL(phylink_stop);
@@ -1664,9 +1673,7 @@ static void phylink_sfp_link_down(void *upstream)
ASSERT_RTNL();
- set_bit(PHYLINK_DISABLE_LINK, &pl->phylink_disable_state);
- queue_work(system_power_efficient_wq, &pl->resolve);
- flush_work(&pl->resolve);
+ phylink_run_resolve_and_disable(pl, PHYLINK_DISABLE_LINK);
}
static void phylink_sfp_link_up(void *upstream)
diff --git a/drivers/net/phy/sfp-bus.c b/drivers/net/phy/sfp-bus.c
index ad9db65..fef701b 100644
--- a/drivers/net/phy/sfp-bus.c
+++ b/drivers/net/phy/sfp-bus.c
@@ -347,6 +347,7 @@ static int sfp_register_bus(struct sfp_bus *bus)
return ret;
}
}
+ bus->socket_ops->attach(bus->sfp);
if (bus->started)
bus->socket_ops->start(bus->sfp);
bus->netdev->sfp_bus = bus;
@@ -362,6 +363,7 @@ static void sfp_unregister_bus(struct sfp_bus *bus)
if (bus->registered) {
if (bus->started)
bus->socket_ops->stop(bus->sfp);
+ bus->socket_ops->detach(bus->sfp);
if (bus->phydev && ops && ops->disconnect_phy)
ops->disconnect_phy(bus->upstream);
}
diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c
index fd8bb99..68c8fbf 100644
--- a/drivers/net/phy/sfp.c
+++ b/drivers/net/phy/sfp.c
@@ -184,6 +184,7 @@ struct sfp {
struct gpio_desc *gpio[GPIO_MAX];
+ bool attached;
unsigned int state;
struct delayed_work poll;
struct delayed_work timeout;
@@ -1475,7 +1476,7 @@ static void sfp_sm_event(struct sfp *sfp, unsigned int event)
*/
switch (sfp->sm_mod_state) {
default:
- if (event == SFP_E_INSERT) {
+ if (event == SFP_E_INSERT && sfp->attached) {
sfp_module_tx_disable(sfp);
sfp_sm_ins_next(sfp, SFP_MOD_PROBE, T_PROBE_INIT);
}
@@ -1607,6 +1608,19 @@ static void sfp_sm_event(struct sfp *sfp, unsigned int event)
mutex_unlock(&sfp->sm_mutex);
}
+static void sfp_attach(struct sfp *sfp)
+{
+ sfp->attached = true;
+ if (sfp->state & SFP_F_PRESENT)
+ sfp_sm_event(sfp, SFP_E_INSERT);
+}
+
+static void sfp_detach(struct sfp *sfp)
+{
+ sfp->attached = false;
+ sfp_sm_event(sfp, SFP_E_REMOVE);
+}
+
static void sfp_start(struct sfp *sfp)
{
sfp_sm_event(sfp, SFP_E_DEV_UP);
@@ -1667,6 +1681,8 @@ static int sfp_module_eeprom(struct sfp *sfp, struct ethtool_eeprom *ee,
}
static const struct sfp_socket_ops sfp_module_ops = {
+ .attach = sfp_attach,
+ .detach = sfp_detach,
.start = sfp_start,
.stop = sfp_stop,
.module_info = sfp_module_info,
@@ -1834,10 +1850,6 @@ static int sfp_probe(struct platform_device *pdev)
dev_info(sfp->dev, "Host maximum power %u.%uW\n",
sfp->max_power_mW / 1000, (sfp->max_power_mW / 100) % 10);
- sfp->sfp_bus = sfp_register_socket(sfp->dev, sfp, &sfp_module_ops);
- if (!sfp->sfp_bus)
- return -ENOMEM;
-
/* Get the initial state, and always signal TX disable,
* since the network interface will not be up.
*/
@@ -1848,10 +1860,6 @@ static int sfp_probe(struct platform_device *pdev)
sfp->state |= SFP_F_RATE_SELECT;
sfp_set_state(sfp, sfp->state);
sfp_module_tx_disable(sfp);
- rtnl_lock();
- if (sfp->state & SFP_F_PRESENT)
- sfp_sm_event(sfp, SFP_E_INSERT);
- rtnl_unlock();
for (i = 0; i < GPIO_MAX; i++) {
if (gpio_flags[i] != GPIOD_IN || !sfp->gpio[i])
@@ -1884,6 +1892,10 @@ static int sfp_probe(struct platform_device *pdev)
dev_warn(sfp->dev,
"No tx_disable pin: SFP modules will always be emitting.\n");
+ sfp->sfp_bus = sfp_register_socket(sfp->dev, sfp, &sfp_module_ops);
+ if (!sfp->sfp_bus)
+ return -ENOMEM;
+
return 0;
}
diff --git a/drivers/net/phy/sfp.h b/drivers/net/phy/sfp.h
index 31b0acf..64f54b0 100644
--- a/drivers/net/phy/sfp.h
+++ b/drivers/net/phy/sfp.h
@@ -7,6 +7,8 @@
struct sfp;
struct sfp_socket_ops {
+ void (*attach)(struct sfp *sfp);
+ void (*detach)(struct sfp *sfp);
void (*start)(struct sfp *sfp);
void (*stop)(struct sfp *sfp);
int (*module_info)(struct sfp *sfp, struct ethtool_modinfo *modinfo);
diff --git a/drivers/net/phy/xilinx_gmii2rgmii.c b/drivers/net/phy/xilinx_gmii2rgmii.c
index 74a8782..bd6084e 100644
--- a/drivers/net/phy/xilinx_gmii2rgmii.c
+++ b/drivers/net/phy/xilinx_gmii2rgmii.c
@@ -44,7 +44,10 @@ static int xgmiitorgmii_read_status(struct phy_device *phydev)
u16 val = 0;
int err;
- err = priv->phy_drv->read_status(phydev);
+ if (priv->phy_drv->read_status)
+ err = priv->phy_drv->read_status(phydev);
+ else
+ err = genphy_read_status(phydev);
if (err < 0)
return err;
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index 4b6572f..723814d 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -256,17 +256,6 @@ static void __team_option_inst_mark_removed_port(struct team *team,
}
}
-static bool __team_option_inst_tmp_find(const struct list_head *opts,
- const struct team_option_inst *needle)
-{
- struct team_option_inst *opt_inst;
-
- list_for_each_entry(opt_inst, opts, tmp_list)
- if (opt_inst == needle)
- return true;
- return false;
-}
-
static int __team_options_register(struct team *team,
const struct team_option *option,
size_t option_count)
@@ -2463,7 +2452,6 @@ static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info)
int err = 0;
int i;
struct nlattr *nl_option;
- LIST_HEAD(opt_inst_list);
rtnl_lock();
@@ -2483,6 +2471,7 @@ static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info)
struct nlattr *opt_attrs[TEAM_ATTR_OPTION_MAX + 1];
struct nlattr *attr;
struct nlattr *attr_data;
+ LIST_HEAD(opt_inst_list);
enum team_option_type opt_type;
int opt_port_ifindex = 0; /* != 0 for per-port options */
u32 opt_array_index = 0;
@@ -2587,23 +2576,17 @@ static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info)
if (err)
goto team_put;
opt_inst->changed = true;
-
- /* dumb/evil user-space can send us duplicate opt,
- * keep only the last one
- */
- if (__team_option_inst_tmp_find(&opt_inst_list,
- opt_inst))
- continue;
-
list_add(&opt_inst->tmp_list, &opt_inst_list);
}
if (!opt_found) {
err = -ENOENT;
goto team_put;
}
- }
- err = team_nl_send_event_options_get(team, &opt_inst_list);
+ err = team_nl_send_event_options_get(team, &opt_inst_list);
+ if (err)
+ break;
+ }
team_put:
team_nl_team_put(team);
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index f2d01cb..6e97162 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -1295,6 +1295,7 @@ static int smsc95xx_bind(struct usbnet *dev, struct usb_interface *intf)
dev->net->features |= NETIF_F_RXCSUM;
dev->net->hw_features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
+ set_bit(EVENT_NO_IP_ALIGN, &dev->flags);
smsc95xx_init_mac_address(dev);
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 42feaa4..c88ee37 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -502,6 +502,8 @@ static int virtnet_xdp_xmit(struct net_device *dev,
struct bpf_prog *xdp_prog;
struct send_queue *sq;
unsigned int len;
+ int packets = 0;
+ int bytes = 0;
int drops = 0;
int kicks = 0;
int ret, err;
@@ -525,10 +527,18 @@ static int virtnet_xdp_xmit(struct net_device *dev,
/* Free up any pending old buffers before queueing new ones. */
while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) {
- if (likely(is_xdp_frame(ptr)))
- xdp_return_frame(ptr_to_xdp(ptr));
- else
- napi_consume_skb(ptr, false);
+ if (likely(is_xdp_frame(ptr))) {
+ struct xdp_frame *frame = ptr_to_xdp(ptr);
+
+ bytes += frame->len;
+ xdp_return_frame(frame);
+ } else {
+ struct sk_buff *skb = ptr;
+
+ bytes += skb->len;
+ napi_consume_skb(skb, false);
+ }
+ packets++;
}
for (i = 0; i < n; i++) {
@@ -548,6 +558,8 @@ static int virtnet_xdp_xmit(struct net_device *dev,
}
out:
u64_stats_update_begin(&sq->stats.syncp);
+ sq->stats.bytes += bytes;
+ sq->stats.packets += packets;
sq->stats.xdp_tx += n;
sq->stats.xdp_tx_drops += drops;
sq->stats.kicks += kicks;
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 27bd586..9fc9aed6 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -2003,7 +2003,7 @@ static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan,
struct pcpu_sw_netstats *tx_stats, *rx_stats;
union vxlan_addr loopback;
union vxlan_addr *remote_ip = &dst_vxlan->default_dst.remote_ip;
- struct net_device *dev = skb->dev;
+ struct net_device *dev;
int len = skb->len;
tx_stats = this_cpu_ptr(src_vxlan->dev->tstats);
@@ -2023,9 +2023,15 @@ static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan,
#endif
}
+ rcu_read_lock();
+ dev = skb->dev;
+ if (unlikely(!(dev->flags & IFF_UP))) {
+ kfree_skb(skb);
+ goto drop;
+ }
+
if (dst_vxlan->cfg.flags & VXLAN_F_LEARN)
- vxlan_snoop(skb->dev, &loopback, eth_hdr(skb)->h_source, 0,
- vni);
+ vxlan_snoop(dev, &loopback, eth_hdr(skb)->h_source, 0, vni);
u64_stats_update_begin(&tx_stats->syncp);
tx_stats->tx_packets++;
@@ -2038,8 +2044,10 @@ static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan,
rx_stats->rx_bytes += len;
u64_stats_update_end(&rx_stats->syncp);
} else {
+drop:
dev->stats.rx_dropped++;
}
+ rcu_read_unlock();
}
static int encap_bypass_if_local(struct sk_buff *skb, struct net_device *dev,
diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
index c40cd12..5210cff 100644
--- a/drivers/net/wireless/ath/ath10k/core.c
+++ b/drivers/net/wireless/ath/ath10k/core.c
@@ -532,6 +532,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.hw_ops = &wcn3990_ops,
.decap_align_bytes = 1,
.num_peers = TARGET_HL_10_TLV_NUM_PEERS,
+ .n_cipher_suites = 8,
.ast_skid_limit = TARGET_HL_10_TLV_AST_SKID_LIMIT,
.num_wds_entries = TARGET_HL_10_TLV_NUM_WDS_ENTRIES,
.target_64bit = true,
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index 21ba209..0fca44e 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -272,7 +272,7 @@ struct ath_node {
#endif
u8 key_idx[4];
- u32 ackto;
+ int ackto;
struct list_head list;
};
diff --git a/drivers/net/wireless/ath/ath9k/dynack.c b/drivers/net/wireless/ath/ath9k/dynack.c
index 7334c9b0..6e236a4 100644
--- a/drivers/net/wireless/ath/ath9k/dynack.c
+++ b/drivers/net/wireless/ath/ath9k/dynack.c
@@ -29,9 +29,13 @@
* ath_dynack_ewma - EWMA (Exponentially Weighted Moving Average) calculation
*
*/
-static inline u32 ath_dynack_ewma(u32 old, u32 new)
+static inline int ath_dynack_ewma(int old, int new)
{
- return (new * (EWMA_DIV - EWMA_LEVEL) + old * EWMA_LEVEL) / EWMA_DIV;
+ if (old > 0)
+ return (new * (EWMA_DIV - EWMA_LEVEL) +
+ old * EWMA_LEVEL) / EWMA_DIV;
+ else
+ return new;
}
/**
@@ -82,10 +86,10 @@ static inline bool ath_dynack_bssidmask(struct ath_hw *ah, const u8 *mac)
*/
static void ath_dynack_compute_ackto(struct ath_hw *ah)
{
- struct ath_node *an;
- u32 to = 0;
- struct ath_dynack *da = &ah->dynack;
struct ath_common *common = ath9k_hw_common(ah);
+ struct ath_dynack *da = &ah->dynack;
+ struct ath_node *an;
+ int to = 0;
list_for_each_entry(an, &da->nodes, list)
if (an->ackto > to)
@@ -144,7 +148,8 @@ static void ath_dynack_compute_to(struct ath_hw *ah)
an->ackto = ath_dynack_ewma(an->ackto,
ackto);
ath_dbg(ath9k_hw_common(ah), DYNACK,
- "%pM to %u\n", dst, an->ackto);
+ "%pM to %d [%u]\n", dst,
+ an->ackto, ackto);
if (time_is_before_jiffies(da->lto)) {
ath_dynack_compute_ackto(ah);
da->lto = jiffies + COMPUTE_TO;
@@ -166,10 +171,12 @@ static void ath_dynack_compute_to(struct ath_hw *ah)
* @ah: ath hw
* @skb: socket buffer
* @ts: tx status info
+ * @sta: station pointer
*
*/
void ath_dynack_sample_tx_ts(struct ath_hw *ah, struct sk_buff *skb,
- struct ath_tx_status *ts)
+ struct ath_tx_status *ts,
+ struct ieee80211_sta *sta)
{
u8 ridx;
struct ieee80211_hdr *hdr;
@@ -177,7 +184,7 @@ void ath_dynack_sample_tx_ts(struct ath_hw *ah, struct sk_buff *skb,
struct ath_common *common = ath9k_hw_common(ah);
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
- if ((info->flags & IEEE80211_TX_CTL_NO_ACK) || !da->enabled)
+ if (!da->enabled || (info->flags & IEEE80211_TX_CTL_NO_ACK))
return;
spin_lock_bh(&da->qlock);
@@ -187,11 +194,19 @@ void ath_dynack_sample_tx_ts(struct ath_hw *ah, struct sk_buff *skb,
/* late ACK */
if (ts->ts_status & ATH9K_TXERR_XRETRY) {
if (ieee80211_is_assoc_req(hdr->frame_control) ||
- ieee80211_is_assoc_resp(hdr->frame_control)) {
+ ieee80211_is_assoc_resp(hdr->frame_control) ||
+ ieee80211_is_auth(hdr->frame_control)) {
ath_dbg(common, DYNACK, "late ack\n");
+
ath9k_hw_setslottime(ah, (LATEACK_TO - 3) / 2);
ath9k_hw_set_ack_timeout(ah, LATEACK_TO);
ath9k_hw_set_cts_timeout(ah, LATEACK_TO);
+ if (sta) {
+ struct ath_node *an;
+
+ an = (struct ath_node *)sta->drv_priv;
+ an->ackto = -1;
+ }
da->lto = jiffies + LATEACK_DELAY;
}
@@ -251,7 +266,7 @@ void ath_dynack_sample_ack_ts(struct ath_hw *ah, struct sk_buff *skb,
struct ath_common *common = ath9k_hw_common(ah);
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
- if (!ath_dynack_bssidmask(ah, hdr->addr1) || !da->enabled)
+ if (!da->enabled || !ath_dynack_bssidmask(ah, hdr->addr1))
return;
spin_lock_bh(&da->qlock);
diff --git a/drivers/net/wireless/ath/ath9k/dynack.h b/drivers/net/wireless/ath/ath9k/dynack.h
index 6d7bef9..cf60224 100644
--- a/drivers/net/wireless/ath/ath9k/dynack.h
+++ b/drivers/net/wireless/ath/ath9k/dynack.h
@@ -86,7 +86,8 @@ void ath_dynack_node_deinit(struct ath_hw *ah, struct ath_node *an);
void ath_dynack_init(struct ath_hw *ah);
void ath_dynack_sample_ack_ts(struct ath_hw *ah, struct sk_buff *skb, u32 ts);
void ath_dynack_sample_tx_ts(struct ath_hw *ah, struct sk_buff *skb,
- struct ath_tx_status *ts);
+ struct ath_tx_status *ts,
+ struct ieee80211_sta *sta);
#else
static inline void ath_dynack_init(struct ath_hw *ah) {}
static inline void ath_dynack_node_init(struct ath_hw *ah,
@@ -97,7 +98,8 @@ static inline void ath_dynack_sample_ack_ts(struct ath_hw *ah,
struct sk_buff *skb, u32 ts) {}
static inline void ath_dynack_sample_tx_ts(struct ath_hw *ah,
struct sk_buff *skb,
- struct ath_tx_status *ts) {}
+ struct ath_tx_status *ts,
+ struct ieee80211_sta *sta) {}
#endif
#endif /* DYNACK_H */
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index 43b6c85..4b7a7fc 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -629,7 +629,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
if (bf == bf->bf_lastbf)
ath_dynack_sample_tx_ts(sc->sc_ah,
bf->bf_mpdu,
- ts);
+ ts, sta);
}
ath_tx_complete_buf(sc, bf, txq, &bf_head, sta, ts,
@@ -773,7 +773,8 @@ static void ath_tx_process_buffer(struct ath_softc *sc, struct ath_txq *txq,
memcpy(info->control.rates, bf->rates,
sizeof(info->control.rates));
ath_tx_rc_status(sc, bf, ts, 1, txok ? 0 : 1, txok);
- ath_dynack_sample_tx_ts(sc->sc_ah, bf->bf_mpdu, ts);
+ ath_dynack_sample_tx_ts(sc->sc_ah, bf->bf_mpdu, ts,
+ sta);
}
ath_tx_complete_buf(sc, bf, txq, bf_head, sta, ts, txok);
} else
diff --git a/drivers/net/wireless/cnss2/pci.c b/drivers/net/wireless/cnss2/pci.c
index 9fedf65..582aae0 100644
--- a/drivers/net/wireless/cnss2/pci.c
+++ b/drivers/net/wireless/cnss2/pci.c
@@ -934,99 +934,6 @@ int cnss_pci_unregister_driver_hdlr(struct cnss_pci_data *pci_priv)
return 0;
}
-static int cnss_pci_init_smmu(struct cnss_pci_data *pci_priv)
-{
- int ret = 0;
- struct device *dev;
- struct dma_iommu_mapping *mapping;
- int atomic_ctx = 1, s1_bypass = 1, fast = 1, cb_stall_disable = 1,
- no_cfre = 1;
-
- cnss_pr_dbg("Initializing SMMU\n");
-
- dev = &pci_priv->pci_dev->dev;
-
- mapping = __depr_arm_iommu_create_mapping(dev->bus,
- pci_priv->smmu_iova_start,
- pci_priv->smmu_iova_len);
- if (IS_ERR(mapping)) {
- ret = PTR_ERR(mapping);
- cnss_pr_err("Failed to create SMMU mapping, err = %d\n", ret);
- goto out;
- }
-
- if (pci_priv->smmu_s1_enable) {
- cnss_pr_dbg("Enabling SMMU S1 stage\n");
-
- ret = iommu_domain_set_attr(mapping->domain,
- DOMAIN_ATTR_ATOMIC,
- &atomic_ctx);
- if (ret) {
- pr_err("Failed to set SMMU atomic_ctx attribute, err = %d\n",
- ret);
- goto release_mapping;
- }
-
- ret = iommu_domain_set_attr(mapping->domain,
- DOMAIN_ATTR_FAST,
- &fast);
- if (ret) {
- pr_err("Failed to set SMMU fast attribute, err = %d\n",
- ret);
- goto release_mapping;
- }
-
- ret = iommu_domain_set_attr(mapping->domain,
- DOMAIN_ATTR_CB_STALL_DISABLE,
- &cb_stall_disable);
- if (ret) {
- pr_err("Failed to set SMMU cb_stall_disable attribute, err = %d\n",
- ret);
- goto release_mapping;
- }
-
- ret = iommu_domain_set_attr(mapping->domain,
- DOMAIN_ATTR_NO_CFRE,
- &no_cfre);
- if (ret) {
- pr_err("Failed to set SMMU no_cfre attribute, err = %d\n",
- ret);
- goto release_mapping;
- }
- } else {
- ret = iommu_domain_set_attr(mapping->domain,
- DOMAIN_ATTR_S1_BYPASS,
- &s1_bypass);
- if (ret) {
- pr_err("Failed to set SMMU s1_bypass attribute, err = %d\n",
- ret);
- goto release_mapping;
- }
- }
-
- ret = __depr_arm_iommu_attach_device(dev, mapping);
- if (ret) {
- pr_err("Failed to attach SMMU device, err = %d\n", ret);
- goto release_mapping;
- }
-
- pci_priv->smmu_mapping = mapping;
-
- return ret;
-release_mapping:
- __depr_arm_iommu_release_mapping(mapping);
-out:
- return ret;
-}
-
-static void cnss_pci_deinit_smmu(struct cnss_pci_data *pci_priv)
-{
- __depr_arm_iommu_detach_device(&pci_priv->pci_dev->dev);
- __depr_arm_iommu_release_mapping(pci_priv->smmu_mapping);
-
- pci_priv->smmu_mapping = NULL;
-}
-
static void cnss_pci_event_cb(struct msm_pcie_notify *notify)
{
unsigned long flags;
@@ -1709,10 +1616,21 @@ struct dma_iommu_mapping *cnss_smmu_get_mapping(struct device *dev)
if (!pci_priv)
return NULL;
- return pci_priv->smmu_mapping;
+ return &pci_priv->smmu_mapping;
}
EXPORT_SYMBOL(cnss_smmu_get_mapping);
+struct iommu_domain *cnss_smmu_get_domain(struct device *dev)
+{
+ struct cnss_pci_data *pci_priv = cnss_get_pci_priv(to_pci_dev(dev));
+
+ if (!pci_priv)
+ return NULL;
+
+ return pci_priv->iommu_domain;
+}
+EXPORT_SYMBOL(cnss_smmu_get_domain);
+
int cnss_smmu_map(struct device *dev,
phys_addr_t paddr, uint32_t *iova_addr, size_t size)
{
@@ -1742,7 +1660,7 @@ int cnss_smmu_map(struct device *dev,
return -ENOMEM;
}
- ret = iommu_map(pci_priv->smmu_mapping->domain, iova,
+ ret = iommu_map(pci_priv->iommu_domain, iova,
rounddown(paddr, PAGE_SIZE), len,
IOMMU_READ | IOMMU_WRITE);
if (ret) {
@@ -2615,6 +2533,9 @@ static int cnss_pci_probe(struct pci_dev *pci_dev,
struct cnss_pci_data *pci_priv;
struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(NULL);
struct resource *res;
+ struct device_node *of_node;
+ const char *iommu_dma_type;
+ u32 addr_win[2];
cnss_pr_dbg("PCI is probing, vendor ID: 0x%x, device ID: 0x%x\n",
id->vendor, pci_dev->device);
@@ -2645,16 +2566,34 @@ static int cnss_pci_probe(struct pci_dev *pci_dev,
if (ret)
goto unregister_subsys;
- res = platform_get_resource_byname(plat_priv->plat_dev, IORESOURCE_MEM,
- "smmu_iova_base");
- if (res) {
- if (of_property_read_bool(plat_priv->plat_dev->dev.of_node,
- "qcom,smmu-s1-enable"))
- pci_priv->smmu_s1_enable = true;
+ of_node = of_parse_phandle(pci_priv->pci_dev->dev.of_node,
+ "qcom,iommu-group", 0);
+ if (of_node) {
+ pci_priv->iommu_domain =
+ iommu_get_domain_for_dev(&pci_dev->dev);
+ pci_priv->smmu_mapping.domain = pci_priv->iommu_domain;
+ ret = of_property_read_string(of_node, "qcom,iommu-dma",
+ &iommu_dma_type);
+ if (!ret) {
+ if (!strcmp("fastmap", iommu_dma_type)) {
+ cnss_pr_dbg("Enabling SMMU S1 stage\n");
+ pci_priv->smmu_s1_enable = true;
+ }
+ }
- pci_priv->smmu_iova_start = res->start;
- pci_priv->smmu_iova_len = resource_size(res);
- cnss_pr_dbg("smmu_iova_start: %pa, smmu_iova_len: %zu\n",
+ ret = of_property_read_u32_array(of_node,
+ "qcom,iommu-dma-addr-pool",
+ addr_win,
+ ARRAY_SIZE(addr_win));
+ if (ret) {
+ cnss_pr_err("Invalid smmu size window, ret %d\n", ret);
+ of_node_put(of_node);
+ goto unregister_ramdump;
+ }
+
+ pci_priv->smmu_iova_start = addr_win[0];
+ pci_priv->smmu_iova_len = addr_win[1];
+ cnss_pr_dbg("smmu_iova_start: %pa, smmu_iova_len: 0x%zx\n",
&pci_priv->smmu_iova_start,
pci_priv->smmu_iova_len);
@@ -2664,22 +2603,17 @@ static int cnss_pci_probe(struct pci_dev *pci_dev,
if (res) {
pci_priv->smmu_iova_ipa_start = res->start;
pci_priv->smmu_iova_ipa_len = resource_size(res);
- cnss_pr_dbg("smmu_iova_ipa_start: %pa, smmu_iova_ipa_len: %zu\n",
+ cnss_pr_dbg("smmu_iova_ipa_start: %pa, smmu_iova_ipa_len: 0x%zx\n",
&pci_priv->smmu_iova_ipa_start,
pci_priv->smmu_iova_ipa_len);
}
-
- ret = cnss_pci_init_smmu(pci_priv);
- if (ret) {
- cnss_pr_err("Failed to init SMMU, err = %d\n", ret);
- goto unregister_ramdump;
- }
+ of_node_put(of_node);
}
ret = cnss_reg_pci_event(pci_priv);
if (ret) {
cnss_pr_err("Failed to register PCI event, err = %d\n", ret);
- goto deinit_smmu;
+ goto unregister_ramdump;
}
ret = cnss_pci_enable_bus(pci_priv);
@@ -2733,10 +2667,9 @@ static int cnss_pci_probe(struct pci_dev *pci_dev,
cnss_pci_disable_bus(pci_priv);
dereg_pci_event:
cnss_dereg_pci_event(pci_priv);
-deinit_smmu:
- if (pci_priv->smmu_mapping)
- cnss_pci_deinit_smmu(pci_priv);
unregister_ramdump:
+ pci_priv->iommu_domain = NULL;
+ pci_priv->smmu_mapping.domain = NULL;
cnss_unregister_ramdump(plat_priv);
unregister_subsys:
cnss_unregister_subsys(plat_priv);
@@ -2770,8 +2703,8 @@ static void cnss_pci_remove(struct pci_dev *pci_dev)
cnss_pci_disable_bus(pci_priv);
cnss_dereg_pci_event(pci_priv);
- if (pci_priv->smmu_mapping)
- cnss_pci_deinit_smmu(pci_priv);
+ pci_priv->iommu_domain = NULL;
+ pci_priv->smmu_mapping.domain = NULL;
cnss_unregister_ramdump(plat_priv);
cnss_unregister_subsys(plat_priv);
plat_priv->bus_priv = NULL;
diff --git a/drivers/net/wireless/cnss2/pci.h b/drivers/net/wireless/cnss2/pci.h
index 87e0c1b..43e42d1 100644
--- a/drivers/net/wireless/cnss2/pci.h
+++ b/drivers/net/wireless/cnss2/pci.h
@@ -56,7 +56,8 @@ struct cnss_pci_data {
struct msm_pcie_register_event msm_pci_event;
atomic_t auto_suspended;
u8 monitor_wake_intr;
- struct dma_iommu_mapping *smmu_mapping;
+ struct dma_iommu_mapping smmu_mapping;
+ struct iommu_domain *iommu_domain;
u8 smmu_s1_enable;
dma_addr_t smmu_iova_start;
size_t smmu_iova_len;
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h b/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h
index 55594c9..47dbd2d 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h
@@ -442,7 +442,7 @@ struct iwl_he_backoff_conf {
* Support for Nss x BW (or RU) matrix:
* (0=SISO, 1=MIMO2) x (0-20MHz, 1-40MHz, 2-80MHz, 3-160MHz)
* Each entry contains 2 QAM thresholds for 8us and 16us:
- * 0=BPSK, 1=QPSK, 2=16QAM, 3=64QAM, 4=256QAM, 5=1024QAM, 6/7=RES
+ * 0=BPSK, 1=QPSK, 2=16QAM, 3=64QAM, 4=256QAM, 5=1024QAM, 6=RES, 7=NONE
* i.e. QAM_th1 < QAM_th2 such if TX uses QAM_tx:
* QAM_tx < QAM_th1 --> PPE=0us
* QAM_th1 <= QAM_tx < QAM_th2 --> PPE=8us
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index 9a764af..0f357e8 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -1997,7 +1997,13 @@ static void iwl_mvm_cfg_he_sta(struct iwl_mvm *mvm,
if (sta->he_cap.he_cap_elem.mac_cap_info[4] & IEEE80211_HE_MAC_CAP4_BQR)
sta_ctxt_cmd.htc_flags |= cpu_to_le32(IWL_HE_HTC_BQR_SUPP);
- /* If PPE Thresholds exist, parse them into a FW-familiar format */
+ /*
+ * Initialize the PPE thresholds to "None" (7), as described in Table
+ * 9-262ac of 80211.ax/D3.0.
+ */
+ memset(&sta_ctxt_cmd.pkt_ext, 7, sizeof(sta_ctxt_cmd.pkt_ext));
+
+ /* If PPE Thresholds exist, parse them into a FW-familiar format. */
if (sta->he_cap.he_cap_elem.phy_cap_info[6] &
IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT) {
u8 nss = (sta->he_cap.ppe_thres[0] &
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
index 8169d14..d1c1a80 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
@@ -98,8 +98,12 @@ static u8 rs_fw_sgi_cw_support(struct ieee80211_sta *sta)
{
struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap;
+ struct ieee80211_sta_he_cap *he_cap = &sta->he_cap;
u8 supp = 0;
+ if (he_cap && he_cap->has_he)
+ return 0;
+
if (ht_cap->cap & IEEE80211_HT_CAP_SGI_20)
supp |= BIT(IWL_TLC_MNG_CH_WIDTH_20MHZ);
if (ht_cap->cap & IEEE80211_HT_CAP_SGI_40)
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2_dfs.c b/drivers/net/wireless/mediatek/mt76/mt76x2_dfs.c
index 374cc65..16e6b69 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2_dfs.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2_dfs.c
@@ -799,7 +799,7 @@ static void mt76x2_dfs_set_bbp_params(struct mt76x2_dev *dev)
/* enable detection*/
mt76_wr(dev, MT_BBP(DFS, 0), MT_DFS_CH_EN << 16);
- mt76_wr(dev, 0x212c, 0x0c350001);
+ mt76_wr(dev, MT_BBP(IBI, 11), 0x0c350001);
}
void mt76x2_dfs_adjust_agc(struct mt76x2_dev *dev)
@@ -842,7 +842,11 @@ void mt76x2_dfs_init_params(struct mt76x2_dev *dev)
mt76_wr(dev, MT_BBP(DFS, 0), 0);
/* clear detector status */
mt76_wr(dev, MT_BBP(DFS, 1), 0xf);
- mt76_wr(dev, 0x212c, 0);
+ if (mt76_chip(&dev->mt76) == 0x7610 ||
+ mt76_chip(&dev->mt76) == 0x7630)
+ mt76_wr(dev, MT_BBP(IBI, 11), 0xfde8081);
+ else
+ mt76_wr(dev, MT_BBP(IBI, 11), 0);
mt76x2_irq_disable(dev, MT_INT_GPTIMER);
mt76_rmw_field(dev, MT_INT_TIMER_EN,
diff --git a/drivers/net/wireless/st/cw1200/scan.c b/drivers/net/wireless/st/cw1200/scan.c
index 67213f1..0a9eac9 100644
--- a/drivers/net/wireless/st/cw1200/scan.c
+++ b/drivers/net/wireless/st/cw1200/scan.c
@@ -78,6 +78,10 @@ int cw1200_hw_scan(struct ieee80211_hw *hw,
if (req->n_ssids > WSM_SCAN_MAX_NUM_OF_SSIDS)
return -EINVAL;
+ /* will be unlocked in cw1200_scan_work() */
+ down(&priv->scan.lock);
+ mutex_lock(&priv->conf_mutex);
+
frame.skb = ieee80211_probereq_get(hw, priv->vif->addr, NULL, 0,
req->ie_len);
if (!frame.skb)
@@ -86,19 +90,15 @@ int cw1200_hw_scan(struct ieee80211_hw *hw,
if (req->ie_len)
skb_put_data(frame.skb, req->ie, req->ie_len);
- /* will be unlocked in cw1200_scan_work() */
- down(&priv->scan.lock);
- mutex_lock(&priv->conf_mutex);
-
ret = wsm_set_template_frame(priv, &frame);
if (!ret) {
/* Host want to be the probe responder. */
ret = wsm_set_probe_responder(priv, true);
}
if (ret) {
+ dev_kfree_skb(frame.skb);
mutex_unlock(&priv->conf_mutex);
up(&priv->scan.lock);
- dev_kfree_skb(frame.skb);
return ret;
}
@@ -120,10 +120,9 @@ int cw1200_hw_scan(struct ieee80211_hw *hw,
++priv->scan.n_ssids;
}
- mutex_unlock(&priv->conf_mutex);
-
if (frame.skb)
dev_kfree_skb(frame.skb);
+ mutex_unlock(&priv->conf_mutex);
queue_work(priv->workqueue, &priv->scan.work);
return 0;
}
diff --git a/drivers/net/wireless/virt_wifi.c b/drivers/net/wireless/virt_wifi.c
index 64b2186..3a93e4d 100644
--- a/drivers/net/wireless/virt_wifi.c
+++ b/drivers/net/wireless/virt_wifi.c
@@ -530,8 +530,10 @@ static int virt_wifi_newlink(struct net *src_net, struct net_device *dev,
SET_NETDEV_DEV(dev, &priv->lowerdev->dev);
dev->ieee80211_ptr = kzalloc(sizeof(*dev->ieee80211_ptr), GFP_KERNEL);
- if (!dev->ieee80211_ptr)
+ if (!dev->ieee80211_ptr) {
+ err = -ENOMEM;
goto remove_handler;
+ }
dev->ieee80211_ptr->iftype = NL80211_IFTYPE_STATION;
dev->ieee80211_ptr->wiphy = common_wiphy;
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index e5bddae..e0d2b74 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -2095,7 +2095,7 @@ static void nvme_init_subnqn(struct nvme_subsystem *subsys, struct nvme_ctrl *ct
/* Generate a "fake" NQN per Figure 254 in NVMe 1.3 + ECN 001 */
off = snprintf(subsys->subnqn, NVMF_NQN_SIZE,
- "nqn.2014.08.org.nvmexpress:%4x%4x",
+ "nqn.2014.08.org.nvmexpress:%04x%04x",
le16_to_cpu(id->vid), le16_to_cpu(id->ssvid));
memcpy(subsys->subnqn + off, id->sn, sizeof(id->sn));
off += sizeof(id->sn);
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index c27af27..815509d 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -556,6 +556,7 @@ int nvme_mpath_init(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
return 0;
out_free_ana_log_buf:
kfree(ctrl->ana_log_buf);
+ ctrl->ana_log_buf = NULL;
out:
return error;
}
@@ -563,5 +564,6 @@ int nvme_mpath_init(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
void nvme_mpath_uninit(struct nvme_ctrl *ctrl)
{
kfree(ctrl->ana_log_buf);
+ ctrl->ana_log_buf = NULL;
}
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index d668682..f46313f 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -908,9 +908,11 @@ static void nvme_complete_cqes(struct nvme_queue *nvmeq, u16 start, u16 end)
static inline void nvme_update_cq_head(struct nvme_queue *nvmeq)
{
- if (++nvmeq->cq_head == nvmeq->q_depth) {
+ if (nvmeq->cq_head == nvmeq->q_depth - 1) {
nvmeq->cq_head = 0;
nvmeq->cq_phase = !nvmeq->cq_phase;
+ } else {
+ nvmeq->cq_head++;
}
}
@@ -1727,8 +1729,9 @@ static void nvme_free_host_mem(struct nvme_dev *dev)
struct nvme_host_mem_buf_desc *desc = &dev->host_mem_descs[i];
size_t size = le32_to_cpu(desc->size) * dev->ctrl.page_size;
- dma_free_coherent(dev->dev, size, dev->host_mem_desc_bufs[i],
- le64_to_cpu(desc->addr));
+ dma_free_attrs(dev->dev, size, dev->host_mem_desc_bufs[i],
+ le64_to_cpu(desc->addr),
+ DMA_ATTR_NO_KERNEL_MAPPING | DMA_ATTR_NO_WARN);
}
kfree(dev->host_mem_desc_bufs);
@@ -1794,8 +1797,9 @@ static int __nvme_alloc_host_mem(struct nvme_dev *dev, u64 preferred,
while (--i >= 0) {
size_t size = le32_to_cpu(descs[i].size) * dev->ctrl.page_size;
- dma_free_coherent(dev->dev, size, bufs[i],
- le64_to_cpu(descs[i].addr));
+ dma_free_attrs(dev->dev, size, bufs[i],
+ le64_to_cpu(descs[i].addr),
+ DMA_ATTR_NO_KERNEL_MAPPING | DMA_ATTR_NO_WARN);
}
kfree(bufs);
diff --git a/drivers/of/irq.c b/drivers/of/irq.c
index f091258..2653abf 100644
--- a/drivers/of/irq.c
+++ b/drivers/of/irq.c
@@ -274,6 +274,131 @@ int of_irq_parse_raw(const __be32 *addr, struct of_phandle_args *out_irq)
}
EXPORT_SYMBOL_GPL(of_irq_parse_raw);
+int of_irq_domain_map(const struct irq_fwspec *in, struct irq_fwspec *out)
+{
+ char *stem_name;
+ char *cells_name, *map_name = NULL, *mask_name = NULL;
+ char *pass_name = NULL;
+ struct device_node *cur, *new = NULL;
+ const __be32 *map, *mask, *pass;
+ static const __be32 dummy_mask[] = { [0 ... MAX_PHANDLE_ARGS] = ~0 };
+ static const __be32 dummy_pass[] = { [0 ... MAX_PHANDLE_ARGS] = 0 };
+ __be32 initial_match_array[MAX_PHANDLE_ARGS];
+ const __be32 *match_array = initial_match_array;
+ int i, ret, map_len, match;
+ u32 in_size, out_size;
+
+ stem_name = "";
+ cells_name = "#interrupt-cells";
+
+ ret = -ENOMEM;
+ map_name = kasprintf(GFP_KERNEL, "irqdomain%s-map", stem_name);
+ if (!map_name)
+ goto free;
+
+ mask_name = kasprintf(GFP_KERNEL, "irqdomain%s-map-mask", stem_name);
+ if (!mask_name)
+ goto free;
+
+ pass_name = kasprintf(GFP_KERNEL, "irqdomain%s-map-pass-thru", stem_name);
+ if (!pass_name)
+ goto free;
+
+ /* Get the #interrupt-cells property */
+ cur = to_of_node(in->fwnode);
+ ret = of_property_read_u32(cur, cells_name, &in_size);
+ if (ret < 0)
+ goto put;
+
+ /* Precalculate the match array - this simplifies match loop */
+ for (i = 0; i < in_size; i++)
+ initial_match_array[i] = cpu_to_be32(in->param[i]);
+
+ ret = -EINVAL;
+ /* Get the irqdomain-map property */
+ map = of_get_property(cur, map_name, &map_len);
+ if (!map) {
+ ret = 0;
+ goto free;
+ }
+ map_len /= sizeof(u32);
+
+ /* Get the irqdomain-map-mask property (optional) */
+ mask = of_get_property(cur, mask_name, NULL);
+ if (!mask)
+ mask = dummy_mask;
+ /* Iterate through irqdomain-map property */
+ match = 0;
+ while (map_len > (in_size + 1) && !match) {
+ /* Compare specifiers */
+ match = 1;
+ for (i = 0; i < in_size; i++, map_len--)
+ match &= !((match_array[i] ^ *map++) & mask[i]);
+
+ of_node_put(new);
+ new = of_find_node_by_phandle(be32_to_cpup(map));
+ map++;
+ map_len--;
+
+ /* Check if not found */
+ if (!new)
+ goto put;
+
+ if (!of_device_is_available(new))
+ match = 0;
+
+ ret = of_property_read_u32(new, cells_name, &out_size);
+ if (ret)
+ goto put;
+
+ /* Check for malformed properties */
+ if (WARN_ON(out_size > MAX_PHANDLE_ARGS))
+ goto put;
+ if (map_len < out_size)
+ goto put;
+
+ /* Move forward by new node's #interrupt-cells amount */
+ map += out_size;
+ map_len -= out_size;
+ }
+ if (match) {
+ /* Get the irqdomain-map-pass-thru property (optional) */
+ pass = of_get_property(cur, pass_name, NULL);
+ if (!pass)
+ pass = dummy_pass;
+
+ /*
+ * Successfully parsed a irqdomain-map translation; copy new
+ * specifier into the out structure, keeping the
+ * bits specified in irqdomain-map-pass-thru.
+ */
+ match_array = map - out_size;
+ for (i = 0; i < out_size; i++) {
+ __be32 val = *(map - out_size + i);
+
+ out->param[i] = in->param[i];
+ if (i < in_size) {
+ val &= ~pass[i];
+ val |= cpu_to_be32(out->param[i]) & pass[i];
+ }
+
+ out->param[i] = be32_to_cpu(val);
+ }
+ out->param_count = in_size = out_size;
+ out->fwnode = of_node_to_fwnode(new);
+ }
+put:
+ of_node_put(cur);
+ of_node_put(new);
+free:
+ kfree(mask_name);
+ kfree(map_name);
+ kfree(pass_name);
+
+ return ret;
+}
+EXPORT_SYMBOL(of_irq_domain_map);
+
/**
* of_irq_parse_one - Resolve an interrupt for a device
* @device: the device whose interrupt is to be resolved
diff --git a/drivers/opp/core.c b/drivers/opp/core.c
index 31ff03d..f3433bf 100644
--- a/drivers/opp/core.c
+++ b/drivers/opp/core.c
@@ -191,12 +191,12 @@ unsigned long dev_pm_opp_get_max_volt_latency(struct device *dev)
if (IS_ERR(opp_table))
return 0;
- count = opp_table->regulator_count;
-
/* Regulator may not be required for the device */
- if (!count)
+ if (!opp_table->regulators)
goto put_opp_table;
+ count = opp_table->regulator_count;
+
uV = kmalloc_array(count, sizeof(*uV), GFP_KERNEL);
if (!uV)
goto put_opp_table;
@@ -976,6 +976,9 @@ static bool _opp_supported_by_regulators(struct dev_pm_opp *opp,
struct regulator *reg;
int i;
+ if (!opp_table->regulators)
+ return true;
+
for (i = 0; i < opp_table->regulator_count; i++) {
reg = opp_table->regulators[i];
@@ -1263,7 +1266,7 @@ static int _allocate_set_opp_data(struct opp_table *opp_table)
struct dev_pm_set_opp_data *data;
int len, count = opp_table->regulator_count;
- if (WARN_ON(!count))
+ if (WARN_ON(!opp_table->regulators))
return -EINVAL;
/* space for set_opp_data */
diff --git a/drivers/pci/controller/dwc/pci-imx6.c b/drivers/pci/controller/dwc/pci-imx6.c
index 975050a..3826b44 100644
--- a/drivers/pci/controller/dwc/pci-imx6.c
+++ b/drivers/pci/controller/dwc/pci-imx6.c
@@ -66,6 +66,7 @@ struct imx6_pcie {
#define PHY_PLL_LOCK_WAIT_USLEEP_MAX 200
/* PCIe Root Complex registers (memory-mapped) */
+#define PCIE_RC_IMX6_MSI_CAP 0x50
#define PCIE_RC_LCR 0x7c
#define PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN1 0x1
#define PCIE_RC_LCR_MAX_LINK_SPEEDS_GEN2 0x2
@@ -682,6 +683,7 @@ static int imx6_pcie_probe(struct platform_device *pdev)
struct resource *dbi_base;
struct device_node *node = dev->of_node;
int ret;
+ u16 val;
imx6_pcie = devm_kzalloc(dev, sizeof(*imx6_pcie), GFP_KERNEL);
if (!imx6_pcie)
@@ -816,6 +818,14 @@ static int imx6_pcie_probe(struct platform_device *pdev)
if (ret < 0)
return ret;
+ if (pci_msi_enabled()) {
+ val = dw_pcie_readw_dbi(pci, PCIE_RC_IMX6_MSI_CAP +
+ PCI_MSI_FLAGS);
+ val |= PCI_MSI_FLAGS_ENABLE;
+ dw_pcie_writew_dbi(pci, PCIE_RC_IMX6_MSI_CAP + PCI_MSI_FLAGS,
+ val);
+ }
+
return 0;
}
diff --git a/drivers/pci/controller/pci-msm-msi.c b/drivers/pci/controller/pci-msm-msi.c
index 1bf7328..4aabcfd 100644
--- a/drivers/pci/controller/pci-msm-msi.c
+++ b/drivers/pci/controller/pci-msm-msi.c
@@ -87,6 +87,8 @@ static void msm_msi_unmask_irq(struct irq_data *data)
static struct irq_chip msm_msi_irq_chip = {
.name = "msm_pci_msi",
+ .irq_enable = msm_msi_unmask_irq,
+ .irq_disable = msm_msi_mask_irq,
.irq_mask = msm_msi_mask_irq,
.irq_unmask = msm_msi_unmask_irq,
};
@@ -237,7 +239,6 @@ static int msm_msi_irq_domain_alloc(struct irq_domain *domain,
msi->irqs[pos].hwirq,
&msm_msi_bottom_irq_chip, client,
handle_simple_irq, NULL, NULL);
- irq_set_status_flags(msi->irqs[pos].virq, IRQ_DISABLE_UNLAZY);
client->nr_irqs++;
pos++;
}
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index afc4680..7eb1549 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -6113,7 +6113,8 @@ static int __init pci_setup(char *str)
} else if (!strncmp(str, "pcie_scan_all", 13)) {
pci_add_flags(PCI_SCAN_ALL_PCIE_DEVS);
} else if (!strncmp(str, "disable_acs_redir=", 18)) {
- disable_acs_redir_param = str + 18;
+ disable_acs_redir_param =
+ kstrdup(str + 18, GFP_KERNEL);
} else {
printk(KERN_ERR "PCI: Unknown option `%s'\n",
str);
diff --git a/drivers/pci/switch/switchtec.c b/drivers/pci/switch/switchtec.c
index 54a8b30..37d0c15 100644
--- a/drivers/pci/switch/switchtec.c
+++ b/drivers/pci/switch/switchtec.c
@@ -800,6 +800,7 @@ static int ioctl_event_ctl(struct switchtec_dev *stdev,
{
int ret;
int nr_idxs;
+ unsigned int event_flags;
struct switchtec_ioctl_event_ctl ctl;
if (copy_from_user(&ctl, uctl, sizeof(ctl)))
@@ -821,7 +822,9 @@ static int ioctl_event_ctl(struct switchtec_dev *stdev,
else
return -EINVAL;
+ event_flags = ctl.flags;
for (ctl.index = 0; ctl.index < nr_idxs; ctl.index++) {
+ ctl.flags = event_flags;
ret = event_ctl(stdev, &ctl);
if (ret < 0)
return ret;
diff --git a/drivers/perf/arm_spe_pmu.c b/drivers/perf/arm_spe_pmu.c
index 54ec278..e1a77b2 100644
--- a/drivers/perf/arm_spe_pmu.c
+++ b/drivers/perf/arm_spe_pmu.c
@@ -927,6 +927,11 @@ static int arm_spe_pmu_perf_init(struct arm_spe_pmu *spe_pmu)
idx = atomic_inc_return(&pmu_idx);
name = devm_kasprintf(dev, GFP_KERNEL, "%s_%d", PMUNAME, idx);
+ if (!name) {
+ dev_err(dev, "failed to allocate name for pmu %d\n", idx);
+ return -ENOMEM;
+ }
+
return perf_pmu_register(&spe_pmu->pmu, name, -1);
}
diff --git a/drivers/phy/allwinner/phy-sun4i-usb.c b/drivers/phy/allwinner/phy-sun4i-usb.c
index d4dcd39..881078f 100644
--- a/drivers/phy/allwinner/phy-sun4i-usb.c
+++ b/drivers/phy/allwinner/phy-sun4i-usb.c
@@ -126,6 +126,7 @@ struct sun4i_usb_phy_cfg {
bool dedicated_clocks;
bool enable_pmu_unk1;
bool phy0_dual_route;
+ int missing_phys;
};
struct sun4i_usb_phy_data {
@@ -646,6 +647,9 @@ static struct phy *sun4i_usb_phy_xlate(struct device *dev,
if (args->args[0] >= data->cfg->num_phys)
return ERR_PTR(-ENODEV);
+ if (data->cfg->missing_phys & BIT(args->args[0]))
+ return ERR_PTR(-ENODEV);
+
return data->phys[args->args[0]].phy;
}
@@ -741,6 +745,9 @@ static int sun4i_usb_phy_probe(struct platform_device *pdev)
struct sun4i_usb_phy *phy = data->phys + i;
char name[16];
+ if (data->cfg->missing_phys & BIT(i))
+ continue;
+
snprintf(name, sizeof(name), "usb%d_vbus", i);
phy->vbus = devm_regulator_get_optional(dev, name);
if (IS_ERR(phy->vbus)) {
diff --git a/drivers/pinctrl/bcm/pinctrl-bcm2835.c b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
index fa53091..08925d2 100644
--- a/drivers/pinctrl/bcm/pinctrl-bcm2835.c
+++ b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
@@ -90,7 +90,7 @@ struct bcm2835_pinctrl {
struct gpio_chip gpio_chip;
struct pinctrl_gpio_range gpio_range;
- spinlock_t irq_lock[BCM2835_NUM_BANKS];
+ raw_spinlock_t irq_lock[BCM2835_NUM_BANKS];
};
/* pins are just named GPIO0..GPIO53 */
@@ -461,10 +461,10 @@ static void bcm2835_gpio_irq_enable(struct irq_data *data)
unsigned bank = GPIO_REG_OFFSET(gpio);
unsigned long flags;
- spin_lock_irqsave(&pc->irq_lock[bank], flags);
+ raw_spin_lock_irqsave(&pc->irq_lock[bank], flags);
set_bit(offset, &pc->enabled_irq_map[bank]);
bcm2835_gpio_irq_config(pc, gpio, true);
- spin_unlock_irqrestore(&pc->irq_lock[bank], flags);
+ raw_spin_unlock_irqrestore(&pc->irq_lock[bank], flags);
}
static void bcm2835_gpio_irq_disable(struct irq_data *data)
@@ -476,12 +476,12 @@ static void bcm2835_gpio_irq_disable(struct irq_data *data)
unsigned bank = GPIO_REG_OFFSET(gpio);
unsigned long flags;
- spin_lock_irqsave(&pc->irq_lock[bank], flags);
+ raw_spin_lock_irqsave(&pc->irq_lock[bank], flags);
bcm2835_gpio_irq_config(pc, gpio, false);
/* Clear events that were latched prior to clearing event sources */
bcm2835_gpio_set_bit(pc, GPEDS0, gpio);
clear_bit(offset, &pc->enabled_irq_map[bank]);
- spin_unlock_irqrestore(&pc->irq_lock[bank], flags);
+ raw_spin_unlock_irqrestore(&pc->irq_lock[bank], flags);
}
static int __bcm2835_gpio_irq_set_type_disabled(struct bcm2835_pinctrl *pc,
@@ -584,7 +584,7 @@ static int bcm2835_gpio_irq_set_type(struct irq_data *data, unsigned int type)
unsigned long flags;
int ret;
- spin_lock_irqsave(&pc->irq_lock[bank], flags);
+ raw_spin_lock_irqsave(&pc->irq_lock[bank], flags);
if (test_bit(offset, &pc->enabled_irq_map[bank]))
ret = __bcm2835_gpio_irq_set_type_enabled(pc, gpio, type);
@@ -596,7 +596,7 @@ static int bcm2835_gpio_irq_set_type(struct irq_data *data, unsigned int type)
else
irq_set_handler_locked(data, handle_level_irq);
- spin_unlock_irqrestore(&pc->irq_lock[bank], flags);
+ raw_spin_unlock_irqrestore(&pc->irq_lock[bank], flags);
return ret;
}
@@ -1047,7 +1047,7 @@ static int bcm2835_pinctrl_probe(struct platform_device *pdev)
for_each_set_bit(offset, &events, 32)
bcm2835_gpio_wr(pc, GPEDS0 + i * 4, BIT(offset));
- spin_lock_init(&pc->irq_lock[i]);
+ raw_spin_lock_init(&pc->irq_lock[i]);
}
err = gpiochip_add_data(&pc->gpio_chip, pc);
diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c
index 6d31ad7..b7e272d 100644
--- a/drivers/pinctrl/intel/pinctrl-cherryview.c
+++ b/drivers/pinctrl/intel/pinctrl-cherryview.c
@@ -1524,7 +1524,7 @@ static const struct dmi_system_id chv_no_valid_mask[] = {
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
DMI_MATCH(DMI_PRODUCT_FAMILY, "Intel_Strago"),
- DMI_MATCH(DMI_BOARD_VERSION, "1.0"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "1.0"),
},
},
{
@@ -1532,7 +1532,7 @@ static const struct dmi_system_id chv_no_valid_mask[] = {
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "HP"),
DMI_MATCH(DMI_PRODUCT_NAME, "Setzer"),
- DMI_MATCH(DMI_BOARD_VERSION, "1.0"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "1.0"),
},
},
{
@@ -1540,7 +1540,7 @@ static const struct dmi_system_id chv_no_valid_mask[] = {
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
DMI_MATCH(DMI_PRODUCT_NAME, "Cyan"),
- DMI_MATCH(DMI_BOARD_VERSION, "1.0"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "1.0"),
},
},
{
@@ -1548,7 +1548,7 @@ static const struct dmi_system_id chv_no_valid_mask[] = {
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
DMI_MATCH(DMI_PRODUCT_NAME, "Celes"),
- DMI_MATCH(DMI_BOARD_VERSION, "1.0"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "1.0"),
},
},
{}
diff --git a/drivers/pinctrl/meson/pinctrl-meson8.c b/drivers/pinctrl/meson/pinctrl-meson8.c
index 8646617..e482672 100644
--- a/drivers/pinctrl/meson/pinctrl-meson8.c
+++ b/drivers/pinctrl/meson/pinctrl-meson8.c
@@ -807,7 +807,9 @@ static const char * const gpio_groups[] = {
"BOOT_5", "BOOT_6", "BOOT_7", "BOOT_8", "BOOT_9",
"BOOT_10", "BOOT_11", "BOOT_12", "BOOT_13", "BOOT_14",
"BOOT_15", "BOOT_16", "BOOT_17", "BOOT_18",
+};
+static const char * const gpio_aobus_groups[] = {
"GPIOAO_0", "GPIOAO_1", "GPIOAO_2", "GPIOAO_3",
"GPIOAO_4", "GPIOAO_5", "GPIOAO_6", "GPIOAO_7",
"GPIOAO_8", "GPIOAO_9", "GPIOAO_10", "GPIOAO_11",
@@ -1030,6 +1032,7 @@ static struct meson_pmx_func meson8_cbus_functions[] = {
};
static struct meson_pmx_func meson8_aobus_functions[] = {
+ FUNCTION(gpio_aobus),
FUNCTION(uart_ao),
FUNCTION(remote),
FUNCTION(i2c_slave_ao),
diff --git a/drivers/pinctrl/meson/pinctrl-meson8b.c b/drivers/pinctrl/meson/pinctrl-meson8b.c
index 647ad15..91cffc0 100644
--- a/drivers/pinctrl/meson/pinctrl-meson8b.c
+++ b/drivers/pinctrl/meson/pinctrl-meson8b.c
@@ -646,16 +646,18 @@ static const char * const gpio_groups[] = {
"BOOT_10", "BOOT_11", "BOOT_12", "BOOT_13", "BOOT_14",
"BOOT_15", "BOOT_16", "BOOT_17", "BOOT_18",
- "GPIOAO_0", "GPIOAO_1", "GPIOAO_2", "GPIOAO_3",
- "GPIOAO_4", "GPIOAO_5", "GPIOAO_6", "GPIOAO_7",
- "GPIOAO_8", "GPIOAO_9", "GPIOAO_10", "GPIOAO_11",
- "GPIOAO_12", "GPIOAO_13", "GPIO_BSD_EN", "GPIO_TEST_N",
-
"DIF_0_P", "DIF_0_N", "DIF_1_P", "DIF_1_N",
"DIF_2_P", "DIF_2_N", "DIF_3_P", "DIF_3_N",
"DIF_4_P", "DIF_4_N"
};
+static const char * const gpio_aobus_groups[] = {
+ "GPIOAO_0", "GPIOAO_1", "GPIOAO_2", "GPIOAO_3",
+ "GPIOAO_4", "GPIOAO_5", "GPIOAO_6", "GPIOAO_7",
+ "GPIOAO_8", "GPIOAO_9", "GPIOAO_10", "GPIOAO_11",
+ "GPIOAO_12", "GPIOAO_13", "GPIO_BSD_EN", "GPIO_TEST_N"
+};
+
static const char * const sd_a_groups[] = {
"sd_d0_a", "sd_d1_a", "sd_d2_a", "sd_d3_a", "sd_clk_a",
"sd_cmd_a"
@@ -871,6 +873,7 @@ static struct meson_pmx_func meson8b_cbus_functions[] = {
};
static struct meson_pmx_func meson8b_aobus_functions[] = {
+ FUNCTION(gpio_aobus),
FUNCTION(uart_ao),
FUNCTION(uart_ao_b),
FUNCTION(i2c_slave_ao),
diff --git a/drivers/pinctrl/pinctrl-max77620.c b/drivers/pinctrl/pinctrl-max77620.c
index a7f3706..3d05bc1 100644
--- a/drivers/pinctrl/pinctrl-max77620.c
+++ b/drivers/pinctrl/pinctrl-max77620.c
@@ -34,14 +34,12 @@ enum max77620_pin_ppdrv {
MAX77620_PIN_PP_DRV,
};
-enum max77620_pinconf_param {
- MAX77620_ACTIVE_FPS_SOURCE = PIN_CONFIG_END + 1,
- MAX77620_ACTIVE_FPS_POWER_ON_SLOTS,
- MAX77620_ACTIVE_FPS_POWER_DOWN_SLOTS,
- MAX77620_SUSPEND_FPS_SOURCE,
- MAX77620_SUSPEND_FPS_POWER_ON_SLOTS,
- MAX77620_SUSPEND_FPS_POWER_DOWN_SLOTS,
-};
+#define MAX77620_ACTIVE_FPS_SOURCE (PIN_CONFIG_END + 1)
+#define MAX77620_ACTIVE_FPS_POWER_ON_SLOTS (PIN_CONFIG_END + 2)
+#define MAX77620_ACTIVE_FPS_POWER_DOWN_SLOTS (PIN_CONFIG_END + 3)
+#define MAX77620_SUSPEND_FPS_SOURCE (PIN_CONFIG_END + 4)
+#define MAX77620_SUSPEND_FPS_POWER_ON_SLOTS (PIN_CONFIG_END + 5)
+#define MAX77620_SUSPEND_FPS_POWER_DOWN_SLOTS (PIN_CONFIG_END + 6)
struct max77620_pin_function {
const char *name;
diff --git a/drivers/pinctrl/pinctrl-sx150x.c b/drivers/pinctrl/pinctrl-sx150x.c
index cbf58a1..4d87d75 100644
--- a/drivers/pinctrl/pinctrl-sx150x.c
+++ b/drivers/pinctrl/pinctrl-sx150x.c
@@ -1166,7 +1166,6 @@ static int sx150x_probe(struct i2c_client *client,
}
/* Register GPIO controller */
- pctl->gpio.label = devm_kstrdup(dev, client->name, GFP_KERNEL);
pctl->gpio.base = -1;
pctl->gpio.ngpio = pctl->data->npins;
pctl->gpio.get_direction = sx150x_gpio_get_direction;
@@ -1180,6 +1179,10 @@ static int sx150x_probe(struct i2c_client *client,
pctl->gpio.of_node = dev->of_node;
#endif
pctl->gpio.can_sleep = true;
+ pctl->gpio.label = devm_kstrdup(dev, client->name, GFP_KERNEL);
+ if (!pctl->gpio.label)
+ return -ENOMEM;
+
/*
* Setting multiple pins is not safe when all pins are not
* handled by the same regmap register. The oscio pin (present
@@ -1200,13 +1203,15 @@ static int sx150x_probe(struct i2c_client *client,
/* Add Interrupt support if an irq is specified */
if (client->irq > 0) {
- pctl->irq_chip.name = devm_kstrdup(dev, client->name,
- GFP_KERNEL);
pctl->irq_chip.irq_mask = sx150x_irq_mask;
pctl->irq_chip.irq_unmask = sx150x_irq_unmask;
pctl->irq_chip.irq_set_type = sx150x_irq_set_type;
pctl->irq_chip.irq_bus_lock = sx150x_irq_bus_lock;
pctl->irq_chip.irq_bus_sync_unlock = sx150x_irq_bus_sync_unlock;
+ pctl->irq_chip.name = devm_kstrdup(dev, client->name,
+ GFP_KERNEL);
+ if (!pctl->irq_chip.name)
+ return -ENOMEM;
pctl->irq.masked = ~0;
pctl->irq.sense = 0;
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun50i-h6.c b/drivers/pinctrl/sunxi/pinctrl-sun50i-h6.c
index aa8b581..ef4268c 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun50i-h6.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun50i-h6.c
@@ -588,7 +588,7 @@ static const unsigned int h6_irq_bank_map[] = { 1, 5, 6, 7 };
static const struct sunxi_pinctrl_desc h6_pinctrl_data = {
.pins = h6_pins,
.npins = ARRAY_SIZE(h6_pins),
- .irq_banks = 3,
+ .irq_banks = 4,
.irq_bank_map = h6_irq_bank_map,
.irq_read_needs_mux = true,
};
diff --git a/drivers/platform/chrome/cros_ec_proto.c b/drivers/platform/chrome/cros_ec_proto.c
index b6fd483..e5d5b1a 100644
--- a/drivers/platform/chrome/cros_ec_proto.c
+++ b/drivers/platform/chrome/cros_ec_proto.c
@@ -575,6 +575,7 @@ static int get_keyboard_state_event(struct cros_ec_device *ec_dev)
int cros_ec_get_next_event(struct cros_ec_device *ec_dev, bool *wake_event)
{
+ u8 event_type;
u32 host_event;
int ret;
@@ -594,11 +595,22 @@ int cros_ec_get_next_event(struct cros_ec_device *ec_dev, bool *wake_event)
return ret;
if (wake_event) {
+ event_type = ec_dev->event_data.event_type;
host_event = cros_ec_get_host_event(ec_dev);
- /* Consider non-host_event as wake event */
- *wake_event = !host_event ||
- !!(host_event & ec_dev->host_event_wake_mask);
+ /*
+ * Sensor events need to be parsed by the sensor sub-device.
+ * Defer them, and don't report the wakeup here.
+ */
+ if (event_type == EC_MKBP_EVENT_SENSOR_FIFO)
+ *wake_event = false;
+ /* Masked host-events should not count as wake events. */
+ else if (host_event &&
+ !(host_event & ec_dev->host_event_wake_mask))
+ *wake_event = false;
+ /* Consider all other events as wake events. */
+ else
+ *wake_event = true;
}
return ret;
diff --git a/drivers/platform/msm/Kconfig b/drivers/platform/msm/Kconfig
index 8172449..539c1ab 100644
--- a/drivers/platform/msm/Kconfig
+++ b/drivers/platform/msm/Kconfig
@@ -86,6 +86,15 @@
Kernel and user-space processes can call the IPA driver
to configure IPA core.
+config IPA_DEBUG
+ bool "IPA DEBUG for non-perf build"
+ depends on IPA3
+ help
+ This driver support more debug info for non-perf build.
+ If you use the non-perf build and want to have more debug
+ info enabled, then this flag can be enabled.
+ It is not suggested to enable this flag for perf build.
+
config IPA_WDI_UNIFIED_API
bool "IPA WDI unified API support"
depends on IPA3
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa.c b/drivers/platform/msm/ipa/ipa_v3/ipa.c
index 3520b67..74d0e15 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa.c
@@ -4411,7 +4411,7 @@ static int ipa3_panic_notifier(struct notifier_block *this,
if (res)
IPAERR("uC panic handler failed %d\n", res);
- if (atomic_read(&ipa3_ctx->ipa3_active_clients.cnt) != 0) {
+ if (atomic_read(&ipa3_ctx->ipa_clk_vote)) {
ipahal_print_all_regs(false);
ipa_save_registers();
}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
index f3a113a..1a3da78 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
@@ -3036,7 +3036,9 @@ static struct sk_buff *handle_skb_completion(struct gsi_chan_xfer_notify
INIT_LIST_HEAD(&rx_pkt->link);
list_add_tail(&rx_pkt->link, head);
- if (notify->evt_id == GSI_CHAN_EVT_EOT) {
+ /* Check added for handling LAN consumer packet without EOT flag */
+ if (notify->evt_id == GSI_CHAN_EVT_EOT ||
+ sys->ep->client == IPA_CLIENT_APPS_LAN_CONS) {
/* go over the list backward to save computations on updating length */
list_for_each_entry_safe_reverse(rx_pkt, tmp, head, link) {
rx_skb = rx_pkt->data.skb;
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
index 0741492..d563427 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
@@ -2082,7 +2082,7 @@ static const struct ipa_ep_configuration ipa3_ep_mapping
true,
IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
QMB_MASTER_SELECT_DDR,
- { 9, 12, 8, 16, IPA_EE_AP, GSI_SMART_PRE_FETCH, 4 } },
+ { 9, 12, 8, 16, IPA_EE_AP, GSI_FREE_PRE_FETCH, 2 } },
[IPA_4_5][IPA_CLIENT_USB_PROD] = {
true, IPA_v4_5_GROUP_UL_DL_SRC,
true,
@@ -2124,7 +2124,7 @@ static const struct ipa_ep_configuration ipa3_ep_mapping
true,
IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_DEC_UCP,
QMB_MASTER_SELECT_DDR,
- { 5, 0, 16, 28, IPA_EE_Q6, GSI_SMART_PRE_FETCH, 8 } },
+ { 5, 0, 16, 28, IPA_EE_Q6, GSI_SMART_PRE_FETCH, 2 } },
[IPA_4_5][IPA_CLIENT_Q6_CMD_PROD] = {
true, IPA_v4_5_GROUP_UL_DL_SRC,
false,
@@ -2305,7 +2305,7 @@ static const struct ipa_ep_configuration ipa3_ep_mapping
true,
IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_DEC_UCP,
QMB_MASTER_SELECT_DDR,
- { 5, 0, 16, 28, IPA_EE_Q6, GSI_SMART_PRE_FETCH, 8 } },
+ { 5, 0, 16, 28, IPA_EE_Q6, GSI_SMART_PRE_FETCH, 2 } },
[IPA_4_5_MHI][IPA_CLIENT_Q6_CMD_PROD] = {
true, IPA_v4_5_MHI_GROUP_PCIE,
false,
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_wigig_i.c b/drivers/platform/msm/ipa/ipa_v3/ipa_wigig_i.c
index b8bc0b6..8e44841 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_wigig_i.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_wigig_i.c
@@ -469,6 +469,7 @@ static int ipa3_wigig_config_gsi(bool Rx,
channel_props.use_db_eng = GSI_CHAN_DB_MODE;
channel_props.max_prefetch = GSI_ONE_PREFETCH_SEG;
channel_props.prefetch_mode = ep_gsi->prefetch_mode;
+ channel_props.empty_lvl_threshold = ep_gsi->prefetch_threshold;
channel_props.low_weight = 1;
channel_props.err_cb = ipa_gsi_chan_err_cb;
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_nat.c b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_nat.c
index 5d33e0d..0ad0fd54 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_nat.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_nat.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
*/
#include <linux/debugfs.h>
@@ -10,7 +10,6 @@
#define IPA_64_LOW_32_MASK (0xFFFFFFFF)
#define IPA_64_HIGH_32_MASK (0xFFFFFFFF00000000ULL)
-#define IPAHAL_NAT_INVALID_PROTOCOL (0xFF)
static const char *ipahal_nat_type_to_str[IPA_NAT_MAX] = {
__stringify(IPAHAL_NAT_IPV4),
diff --git a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
index c54d573..b9043d5 100644
--- a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
@@ -467,12 +467,19 @@ static void ipa3_del_dflt_wan_rt_tables(void)
static void ipa3_copy_qmi_flt_rule_ex(
struct ipa_ioc_ext_intf_prop *q6_ul_flt_rule_ptr,
- struct ipa_filter_spec_ex_type_v01 *flt_spec_ptr)
+ void *flt_spec_ptr_void)
{
int j;
+ struct ipa_filter_spec_ex_type_v01 *flt_spec_ptr;
struct ipa_ipfltr_range_eq_16 *q6_ul_filter_nat_ptr;
struct ipa_ipfltr_range_eq_16_type_v01 *filter_spec_nat_ptr;
+ /*
+ * pure_ack and tos has the same size and type and we will treat tos
+ * field as pure_ack in ipa4.5 version
+ */
+ flt_spec_ptr = (struct ipa_filter_spec_ex_type_v01 *) flt_spec_ptr_void;
+
q6_ul_flt_rule_ptr->ip = flt_spec_ptr->ip_type;
q6_ul_flt_rule_ptr->action = flt_spec_ptr->filter_action;
if (flt_spec_ptr->is_routing_table_index_valid == true)
@@ -584,7 +591,6 @@ static void ipa3_copy_qmi_flt_rule_ex(
flt_spec_ptr->filter_rule.ipv4_frag_eq_present;
}
-
int ipa3_copy_ul_filter_rule_to_ipa(struct ipa_install_fltr_rule_req_msg_v01
*rule_req)
{
@@ -592,14 +598,25 @@ int ipa3_copy_ul_filter_rule_to_ipa(struct ipa_install_fltr_rule_req_msg_v01
/* prevent multi-threads accessing rmnet_ipa3_ctx->num_q6_rules */
mutex_lock(&rmnet_ipa3_ctx->add_mux_channel_lock);
- if (rule_req->filter_spec_ex_list_valid == true) {
+ if (rule_req->filter_spec_ex_list_valid == true &&
+ rule_req->filter_spec_ex2_list_valid == false) {
rmnet_ipa3_ctx->num_q6_rules =
rule_req->filter_spec_ex_list_len;
- IPAWANDBG("Received (%d) install_flt_req\n",
+ IPAWANDBG("Received (%d) install_flt_req_ex_list\n",
+ rmnet_ipa3_ctx->num_q6_rules);
+ } else if (rule_req->filter_spec_ex2_list_valid == true &&
+ rule_req->filter_spec_ex_list_valid == false) {
+ rmnet_ipa3_ctx->num_q6_rules =
+ rule_req->filter_spec_ex2_list_len;
+ IPAWANDBG("Received (%d) install_flt_req_ex2_list\n",
rmnet_ipa3_ctx->num_q6_rules);
} else {
rmnet_ipa3_ctx->num_q6_rules = 0;
- IPAWANERR("got no UL rules from modem\n");
+ if (rule_req->filter_spec_ex2_list_valid == true)
+ IPAWANERR(
+ "both ex and ex2 flt rules are set to valid\n");
+ else
+ IPAWANERR("got no UL rules from modem\n");
mutex_unlock(
&rmnet_ipa3_ctx->add_mux_channel_lock);
return -EINVAL;
@@ -615,8 +632,14 @@ int ipa3_copy_ul_filter_rule_to_ipa(struct ipa_install_fltr_rule_req_msg_v01
rmnet_ipa3_ctx->num_q6_rules);
goto failure;
}
- ipa3_copy_qmi_flt_rule_ex(&ipa3_qmi_ctx->q6_ul_filter_rule[i],
- &rule_req->filter_spec_ex_list[i]);
+ if (rule_req->filter_spec_ex_list_valid == true)
+ ipa3_copy_qmi_flt_rule_ex(
+ &ipa3_qmi_ctx->q6_ul_filter_rule[i],
+ &rule_req->filter_spec_ex_list[i]);
+ else if (rule_req->filter_spec_ex2_list_valid == true)
+ ipa3_copy_qmi_flt_rule_ex(
+ &ipa3_qmi_ctx->q6_ul_filter_rule[i],
+ &rule_req->filter_spec_ex2_list[i]);
}
if (rule_req->xlat_filter_indices_list_valid) {
@@ -4072,6 +4095,15 @@ int rmnet_ipa3_send_lan_client_msg(
IPAWANERR("Can't allocate memory for tether_info\n");
return -ENOMEM;
}
+
+ if (data->client_event != IPA_PER_CLIENT_STATS_CONNECT_EVENT &&
+ data->client_event != IPA_PER_CLIENT_STATS_DISCONNECT_EVENT) {
+ IPAWANERR("Wrong event given. Event:- %d\n",
+ data->client_event);
+ kfree(lan_client);
+ return -EINVAL;
+ }
+ data->lan_client.lanIface[IPA_RESOURCE_NAME_MAX-1] = '\0';
memset(&msg_meta, 0, sizeof(struct ipa_msg_meta));
memcpy(lan_client, &data->lan_client,
sizeof(struct ipa_lan_client_msg));
diff --git a/drivers/platform/msm/qcom-geni-se.c b/drivers/platform/msm/qcom-geni-se.c
index 68f5a12..1802f16 100644
--- a/drivers/platform/msm/qcom-geni-se.c
+++ b/drivers/platform/msm/qcom-geni-se.c
@@ -33,7 +33,8 @@
#define NUM_LOG_PAGES 2
#define MAX_CLK_PERF_LEVEL 32
-static unsigned long default_bus_bw_set[] = {0, 19200000, 50000000, 100000000};
+static unsigned long default_bus_bw_set[] = {0, 19200000, 50000000,
+ 100000000, 150000000, 200000000, 236000000};
/**
* @struct geni_se_device - Data structure to represent the QUPv3 Core
@@ -290,6 +291,10 @@ static int geni_se_select_fifo_mode(void __iomem *base)
geni_write_reg(0xFFFFFFFF, base, SE_DMA_RX_IRQ_CLR);
geni_write_reg(0xFFFFFFFF, base, SE_IRQ_EN);
+ /* Clearing registers before reading */
+ geni_write_reg(0x00000000, base, SE_GENI_M_IRQ_EN);
+ geni_write_reg(0x00000000, base, SE_GENI_S_IRQ_EN);
+
common_geni_m_irq_en = geni_read_reg(base, SE_GENI_M_IRQ_EN);
common_geni_s_irq_en = geni_read_reg(base, SE_GENI_S_IRQ_EN);
geni_dma_mode = geni_read_reg(base, SE_GENI_DMA_MODE_EN);
@@ -309,9 +314,7 @@ static int geni_se_select_fifo_mode(void __iomem *base)
static int geni_se_select_dma_mode(void __iomem *base)
{
- int proto = get_se_proto(base);
unsigned int geni_dma_mode = 0;
- unsigned int common_geni_m_irq_en;
geni_write_reg(0, base, SE_GSI_EVENT_EN);
geni_write_reg(0xFFFFFFFF, base, SE_GENI_M_IRQ_CLEAR);
@@ -319,13 +322,9 @@ static int geni_se_select_dma_mode(void __iomem *base)
geni_write_reg(0xFFFFFFFF, base, SE_DMA_TX_IRQ_CLR);
geni_write_reg(0xFFFFFFFF, base, SE_DMA_RX_IRQ_CLR);
geni_write_reg(0xFFFFFFFF, base, SE_IRQ_EN);
+ geni_write_reg(0x00000000, base, SE_GENI_M_IRQ_EN);
+ geni_write_reg(0x00000000, base, SE_GENI_S_IRQ_EN);
- common_geni_m_irq_en = geni_read_reg(base, SE_GENI_M_IRQ_EN);
- if (proto != UART)
- common_geni_m_irq_en &=
- ~(M_TX_FIFO_WATERMARK_EN | M_RX_FIFO_WATERMARK_EN);
-
- geni_write_reg(common_geni_m_irq_en, base, SE_GENI_M_IRQ_EN);
geni_dma_mode = geni_read_reg(base, SE_GENI_DMA_MODE_EN);
geni_dma_mode |= GENI_DMA_MODE_EN;
geni_write_reg(geni_dma_mode, base, SE_GENI_DMA_MODE_EN);
@@ -622,8 +621,11 @@ static bool geni_se_check_bus_bw(struct geni_se_device *geni_se_dev)
int new_bus_bw_idx = geni_se_dev->bus_bw_set_size - 1;
unsigned long new_bus_bw;
bool bus_bw_update = false;
+ /* Convert agg ab into bytes per second */
+ unsigned long new_ab_in_hz = DEFAULT_BUS_WIDTH *
+ ((2*geni_se_dev->cur_ab)*10000);
- new_bus_bw = max(geni_se_dev->cur_ib, geni_se_dev->cur_ab) /
+ new_bus_bw = max(geni_se_dev->cur_ib, new_ab_in_hz) /
DEFAULT_BUS_WIDTH;
for (i = 0; i < geni_se_dev->bus_bw_set_size; i++) {
if (geni_se_dev->bus_bw_set[i] >= new_bus_bw) {
@@ -667,9 +669,9 @@ static int geni_se_rmv_ab_ib(struct geni_se_device *geni_se_dev,
geni_se_dev->cur_ab,
geni_se_dev->cur_ib);
GENI_SE_DBG(geni_se_dev->log_ctx, false, NULL,
- "%s: %lu:%lu (%lu:%lu) %d\n", __func__,
- geni_se_dev->cur_ab, geni_se_dev->cur_ib,
- rsc->ab, rsc->ib, bus_bw_update);
+ "%s: %s: cur_ab_ib(%lu:%lu) req_ab_ib(%lu:%lu) %d\n",
+ __func__, dev_name(rsc->ctrl_dev), geni_se_dev->cur_ab,
+ geni_se_dev->cur_ib, rsc->ab, rsc->ib, bus_bw_update);
mutex_unlock(&geni_se_dev->geni_dev_lock);
return ret;
}
@@ -765,9 +767,9 @@ static int geni_se_add_ab_ib(struct geni_se_device *geni_se_dev,
geni_se_dev->cur_ab,
geni_se_dev->cur_ib);
GENI_SE_DBG(geni_se_dev->log_ctx, false, NULL,
- "%s: %lu:%lu (%lu:%lu) %d\n", __func__,
- geni_se_dev->cur_ab, geni_se_dev->cur_ib,
- rsc->ab, rsc->ib, bus_bw_update);
+ "%s: %s: cur_ab_ib(%lu:%lu) req_ab_ib(%lu:%lu) %d\n",
+ __func__, dev_name(rsc->ctrl_dev), geni_se_dev->cur_ab,
+ geni_se_dev->cur_ib, rsc->ab, rsc->ib, bus_bw_update);
mutex_unlock(&geni_se_dev->geni_dev_lock);
return ret;
}
diff --git a/drivers/platform/x86/mlx-platform.c b/drivers/platform/x86/mlx-platform.c
index d89936c..78b4aa4 100644
--- a/drivers/platform/x86/mlx-platform.c
+++ b/drivers/platform/x86/mlx-platform.c
@@ -83,12 +83,12 @@
#define MLXPLAT_CPLD_LPC_REG_TACHO4_OFFSET 0xe7
#define MLXPLAT_CPLD_LPC_REG_TACHO5_OFFSET 0xe8
#define MLXPLAT_CPLD_LPC_REG_TACHO6_OFFSET 0xe9
-#define MLXPLAT_CPLD_LPC_REG_TACHO7_OFFSET 0xea
-#define MLXPLAT_CPLD_LPC_REG_TACHO8_OFFSET 0xeb
-#define MLXPLAT_CPLD_LPC_REG_TACHO9_OFFSET 0xec
-#define MLXPLAT_CPLD_LPC_REG_TACHO10_OFFSET 0xed
-#define MLXPLAT_CPLD_LPC_REG_TACHO11_OFFSET 0xee
-#define MLXPLAT_CPLD_LPC_REG_TACHO12_OFFSET 0xef
+#define MLXPLAT_CPLD_LPC_REG_TACHO7_OFFSET 0xeb
+#define MLXPLAT_CPLD_LPC_REG_TACHO8_OFFSET 0xec
+#define MLXPLAT_CPLD_LPC_REG_TACHO9_OFFSET 0xed
+#define MLXPLAT_CPLD_LPC_REG_TACHO10_OFFSET 0xee
+#define MLXPLAT_CPLD_LPC_REG_TACHO11_OFFSET 0xef
+#define MLXPLAT_CPLD_LPC_REG_TACHO12_OFFSET 0xf0
#define MLXPLAT_CPLD_LPC_IO_RANGE 0x100
#define MLXPLAT_CPLD_LPC_I2C_CH1_OFF 0xdb
#define MLXPLAT_CPLD_LPC_I2C_CH2_OFF 0xda
diff --git a/drivers/power/supply/qcom/battery.c b/drivers/power/supply/qcom/battery.c
index d685f81..705462d 100644
--- a/drivers/power/supply/qcom/battery.c
+++ b/drivers/power/supply/qcom/battery.c
@@ -596,6 +596,25 @@ static void pl_taper_work(struct work_struct *work)
goto done;
}
+ /*
+ * Due to reduction of float voltage in JEITA condition taper
+ * charging can be initiated at a lower FV. On removal of JEITA
+ * condition, FV readjusts itself. However, once taper charging
+ * is initiated, it doesn't exits until parallel chaging is
+ * disabled due to which FCC doesn't scale back to its original
+ * value, leading to slow charging thereafter.
+ * Check if FV increases in comparison to FV at which taper
+ * charging was initiated, and if yes, exit taper charging.
+ */
+ if (get_effective_result(chip->fv_votable) >
+ chip->taper_entry_fv) {
+ pl_dbg(chip, PR_PARALLEL, "Float voltage increased. Exiting taper\n");
+ goto done;
+ } else {
+ chip->taper_entry_fv =
+ get_effective_result(chip->fv_votable);
+ }
+
rc = power_supply_get_property(chip->batt_psy,
POWER_SUPPLY_PROP_CHARGE_TYPE, &pval);
if (rc < 0) {
@@ -621,26 +640,9 @@ static void pl_taper_work(struct work_struct *work)
vote(chip->fcc_votable, TAPER_STEPPER_VOTER,
true, eff_fcc_ua);
} else {
- /*
- * Due to reduction of float voltage in JEITA condition
- * taper charging can be initiated at a lower FV. On
- * removal of JEITA condition, FV readjusts itself.
- * However, once taper charging is initiated, it doesn't
- * exits until parallel chaging is disabled due to which
- * FCC doesn't scale back to its original value, leading
- * to slow charging thereafter.
- * Check if FV increases in comparison to FV at which
- * taper charging was initiated, and if yes, exit taper
- * charging.
- */
- if (get_effective_result(chip->fv_votable) >
- chip->taper_entry_fv) {
- pl_dbg(chip, PR_PARALLEL, "Float voltage increased. Exiting taper\n");
- goto done;
- } else {
- pl_dbg(chip, PR_PARALLEL, "master is fast charging; waiting for next taper\n");
- }
+ pl_dbg(chip, PR_PARALLEL, "master is fast charging; waiting for next taper\n");
}
+
/* wait for the charger state to deglitch after FCC change */
msleep(PL_TAPER_WORK_DELAY_MS);
}
diff --git a/drivers/power/supply/qcom/qpnp-smb5.c b/drivers/power/supply/qcom/qpnp-smb5.c
index 4eae836..9536937 100644
--- a/drivers/power/supply/qcom/qpnp-smb5.c
+++ b/drivers/power/supply/qcom/qpnp-smb5.c
@@ -54,7 +54,7 @@ static struct smb_params smb5_pmi632_params = {
},
.icl_stat = {
.name = "input current limit status",
- .reg = AICL_ICL_STATUS_REG,
+ .reg = ICL_STATUS_REG,
.min_u = 0,
.max_u = 3000000,
.step_u = 50000,
@@ -88,6 +88,22 @@ static struct smb_params smb5_pmi632_params = {
.step_u = 400,
.set_proc = smblib_set_chg_freq,
},
+ .aicl_5v_threshold = {
+ .name = "AICL 5V threshold",
+ .reg = USBIN_5V_AICL_THRESHOLD_REG,
+ .min_u = 4000,
+ .max_u = 4700,
+ .step_u = 100,
+ },
+ .aicl_cont_threshold = {
+ .name = "AICL CONT threshold",
+ .reg = USBIN_CONT_AICL_THRESHOLD_REG,
+ .min_u = 4000,
+ .max_u = 8800,
+ .step_u = 100,
+ .get_proc = smblib_get_aicl_cont_threshold,
+ .set_proc = smblib_set_aicl_cont_threshold,
+ },
};
static struct smb_params smb5_pm8150b_params = {
@@ -164,6 +180,22 @@ static struct smb_params smb5_pm8150b_params = {
.step_u = 400,
.set_proc = smblib_set_chg_freq,
},
+ .aicl_5v_threshold = {
+ .name = "AICL 5V threshold",
+ .reg = USBIN_5V_AICL_THRESHOLD_REG,
+ .min_u = 4000,
+ .max_u = 4700,
+ .step_u = 100,
+ },
+ .aicl_cont_threshold = {
+ .name = "AICL CONT threshold",
+ .reg = USBIN_CONT_AICL_THRESHOLD_REG,
+ .min_u = 4000,
+ .max_u = 11800,
+ .step_u = 100,
+ .get_proc = smblib_get_aicl_cont_threshold,
+ .set_proc = smblib_set_aicl_cont_threshold,
+ },
};
struct smb_dt_props {
@@ -251,6 +283,13 @@ static struct attribute *smb5_attrs[] = {
};
ATTRIBUTE_GROUPS(smb5);
+enum {
+ BAT_THERM = 0,
+ MISC_THERM,
+ CONN_THERM,
+ SMB_THERM,
+};
+
#define PMI632_MAX_ICL_UA 3000000
#define PM6150_MAX_FCC_UA 3000000
static int smb5_chg_config_init(struct smb5 *chip)
@@ -295,6 +334,7 @@ static int smb5_chg_config_init(struct smb5 *chip)
break;
case PMI632_SUBTYPE:
chip->chg.smb_version = PMI632_SUBTYPE;
+ chg->wa_flags |= WEAK_ADAPTER_WA | USBIN_OV_WA;
chg->param = smb5_pmi632_params;
chg->use_extcon = true;
chg->name = "pmi632_charger";
@@ -324,6 +364,45 @@ static int smb5_chg_config_init(struct smb5 *chip)
return rc;
}
+#define PULL_NO_PULL 0
+#define PULL_30K 30
+#define PULL_100K 100
+#define PULL_400K 400
+static int get_valid_pullup(int pull_up)
+{
+ /* pull up can only be 0/30K/100K/400K) */
+ switch (pull_up) {
+ case PULL_NO_PULL:
+ return INTERNAL_PULL_NO_PULL;
+ case PULL_30K:
+ return INTERNAL_PULL_30K_PULL;
+ case PULL_100K:
+ return INTERNAL_PULL_100K_PULL;
+ case PULL_400K:
+ return INTERNAL_PULL_400K_PULL;
+ default:
+ return INTERNAL_PULL_100K_PULL;
+ }
+}
+
+#define INTERNAL_PULL_UP_MASK 0x3
+static int smb5_configure_internal_pull(struct smb_charger *chg, int type,
+ int pull)
+{
+ int rc;
+ int shift = type * 2;
+ u8 mask = INTERNAL_PULL_UP_MASK << shift;
+ u8 val = pull << shift;
+
+ rc = smblib_masked_write(chg, BATIF_ADC_INTERNAL_PULL_UP_REG,
+ mask, val);
+ if (rc < 0)
+ dev_err(chg->dev,
+ "Couldn't configure ADC pull-up reg rc=%d\n", rc);
+
+ return rc;
+}
+
#define MICRO_1P5A 1500000
#define MICRO_P1A 100000
#define MICRO_1PA 1000000
@@ -354,6 +433,9 @@ static int smb5_parse_dt_misc(struct smb5 *chip, struct device_node *node)
chg->sw_jeita_enabled = of_property_read_bool(node,
"qcom,sw-jeita-enable");
+ chg->pd_not_supported = of_property_read_bool(node,
+ "qcom,usb-pd-disable");
+
rc = of_property_read_u32(node, "qcom,wd-bark-time-secs",
&chip->dt.wd_bark_time);
if (rc < 0 || chip->dt.wd_bark_time < MIN_WD_BARK_TIME)
@@ -431,6 +513,19 @@ static int smb5_parse_dt_misc(struct smb5 *chip, struct device_node *node)
of_property_read_bool(node,
"qcom,uusb-moisture-protection-enable");
+ chg->hw_die_temp_mitigation = of_property_read_bool(node,
+ "qcom,hw-die-temp-mitigation");
+
+ chg->hw_connector_mitigation = of_property_read_bool(node,
+ "qcom,hw-connector-mitigation");
+
+ chg->hw_skin_temp_mitigation = of_property_read_bool(node,
+ "qcom,hw-skin-temp-mitigation");
+
+ chg->connector_pull_up = -EINVAL;
+ of_property_read_u32(node, "qcom,connector-internal-pull-kohm",
+ &chg->connector_pull_up);
+
chip->dt.disable_suspend_on_collapse = of_property_read_bool(node,
"qcom,disable-suspend-on-collapse");
@@ -620,6 +715,8 @@ static enum power_supply_property smb5_usb_props[] = {
POWER_SUPPLY_PROP_SMB_EN_REASON,
POWER_SUPPLY_PROP_SCOPE,
POWER_SUPPLY_PROP_MOISTURE_DETECTED,
+ POWER_SUPPLY_PROP_HVDCP_OPTI_ALLOWED,
+ POWER_SUPPLY_PROP_QC_OPTI_DISABLE,
};
static int smb5_usb_get_prop(struct power_supply *psy,
@@ -629,6 +726,7 @@ static int smb5_usb_get_prop(struct power_supply *psy,
struct smb5 *chip = power_supply_get_drvdata(psy);
struct smb_charger *chg = &chip->chg;
int rc = 0;
+ val->intval = 0;
switch (psp) {
case POWER_SUPPLY_PROP_PRESENT:
@@ -730,6 +828,16 @@ static int smb5_usb_get_prop(struct power_supply *psy,
case POWER_SUPPLY_PROP_MOISTURE_DETECTED:
val->intval = chg->moisture_present;
break;
+ case POWER_SUPPLY_PROP_HVDCP_OPTI_ALLOWED:
+ val->intval = !chg->flash_active;
+ break;
+ case POWER_SUPPLY_PROP_QC_OPTI_DISABLE:
+ if (chg->hw_die_temp_mitigation)
+ val->intval = POWER_SUPPLY_QC_THERMAL_BALANCE_DISABLE
+ | POWER_SUPPLY_QC_INOV_THERMAL_DISABLE;
+ if (chg->hw_connector_mitigation)
+ val->intval |= POWER_SUPPLY_QC_CTM_DISABLE;
+ break;
default:
pr_err("get prop %d is not supported in usb\n", psp);
rc = -EINVAL;
@@ -1023,6 +1131,7 @@ static int smb5_usb_main_set_prop(struct power_supply *psy,
{
struct smb5 *chip = power_supply_get_drvdata(psy);
struct smb_charger *chg = &chip->chg;
+ union power_supply_propval pval = {0, };
int rc = 0;
switch (psp) {
@@ -1036,7 +1145,35 @@ static int smb5_usb_main_set_prop(struct power_supply *psy,
rc = smblib_set_icl_current(chg, val->intval);
break;
case POWER_SUPPLY_PROP_FLASH_ACTIVE:
- chg->flash_active = val->intval;
+ if ((chg->smb_version == PMI632_SUBTYPE)
+ && (chg->flash_active != val->intval)) {
+ chg->flash_active = val->intval;
+
+ rc = smblib_get_prop_usb_present(chg, &pval);
+ if (rc < 0)
+ pr_err("Failed to get USB preset status rc=%d\n",
+ rc);
+ if (pval.intval) {
+ rc = smblib_force_vbus_voltage(chg,
+ chg->flash_active ? FORCE_5V_BIT
+ : IDLE_BIT);
+ if (rc < 0)
+ pr_err("Failed to force 5V\n");
+ else
+ chg->pulse_cnt = 0;
+ } else {
+ /* USB absent & flash not-active - vote 100mA */
+ vote(chg->usb_icl_votable, SW_ICL_MAX_VOTER,
+ true, SDP_100_MA);
+ }
+
+ pr_debug("flash active VBUS 5V restriction %s\n",
+ chg->flash_active ? "applied" : "removed");
+
+ /* Update userspace */
+ if (chg->batt_psy)
+ power_supply_changed(chg->batt_psy);
+ }
break;
case POWER_SUPPLY_PROP_TOGGLE_STAT:
rc = smblib_toggle_smb_en(chg, val->intval);
@@ -1459,10 +1596,11 @@ static int smb5_batt_set_prop(struct power_supply *psy,
rc = smblib_set_prop_ship_mode(chg, val);
break;
case POWER_SUPPLY_PROP_RERUN_AICL:
- rc = smblib_rerun_aicl(chg);
+ rc = smblib_run_aicl(chg, RERUN_AICL);
break;
case POWER_SUPPLY_PROP_DP_DM:
- rc = smblib_dp_dm(chg, val->intval);
+ if (!chg->flash_active)
+ rc = smblib_dp_dm(chg, val->intval);
break;
case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMITED:
rc = smblib_set_prop_input_current_limited(chg, val);
@@ -1636,7 +1774,42 @@ static int smb5_init_vconn_regulator(struct smb5 *chip)
***************************/
static int smb5_configure_typec(struct smb_charger *chg)
{
+ union power_supply_propval pval = {0, };
int rc;
+ u8 val = 0;
+
+ rc = smblib_read(chg, LEGACY_CABLE_STATUS_REG, &val);
+ if (rc < 0) {
+ dev_err(chg->dev, "Couldn't read Legacy status rc=%d\n", rc);
+ return rc;
+ }
+
+ /*
+ * Across reboot, standard typeC cables get detected as legacy cables
+ * due to VBUS attachment prior to CC attach/dettach. To handle this,
+ * "early_usb_attach" flag is used, which assumes that across reboot,
+ * the cable connected can be standard typeC. However, its jurisdiction
+ * is limited to PD capable designs only. Hence, for non-PD type designs
+ * reset legacy cable detection by disabling/enabling typeC mode.
+ */
+ if (chg->pd_not_supported && (val & TYPEC_LEGACY_CABLE_STATUS_BIT)) {
+ pval.intval = POWER_SUPPLY_TYPEC_PR_NONE;
+ smblib_set_prop_typec_power_role(chg, &pval);
+ if (rc < 0) {
+ dev_err(chg->dev, "Couldn't disable TYPEC rc=%d\n", rc);
+ return rc;
+ }
+
+ /* delay before enabling typeC */
+ msleep(50);
+
+ pval.intval = POWER_SUPPLY_TYPEC_PR_DUAL;
+ smblib_set_prop_typec_power_role(chg, &pval);
+ if (rc < 0) {
+ dev_err(chg->dev, "Couldn't enable TYPEC rc=%d\n", rc);
+ return rc;
+ }
+ }
smblib_apsd_enable(chg, true);
smblib_hvdcp_detect_enable(chg, false);
@@ -1764,30 +1937,37 @@ static int smb5_configure_micro_usb(struct smb_charger *chg)
return rc;
}
+#define RAW_ITERM(iterm_ma, max_range) \
+ div_s64((int64_t)iterm_ma * ADC_CHG_ITERM_MASK, max_range)
static int smb5_configure_iterm_thresholds_adc(struct smb5 *chip)
{
u8 *buf;
int rc = 0;
- s16 raw_hi_thresh, raw_lo_thresh;
+ s16 raw_hi_thresh, raw_lo_thresh, max_limit_ma;
struct smb_charger *chg = &chip->chg;
- if (chip->dt.term_current_thresh_hi_ma < -10000 ||
- chip->dt.term_current_thresh_hi_ma > 10000 ||
- chip->dt.term_current_thresh_lo_ma < -10000 ||
- chip->dt.term_current_thresh_lo_ma > 10000) {
+ if (chip->chg.smb_version == PMI632_SUBTYPE)
+ max_limit_ma = ITERM_LIMITS_PMI632_MA;
+ else
+ max_limit_ma = ITERM_LIMITS_PM8150B_MA;
+
+ if (chip->dt.term_current_thresh_hi_ma < (-1 * max_limit_ma)
+ || chip->dt.term_current_thresh_hi_ma > max_limit_ma
+ || chip->dt.term_current_thresh_lo_ma < (-1 * max_limit_ma)
+ || chip->dt.term_current_thresh_lo_ma > max_limit_ma) {
dev_err(chg->dev, "ITERM threshold out of range rc=%d\n", rc);
return -EINVAL;
}
/*
* Conversion:
- * raw (A) = (scaled_mA * ADC_CHG_TERM_MASK) / (10 * 1000)
+ * raw (A) = (term_current * ADC_CHG_ITERM_MASK) / max_limit_ma
* Note: raw needs to be converted to big-endian format.
*/
if (chip->dt.term_current_thresh_hi_ma) {
- raw_hi_thresh = ((chip->dt.term_current_thresh_hi_ma *
- ADC_CHG_TERM_MASK) / 10000);
+ raw_hi_thresh = RAW_ITERM(chip->dt.term_current_thresh_hi_ma,
+ max_limit_ma);
raw_hi_thresh = sign_extend32(raw_hi_thresh, 15);
buf = (u8 *)&raw_hi_thresh;
raw_hi_thresh = buf[1] | (buf[0] << 8);
@@ -1802,8 +1982,8 @@ static int smb5_configure_iterm_thresholds_adc(struct smb5 *chip)
}
if (chip->dt.term_current_thresh_lo_ma) {
- raw_lo_thresh = ((chip->dt.term_current_thresh_lo_ma *
- ADC_CHG_TERM_MASK) / 10000);
+ raw_lo_thresh = RAW_ITERM(chip->dt.term_current_thresh_lo_ma,
+ max_limit_ma);
raw_lo_thresh = sign_extend32(raw_lo_thresh, 15);
buf = (u8 *)&raw_lo_thresh;
raw_lo_thresh = buf[1] | (buf[0] << 8);
@@ -1835,6 +2015,54 @@ static int smb5_configure_iterm_thresholds(struct smb5 *chip)
return rc;
}
+static int smb5_configure_mitigation(struct smb_charger *chg)
+{
+ int rc;
+ u8 chan = 0, src_cfg = 0;
+
+ if (!chg->hw_die_temp_mitigation && !chg->hw_connector_mitigation &&
+ !chg->hw_skin_temp_mitigation) {
+ src_cfg = THERMREG_SW_ICL_ADJUST_BIT;
+ } else {
+ if (chg->hw_die_temp_mitigation) {
+ chan = DIE_TEMP_CHANNEL_EN_BIT;
+ src_cfg = THERMREG_DIE_ADC_SRC_EN_BIT
+ | THERMREG_DIE_CMP_SRC_EN_BIT;
+ }
+
+ if (chg->hw_connector_mitigation) {
+ chan |= CONN_THM_CHANNEL_EN_BIT;
+ src_cfg |= THERMREG_CONNECTOR_ADC_SRC_EN_BIT;
+ }
+
+ if (chg->hw_skin_temp_mitigation) {
+ chan |= MISC_THM_CHANNEL_EN_BIT;
+ src_cfg |= THERMREG_SKIN_ADC_SRC_EN_BIT;
+ }
+
+ rc = smblib_masked_write(chg, BATIF_ADC_CHANNEL_EN_REG,
+ CONN_THM_CHANNEL_EN_BIT | DIE_TEMP_CHANNEL_EN_BIT |
+ MISC_THM_CHANNEL_EN_BIT, chan);
+ if (rc < 0) {
+ dev_err(chg->dev, "Couldn't enable ADC channel rc=%d\n",
+ rc);
+ return rc;
+ }
+ }
+
+ rc = smblib_masked_write(chg, MISC_THERMREG_SRC_CFG_REG,
+ THERMREG_SW_ICL_ADJUST_BIT | THERMREG_DIE_ADC_SRC_EN_BIT |
+ THERMREG_DIE_CMP_SRC_EN_BIT | THERMREG_SKIN_ADC_SRC_EN_BIT |
+ SKIN_ADC_CFG_BIT | THERMREG_CONNECTOR_ADC_SRC_EN_BIT, src_cfg);
+ if (rc < 0) {
+ dev_err(chg->dev,
+ "Couldn't configure THERM_SRC reg rc=%d\n", rc);
+ return rc;
+ }
+
+ return 0;
+}
+
static int smb5_init_dc_peripheral(struct smb_charger *chg)
{
int rc = 0;
@@ -1882,6 +2110,12 @@ static int smb5_init_hw(struct smb5 *chip)
smblib_get_charge_param(chg, &chg->param.usb_icl,
&chg->default_icl_ua);
+ smblib_get_charge_param(chg, &chg->param.aicl_5v_threshold,
+ &chg->default_aicl_5v_threshold_mv);
+ chg->aicl_5v_threshold_mv = chg->default_aicl_5v_threshold_mv;
+ smblib_get_charge_param(chg, &chg->param.aicl_cont_threshold,
+ &chg->default_aicl_cont_threshold_mv);
+ chg->aicl_cont_threshold_mv = chg->default_aicl_cont_threshold_mv;
if (chg->charger_temp_max == -EINVAL) {
rc = smblib_get_thermal_threshold(chg,
@@ -1908,11 +2142,10 @@ static int smb5_init_hw(struct smb5 *chip)
return rc;
}
} else {
- /* Allows software thermal regulation only */
- rc = smblib_write(chg, MISC_THERMREG_SRC_CFG_REG,
- THERMREG_SW_ICL_ADJUST_BIT);
+ /* configure temperature mitigation */
+ rc = smb5_configure_mitigation(chg);
if (rc < 0) {
- dev_err(chg->dev, "Couldn't configure SMB thermal regulation rc=%d\n",
+ dev_err(chg->dev, "Couldn't configure mitigation rc=%d\n",
rc);
return rc;
}
@@ -2257,6 +2490,17 @@ static int smb5_init_hw(struct smb5 *chip)
return rc;
}
+ if (chg->connector_pull_up != -EINVAL) {
+ rc = smb5_configure_internal_pull(chg, CONN_THERM,
+ get_valid_pullup(chg->connector_pull_up));
+ if (rc < 0) {
+ dev_err(chg->dev,
+ "Couldn't configure CONN_THERM pull-up rc=%d\n",
+ rc);
+ return rc;
+ }
+ }
+
return rc;
}
@@ -2428,10 +2672,12 @@ static struct smb_irq_info smb5_irqs[] = {
[USBIN_UV_IRQ] = {
.name = "usbin-uv",
.handler = usbin_uv_irq_handler,
+ .wake = true,
+ .storm_data = {true, 3000, 5},
},
[USBIN_OV_IRQ] = {
.name = "usbin-ov",
- .handler = default_irq_handler,
+ .handler = usbin_ov_irq_handler,
},
[USBIN_PLUGIN_IRQ] = {
.name = "usbin-plugin",
diff --git a/drivers/power/supply/qcom/schgm-flash.c b/drivers/power/supply/qcom/schgm-flash.c
index 7e8c70b..b92ace1 100644
--- a/drivers/power/supply/qcom/schgm-flash.c
+++ b/drivers/power/supply/qcom/schgm-flash.c
@@ -94,6 +94,11 @@ static void schgm_flash_parse_dt(struct smb_charger *chg)
}
}
+bool is_flash_active(struct smb_charger *chg)
+{
+ return chg->flash_active ? true : false;
+}
+
int schgm_flash_get_vreg_ok(struct smb_charger *chg, int *val)
{
int rc, vreg_state;
@@ -140,6 +145,31 @@ int schgm_flash_get_vreg_ok(struct smb_charger *chg, int *val)
return 0;
}
+void schgm_flash_torch_priority(struct smb_charger *chg, enum torch_mode mode)
+{
+ int rc;
+ u8 reg;
+
+ /*
+ * If torch is configured in default BOOST mode, skip any update in the
+ * mode configuration.
+ */
+ if (chg->headroom_mode == FIXED_MODE)
+ return;
+
+ if ((mode != TORCH_BOOST_MODE) && (mode != TORCH_BUCK_MODE))
+ return;
+
+ reg = mode;
+ rc = smblib_masked_write(chg, SCHGM_TORCH_PRIORITY_CONTROL_REG,
+ TORCH_PRIORITY_CONTROL_BIT, reg);
+ if (rc < 0)
+ pr_err("Couldn't configure Torch priority control rc=%d\n",
+ rc);
+
+ pr_debug("Torch priority changed to: %d\n", mode);
+}
+
int schgm_flash_init(struct smb_charger *chg)
{
int rc;
@@ -183,7 +213,7 @@ int schgm_flash_init(struct smb_charger *chg)
reg = (chg->headroom_mode == FIXED_MODE)
? TORCH_PRIORITY_CONTROL_BIT : 0;
- rc = smblib_write(chg, SCHGM_TORCH_PRIORITY_CONTROL, reg);
+ rc = smblib_write(chg, SCHGM_TORCH_PRIORITY_CONTROL_REG, reg);
if (rc < 0) {
pr_err("Couldn't force 5V boost in torch mode rc=%d\n",
rc);
diff --git a/drivers/power/supply/qcom/schgm-flash.h b/drivers/power/supply/qcom/schgm-flash.h
index 546e63a..1294467 100644
--- a/drivers/power/supply/qcom/schgm-flash.h
+++ b/drivers/power/supply/qcom/schgm-flash.h
@@ -30,7 +30,7 @@
#define SCHGM_FLASH_CONTROL_REG (SCHGM_FLASH_BASE + 0x60)
#define SOC_LOW_FOR_FLASH_EN_BIT BIT(7)
-#define SCHGM_TORCH_PRIORITY_CONTROL (SCHGM_FLASH_BASE + 0x63)
+#define SCHGM_TORCH_PRIORITY_CONTROL_REG (SCHGM_FLASH_BASE + 0x63)
#define TORCH_PRIORITY_CONTROL_BIT BIT(0)
#define SCHGM_SOC_BASED_FLASH_DERATE_TH_CFG_REG (SCHGM_FLASH_BASE + 0x67)
@@ -38,8 +38,15 @@
#define SCHGM_SOC_BASED_FLASH_DISABLE_TH_CFG_REG \
(SCHGM_FLASH_BASE + 0x68)
+enum torch_mode {
+ TORCH_BUCK_MODE = 0,
+ TORCH_BOOST_MODE,
+};
+
int schgm_flash_get_vreg_ok(struct smb_charger *chg, int *val);
+void schgm_flash_torch_priority(struct smb_charger *chg, enum torch_mode mode);
int schgm_flash_init(struct smb_charger *chg);
+bool is_flash_active(struct smb_charger *chg);
irqreturn_t schgm_flash_default_irq_handler(int irq, void *data);
irqreturn_t schgm_flash_ilim2_irq_handler(int irq, void *data);
diff --git a/drivers/power/supply/qcom/smb1390-charger-psy.c b/drivers/power/supply/qcom/smb1390-charger-psy.c
index 6d87589..ff8f484 100644
--- a/drivers/power/supply/qcom/smb1390-charger-psy.c
+++ b/drivers/power/supply/qcom/smb1390-charger-psy.c
@@ -635,6 +635,15 @@ static void smb1390_taper_work(struct work_struct *work)
goto out;
}
+ if (get_effective_result(chip->fv_votable) >
+ chip->taper_entry_fv) {
+ pr_debug("Float voltage increased. Exiting taper\n");
+ goto out;
+ } else {
+ chip->taper_entry_fv =
+ get_effective_result(chip->fv_votable);
+ }
+
if (pval.intval == POWER_SUPPLY_CHARGE_TYPE_TAPER) {
fcc_uA = get_effective_result(chip->fcc_votable)
- 100000;
@@ -646,10 +655,6 @@ static void smb1390_taper_work(struct work_struct *work)
true, 0);
goto out;
}
- } else if (get_effective_result(chip->fv_votable) >
- chip->taper_entry_fv) {
- pr_debug("Float voltage increased. Exiting taper\n");
- goto out;
} else {
pr_debug("In fast charging. Wait for next taper\n");
}
diff --git a/drivers/power/supply/qcom/smb5-lib.c b/drivers/power/supply/qcom/smb5-lib.c
index 9bd05a6..48f0165 100644
--- a/drivers/power/supply/qcom/smb5-lib.c
+++ b/drivers/power/supply/qcom/smb5-lib.c
@@ -16,8 +16,10 @@
#include "smb5-lib.h"
#include "smb5-reg.h"
#include "battery.h"
+#include "schgm-flash.h"
#include "step-chg-jeita.h"
#include "storm-watch.h"
+#include "schgm-flash.h"
#define smblib_err(chg, fmt, ...) \
pr_err("%s: %s: " fmt, chg->name, \
@@ -587,6 +589,25 @@ static int smblib_is_input_present(struct smb_charger *chg,
return 0;
}
+#define AICL_RANGE2_MIN_MV 5600
+#define AICL_RANGE2_STEP_DELTA_MV 200
+#define AICL_RANGE2_OFFSET 16
+int smblib_get_aicl_cont_threshold(struct smb_chg_param *param, u8 val_raw)
+{
+ int base = param->min_u;
+ u8 reg = val_raw;
+ int step = param->step_u;
+
+
+ if (val_raw >= AICL_RANGE2_OFFSET) {
+ reg = val_raw - AICL_RANGE2_OFFSET;
+ base = AICL_RANGE2_MIN_MV;
+ step = AICL_RANGE2_STEP_DELTA_MV;
+ }
+
+ return base + (reg * step);
+}
+
/********************
* REGISTER SETTERS *
********************/
@@ -739,6 +760,23 @@ static int smblib_set_adapter_allowance(struct smb_charger *chg,
{
int rc = 0;
+ /* PMI632 only support max. 9V */
+ if (chg->smb_version == PMI632_SUBTYPE) {
+ switch (allowed_voltage) {
+ case USBIN_ADAPTER_ALLOW_12V:
+ case USBIN_ADAPTER_ALLOW_9V_TO_12V:
+ allowed_voltage = USBIN_ADAPTER_ALLOW_9V;
+ break;
+ case USBIN_ADAPTER_ALLOW_5V_OR_12V:
+ case USBIN_ADAPTER_ALLOW_5V_OR_9V_TO_12V:
+ allowed_voltage = USBIN_ADAPTER_ALLOW_5V_OR_9V;
+ break;
+ case USBIN_ADAPTER_ALLOW_5V_TO_12V:
+ allowed_voltage = USBIN_ADAPTER_ALLOW_5V_TO_9V;
+ break;
+ }
+ }
+
rc = smblib_write(chg, USBIN_ADAPTER_ALLOW_CFG_REG, allowed_voltage);
if (rc < 0) {
smblib_err(chg, "Couldn't write 0x%02x to USBIN_ADAPTER_ALLOW_CFG rc=%d\n",
@@ -808,6 +846,29 @@ static int smblib_set_usb_pd_allowed_voltage(struct smb_charger *chg,
return rc;
}
+int smblib_set_aicl_cont_threshold(struct smb_chg_param *param,
+ int val_u, u8 *val_raw)
+{
+ int base = param->min_u;
+ int offset = 0;
+ int step = param->step_u;
+
+ if (val_u > param->max_u)
+ val_u = param->max_u;
+ if (val_u < param->min_u)
+ val_u = param->min_u;
+
+ if (val_u >= AICL_RANGE2_MIN_MV) {
+ base = AICL_RANGE2_MIN_MV;
+ step = AICL_RANGE2_STEP_DELTA_MV;
+ offset = AICL_RANGE2_OFFSET;
+ };
+
+ *val_raw = ((val_u - base) / step) + offset;
+
+ return 0;
+}
+
/********************
* HELPER FUNCTIONS *
********************/
@@ -1004,7 +1065,6 @@ int smblib_mapping_cc_delta_from_field_value(struct smb_chg_param *param,
return 0;
}
-#define SDP_100_MA 100000
static void smblib_uusb_removal(struct smb_charger *chg)
{
int rc;
@@ -1035,7 +1095,8 @@ static void smblib_uusb_removal(struct smb_charger *chg)
/* reset both usbin current and voltage votes */
vote(chg->pl_enable_votable_indirect, USBIN_I_VOTER, false, 0);
vote(chg->pl_enable_votable_indirect, USBIN_V_VOTER, false, 0);
- vote(chg->usb_icl_votable, SW_ICL_MAX_VOTER, true, SDP_100_MA);
+ vote(chg->usb_icl_votable, SW_ICL_MAX_VOTER, true,
+ is_flash_active(chg) ? SDP_CURRENT_UA : SDP_100_MA);
vote(chg->usb_icl_votable, SW_QC3_VOTER, false, 0);
vote(chg->usb_icl_votable, HVDCP2_ICL_VOTER, false, 0);
@@ -1054,6 +1115,11 @@ static void smblib_uusb_removal(struct smb_charger *chg)
smblib_err(chg, "Couldn't set USBIN_ADAPTER_ALLOW_5V_OR_9V_TO_12V rc=%d\n",
rc);
+ /* reset USBOV votes and cancel work */
+ cancel_delayed_work_sync(&chg->usbov_dbc_work);
+ vote(chg->awake_votable, USBOV_DBC_VOTER, false, 0);
+ chg->dbc_usbov = false;
+
chg->voltage_min_uv = MICRO_5V;
chg->voltage_max_uv = MICRO_5V;
chg->usb_icl_delta_ua = 0;
@@ -1288,8 +1354,7 @@ int smblib_set_icl_current(struct smb_charger *chg, int icl_ua)
/* Re-run AICL */
if (icl_override != SW_OVERRIDE_HC_MODE)
- rc = smblib_rerun_aicl(chg);
-
+ rc = smblib_run_aicl(chg, RERUN_AICL);
out:
return rc;
}
@@ -1678,7 +1743,32 @@ int smblib_get_prop_batt_status(struct smb_charger *chg,
union power_supply_propval pval = {0, };
bool usb_online, dc_online;
u8 stat;
- int rc;
+ int rc, suspend = 0;
+
+ if (chg->dbc_usbov) {
+ rc = smblib_get_prop_usb_present(chg, &pval);
+ if (rc < 0) {
+ smblib_err(chg,
+ "Couldn't get usb present prop rc=%d\n", rc);
+ return rc;
+ }
+
+ rc = smblib_get_usb_suspend(chg, &suspend);
+ if (rc < 0) {
+ smblib_err(chg,
+ "Couldn't get usb suspend rc=%d\n", rc);
+ return rc;
+ }
+
+ /*
+ * Report charging as long as USBOV is not debounced and
+ * charging path is un-suspended.
+ */
+ if (pval.intval && !suspend) {
+ val->intval = POWER_SUPPLY_STATUS_CHARGING;
+ return 0;
+ }
+ }
rc = smblib_get_prop_usb_online(chg, &pval);
if (rc < 0) {
@@ -1921,7 +2011,14 @@ int smblib_get_prop_batt_iterm(struct smb_charger *chg,
temp = buf[1] | (buf[0] << 8);
temp = sign_extend32(temp, 15);
- temp = DIV_ROUND_CLOSEST(temp * 10000, ADC_CHG_TERM_MASK);
+
+ if (chg->smb_version == PMI632_SUBTYPE)
+ temp = DIV_ROUND_CLOSEST(temp * ITERM_LIMITS_PMI632_MA,
+ ADC_CHG_ITERM_MASK);
+ else
+ temp = DIV_ROUND_CLOSEST(temp * ITERM_LIMITS_PM8150B_MA,
+ ADC_CHG_ITERM_MASK);
+
val->intval = temp;
return rc;
@@ -2063,7 +2160,7 @@ int smblib_set_prop_rechg_soc_thresh(struct smb_charger *chg,
return rc;
}
-int smblib_rerun_aicl(struct smb_charger *chg)
+int smblib_run_aicl(struct smb_charger *chg, int type)
{
int rc;
u8 stat;
@@ -2081,8 +2178,8 @@ int smblib_rerun_aicl(struct smb_charger *chg)
smblib_dbg(chg, PR_MISC, "re-running AICL\n");
- rc = smblib_masked_write(chg, AICL_CMD_REG, RERUN_AICL_BIT,
- RERUN_AICL_BIT);
+ stat = (type == RERUN_AICL) ? RERUN_AICL_BIT : RESTART_AICL_BIT;
+ rc = smblib_masked_write(chg, AICL_CMD_REG, stat, stat);
if (rc < 0)
smblib_err(chg, "Couldn't write to AICL_CMD_REG rc=%d\n",
rc);
@@ -2117,7 +2214,7 @@ static int smblib_dm_pulse(struct smb_charger *chg)
return rc;
}
-static int smblib_force_vbus_voltage(struct smb_charger *chg, u8 val)
+int smblib_force_vbus_voltage(struct smb_charger *chg, u8 val)
{
int rc;
@@ -3238,30 +3335,44 @@ int smblib_get_prop_typec_select_rp(struct smb_charger *chg,
int smblib_get_prop_usb_current_now(struct smb_charger *chg,
union power_supply_propval *val)
{
+ union power_supply_propval pval = {0, };
int rc = 0;
if (chg->iio.usbin_i_chan) {
rc = iio_read_channel_processed(chg->iio.usbin_i_chan,
&val->intval);
+ if (rc < 0) {
+ pr_err("Error in reading USBIN_I channel, rc=%d\n", rc);
+ return rc;
+ }
/*
* For PM8150B, scaling factor = reciprocal of
* 0.2V/A in Buck mode, 0.4V/A in Boost mode.
*/
- if (smblib_get_prop_ufp_mode(chg) != POWER_SUPPLY_TYPEC_NONE) {
- val->intval *= 5;
- return rc;
- }
-
- if (smblib_get_prop_dfp_mode(chg) != POWER_SUPPLY_TYPEC_NONE) {
+ if (chg->otg_present || smblib_get_prop_dfp_mode(chg) !=
+ POWER_SUPPLY_TYPEC_NONE) {
val->intval = DIV_ROUND_CLOSEST(val->intval * 100, 40);
return rc;
}
+
+ rc = smblib_get_prop_usb_present(chg, &pval);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't get usb present status,rc=%d\n",
+ rc);
+ return -ENODATA;
+ }
+
+ /* If USB is not present, return 0 */
+ if (!pval.intval)
+ val->intval = 0;
+ else
+ val->intval *= 5;
} else {
+ val->intval = 0;
rc = -ENODATA;
}
- val->intval = 0;
return rc;
}
@@ -3452,13 +3563,6 @@ int smblib_get_prop_connector_health(struct smb_charger *chg)
return POWER_SUPPLY_HEALTH_COOL;
}
-#define SDP_CURRENT_UA 500000
-#define CDP_CURRENT_UA 1500000
-#define DCP_CURRENT_UA 1500000
-#define HVDCP_CURRENT_UA 3000000
-#define TYPEC_DEFAULT_CURRENT_UA 900000
-#define TYPEC_MEDIUM_CURRENT_UA 1500000
-#define TYPEC_HIGH_CURRENT_UA 3000000
static int get_rp_based_dcp_current(struct smb_charger *chg, int typec_mode)
{
int rp_ua;
@@ -3498,6 +3602,7 @@ static int smblib_handle_usb_current(struct smb_charger *chg,
int usb_current)
{
int rc = 0, rp_ua, typec_mode;
+ union power_supply_propval val = {0, };
if (chg->real_charger_type == POWER_SUPPLY_TYPE_USB_FLOAT) {
if (usb_current == -ETIMEDOUT) {
@@ -3552,8 +3657,16 @@ static int smblib_handle_usb_current(struct smb_charger *chg,
return rc;
}
} else {
- rc = vote(chg->usb_icl_votable, USB_PSY_VOTER,
- true, usb_current);
+ rc = smblib_get_prop_usb_present(chg, &val);
+ if (!rc && !val.intval)
+ return 0;
+
+ /* if flash is active force 500mA */
+ if ((usb_current < SDP_CURRENT_UA) && is_flash_active(chg))
+ usb_current = SDP_CURRENT_UA;
+
+ rc = vote(chg->usb_icl_votable, USB_PSY_VOTER, true,
+ usb_current);
if (rc < 0) {
pr_err("Couldn't vote ICL USB_PSY_VOTER rc=%d\n", rc);
return rc;
@@ -4155,6 +4268,8 @@ irqreturn_t batt_psy_changed_irq_handler(int irq, void *data)
return IRQ_HANDLED;
}
+#define AICL_STEP_MV 200
+#define MAX_AICL_THRESHOLD_MV 4800
irqreturn_t usbin_uv_irq_handler(int irq, void *data)
{
struct smb_irq_data *irq_data = data;
@@ -4165,6 +4280,70 @@ irqreturn_t usbin_uv_irq_handler(int irq, void *data)
u8 stat = 0, max_pulses = 0;
smblib_dbg(chg, PR_INTERRUPT, "IRQ: %s\n", irq_data->name);
+
+ if ((chg->wa_flags & WEAK_ADAPTER_WA)
+ && is_storming(&irq_data->storm_data)) {
+
+ if (chg->aicl_max_reached) {
+ smblib_dbg(chg, PR_MISC,
+ "USBIN_UV storm at max AICL threshold\n");
+ return IRQ_HANDLED;
+ }
+
+ smblib_dbg(chg, PR_MISC, "USBIN_UV storm at threshold %d\n",
+ chg->aicl_5v_threshold_mv);
+
+ /* suspend USBIN before updating AICL threshold */
+ vote(chg->usb_icl_votable, AICL_THRESHOLD_VOTER, true, 0);
+
+ /* delay for VASHDN deglitch */
+ msleep(20);
+
+ if (chg->aicl_5v_threshold_mv > MAX_AICL_THRESHOLD_MV) {
+ /* reached max AICL threshold */
+ chg->aicl_max_reached = true;
+ goto unsuspend_input;
+ }
+
+ /* Increase AICL threshold by 200mV */
+ rc = smblib_set_charge_param(chg, &chg->param.aicl_5v_threshold,
+ chg->aicl_5v_threshold_mv + AICL_STEP_MV);
+ if (rc < 0)
+ dev_err(chg->dev,
+ "Error in setting AICL threshold rc=%d\n", rc);
+ else
+ chg->aicl_5v_threshold_mv += AICL_STEP_MV;
+
+ rc = smblib_set_charge_param(chg,
+ &chg->param.aicl_cont_threshold,
+ chg->aicl_cont_threshold_mv + AICL_STEP_MV);
+ if (rc < 0)
+ dev_err(chg->dev,
+ "Error in setting AICL threshold rc=%d\n", rc);
+ else
+ chg->aicl_cont_threshold_mv += AICL_STEP_MV;
+
+unsuspend_input:
+ /* Force torch in boost mode to ensure it works with low ICL */
+ if (chg->smb_version == PMI632_SUBTYPE)
+ schgm_flash_torch_priority(chg, TORCH_BOOST_MODE);
+
+ if (chg->aicl_max_reached) {
+ smblib_dbg(chg, PR_MISC,
+ "Reached max AICL threshold resctricting ICL to 100mA\n");
+ vote(chg->usb_icl_votable, AICL_THRESHOLD_VOTER,
+ true, USBIN_100MA);
+ smblib_run_aicl(chg, RESTART_AICL);
+ } else {
+ smblib_run_aicl(chg, RESTART_AICL);
+ vote(chg->usb_icl_votable, AICL_THRESHOLD_VOTER,
+ false, 0);
+ }
+
+ wdata = &chg->irq_info[USBIN_UV_IRQ].irq_data->storm_data;
+ reset_storm_count(wdata);
+ }
+
if (!chg->irq_info[SWITCHER_POWER_OK_IRQ].irq_data)
return IRQ_HANDLED;
@@ -4233,6 +4412,24 @@ irqreturn_t icl_change_irq_handler(int irq, void *data)
struct smb_charger *chg = irq_data->parent_data;
if (chg->mode == PARALLEL_MASTER) {
+ /*
+ * Ignore if change in ICL is due to DIE temp mitigation.
+ * This is to prevent any further ICL split.
+ */
+ if (chg->hw_die_temp_mitigation) {
+ rc = smblib_read(chg, DIE_TEMP_STATUS_REG, &stat);
+ if (rc < 0) {
+ smblib_err(chg,
+ "Couldn't read DIE_TEMP rc=%d\n", rc);
+ return IRQ_HANDLED;
+ }
+ if (stat & (DIE_TEMP_UB_BIT | DIE_TEMP_LB_BIT)) {
+ smblib_dbg(chg, PR_PARALLEL,
+ "skip ICL change DIE_TEMP %x\n", stat);
+ return IRQ_HANDLED;
+ }
+ }
+
rc = smblib_read(chg, AICL_STATUS_REG, &stat);
if (rc < 0) {
smblib_err(chg, "Couldn't read AICL_STATUS rc=%d\n",
@@ -4376,6 +4573,33 @@ void smblib_usb_plugin_locked(struct smb_charger *chg)
vote(chg->fcc_votable, FCC_STEPPER_VOTER,
true, 1500000);
+ if (chg->wa_flags & WEAK_ADAPTER_WA) {
+ chg->aicl_5v_threshold_mv =
+ chg->default_aicl_5v_threshold_mv;
+ chg->aicl_cont_threshold_mv =
+ chg->default_aicl_cont_threshold_mv;
+
+ smblib_set_charge_param(chg,
+ &chg->param.aicl_5v_threshold,
+ chg->aicl_5v_threshold_mv);
+ smblib_set_charge_param(chg,
+ &chg->param.aicl_cont_threshold,
+ chg->aicl_cont_threshold_mv);
+ chg->aicl_max_reached = false;
+
+ if (chg->smb_version == PMI632_SUBTYPE)
+ schgm_flash_torch_priority(chg,
+ TORCH_BUCK_MODE);
+
+ data = chg->irq_info[USBIN_UV_IRQ].irq_data;
+ if (data) {
+ wdata = &data->storm_data;
+ reset_storm_count(wdata);
+ }
+ vote(chg->usb_icl_votable, AICL_THRESHOLD_VOTER,
+ false, 0);
+ }
+
rc = smblib_request_dpdm(chg, false);
if (rc < 0)
smblib_err(chg, "Couldn't disable DPDM rc=%d\n", rc);
@@ -4508,9 +4732,12 @@ static void update_sw_icl_max(struct smb_charger *chg, int pst)
* enumeration is done.
*/
if (!is_client_vote_enabled(chg->usb_icl_votable,
- USB_PSY_VOTER))
+ USB_PSY_VOTER)) {
+ /* if flash is active force 500mA */
vote(chg->usb_icl_votable, USB_PSY_VOTER, true,
- SDP_100_MA);
+ is_flash_active(chg) ?
+ SDP_CURRENT_UA : SDP_100_MA);
+ }
vote(chg->usb_icl_votable, SW_ICL_MAX_VOTER, false, 0);
break;
case POWER_SUPPLY_TYPE_USB_CDP:
@@ -4801,7 +5028,8 @@ static void typec_src_removal(struct smb_charger *chg)
cancel_delayed_work_sync(&chg->pl_enable_work);
/* reset input current limit voters */
- vote(chg->usb_icl_votable, SW_ICL_MAX_VOTER, true, SDP_100_MA);
+ vote(chg->usb_icl_votable, SW_ICL_MAX_VOTER, true,
+ is_flash_active(chg) ? SDP_CURRENT_UA : SDP_100_MA);
vote(chg->usb_icl_votable, PD_VOTER, false, 0);
vote(chg->usb_icl_votable, USB_PSY_VOTER, false, 0);
vote(chg->usb_icl_votable, DCP_VOTER, false, 0);
@@ -4829,6 +5057,11 @@ static void typec_src_removal(struct smb_charger *chg)
vote(chg->cp_disable_votable, SW_THERM_REGULATION_VOTER,
false, 0);
+ /* reset USBOV votes and cancel work */
+ cancel_delayed_work_sync(&chg->usbov_dbc_work);
+ vote(chg->awake_votable, USBOV_DBC_VOTER, false, 0);
+ chg->dbc_usbov = false;
+
chg->pulse_cnt = 0;
chg->usb_icl_delta_ua = 0;
chg->voltage_min_uv = MICRO_5V;
@@ -5277,6 +5510,57 @@ irqreturn_t temp_change_irq_handler(int irq, void *data)
return IRQ_HANDLED;
}
+static void smblib_usbov_dbc_work(struct work_struct *work)
+{
+ struct smb_charger *chg = container_of(work, struct smb_charger,
+ usbov_dbc_work.work);
+
+ smblib_dbg(chg, PR_MISC, "Resetting USBOV debounce\n");
+ chg->dbc_usbov = false;
+ vote(chg->awake_votable, USBOV_DBC_VOTER, false, 0);
+}
+
+#define USB_OV_DBC_PERIOD_MS 1000
+irqreturn_t usbin_ov_irq_handler(int irq, void *data)
+{
+ struct smb_irq_data *irq_data = data;
+ struct smb_charger *chg = irq_data->parent_data;
+ u8 stat;
+ int rc;
+
+ smblib_dbg(chg, PR_INTERRUPT, "IRQ: %s\n", irq_data->name);
+
+ if (!(chg->wa_flags & USBIN_OV_WA))
+ return IRQ_HANDLED;
+
+ rc = smblib_read(chg, USBIN_BASE + INT_RT_STS_OFFSET, &stat);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't read USB_INT_RT_STS rc=%d\n", rc);
+ return IRQ_HANDLED;
+ }
+
+ /*
+ * On specific PMICs, OV IRQ triggers for very small duration in
+ * interim periods affecting charging status reflection. In order to
+ * differentiate between OV IRQ glitch and real OV_IRQ, add a debounce
+ * period for evaluation.
+ */
+ if (stat & USBIN_OV_RT_STS_BIT) {
+ chg->dbc_usbov = true;
+ vote(chg->awake_votable, USBOV_DBC_VOTER, true, 0);
+ schedule_delayed_work(&chg->usbov_dbc_work,
+ msecs_to_jiffies(USB_OV_DBC_PERIOD_MS));
+ } else {
+ cancel_delayed_work_sync(&chg->usbov_dbc_work);
+ chg->dbc_usbov = false;
+ vote(chg->awake_votable, USBOV_DBC_VOTER, false, 0);
+ }
+
+ smblib_dbg(chg, PR_MISC, "USBOV debounce status %d\n",
+ chg->dbc_usbov);
+ return IRQ_HANDLED;
+}
+
/**************
* Additional USB PSY getters/setters
* that call interrupt functions
@@ -5944,6 +6228,7 @@ int smblib_init(struct smb_charger *chg)
INIT_DELAYED_WORK(&chg->lpd_detach_work, smblib_lpd_detach_work);
INIT_DELAYED_WORK(&chg->thermal_regulation_work,
smblib_thermal_regulation_work);
+ INIT_DELAYED_WORK(&chg->usbov_dbc_work, smblib_usbov_dbc_work);
if (chg->uusb_moisture_protection_enabled) {
INIT_WORK(&chg->moisture_protection_work,
@@ -6065,6 +6350,7 @@ int smblib_deinit(struct smb_charger *chg)
cancel_delayed_work_sync(&chg->lpd_ra_open_work);
cancel_delayed_work_sync(&chg->lpd_detach_work);
cancel_delayed_work_sync(&chg->thermal_regulation_work);
+ cancel_delayed_work_sync(&chg->usbov_dbc_work);
power_supply_unreg_notifier(&chg->nb);
smblib_destroy_votables(chg);
qcom_step_chg_deinit();
diff --git a/drivers/power/supply/qcom/smb5-lib.h b/drivers/power/supply/qcom/smb5-lib.h
index 375c2bc..b7dd7bb 100644
--- a/drivers/power/supply/qcom/smb5-lib.h
+++ b/drivers/power/supply/qcom/smb5-lib.h
@@ -63,13 +63,26 @@ enum print_reason {
#define JEITA_ARB_VOTER "JEITA_ARB_VOTER"
#define MOISTURE_VOTER "MOISTURE_VOTER"
#define HVDCP2_ICL_VOTER "HVDCP2_ICL_VOTER"
+#define AICL_THRESHOLD_VOTER "AICL_THRESHOLD_VOTER"
+#define USBOV_DBC_VOTER "USBOV_DBC_VOTER"
#define BOOST_BACK_STORM_COUNT 3
#define WEAK_CHG_STORM_COUNT 8
#define VBAT_TO_VRAW_ADC(v) div_u64((u64)v * 1000000UL, 194637UL)
-#define ADC_CHG_TERM_MASK 32767
+#define ITERM_LIMITS_PMI632_MA 5000
+#define ITERM_LIMITS_PM8150B_MA 10000
+#define ADC_CHG_ITERM_MASK 32767
+
+#define SDP_100_MA 100000
+#define SDP_CURRENT_UA 500000
+#define CDP_CURRENT_UA 1500000
+#define DCP_CURRENT_UA 1500000
+#define HVDCP_CURRENT_UA 3000000
+#define TYPEC_DEFAULT_CURRENT_UA 900000
+#define TYPEC_MEDIUM_CURRENT_UA 1500000
+#define TYPEC_HIGH_CURRENT_UA 3000000
enum smb_mode {
PARALLEL_MASTER = 0,
@@ -92,6 +105,8 @@ enum qc2_non_comp_voltage {
enum {
BOOST_BACK_WA = BIT(0),
SW_THERM_REGULATION_WA = BIT(1),
+ WEAK_ADAPTER_WA = BIT(2),
+ USBIN_OV_WA = BIT(3),
};
enum jeita_cfg_stat {
@@ -100,6 +115,11 @@ enum jeita_cfg_stat {
JEITA_CFG_COMPLETE,
};
+enum {
+ RERUN_AICL = 0,
+ RESTART_AICL,
+};
+
enum smb_irq_index {
/* CHGR */
CHGR_ERROR_IRQ = 0,
@@ -293,6 +313,8 @@ struct smb_params {
struct smb_chg_param jeita_cc_comp_hot;
struct smb_chg_param jeita_cc_comp_cold;
struct smb_chg_param freq_switcher;
+ struct smb_chg_param aicl_5v_threshold;
+ struct smb_chg_param aicl_cont_threshold;
};
struct parallel_params {
@@ -382,6 +404,7 @@ struct smb_charger {
struct delayed_work lpd_ra_open_work;
struct delayed_work lpd_detach_work;
struct delayed_work thermal_regulation_work;
+ struct delayed_work usbov_dbc_work;
struct alarm lpd_recheck_timer;
struct alarm moisture_protection_alarm;
@@ -455,12 +478,22 @@ struct smb_charger {
bool moisture_present;
bool uusb_moisture_protection_capable;
bool uusb_moisture_protection_enabled;
+ bool hw_die_temp_mitigation;
+ bool hw_connector_mitigation;
+ bool hw_skin_temp_mitigation;
+ int connector_pull_up;
+ int aicl_5v_threshold_mv;
+ int default_aicl_5v_threshold_mv;
+ int aicl_cont_threshold_mv;
+ int default_aicl_cont_threshold_mv;
+ bool aicl_max_reached;
/* workaround flag */
u32 wa_flags;
int boost_current_ua;
int qc2_max_pulses;
enum qc2_non_comp_voltage qc2_unsupported_voltage;
+ bool dbc_usbov;
/* extcon for VBUS / ID notification to USB for uUSB */
struct extcon_dev *extcon;
@@ -495,7 +528,7 @@ int smblib_batch_read(struct smb_charger *chg, u16 addr, u8 *val, int count);
int smblib_get_charge_param(struct smb_charger *chg,
struct smb_chg_param *param, int *val_u);
int smblib_get_usb_suspend(struct smb_charger *chg, int *suspend);
-
+int smblib_get_aicl_cont_threshold(struct smb_chg_param *param, u8 val_raw);
int smblib_enable_charging(struct smb_charger *chg, bool enable);
int smblib_set_charge_param(struct smb_charger *chg,
struct smb_chg_param *param, int val_u);
@@ -512,6 +545,8 @@ int smblib_set_chg_freq(struct smb_chg_param *param,
int val_u, u8 *val_raw);
int smblib_set_prop_boost_current(struct smb_charger *chg,
const union power_supply_propval *val);
+int smblib_set_aicl_cont_threshold(struct smb_chg_param *param,
+ int val_u, u8 *val_raw);
int smblib_vbus_regulator_enable(struct regulator_dev *rdev);
int smblib_vbus_regulator_disable(struct regulator_dev *rdev);
int smblib_vbus_regulator_is_enabled(struct regulator_dev *rdev);
@@ -537,6 +572,7 @@ irqreturn_t wdog_snarl_irq_handler(int irq, void *data);
irqreturn_t wdog_bark_irq_handler(int irq, void *data);
irqreturn_t typec_or_rid_detection_change_irq_handler(int irq, void *data);
irqreturn_t temp_change_irq_handler(int irq, void *data);
+irqreturn_t usbin_ov_irq_handler(int irq, void *data);
int smblib_get_prop_input_suspend(struct smb_charger *chg,
union power_supply_propval *val);
@@ -654,7 +690,7 @@ int smblib_get_prop_fcc_delta(struct smb_charger *chg,
int smblib_get_thermal_threshold(struct smb_charger *chg, u16 addr, int *val);
int smblib_dp_dm(struct smb_charger *chg, int val);
int smblib_disable_hw_jeita(struct smb_charger *chg, bool disable);
-int smblib_rerun_aicl(struct smb_charger *chg);
+int smblib_run_aicl(struct smb_charger *chg, int type);
int smblib_set_icl_current(struct smb_charger *chg, int icl_ua);
int smblib_get_icl_current(struct smb_charger *chg, int *icl_ua);
int smblib_get_charge_current(struct smb_charger *chg, int *total_current_ua);
@@ -676,6 +712,7 @@ enum alarmtimer_restart smblib_lpd_recheck_timer(struct alarm *alarm,
int smblib_toggle_smb_en(struct smb_charger *chg, int toggle);
void smblib_hvdcp_detect_enable(struct smb_charger *chg, bool enable);
void smblib_apsd_enable(struct smb_charger *chg, bool enable);
+int smblib_force_vbus_voltage(struct smb_charger *chg, u8 val);
int smblib_init(struct smb_charger *chg);
int smblib_deinit(struct smb_charger *chg);
diff --git a/drivers/power/supply/qcom/smb5-reg.h b/drivers/power/supply/qcom/smb5-reg.h
index 838c527..aa9a000 100644
--- a/drivers/power/supply/qcom/smb5-reg.h
+++ b/drivers/power/supply/qcom/smb5-reg.h
@@ -119,7 +119,7 @@ enum {
* DCDC Peripheral Registers *
********************************/
#define ICL_MAX_STATUS_REG (DCDC_BASE + 0x06)
-
+#define ICL_STATUS_REG (DCDC_BASE + 0x07)
#define AICL_ICL_STATUS_REG (DCDC_BASE + 0x08)
#define AICL_STATUS_REG (DCDC_BASE + 0x0A)
@@ -171,6 +171,19 @@ enum {
#define SHIP_MODE_REG (BATIF_BASE + 0x40)
#define SHIP_MODE_EN_BIT BIT(0)
+#define BATIF_ADC_CHANNEL_EN_REG (BATIF_BASE + 0x82)
+#define CONN_THM_CHANNEL_EN_BIT BIT(4)
+#define DIE_TEMP_CHANNEL_EN_BIT BIT(2)
+#define MISC_THM_CHANNEL_EN_BIT BIT(1)
+
+#define BATIF_ADC_INTERNAL_PULL_UP_REG (BATIF_BASE + 0x86)
+#define INTERNAL_PULL_UP_CONN_THM_MASK GENMASK(5, 4)
+#define CONN_THM_SHIFT 4
+#define INTERNAL_PULL_NO_PULL 0x00
+#define INTERNAL_PULL_30K_PULL 0x01
+#define INTERNAL_PULL_100K_PULL 0x02
+#define INTERNAL_PULL_400K_PULL 0x03
+
/********************************
* USBIN Peripheral Registers *
********************************/
@@ -224,6 +237,7 @@ enum {
#define FORCE_12V_BIT BIT(5)
#define FORCE_9V_BIT BIT(4)
#define FORCE_5V_BIT BIT(3)
+#define IDLE_BIT BIT(2)
#define SINGLE_DECREMENT_BIT BIT(1)
#define SINGLE_INCREMENT_BIT BIT(0)
@@ -288,6 +302,8 @@ enum {
#define USB_ENG_SSUPPLY_USB2_REG (USBIN_BASE + 0xC0)
#define ENG_SSUPPLY_12V_OV_OPT_BIT BIT(1)
+#define USBIN_5V_AICL_THRESHOLD_REG (USBIN_BASE + 0x81)
+#define USBIN_CONT_AICL_THRESHOLD_REG (USBIN_BASE + 0x84)
/********************************
* DCIN Peripheral Registers *
********************************/
@@ -449,6 +465,7 @@ enum {
#define BARK_BITE_WDOG_PET_BIT BIT(0)
#define AICL_CMD_REG (MISC_BASE + 0x44)
+#define RESTART_AICL_BIT BIT(1)
#define RERUN_AICL_BIT BIT(0)
#define MISC_SMB_EN_CMD_REG (MISC_BASE + 0x48)
@@ -480,7 +497,12 @@ enum {
#define MISC_THERMREG_SRC_CFG_REG (MISC_BASE + 0x70)
#define THERMREG_SW_ICL_ADJUST_BIT BIT(7)
+#define DIE_ADC_SEL_BIT BIT(6)
#define THERMREG_SMB_ADC_SRC_EN_BIT BIT(5)
+#define THERMREG_CONNECTOR_ADC_SRC_EN_BIT BIT(4)
+#define SKIN_ADC_CFG_BIT BIT(3)
+#define THERMREG_SKIN_ADC_SRC_EN_BIT BIT(2)
+#define THERMREG_DIE_ADC_SRC_EN_BIT BIT(1)
#define THERMREG_DIE_CMP_SRC_EN_BIT BIT(0)
#define MISC_SMB_CFG_REG (MISC_BASE + 0x90)
diff --git a/drivers/ptp/ptp_chardev.c b/drivers/ptp/ptp_chardev.c
index 2012551..796eeff 100644
--- a/drivers/ptp/ptp_chardev.c
+++ b/drivers/ptp/ptp_chardev.c
@@ -228,7 +228,9 @@ long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg)
pct->sec = ts.tv_sec;
pct->nsec = ts.tv_nsec;
pct++;
- ptp->info->gettime64(ptp->info, &ts);
+ err = ptp->info->gettime64(ptp->info, &ts);
+ if (err)
+ goto out;
pct->sec = ts.tv_sec;
pct->nsec = ts.tv_nsec;
pct++;
@@ -281,6 +283,7 @@ long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg)
break;
}
+out:
kfree(sysoff);
return err;
}
diff --git a/drivers/ptp/ptp_clock.c b/drivers/ptp/ptp_clock.c
index 7eacc1c..c64903a 100644
--- a/drivers/ptp/ptp_clock.c
+++ b/drivers/ptp/ptp_clock.c
@@ -253,8 +253,10 @@ struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
ptp->dev = device_create_with_groups(ptp_class, parent, ptp->devid,
ptp, ptp->pin_attr_groups,
"ptp%d", ptp->index);
- if (IS_ERR(ptp->dev))
+ if (IS_ERR(ptp->dev)) {
+ err = PTR_ERR(ptp->dev);
goto no_device;
+ }
/* Register a new PPS source. */
if (info->pps) {
@@ -265,6 +267,7 @@ struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
pps.owner = info->owner;
ptp->pps_source = pps_register_source(&pps, PTP_PPS_DEFAULTS);
if (!ptp->pps_source) {
+ err = -EINVAL;
pr_err("failed to register pps source\n");
goto no_pps;
}
diff --git a/drivers/rpmsg/qcom_glink_native.c b/drivers/rpmsg/qcom_glink_native.c
index 770596e..d859315 100644
--- a/drivers/rpmsg/qcom_glink_native.c
+++ b/drivers/rpmsg/qcom_glink_native.c
@@ -22,6 +22,7 @@
#include <linux/kthread.h>
#include <linux/mailbox_client.h>
#include <linux/ipc_logging.h>
+#include <soc/qcom/subsystem_notif.h>
#include "rpmsg_internal.h"
#include "qcom_glink_native.h"
@@ -284,13 +285,22 @@ static void qcom_glink_channel_release(struct kref *ref)
{
struct glink_channel *channel = container_of(ref, struct glink_channel,
refcount);
+ struct glink_core_rx_intent *tmp;
unsigned long flags;
+ int iid;
CH_INFO(channel, "\n");
wake_up(&channel->intent_req_event);
spin_lock_irqsave(&channel->intent_lock, flags);
+ idr_for_each_entry(&channel->liids, tmp, iid) {
+ kfree(tmp->data);
+ kfree(tmp);
+ }
idr_destroy(&channel->liids);
+
+ idr_for_each_entry(&channel->riids, tmp, iid)
+ kfree(tmp);
idr_destroy(&channel->riids);
spin_unlock_irqrestore(&channel->intent_lock, flags);
@@ -347,6 +357,9 @@ static int qcom_glink_tx(struct qcom_glink *glink,
if (tlen >= glink->tx_pipe->length)
return -EINVAL;
+ if (atomic_read(&glink->in_reset))
+ return -ECONNRESET;
+
spin_lock_irqsave(&glink->tx_lock, flags);
while (qcom_glink_tx_avail(glink) < tlen) {
@@ -813,9 +826,11 @@ static void qcom_glink_handle_rx_done(struct qcom_glink *glink,
static void qcom_glink_handle_intent_req(struct qcom_glink *glink,
u32 cid, size_t size)
{
- struct glink_core_rx_intent *intent;
+ struct glink_core_rx_intent *intent = NULL;
+ struct glink_core_rx_intent *tmp;
struct glink_channel *channel;
unsigned long flags;
+ int iid;
spin_lock_irqsave(&glink->idr_lock, flags);
channel = idr_find(&glink->rcids, cid);
@@ -826,6 +841,19 @@ static void qcom_glink_handle_intent_req(struct qcom_glink *glink,
return;
}
+ spin_lock_irqsave(&channel->intent_lock, flags);
+ idr_for_each_entry(&channel->liids, tmp, iid) {
+ if (tmp->size >= size && tmp->reuse) {
+ intent = tmp;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&channel->intent_lock, flags);
+ if (intent) {
+ qcom_glink_send_intent_req_ack(glink, channel, !!intent);
+ return;
+ }
+
intent = qcom_glink_alloc_intent(glink, channel, size, false);
if (intent)
qcom_glink_advertise_intent(glink, channel, intent);
@@ -1362,9 +1390,6 @@ static void qcom_glink_destroy_ept(struct rpmsg_endpoint *ept)
channel->ept.cb = NULL;
spin_unlock_irqrestore(&channel->recv_lock, flags);
- /* Decouple the potential rpdev from the channel */
- channel->rpdev = NULL;
-
qcom_glink_send_close_req(glink, channel);
}
@@ -1426,6 +1451,8 @@ static int __qcom_glink_send(struct glink_channel *channel,
} __packed req;
int ret;
unsigned long flags;
+ int chunk_size = len;
+ int left_size = 0;
if (!glink->intentless) {
while (!intent) {
@@ -1448,6 +1475,9 @@ static int __qcom_glink_send(struct glink_channel *channel,
if (intent)
break;
+ if (atomic_read(&glink->in_reset))
+ return -ECONNRESET;
+
if (!wait)
return -EBUSY;
@@ -1459,18 +1489,46 @@ static int __qcom_glink_send(struct glink_channel *channel,
iid = intent->id;
}
+ if (wait && (chunk_size > SZ_8K)) {
+ chunk_size = SZ_8K;
+ left_size = len - chunk_size;
+ }
req.msg.cmd = cpu_to_le16(RPM_CMD_TX_DATA);
req.msg.param1 = cpu_to_le16(channel->lcid);
req.msg.param2 = cpu_to_le32(iid);
- req.chunk_size = cpu_to_le32(len);
- req.left_size = cpu_to_le32(0);
+ req.chunk_size = cpu_to_le32(chunk_size);
+ req.left_size = cpu_to_le32(left_size);
- ret = qcom_glink_tx(glink, &req, sizeof(req), data, len, wait);
+ ret = qcom_glink_tx(glink, &req, sizeof(req), data, chunk_size, wait);
/* Mark intent available if we failed */
- if (ret && intent)
+ if (ret && intent) {
intent->in_use = false;
+ return ret;
+ }
+ while (left_size > 0) {
+ data = (void *)((char *)data + chunk_size);
+ chunk_size = left_size;
+ if (chunk_size > SZ_8K)
+ chunk_size = SZ_8K;
+ left_size -= chunk_size;
+
+ req.msg.cmd = cpu_to_le16(RPM_CMD_TX_DATA_CONT);
+ req.msg.param1 = cpu_to_le16(channel->lcid);
+ req.msg.param2 = cpu_to_le32(iid);
+ req.chunk_size = cpu_to_le32(chunk_size);
+ req.left_size = cpu_to_le32(left_size);
+
+ ret = qcom_glink_tx(glink, &req, sizeof(req), data,
+ chunk_size, wait);
+
+ /* Mark intent available if we failed */
+ if (ret && intent) {
+ intent->in_use = false;
+ break;
+ }
+ }
return ret;
}
@@ -1614,7 +1672,7 @@ static int qcom_glink_rx_open(struct qcom_glink *glink, unsigned int rcid,
ret = rpmsg_register_device(rpdev);
if (ret)
- goto free_rpdev;
+ goto rcid_remove;
channel->rpdev = rpdev;
}
@@ -1622,9 +1680,6 @@ static int qcom_glink_rx_open(struct qcom_glink *glink, unsigned int rcid,
return 0;
-free_rpdev:
- CH_INFO(channel, "free_rpdev\n");
- kfree(rpdev);
rcid_remove:
CH_INFO(channel, "rcid_remove\n");
spin_lock_irqsave(&glink->idr_lock, flags);
@@ -1663,6 +1718,7 @@ static void qcom_glink_rx_close(struct qcom_glink *glink, unsigned int rcid)
rpmsg_unregister_device(glink->dev, &chinfo);
}
+ channel->rpdev = NULL;
qcom_glink_send_close_ack(glink, channel->rcid);
@@ -1676,6 +1732,7 @@ static void qcom_glink_rx_close(struct qcom_glink *glink, unsigned int rcid)
static void qcom_glink_rx_close_ack(struct qcom_glink *glink, unsigned int lcid)
{
+ struct rpmsg_channel_info chinfo;
struct glink_channel *channel;
unsigned long flags;
@@ -1691,6 +1748,16 @@ static void qcom_glink_rx_close_ack(struct qcom_glink *glink, unsigned int lcid)
channel->lcid = 0;
spin_unlock_irqrestore(&glink->idr_lock, flags);
+ /* Decouple the potential rpdev from the channel */
+ if (channel->rpdev) {
+ strlcpy(chinfo.name, channel->name, sizeof(chinfo.name));
+ chinfo.src = RPMSG_ADDR_ANY;
+ chinfo.dst = RPMSG_ADDR_ANY;
+
+ rpmsg_unregister_device(glink->dev, &chinfo);
+ }
+ channel->rpdev = NULL;
+
kref_put(&channel->refcount, qcom_glink_channel_release);
}
@@ -1816,6 +1883,23 @@ static void qcom_glink_set_affinity(struct qcom_glink *glink, u32 *arr,
dev_err(glink->dev, "failed to set task affinity\n");
}
+static void qcom_glink_notif_reset(void *data)
+{
+ struct qcom_glink *glink = data;
+ struct glink_channel *channel;
+ unsigned long flags;
+ int cid;
+
+ if (!glink)
+ return;
+ atomic_inc(&glink->in_reset);
+
+ spin_lock_irqsave(&glink->idr_lock, flags);
+ idr_for_each_entry(&glink->lcids, channel, cid) {
+ wake_up(&channel->intent_req_event);
+ }
+ spin_unlock_irqrestore(&glink->idr_lock, flags);
+}
struct qcom_glink *qcom_glink_native_probe(struct device *dev,
unsigned long features,
@@ -1874,6 +1958,11 @@ struct qcom_glink *qcom_glink_native_probe(struct device *dev,
return ERR_CAST(glink->task);
}
+ ret = subsys_register_early_notifier(glink->name, XPORT_LAYER_NOTIF,
+ qcom_glink_notif_reset, glink);
+ if (ret)
+ dev_err(dev, "failed to register early notif %d\n", ret);
+
irq = of_irq_get(dev->of_node, 0);
ret = devm_request_irq(dev, irq,
qcom_glink_native_intr,
@@ -1881,7 +1970,7 @@ struct qcom_glink *qcom_glink_native_probe(struct device *dev,
"glink-native", glink);
if (ret) {
dev_err(dev, "failed to request IRQ\n");
- return ERR_PTR(ret);
+ goto unregister;
}
glink->irq = irq;
@@ -1889,8 +1978,10 @@ struct qcom_glink *qcom_glink_native_probe(struct device *dev,
size = of_property_count_u32_elems(dev->of_node, "cpu-affinity");
if (size > 0) {
arr = kmalloc_array(size, sizeof(u32), GFP_KERNEL);
- if (!arr)
- return ERR_PTR(-ENOMEM);
+ if (!arr) {
+ ret = -ENOMEM;
+ goto unregister;
+ }
ret = of_property_read_u32_array(dev->of_node, "cpu-affinity",
arr, size);
if (!ret)
@@ -1901,7 +1992,7 @@ struct qcom_glink *qcom_glink_native_probe(struct device *dev,
ret = qcom_glink_send_version(glink);
if (ret) {
dev_err(dev, "failed to send version %d\n", ret);
- return ERR_PTR(ret);
+ goto unregister;
}
ret = qcom_glink_create_chrdev(glink);
@@ -1911,6 +2002,10 @@ struct qcom_glink *qcom_glink_native_probe(struct device *dev,
glink->ilc = ipc_log_context_create(GLINK_LOG_PAGE_CNT, glink->name, 0);
return glink;
+
+unregister:
+ subsys_unregister_early_notifier(glink->name, XPORT_LAYER_NOTIF);
+ return ERR_PTR(ret);
}
EXPORT_SYMBOL_GPL(qcom_glink_native_probe);
@@ -1928,17 +2023,11 @@ void qcom_glink_native_remove(struct qcom_glink *glink)
int ret;
unsigned long flags;
- atomic_inc(&glink->in_reset);
+ subsys_unregister_early_notifier(glink->name, XPORT_LAYER_NOTIF);
+ qcom_glink_notif_reset(glink);
disable_irq(glink->irq);
cancel_work_sync(&glink->rx_work);
- /* Signal all threads to cancel tx */
- spin_lock_irqsave(&glink->idr_lock, flags);
- idr_for_each_entry(&glink->lcids, channel, cid) {
- wake_up(&channel->intent_req_event);
- }
- spin_unlock_irqrestore(&glink->idr_lock, flags);
-
ret = device_for_each_child(glink->dev, NULL, qcom_glink_remove_device);
if (ret)
dev_warn(glink->dev, "Can't remove GLINK devices: %d\n", ret);
diff --git a/drivers/rpmsg/qcom_glink_rpm.c b/drivers/rpmsg/qcom_glink_rpm.c
index f64f45d..baadf7d 100644
--- a/drivers/rpmsg/qcom_glink_rpm.c
+++ b/drivers/rpmsg/qcom_glink_rpm.c
@@ -338,7 +338,7 @@ static int __init glink_rpm_init(void)
{
return platform_driver_register(&glink_rpm_driver);
}
-subsys_initcall(glink_rpm_init);
+postcore_initcall(glink_rpm_init);
static void __exit glink_rpm_exit(void)
{
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index f039266..a57b969 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -249,7 +249,8 @@ static inline int ap_test_config(unsigned int *field, unsigned int nr)
static inline int ap_test_config_card_id(unsigned int id)
{
if (!ap_configuration) /* QCI not supported */
- return 1;
+ /* only ids 0...3F may be probed */
+ return id < 0x40 ? 1 : 0;
return ap_test_config(ap_configuration->apm, id);
}
diff --git a/drivers/s390/crypto/zcrypt_error.h b/drivers/s390/crypto/zcrypt_error.h
index 6f7ebc1..2e1a27b 100644
--- a/drivers/s390/crypto/zcrypt_error.h
+++ b/drivers/s390/crypto/zcrypt_error.h
@@ -52,6 +52,7 @@ struct error_hdr {
#define REP82_ERROR_FORMAT_FIELD 0x29
#define REP82_ERROR_INVALID_COMMAND 0x30
#define REP82_ERROR_MALFORMED_MSG 0x40
+#define REP82_ERROR_INVALID_SPECIAL_CMD 0x41
#define REP82_ERROR_INVALID_DOMAIN_PRECHECK 0x42
#define REP82_ERROR_RESERVED_FIELDO 0x50 /* old value */
#define REP82_ERROR_WORD_ALIGNMENT 0x60
@@ -90,6 +91,7 @@ static inline int convert_error(struct zcrypt_queue *zq,
case REP88_ERROR_MESSAGE_MALFORMD:
case REP82_ERROR_INVALID_DOMAIN_PRECHECK:
case REP82_ERROR_INVALID_DOMAIN_PENDING:
+ case REP82_ERROR_INVALID_SPECIAL_CMD:
// REP88_ERROR_INVALID_KEY // '82' CEX2A
// REP88_ERROR_OPERAND // '84' CEX2A
// REP88_ERROR_OPERAND_EVEN_MOD // '85' CEX2A
diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c
index 1391e5f..702da90 100644
--- a/drivers/scsi/aic94xx/aic94xx_init.c
+++ b/drivers/scsi/aic94xx/aic94xx_init.c
@@ -281,7 +281,7 @@ static ssize_t asd_show_dev_rev(struct device *dev,
return snprintf(buf, PAGE_SIZE, "%s\n",
asd_dev_rev[asd_ha->revision_id]);
}
-static DEVICE_ATTR(revision, S_IRUGO, asd_show_dev_rev, NULL);
+static DEVICE_ATTR(aic_revision, S_IRUGO, asd_show_dev_rev, NULL);
static ssize_t asd_show_dev_bios_build(struct device *dev,
struct device_attribute *attr,char *buf)
@@ -478,7 +478,7 @@ static int asd_create_dev_attrs(struct asd_ha_struct *asd_ha)
{
int err;
- err = device_create_file(&asd_ha->pcidev->dev, &dev_attr_revision);
+ err = device_create_file(&asd_ha->pcidev->dev, &dev_attr_aic_revision);
if (err)
return err;
@@ -500,13 +500,13 @@ static int asd_create_dev_attrs(struct asd_ha_struct *asd_ha)
err_biosb:
device_remove_file(&asd_ha->pcidev->dev, &dev_attr_bios_build);
err_rev:
- device_remove_file(&asd_ha->pcidev->dev, &dev_attr_revision);
+ device_remove_file(&asd_ha->pcidev->dev, &dev_attr_aic_revision);
return err;
}
static void asd_remove_dev_attrs(struct asd_ha_struct *asd_ha)
{
- device_remove_file(&asd_ha->pcidev->dev, &dev_attr_revision);
+ device_remove_file(&asd_ha->pcidev->dev, &dev_attr_aic_revision);
device_remove_file(&asd_ha->pcidev->dev, &dev_attr_bios_build);
device_remove_file(&asd_ha->pcidev->dev, &dev_attr_pcba_sn);
device_remove_file(&asd_ha->pcidev->dev, &dev_attr_update_bios);
diff --git a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
index bf07735..0fc382c 100644
--- a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
+++ b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
@@ -1144,7 +1144,7 @@ static void ddp_clear_map(struct cxgbi_device *cdev, struct cxgbi_ppm *ppm,
}
static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk,
- unsigned int tid, int pg_idx, bool reply)
+ unsigned int tid, int pg_idx)
{
struct sk_buff *skb = alloc_wr(sizeof(struct cpl_set_tcb_field), 0,
GFP_KERNEL);
@@ -1160,7 +1160,7 @@ static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk,
req = (struct cpl_set_tcb_field *)skb->head;
req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
- req->reply = V_NO_REPLY(reply ? 0 : 1);
+ req->reply = V_NO_REPLY(1);
req->cpu_idx = 0;
req->word = htons(31);
req->mask = cpu_to_be64(0xF0000000);
@@ -1177,11 +1177,10 @@ static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk,
* @tid: connection id
* @hcrc: header digest enabled
* @dcrc: data digest enabled
- * @reply: request reply from h/w
* set up the iscsi digest settings for a connection identified by tid
*/
static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid,
- int hcrc, int dcrc, int reply)
+ int hcrc, int dcrc)
{
struct sk_buff *skb = alloc_wr(sizeof(struct cpl_set_tcb_field), 0,
GFP_KERNEL);
@@ -1197,7 +1196,7 @@ static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid,
req = (struct cpl_set_tcb_field *)skb->head;
req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
- req->reply = V_NO_REPLY(reply ? 0 : 1);
+ req->reply = V_NO_REPLY(1);
req->cpu_idx = 0;
req->word = htons(31);
req->mask = cpu_to_be64(0x0F000000);
diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
index 211da1d..689d6c8 100644
--- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
+++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
@@ -1517,16 +1517,22 @@ static void do_set_tcb_rpl(struct cxgbi_device *cdev, struct sk_buff *skb)
struct cxgbi_sock *csk;
csk = lookup_tid(t, tid);
- if (!csk)
+ if (!csk) {
pr_err("can't find conn. for tid %u.\n", tid);
+ return;
+ }
log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
"csk 0x%p,%u,%lx,%u, status 0x%x.\n",
csk, csk->state, csk->flags, csk->tid, rpl->status);
- if (rpl->status != CPL_ERR_NONE)
+ if (rpl->status != CPL_ERR_NONE) {
pr_err("csk 0x%p,%u, SET_TCB_RPL status %u.\n",
csk, tid, rpl->status);
+ csk->err = -EINVAL;
+ }
+
+ complete(&csk->cmpl);
__kfree_skb(skb);
}
@@ -1903,7 +1909,7 @@ static int ddp_set_map(struct cxgbi_ppm *ppm, struct cxgbi_sock *csk,
}
static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk, unsigned int tid,
- int pg_idx, bool reply)
+ int pg_idx)
{
struct sk_buff *skb;
struct cpl_set_tcb_field *req;
@@ -1919,7 +1925,7 @@ static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk, unsigned int tid,
req = (struct cpl_set_tcb_field *)skb->head;
INIT_TP_WR(req, csk->tid);
OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, csk->tid));
- req->reply_ctrl = htons(NO_REPLY_V(reply) | QUEUENO_V(csk->rss_qid));
+ req->reply_ctrl = htons(NO_REPLY_V(0) | QUEUENO_V(csk->rss_qid));
req->word_cookie = htons(0);
req->mask = cpu_to_be64(0x3 << 8);
req->val = cpu_to_be64(pg_idx << 8);
@@ -1928,12 +1934,15 @@ static int ddp_setup_conn_pgidx(struct cxgbi_sock *csk, unsigned int tid,
log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
"csk 0x%p, tid 0x%x, pg_idx %u.\n", csk, csk->tid, pg_idx);
+ reinit_completion(&csk->cmpl);
cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
- return 0;
+ wait_for_completion(&csk->cmpl);
+
+ return csk->err;
}
static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid,
- int hcrc, int dcrc, int reply)
+ int hcrc, int dcrc)
{
struct sk_buff *skb;
struct cpl_set_tcb_field *req;
@@ -1951,7 +1960,7 @@ static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid,
req = (struct cpl_set_tcb_field *)skb->head;
INIT_TP_WR(req, tid);
OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
- req->reply_ctrl = htons(NO_REPLY_V(reply) | QUEUENO_V(csk->rss_qid));
+ req->reply_ctrl = htons(NO_REPLY_V(0) | QUEUENO_V(csk->rss_qid));
req->word_cookie = htons(0);
req->mask = cpu_to_be64(0x3 << 4);
req->val = cpu_to_be64(((hcrc ? ULP_CRC_HEADER : 0) |
@@ -1961,8 +1970,11 @@ static int ddp_setup_conn_digest(struct cxgbi_sock *csk, unsigned int tid,
log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
"csk 0x%p, tid 0x%x, crc %d,%d.\n", csk, csk->tid, hcrc, dcrc);
+ reinit_completion(&csk->cmpl);
cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb);
- return 0;
+ wait_for_completion(&csk->cmpl);
+
+ return csk->err;
}
static struct cxgbi_ppm *cdev2ppm(struct cxgbi_device *cdev)
diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c
index 3f3af5e..f2c561c 100644
--- a/drivers/scsi/cxgbi/libcxgbi.c
+++ b/drivers/scsi/cxgbi/libcxgbi.c
@@ -573,6 +573,7 @@ static struct cxgbi_sock *cxgbi_sock_create(struct cxgbi_device *cdev)
skb_queue_head_init(&csk->receive_queue);
skb_queue_head_init(&csk->write_queue);
timer_setup(&csk->retry_timer, NULL, 0);
+ init_completion(&csk->cmpl);
rwlock_init(&csk->callback_lock);
csk->cdev = cdev;
csk->flags = 0;
@@ -2252,14 +2253,14 @@ int cxgbi_set_conn_param(struct iscsi_cls_conn *cls_conn,
if (!err && conn->hdrdgst_en)
err = csk->cdev->csk_ddp_setup_digest(csk, csk->tid,
conn->hdrdgst_en,
- conn->datadgst_en, 0);
+ conn->datadgst_en);
break;
case ISCSI_PARAM_DATADGST_EN:
err = iscsi_set_param(cls_conn, param, buf, buflen);
if (!err && conn->datadgst_en)
err = csk->cdev->csk_ddp_setup_digest(csk, csk->tid,
conn->hdrdgst_en,
- conn->datadgst_en, 0);
+ conn->datadgst_en);
break;
case ISCSI_PARAM_MAX_R2T:
return iscsi_tcp_set_max_r2t(conn, buf);
@@ -2385,7 +2386,7 @@ int cxgbi_bind_conn(struct iscsi_cls_session *cls_session,
ppm = csk->cdev->cdev2ppm(csk->cdev);
err = csk->cdev->csk_ddp_setup_pgidx(csk, csk->tid,
- ppm->tformat.pgsz_idx_dflt, 0);
+ ppm->tformat.pgsz_idx_dflt);
if (err < 0)
return err;
diff --git a/drivers/scsi/cxgbi/libcxgbi.h b/drivers/scsi/cxgbi/libcxgbi.h
index dcb190e..3bf7414 100644
--- a/drivers/scsi/cxgbi/libcxgbi.h
+++ b/drivers/scsi/cxgbi/libcxgbi.h
@@ -146,6 +146,7 @@ struct cxgbi_sock {
struct sk_buff_head receive_queue;
struct sk_buff_head write_queue;
struct timer_list retry_timer;
+ struct completion cmpl;
int err;
rwlock_t callback_lock;
void *user_data;
@@ -487,9 +488,9 @@ struct cxgbi_device {
struct cxgbi_ppm *,
struct cxgbi_task_tag_info *);
int (*csk_ddp_setup_digest)(struct cxgbi_sock *,
- unsigned int, int, int, int);
+ unsigned int, int, int);
int (*csk_ddp_setup_pgidx)(struct cxgbi_sock *,
- unsigned int, int, bool);
+ unsigned int, int);
void (*csk_release_offload_resources)(struct cxgbi_sock *);
int (*csk_rx_pdu_ready)(struct cxgbi_sock *, struct sk_buff *);
diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c
index 6637116..f987c40 100644
--- a/drivers/scsi/cxlflash/main.c
+++ b/drivers/scsi/cxlflash/main.c
@@ -3694,6 +3694,7 @@ static int cxlflash_probe(struct pci_dev *pdev,
host->max_cmd_len = CXLFLASH_MAX_CDB_LEN;
cfg = shost_priv(host);
+ cfg->state = STATE_PROBING;
cfg->host = host;
rc = alloc_mem(cfg);
if (rc) {
@@ -3782,6 +3783,7 @@ static int cxlflash_probe(struct pci_dev *pdev,
return rc;
out_remove:
+ cfg->state = STATE_PROBED;
cxlflash_remove(pdev);
goto out;
}
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
index 687ff61..3922b17 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
@@ -492,7 +492,7 @@ static void init_reg_v3_hw(struct hisi_hba *hisi_hba)
hisi_sas_phy_write32(hisi_hba, i, PHYCTRL_OOB_RESTART_MSK, 0x1);
hisi_sas_phy_write32(hisi_hba, i, STP_LINK_TIMER, 0x7f7a120);
hisi_sas_phy_write32(hisi_hba, i, CON_CFG_DRIVER, 0x2a0a01);
-
+ hisi_sas_phy_write32(hisi_hba, i, SAS_SSP_CON_TIMER_CFG, 0x32);
/* used for 12G negotiate */
hisi_sas_phy_write32(hisi_hba, i, COARSETUNE_TIME, 0x1e);
}
diff --git a/drivers/scsi/isci/init.c b/drivers/scsi/isci/init.c
index 08c7b1e..dde84f7 100644
--- a/drivers/scsi/isci/init.c
+++ b/drivers/scsi/isci/init.c
@@ -588,6 +588,13 @@ static struct isci_host *isci_host_alloc(struct pci_dev *pdev, int id)
shost->max_lun = ~0;
shost->max_cmd_len = MAX_COMMAND_SIZE;
+ /* turn on DIF support */
+ scsi_host_set_prot(shost,
+ SHOST_DIF_TYPE1_PROTECTION |
+ SHOST_DIF_TYPE2_PROTECTION |
+ SHOST_DIF_TYPE3_PROTECTION);
+ scsi_host_set_guard(shost, SHOST_DIX_GUARD_CRC);
+
err = scsi_add_host(shost, &pdev->dev);
if (err)
goto err_shost;
@@ -675,13 +682,6 @@ static int isci_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto err_host_alloc;
}
pci_info->hosts[i] = h;
-
- /* turn on DIF support */
- scsi_host_set_prot(to_shost(h),
- SHOST_DIF_TYPE1_PROTECTION |
- SHOST_DIF_TYPE2_PROTECTION |
- SHOST_DIF_TYPE3_PROTECTION);
- scsi_host_set_guard(to_shost(h), SHOST_DIX_GUARD_CRC);
}
err = isci_setup_interrupts(pdev);
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 4dda969..0d214e6 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -242,6 +242,8 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp,
icmd->ulpCommand = CMD_ELS_REQUEST64_CR;
if (elscmd == ELS_CMD_FLOGI)
icmd->ulpTimeout = FF_DEF_RATOV * 2;
+ else if (elscmd == ELS_CMD_LOGO)
+ icmd->ulpTimeout = phba->fc_ratov;
else
icmd->ulpTimeout = phba->fc_ratov * 2;
} else {
@@ -2682,16 +2684,15 @@ lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
goto out;
}
+ /* The LOGO will not be retried on failure. A LOGO was
+ * issued to the remote rport and a ACC or RJT or no Answer are
+ * all acceptable. Note the failure and move forward with
+ * discovery. The PLOGI will retry.
+ */
if (irsp->ulpStatus) {
- /* Check for retry */
- if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
- /* ELS command is being retried */
- skip_recovery = 1;
- goto out;
- }
/* LOGO failed */
lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
- "2756 LOGO failure DID:%06X Status:x%x/x%x\n",
+ "2756 LOGO failure, No Retry DID:%06X Status:x%x/x%x\n",
ndlp->nlp_DID, irsp->ulpStatus,
irsp->un.ulpWord[4]);
/* Do not call DSM for lpfc_els_abort'ed ELS cmds */
@@ -2737,7 +2738,8 @@ lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
* For any other port type, the rpi is unregistered as an implicit
* LOGO.
*/
- if ((ndlp->nlp_type & NLP_FCP_TARGET) && (skip_recovery == 0)) {
+ if (ndlp->nlp_type & (NLP_FCP_TARGET | NLP_NVME_TARGET) &&
+ skip_recovery == 0) {
lpfc_cancel_retry_delay_tmo(vport, ndlp);
spin_lock_irqsave(shost->host_lock, flags);
ndlp->nlp_flag |= NLP_NPR_2B_DISC;
@@ -2770,6 +2772,8 @@ lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
* will be stored into the context1 field of the IOCB for the completion
* callback function to the LOGO ELS command.
*
+ * Callers of this routine are expected to unregister the RPI first
+ *
* Return code
* 0 - successfully issued logo
* 1 - failed to issue logo
@@ -2811,22 +2815,6 @@ lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
"Issue LOGO: did:x%x",
ndlp->nlp_DID, 0, 0);
- /*
- * If we are issuing a LOGO, we may try to recover the remote NPort
- * by issuing a PLOGI later. Even though we issue ELS cmds by the
- * VPI, if we have a valid RPI, and that RPI gets unreg'ed while
- * that ELS command is in-flight, the HBA returns a IOERR_INVALID_RPI
- * for that ELS cmd. To avoid this situation, lets get rid of the
- * RPI right now, before any ELS cmds are sent.
- */
- spin_lock_irq(shost->host_lock);
- ndlp->nlp_flag |= NLP_ISSUE_LOGO;
- spin_unlock_irq(shost->host_lock);
- if (lpfc_unreg_rpi(vport, ndlp)) {
- lpfc_els_free_iocb(phba, elsiocb);
- return 0;
- }
-
phba->fc_stat.elsXmitLOGO++;
elsiocb->iocb_cmpl = lpfc_cmpl_els_logo;
spin_lock_irq(shost->host_lock);
@@ -2834,7 +2822,6 @@ lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
ndlp->nlp_flag &= ~NLP_ISSUE_LOGO;
spin_unlock_irq(shost->host_lock);
rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
-
if (rc == IOCB_ERROR) {
spin_lock_irq(shost->host_lock);
ndlp->nlp_flag &= ~NLP_LOGO_SND;
@@ -2842,6 +2829,11 @@ lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
lpfc_els_free_iocb(phba, elsiocb);
return 1;
}
+
+ spin_lock_irq(shost->host_lock);
+ ndlp->nlp_prev_state = ndlp->nlp_state;
+ spin_unlock_irq(shost->host_lock);
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_LOGO_ISSUE);
return 0;
}
@@ -5701,6 +5693,9 @@ lpfc_els_lcb_rsp(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
stat = (struct ls_rjt *)(pcmd + sizeof(uint32_t));
stat->un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
+ if (shdr_add_status == ADD_STATUS_OPERATION_ALREADY_ACTIVE)
+ stat->un.b.lsRjtRsnCodeExp = LSEXP_CMD_IN_PROGRESS;
+
elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
phba->fc_stat.elsXmitLSRJT++;
rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
@@ -9502,7 +9497,8 @@ lpfc_sli_abts_recover_port(struct lpfc_vport *vport,
"rport in state 0x%x\n", ndlp->nlp_state);
return;
}
- lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+ lpfc_printf_log(phba, KERN_ERR,
+ LOG_ELS | LOG_FCP_ERROR | LOG_NVME_IOERR,
"3094 Start rport recovery on shost id 0x%x "
"fc_id 0x%06x vpi 0x%x rpi 0x%x state 0x%x "
"flags 0x%x\n",
@@ -9515,8 +9511,8 @@ lpfc_sli_abts_recover_port(struct lpfc_vport *vport,
*/
spin_lock_irqsave(shost->host_lock, flags);
ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
+ ndlp->nlp_flag |= NLP_ISSUE_LOGO;
spin_unlock_irqrestore(shost->host_lock, flags);
- lpfc_issue_els_logo(vport, ndlp, 0);
- lpfc_nlp_set_state(vport, ndlp, NLP_STE_LOGO_ISSUE);
+ lpfc_unreg_rpi(vport, ndlp);
}
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index bd9bce9..a6619fd 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -836,7 +836,9 @@ lpfc_disc_set_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
if (!(ndlp->nlp_flag & NLP_RPI_REGISTERED)) {
+ spin_lock_irq(shost->host_lock);
ndlp->nlp_flag &= ~NLP_NPR_ADISC;
+ spin_unlock_irq(shost->host_lock);
return 0;
}
@@ -851,7 +853,10 @@ lpfc_disc_set_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
return 1;
}
}
+
+ spin_lock_irq(shost->host_lock);
ndlp->nlp_flag &= ~NLP_NPR_ADISC;
+ spin_unlock_irq(shost->host_lock);
lpfc_unreg_rpi(vport, ndlp);
return 0;
}
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index 53133cf..622832e 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -9809,6 +9809,7 @@ static void scsih_remove(struct pci_dev *pdev)
/* release all the volumes */
_scsih_ir_shutdown(ioc);
+ sas_remove_host(shost);
list_for_each_entry_safe(raid_device, next, &ioc->raid_device_list,
list) {
if (raid_device->starget) {
@@ -9851,7 +9852,6 @@ static void scsih_remove(struct pci_dev *pdev)
ioc->sas_hba.num_phys = 0;
}
- sas_remove_host(shost);
mpt3sas_base_detach(ioc);
spin_lock(&gioc_lock);
list_del(&ioc->list);
diff --git a/drivers/scsi/mpt3sas/mpt3sas_transport.c b/drivers/scsi/mpt3sas/mpt3sas_transport.c
index f8cc267..20d3606 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_transport.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_transport.c
@@ -834,10 +834,13 @@ mpt3sas_transport_port_remove(struct MPT3SAS_ADAPTER *ioc, u64 sas_address,
mpt3sas_port->remote_identify.sas_address,
mpt3sas_phy->phy_id);
mpt3sas_phy->phy_belongs_to_port = 0;
- sas_port_delete_phy(mpt3sas_port->port, mpt3sas_phy->phy);
+ if (!ioc->remove_host)
+ sas_port_delete_phy(mpt3sas_port->port,
+ mpt3sas_phy->phy);
list_del(&mpt3sas_phy->port_siblings);
}
- sas_port_delete(mpt3sas_port->port);
+ if (!ioc->remove_host)
+ sas_port_delete(mpt3sas_port->port);
kfree(mpt3sas_port);
}
diff --git a/drivers/scsi/qedi/qedi_iscsi.c b/drivers/scsi/qedi/qedi_iscsi.c
index 2f0a4f2..d4821b9 100644
--- a/drivers/scsi/qedi/qedi_iscsi.c
+++ b/drivers/scsi/qedi/qedi_iscsi.c
@@ -954,6 +954,7 @@ static int qedi_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
qedi_ep = ep->dd_data;
if (qedi_ep->state == EP_STATE_IDLE ||
+ qedi_ep->state == EP_STATE_OFLDCONN_NONE ||
qedi_ep->state == EP_STATE_OFLDCONN_FAILED)
return -1;
@@ -1036,6 +1037,7 @@ static void qedi_ep_disconnect(struct iscsi_endpoint *ep)
switch (qedi_ep->state) {
case EP_STATE_OFLDCONN_START:
+ case EP_STATE_OFLDCONN_NONE:
goto ep_release_conn;
case EP_STATE_OFLDCONN_FAILED:
break;
@@ -1226,6 +1228,7 @@ static int qedi_set_path(struct Scsi_Host *shost, struct iscsi_path *path_data)
if (!is_valid_ether_addr(&path_data->mac_addr[0])) {
QEDI_NOTICE(&qedi->dbg_ctx, "dst mac NOT VALID\n");
+ qedi_ep->state = EP_STATE_OFLDCONN_NONE;
ret = -EIO;
goto set_path_exit;
}
diff --git a/drivers/scsi/qedi/qedi_iscsi.h b/drivers/scsi/qedi/qedi_iscsi.h
index 1126077..892d70d 100644
--- a/drivers/scsi/qedi/qedi_iscsi.h
+++ b/drivers/scsi/qedi/qedi_iscsi.h
@@ -59,6 +59,7 @@ enum {
EP_STATE_OFLDCONN_FAILED = 0x2000,
EP_STATE_CONNECT_FAILED = 0x4000,
EP_STATE_DISCONN_TIMEDOUT = 0x8000,
+ EP_STATE_OFLDCONN_NONE = 0x10000,
};
struct qedi_conn;
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index 0e13349..575445c 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -7237,6 +7237,8 @@ static int qla4xxx_sysfs_ddb_tgt_create(struct scsi_qla_host *ha,
rc = qla4xxx_copy_from_fwddb_param(fnode_sess, fnode_conn,
fw_ddb_entry);
+ if (rc)
+ goto free_sess;
ql4_printk(KERN_INFO, ha, "%s: sysfs entry %s created\n",
__func__, fnode_sess->dev.kobj.name);
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index e229f2f..ffeac4b 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -2244,6 +2244,8 @@ void __scsi_init_queue(struct Scsi_Host *shost, struct request_queue *q)
if (!shost->use_clustering)
q->limits.cluster = 0;
+ if (shost->inlinecrypt_support)
+ queue_flag_set_unlocked(QUEUE_FLAG_INLINECRYPT, q);
/*
* Set a reasonable default alignment: The larger of 32-byte (dword),
* which is a common minimum for HBAs, and the minimum DMA alignment,
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 46216e45..a73cbd8 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -2881,9 +2881,6 @@ static void sd_read_block_characteristics(struct scsi_disk *sdkp)
if (rot == 1) {
blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, q);
- } else {
- blk_queue_flag_clear(QUEUE_FLAG_NONROT, q);
- blk_queue_flag_set(QUEUE_FLAG_ADD_RANDOM, q);
}
if (sdkp->device->type == TYPE_ZBC) {
@@ -3020,6 +3017,15 @@ static int sd_revalidate_disk(struct gendisk *disk)
if (sdkp->media_present) {
sd_read_capacity(sdkp, buffer);
+ /*
+ * set the default to rotational. All non-rotational devices
+ * support the block characteristics VPD page, which will
+ * cause this to be updated correctly and any device which
+ * doesn't support it should be treated as rotational.
+ */
+ blk_queue_flag_clear(QUEUE_FLAG_NONROT, q);
+ blk_queue_flag_set(QUEUE_FLAG_ADD_RANDOM, q);
+
if (scsi_device_supports_vpd(sdp)) {
sd_read_block_provisioning(sdkp);
sd_read_block_limits(sdkp);
diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c
index 8c1a232..3781e81 100644
--- a/drivers/scsi/smartpqi/smartpqi_init.c
+++ b/drivers/scsi/smartpqi/smartpqi_init.c
@@ -653,6 +653,7 @@ struct bmic_host_wellness_driver_version {
u8 driver_version_tag[2];
__le16 driver_version_length;
char driver_version[32];
+ u8 dont_write_tag[2];
u8 end_tag[2];
};
@@ -682,6 +683,8 @@ static int pqi_write_driver_version_to_host_wellness(
strncpy(buffer->driver_version, "Linux " DRIVER_VERSION,
sizeof(buffer->driver_version) - 1);
buffer->driver_version[sizeof(buffer->driver_version) - 1] = '\0';
+ buffer->dont_write_tag[0] = 'D';
+ buffer->dont_write_tag[1] = 'W';
buffer->end_tag[0] = 'Z';
buffer->end_tag[1] = 'Z';
@@ -1181,6 +1184,9 @@ static void pqi_get_volume_status(struct pqi_ctrl_info *ctrl_info,
if (rc)
goto out;
+ if (vpd->page_code != CISS_VPD_LV_STATUS)
+ goto out;
+
page_length = offsetof(struct ciss_vpd_logical_volume_status,
volume_status) + vpd->page_length;
if (page_length < sizeof(*vpd))
diff --git a/drivers/scsi/smartpqi/smartpqi_sis.c b/drivers/scsi/smartpqi/smartpqi_sis.c
index 5141bd4..ca7dfb3 100644
--- a/drivers/scsi/smartpqi/smartpqi_sis.c
+++ b/drivers/scsi/smartpqi/smartpqi_sis.c
@@ -59,7 +59,7 @@
#define SIS_CTRL_KERNEL_UP 0x80
#define SIS_CTRL_KERNEL_PANIC 0x100
-#define SIS_CTRL_READY_TIMEOUT_SECS 30
+#define SIS_CTRL_READY_TIMEOUT_SECS 180
#define SIS_CTRL_READY_RESUME_TIMEOUT_SECS 90
#define SIS_CTRL_READY_POLL_INTERVAL_MSECS 10
diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c
index d2b042e..95c212c 100644
--- a/drivers/scsi/ufs/ufs-qcom.c
+++ b/drivers/scsi/ufs/ufs-qcom.c
@@ -938,8 +938,9 @@ static int ufs_qcom_crypto_req_setup(struct ufs_hba *hba,
else
return 0;
- /* Use request LBA as the DUN value */
+ /* Use request LBA or given dun as the DUN value */
if (req->bio) {
+#ifdef CONFIG_PFK
if (bio_dun(req->bio)) {
/* dun @bio can be split, so we have to adjust offset */
*dun = bio_dun(req->bio);
@@ -947,8 +948,11 @@ static int ufs_qcom_crypto_req_setup(struct ufs_hba *hba,
*dun = req->bio->bi_iter.bi_sector;
*dun >>= UFS_QCOM_ICE_TR_DATA_UNIT_4_KB;
}
+#else
+ *dun = req->bio->bi_iter.bi_sector;
+ *dun >>= UFS_QCOM_ICE_TR_DATA_UNIT_4_KB;
+#endif
}
-
ret = ufs_qcom_ice_req_setup(host, lrbp->cmd, cc_index, enable);
return ret;
@@ -2191,6 +2195,8 @@ static int ufs_qcom_init(struct ufs_hba *hba)
dev_err(dev, "%s: ufs_qcom_ice_get_dev failed %d\n",
__func__, err);
goto out_variant_clear;
+ } else {
+ hba->host->inlinecrypt_support = 1;
}
host->generic_phy = devm_phy_get(dev, "ufsphy");
diff --git a/drivers/scsi/ufs/ufs.h b/drivers/scsi/ufs/ufs.h
index 001b719..d790923 100644
--- a/drivers/scsi/ufs/ufs.h
+++ b/drivers/scsi/ufs/ufs.h
@@ -141,7 +141,7 @@ enum ufs_desc_def_size {
QUERY_DESC_CONFIGURATION_DEF_SIZE = 0x90,
QUERY_DESC_UNIT_DEF_SIZE = 0x23,
QUERY_DESC_INTERCONNECT_DEF_SIZE = 0x06,
- QUERY_DESC_GEOMETRY_DEF_SIZE = 0x44,
+ QUERY_DESC_GEOMETRY_DEF_SIZE = 0x48,
QUERY_DESC_POWER_DEF_SIZE = 0x62,
QUERY_DESC_HEALTH_DEF_SIZE = 0x25,
};
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index dcbcb24..d739dc8 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -10461,6 +10461,8 @@ int ufshcd_system_resume(struct ufs_hba *hba)
trace_ufshcd_system_resume(dev_name(hba->dev), ret,
ktime_to_us(ktime_sub(ktime_get(), start)),
hba->curr_dev_pwr_mode, hba->uic_link_state);
+ if (!ret)
+ hba->is_sys_suspended = false;
return ret;
}
EXPORT_SYMBOL(ufshcd_system_resume);
diff --git a/drivers/soc/bcm/brcmstb/common.c b/drivers/soc/bcm/brcmstb/common.c
index 1418545..bf9123f 100644
--- a/drivers/soc/bcm/brcmstb/common.c
+++ b/drivers/soc/bcm/brcmstb/common.c
@@ -31,13 +31,17 @@ static const struct of_device_id brcmstb_machine_match[] = {
bool soc_is_brcmstb(void)
{
+ const struct of_device_id *match;
struct device_node *root;
root = of_find_node_by_path("/");
if (!root)
return false;
- return of_match_node(brcmstb_machine_match, root) != NULL;
+ match = of_match_node(brcmstb_machine_match, root);
+ of_node_put(root);
+
+ return match != NULL;
}
u32 brcmstb_get_family_id(void)
diff --git a/drivers/soc/qcom/llcc-slice.c b/drivers/soc/qcom/llcc-slice.c
index c59d1d6..833f047 100644
--- a/drivers/soc/qcom/llcc-slice.c
+++ b/drivers/soc/qcom/llcc-slice.c
@@ -322,13 +322,17 @@ static int qcom_llcc_cfg_program(struct platform_device *pdev)
if (cap_based_alloc_and_pwr_collapse) {
cad |= llcc_table[i].dis_cap_alloc <<
llcc_table[i].slice_id;
- regmap_write(drv_data->bcast_regmap,
+ ret = regmap_write(drv_data->bcast_regmap,
LLCC_TRP_SCID_DIS_CAP_ALLOC, cad);
+ if (ret)
+ return ret;
pcb |= llcc_table[i].retain_on_pc <<
llcc_table[i].slice_id;
- regmap_write(drv_data->bcast_regmap,
- LLCC_TRP_PCB_ACT, pcb);
+ ret = regmap_write(drv_data->bcast_regmap,
+ LLCC_TRP_PCB_ACT, pcb);
+ if (ret)
+ return ret;
}
if (llcc_table[i].activate_on_init) {
@@ -414,8 +418,10 @@ int qcom_llcc_probe(struct platform_device *pdev,
platform_set_drvdata(pdev, drv_data);
ret = qcom_llcc_cfg_program(pdev);
- if (ret)
+ if (ret) {
+ pr_err("llcc configuration failed!!\n");
return ret;
+ }
drv_data->ecc_irq = platform_get_irq(pdev, 0);
llcc_edac = platform_device_register_data(&pdev->dev,
diff --git a/drivers/soc/tegra/common.c b/drivers/soc/tegra/common.c
index cd8f413..7bfb154 100644
--- a/drivers/soc/tegra/common.c
+++ b/drivers/soc/tegra/common.c
@@ -22,11 +22,15 @@ static const struct of_device_id tegra_machine_match[] = {
bool soc_is_tegra(void)
{
+ const struct of_device_id *match;
struct device_node *root;
root = of_find_node_by_path("/");
if (!root)
return false;
- return of_match_node(tegra_machine_match, root) != NULL;
+ match = of_match_node(tegra_machine_match, root);
+ of_node_put(root);
+
+ return match != NULL;
}
diff --git a/drivers/spi/spi-geni-qcom.c b/drivers/spi/spi-geni-qcom.c
index 424354f..36435f7 100644
--- a/drivers/spi/spi-geni-qcom.c
+++ b/drivers/spi/spi-geni-qcom.c
@@ -1378,6 +1378,7 @@ static int spi_geni_probe(struct platform_device *pdev)
goto spi_geni_probe_err;
}
+ geni_mas->spi_rsc.ctrl_dev = geni_mas->dev;
rsc->geni_pinctrl = devm_pinctrl_get(&pdev->dev);
if (IS_ERR_OR_NULL(rsc->geni_pinctrl)) {
dev_err(&pdev->dev, "No pinctrl config specified!\n");
diff --git a/drivers/staging/erofs/data.c b/drivers/staging/erofs/data.c
index ac263a1..894e60e 100644
--- a/drivers/staging/erofs/data.c
+++ b/drivers/staging/erofs/data.c
@@ -25,7 +25,7 @@ static inline void read_endio(struct bio *bio)
struct page *page = bvec->bv_page;
/* page is already locked */
- BUG_ON(PageUptodate(page));
+ DBG_BUGON(PageUptodate(page));
if (unlikely(err))
SetPageError(page);
@@ -91,12 +91,12 @@ static int erofs_map_blocks_flatmode(struct inode *inode,
struct erofs_map_blocks *map,
int flags)
{
+ int err = 0;
erofs_blk_t nblocks, lastblk;
u64 offset = map->m_la;
struct erofs_vnode *vi = EROFS_V(inode);
trace_erofs_map_blocks_flatmode_enter(inode, map, flags);
- BUG_ON(is_inode_layout_compression(inode));
nblocks = DIV_ROUND_UP(inode->i_size, PAGE_SIZE);
lastblk = nblocks - is_inode_layout_inline(inode);
@@ -123,18 +123,27 @@ static int erofs_map_blocks_flatmode(struct inode *inode,
map->m_plen = inode->i_size - offset;
/* inline data should locate in one meta block */
- BUG_ON(erofs_blkoff(map->m_pa) + map->m_plen > PAGE_SIZE);
+ if (erofs_blkoff(map->m_pa) + map->m_plen > PAGE_SIZE) {
+ DBG_BUGON(1);
+ err = -EIO;
+ goto err_out;
+ }
+
map->m_flags |= EROFS_MAP_META;
} else {
errln("internal error @ nid: %llu (size %llu), m_la 0x%llx",
vi->nid, inode->i_size, map->m_la);
- BUG();
+ DBG_BUGON(1);
+ err = -EIO;
+ goto err_out;
}
out:
map->m_llen = map->m_plen;
+
+err_out:
trace_erofs_map_blocks_flatmode_exit(inode, map, flags, 0);
- return 0;
+ return err;
}
#ifdef CONFIG_EROFS_FS_ZIP
@@ -190,7 +199,7 @@ static inline struct bio *erofs_read_raw_page(
erofs_off_t current_block = (erofs_off_t)page->index;
int err;
- BUG_ON(!nblocks);
+ DBG_BUGON(!nblocks);
if (PageUptodate(page)) {
err = 0;
@@ -233,7 +242,7 @@ static inline struct bio *erofs_read_raw_page(
}
/* for RAW access mode, m_plen must be equal to m_llen */
- BUG_ON(map.m_plen != map.m_llen);
+ DBG_BUGON(map.m_plen != map.m_llen);
blknr = erofs_blknr(map.m_pa);
blkoff = erofs_blkoff(map.m_pa);
@@ -243,7 +252,7 @@ static inline struct bio *erofs_read_raw_page(
void *vsrc, *vto;
struct page *ipage;
- BUG_ON(map.m_plen > PAGE_SIZE);
+ DBG_BUGON(map.m_plen > PAGE_SIZE);
ipage = erofs_get_meta_page(inode->i_sb, blknr, 0);
@@ -270,7 +279,7 @@ static inline struct bio *erofs_read_raw_page(
}
/* pa must be block-aligned for raw reading */
- BUG_ON(erofs_blkoff(map.m_pa) != 0);
+ DBG_BUGON(erofs_blkoff(map.m_pa));
/* max # of continuous pages */
if (nblocks > DIV_ROUND_UP(map.m_plen, PAGE_SIZE))
@@ -331,7 +340,7 @@ static int erofs_raw_access_readpage(struct file *file, struct page *page)
if (IS_ERR(bio))
return PTR_ERR(bio);
- BUG_ON(bio != NULL); /* since we have only one bio -- must be NULL */
+ DBG_BUGON(bio); /* since we have only one bio -- must be NULL */
return 0;
}
@@ -369,7 +378,7 @@ static int erofs_raw_access_readpages(struct file *filp,
/* pages could still be locked */
put_page(page);
}
- BUG_ON(!list_empty(pages));
+ DBG_BUGON(!list_empty(pages));
/* the rare case (end in gaps) */
if (unlikely(bio != NULL))
diff --git a/drivers/staging/erofs/dir.c b/drivers/staging/erofs/dir.c
index be6ae3b..04b84ff 100644
--- a/drivers/staging/erofs/dir.c
+++ b/drivers/staging/erofs/dir.c
@@ -53,8 +53,11 @@ static int erofs_fill_dentries(struct dir_context *ctx,
strnlen(de_name, maxsize - nameoff) :
le16_to_cpu(de[1].nameoff) - nameoff;
- /* the corrupted directory found */
- BUG_ON(de_namelen < 0);
+ /* a corrupted entry is found */
+ if (unlikely(de_namelen < 0)) {
+ DBG_BUGON(1);
+ return -EIO;
+ }
#ifdef CONFIG_EROFS_FS_DEBUG
dbg_namelen = min(EROFS_NAME_LEN - 1, de_namelen);
diff --git a/drivers/staging/erofs/inode.c b/drivers/staging/erofs/inode.c
index fbf6ff2..9e7815f 100644
--- a/drivers/staging/erofs/inode.c
+++ b/drivers/staging/erofs/inode.c
@@ -132,7 +132,13 @@ static int fill_inline_data(struct inode *inode, void *data, unsigned m_pofs)
return -ENOMEM;
m_pofs += vi->inode_isize + vi->xattr_isize;
- BUG_ON(m_pofs + inode->i_size > PAGE_SIZE);
+
+ /* inline symlink data shouldn't across page boundary as well */
+ if (unlikely(m_pofs + inode->i_size > PAGE_SIZE)) {
+ DBG_BUGON(1);
+ kfree(lnk);
+ return -EIO;
+ }
/* get in-page inline data */
memcpy(lnk, data + m_pofs, inode->i_size);
@@ -170,7 +176,7 @@ static int fill_inode(struct inode *inode, int isdir)
return PTR_ERR(page);
}
- BUG_ON(!PageUptodate(page));
+ DBG_BUGON(!PageUptodate(page));
data = page_address(page);
err = read_inode(inode, data + ofs);
diff --git a/drivers/staging/erofs/internal.h b/drivers/staging/erofs/internal.h
index 367b39f..9f44ed8 100644
--- a/drivers/staging/erofs/internal.h
+++ b/drivers/staging/erofs/internal.h
@@ -39,7 +39,7 @@
#define debugln(x, ...) ((void)0)
#define dbg_might_sleep() ((void)0)
-#define DBG_BUGON(...) ((void)0)
+#define DBG_BUGON(x) ((void)(x))
#endif
#ifdef CONFIG_EROFS_FAULT_INJECTION
@@ -184,50 +184,70 @@ struct erofs_workgroup {
#define EROFS_LOCKED_MAGIC (INT_MIN | 0xE0F510CCL)
-static inline bool erofs_workgroup_try_to_freeze(
- struct erofs_workgroup *grp, int v)
+#if defined(CONFIG_SMP)
+static inline bool erofs_workgroup_try_to_freeze(struct erofs_workgroup *grp,
+ int val)
{
-#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
- if (v != atomic_cmpxchg(&grp->refcount,
- v, EROFS_LOCKED_MAGIC))
- return false;
preempt_disable();
-#else
- preempt_disable();
- if (atomic_read(&grp->refcount) != v) {
+ if (val != atomic_cmpxchg(&grp->refcount, val, EROFS_LOCKED_MAGIC)) {
preempt_enable();
return false;
}
-#endif
return true;
}
-static inline void erofs_workgroup_unfreeze(
- struct erofs_workgroup *grp, int v)
+static inline void erofs_workgroup_unfreeze(struct erofs_workgroup *grp,
+ int orig_val)
{
-#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
- atomic_set(&grp->refcount, v);
-#endif
+ /*
+ * other observers should notice all modifications
+ * in the freezing period.
+ */
+ smp_mb();
+ atomic_set(&grp->refcount, orig_val);
preempt_enable();
}
+static inline int erofs_wait_on_workgroup_freezed(struct erofs_workgroup *grp)
+{
+ return atomic_cond_read_relaxed(&grp->refcount,
+ VAL != EROFS_LOCKED_MAGIC);
+}
+#else
+static inline bool erofs_workgroup_try_to_freeze(struct erofs_workgroup *grp,
+ int val)
+{
+ preempt_disable();
+ /* no need to spin on UP platforms, let's just disable preemption. */
+ if (val != atomic_read(&grp->refcount)) {
+ preempt_enable();
+ return false;
+ }
+ return true;
+}
+
+static inline void erofs_workgroup_unfreeze(struct erofs_workgroup *grp,
+ int orig_val)
+{
+ preempt_enable();
+}
+
+static inline int erofs_wait_on_workgroup_freezed(struct erofs_workgroup *grp)
+{
+ int v = atomic_read(&grp->refcount);
+
+ /* workgroup is never freezed on uniprocessor systems */
+ DBG_BUGON(v == EROFS_LOCKED_MAGIC);
+ return v;
+}
+#endif
+
static inline bool erofs_workgroup_get(struct erofs_workgroup *grp, int *ocnt)
{
- const int locked = (int)EROFS_LOCKED_MAGIC;
int o;
repeat:
- o = atomic_read(&grp->refcount);
-
- /* spin if it is temporarily locked at the reclaim path */
- if (unlikely(o == locked)) {
-#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
- do
- cpu_relax();
- while (atomic_read(&grp->refcount) == locked);
-#endif
- goto repeat;
- }
+ o = erofs_wait_on_workgroup_freezed(grp);
if (unlikely(o <= 0))
return -1;
diff --git a/drivers/staging/erofs/super.c b/drivers/staging/erofs/super.c
index 2df9768..b0583cd 100644
--- a/drivers/staging/erofs/super.c
+++ b/drivers/staging/erofs/super.c
@@ -40,7 +40,6 @@ static int erofs_init_inode_cache(void)
static void erofs_exit_inode_cache(void)
{
- BUG_ON(erofs_inode_cachep == NULL);
kmem_cache_destroy(erofs_inode_cachep);
}
@@ -265,8 +264,8 @@ static int managed_cache_releasepage(struct page *page, gfp_t gfp_mask)
int ret = 1; /* 0 - busy */
struct address_space *const mapping = page->mapping;
- BUG_ON(!PageLocked(page));
- BUG_ON(mapping->a_ops != &managed_cache_aops);
+ DBG_BUGON(!PageLocked(page));
+ DBG_BUGON(mapping->a_ops != &managed_cache_aops);
if (PagePrivate(page))
ret = erofs_try_to_free_cached_page(mapping, page);
@@ -279,10 +278,10 @@ static void managed_cache_invalidatepage(struct page *page,
{
const unsigned int stop = length + offset;
- BUG_ON(!PageLocked(page));
+ DBG_BUGON(!PageLocked(page));
- /* Check for overflow */
- BUG_ON(stop > PAGE_SIZE || stop < length);
+ /* Check for potential overflow in debug mode */
+ DBG_BUGON(stop > PAGE_SIZE || stop < length);
if (offset == 0 && stop == PAGE_SIZE)
while (!managed_cache_releasepage(page, GFP_NOFS))
@@ -404,12 +403,6 @@ static int erofs_read_super(struct super_block *sb,
erofs_register_super(sb);
- /*
- * We already have a positive dentry, which was instantiated
- * by d_make_root. Just need to d_rehash it.
- */
- d_rehash(sb->s_root);
-
if (!silent)
infoln("mounted on %s with opts: %s.", dev_name,
(char *)data);
@@ -625,7 +618,7 @@ static int erofs_show_options(struct seq_file *seq, struct dentry *root)
static int erofs_remount(struct super_block *sb, int *flags, char *data)
{
- BUG_ON(!sb_rdonly(sb));
+ DBG_BUGON(!sb_rdonly(sb));
*flags |= SB_RDONLY;
return 0;
diff --git a/drivers/staging/erofs/unzip_pagevec.h b/drivers/staging/erofs/unzip_pagevec.h
index 0956615..23856ba 100644
--- a/drivers/staging/erofs/unzip_pagevec.h
+++ b/drivers/staging/erofs/unzip_pagevec.h
@@ -150,7 +150,7 @@ z_erofs_pagevec_ctor_dequeue(struct z_erofs_pagevec_ctor *ctor,
erofs_vtptr_t t;
if (unlikely(ctor->index >= ctor->nr)) {
- BUG_ON(ctor->next == NULL);
+ DBG_BUGON(!ctor->next);
z_erofs_pagevec_ctor_pagedown(ctor, true);
}
diff --git a/drivers/staging/erofs/unzip_vle.c b/drivers/staging/erofs/unzip_vle.c
index 0346630..1279241 100644
--- a/drivers/staging/erofs/unzip_vle.c
+++ b/drivers/staging/erofs/unzip_vle.c
@@ -18,9 +18,6 @@ static struct kmem_cache *z_erofs_workgroup_cachep __read_mostly;
void z_erofs_exit_zip_subsystem(void)
{
- BUG_ON(z_erofs_workqueue == NULL);
- BUG_ON(z_erofs_workgroup_cachep == NULL);
-
destroy_workqueue(z_erofs_workqueue);
kmem_cache_destroy(z_erofs_workgroup_cachep);
}
@@ -293,12 +290,9 @@ z_erofs_vle_work_lookup(struct super_block *sb,
*grp_ret = grp = container_of(egrp,
struct z_erofs_vle_workgroup, obj);
-#ifndef CONFIG_EROFS_FS_ZIP_MULTIREF
work = z_erofs_vle_grab_work(grp, pageofs);
+ /* if multiref is disabled, `primary' is always true */
primary = true;
-#else
- BUG();
-#endif
DBG_BUGON(work->pageofs != pageofs);
@@ -365,12 +359,12 @@ z_erofs_vle_work_register(struct super_block *sb,
struct z_erofs_vle_workgroup *grp = *grp_ret;
struct z_erofs_vle_work *work;
-#ifndef CONFIG_EROFS_FS_ZIP_MULTIREF
- BUG_ON(grp != NULL);
-#else
- if (grp != NULL)
- goto skip;
-#endif
+ /* if multiref is disabled, grp should never be nullptr */
+ if (unlikely(grp)) {
+ DBG_BUGON(1);
+ return ERR_PTR(-EINVAL);
+ }
+
/* no available workgroup, let's allocate one */
grp = kmem_cache_zalloc(z_erofs_workgroup_cachep, GFP_NOFS);
if (unlikely(grp == NULL))
@@ -393,13 +387,7 @@ z_erofs_vle_work_register(struct super_block *sb,
*hosted = true;
newgrp = true;
-#ifdef CONFIG_EROFS_FS_ZIP_MULTIREF
-skip:
- /* currently unimplemented */
- BUG();
-#else
work = z_erofs_vle_grab_primary_work(grp);
-#endif
work->pageofs = pageofs;
mutex_init(&work->lock);
@@ -606,7 +594,7 @@ static int z_erofs_do_read_page(struct z_erofs_vle_frontend *fe,
enum z_erofs_page_type page_type;
unsigned cur, end, spiltted, index;
- int err;
+ int err = 0;
/* register locked file pages as online pages in pack */
z_erofs_onlinepage_init(page);
@@ -624,7 +612,7 @@ static int z_erofs_do_read_page(struct z_erofs_vle_frontend *fe,
/* go ahead the next map_blocks */
debugln("%s: [out-of-range] pos %llu", __func__, offset + cur);
- if (!z_erofs_vle_work_iter_end(builder))
+ if (z_erofs_vle_work_iter_end(builder))
fe->initial = false;
map->m_la = offset + cur;
@@ -633,12 +621,11 @@ static int z_erofs_do_read_page(struct z_erofs_vle_frontend *fe,
if (unlikely(err))
goto err_out;
- /* deal with hole (FIXME! broken now) */
if (unlikely(!(map->m_flags & EROFS_MAP_MAPPED)))
goto hitted;
DBG_BUGON(map->m_plen != 1 << sbi->clusterbits);
- BUG_ON(erofs_blkoff(map->m_pa));
+ DBG_BUGON(erofs_blkoff(map->m_pa));
err = z_erofs_vle_work_iter_begin(builder, sb, map, &fe->owned_head);
if (unlikely(err))
@@ -683,7 +670,7 @@ static int z_erofs_do_read_page(struct z_erofs_vle_frontend *fe,
err = z_erofs_vle_work_add_page(builder,
newpage, Z_EROFS_PAGE_TYPE_EXCLUSIVE);
- if (!err)
+ if (likely(!err))
goto retry;
}
@@ -694,9 +681,10 @@ static int z_erofs_do_read_page(struct z_erofs_vle_frontend *fe,
/* FIXME! avoid the last relundant fixup & endio */
z_erofs_onlinepage_fixup(page, index, true);
- ++spiltted;
- /* also update nr_pages and increase queued_pages */
+ /* bump up the number of spiltted parts of a page */
+ ++spiltted;
+ /* also update nr_pages */
work->nr_pages = max_t(pgoff_t, work->nr_pages, index + 1);
next_part:
/* can be used for verification */
@@ -706,16 +694,18 @@ static int z_erofs_do_read_page(struct z_erofs_vle_frontend *fe,
if (end > 0)
goto repeat;
+out:
/* FIXME! avoid the last relundant fixup & endio */
z_erofs_onlinepage_endio(page);
debugln("%s, finish page: %pK spiltted: %u map->m_llen %llu",
__func__, page, spiltted, map->m_llen);
- return 0;
-
-err_out:
- /* TODO: the missing error handing cases */
return err;
+
+ /* if some error occurred while processing this page */
+err_out:
+ SetPageError(page);
+ goto out;
}
static void z_erofs_vle_unzip_kickoff(void *ptr, int bios)
@@ -752,7 +742,7 @@ static inline void z_erofs_vle_read_endio(struct bio *bio)
bool cachemngd = false;
DBG_BUGON(PageUptodate(page));
- BUG_ON(page->mapping == NULL);
+ DBG_BUGON(!page->mapping);
#ifdef EROFS_FS_HAS_MANAGED_CACHE
if (unlikely(mngda == NULL && !z_erofs_is_stagingpage(page))) {
@@ -796,10 +786,8 @@ static int z_erofs_vle_unzip(struct super_block *sb,
const unsigned clusterpages = erofs_clusterpages(sbi);
struct z_erofs_pagevec_ctor ctor;
- unsigned nr_pages;
-#ifndef CONFIG_EROFS_FS_ZIP_MULTIREF
- unsigned sparsemem_pages = 0;
-#endif
+ unsigned int nr_pages;
+ unsigned int sparsemem_pages = 0;
struct page *pages_onstack[Z_EROFS_VLE_VMAP_ONSTACK_PAGES];
struct page **pages, **compressed_pages, *page;
unsigned i, llen;
@@ -811,12 +799,8 @@ static int z_erofs_vle_unzip(struct super_block *sb,
int err;
might_sleep();
-#ifndef CONFIG_EROFS_FS_ZIP_MULTIREF
work = z_erofs_vle_grab_primary_work(grp);
-#else
- BUG();
-#endif
- BUG_ON(!READ_ONCE(work->nr_pages));
+ DBG_BUGON(!READ_ONCE(work->nr_pages));
mutex_lock(&work->lock);
nr_pages = work->nr_pages;
@@ -865,14 +849,12 @@ static int z_erofs_vle_unzip(struct super_block *sb,
else
pagenr = z_erofs_onlinepage_index(page);
- BUG_ON(pagenr >= nr_pages);
+ DBG_BUGON(pagenr >= nr_pages);
+ DBG_BUGON(pages[pagenr]);
-#ifndef CONFIG_EROFS_FS_ZIP_MULTIREF
- BUG_ON(pages[pagenr] != NULL);
- ++sparsemem_pages;
-#endif
pages[pagenr] = page;
}
+ sparsemem_pages = i;
z_erofs_pagevec_ctor_exit(&ctor, true);
@@ -891,9 +873,8 @@ static int z_erofs_vle_unzip(struct super_block *sb,
if (z_erofs_is_stagingpage(page))
continue;
#ifdef EROFS_FS_HAS_MANAGED_CACHE
- else if (page->mapping == mngda) {
- BUG_ON(PageLocked(page));
- BUG_ON(!PageUptodate(page));
+ if (page->mapping == mngda) {
+ DBG_BUGON(!PageUptodate(page));
continue;
}
#endif
@@ -901,11 +882,9 @@ static int z_erofs_vle_unzip(struct super_block *sb,
/* only non-head page could be reused as a compressed page */
pagenr = z_erofs_onlinepage_index(page);
- BUG_ON(pagenr >= nr_pages);
-#ifndef CONFIG_EROFS_FS_ZIP_MULTIREF
- BUG_ON(pages[pagenr] != NULL);
+ DBG_BUGON(pagenr >= nr_pages);
+ DBG_BUGON(pages[pagenr]);
++sparsemem_pages;
-#endif
pages[pagenr] = page;
overlapped = true;
@@ -914,9 +893,6 @@ static int z_erofs_vle_unzip(struct super_block *sb,
llen = (nr_pages << PAGE_SHIFT) - work->pageofs;
if (z_erofs_vle_workgrp_fmt(grp) == Z_EROFS_VLE_WORKGRP_FMT_PLAIN) {
- /* FIXME! this should be fixed in the future */
- BUG_ON(grp->llen != llen);
-
err = z_erofs_vle_plain_copy(compressed_pages, clusterpages,
pages, nr_pages, work->pageofs);
goto out;
@@ -931,12 +907,8 @@ static int z_erofs_vle_unzip(struct super_block *sb,
if (err != -ENOTSUPP)
goto out_percpu;
-#ifndef CONFIG_EROFS_FS_ZIP_MULTIREF
- if (sparsemem_pages >= nr_pages) {
- BUG_ON(sparsemem_pages > nr_pages);
+ if (sparsemem_pages >= nr_pages)
goto skip_allocpage;
- }
-#endif
for (i = 0; i < nr_pages; ++i) {
if (pages[i] != NULL)
@@ -945,9 +917,7 @@ static int z_erofs_vle_unzip(struct super_block *sb,
pages[i] = __stagingpage_alloc(page_pool, GFP_NOFS);
}
-#ifndef CONFIG_EROFS_FS_ZIP_MULTIREF
skip_allocpage:
-#endif
vout = erofs_vmap(pages, nr_pages);
err = z_erofs_vle_unzip_vmap(compressed_pages,
@@ -1031,7 +1001,7 @@ static void z_erofs_vle_unzip_wq(struct work_struct *work)
struct z_erofs_vle_unzip_io_sb, io.u.work);
LIST_HEAD(page_pool);
- BUG_ON(iosb->io.head == Z_EROFS_VLE_WORKGRP_TAIL_CLOSED);
+ DBG_BUGON(iosb->io.head == Z_EROFS_VLE_WORKGRP_TAIL_CLOSED);
z_erofs_vle_unzip_all(iosb->sb, &iosb->io, &page_pool);
put_pages_list(&page_pool);
@@ -1360,7 +1330,6 @@ static inline int __z_erofs_vle_normalaccess_readpages(
continue;
}
- BUG_ON(PagePrivate(page));
set_page_private(page, (unsigned long)head);
head = page;
}
diff --git a/drivers/staging/erofs/unzip_vle.h b/drivers/staging/erofs/unzip_vle.h
index 3939985..3316bc3 100644
--- a/drivers/staging/erofs/unzip_vle.h
+++ b/drivers/staging/erofs/unzip_vle.h
@@ -47,13 +47,6 @@ static inline bool z_erofs_gather_if_stagingpage(struct list_head *page_pool,
#define Z_EROFS_VLE_INLINE_PAGEVECS 3
struct z_erofs_vle_work {
- /* struct z_erofs_vle_work *left, *right; */
-
-#ifdef CONFIG_EROFS_FS_ZIP_MULTIREF
- struct list_head list;
-
- atomic_t refcount;
-#endif
struct mutex lock;
/* I: decompression offset in page */
@@ -107,10 +100,8 @@ static inline void z_erofs_vle_set_workgrp_fmt(
grp->flags = fmt | (grp->flags & ~Z_EROFS_VLE_WORKGRP_FMT_MASK);
}
-#ifdef CONFIG_EROFS_FS_ZIP_MULTIREF
-#error multiref decompression is unimplemented yet
-#else
+/* definitions if multiref is disabled */
#define z_erofs_vle_grab_primary_work(grp) (&(grp)->work)
#define z_erofs_vle_grab_work(grp, pageofs) (&(grp)->work)
#define z_erofs_vle_work_workgroup(wrk, primary) \
@@ -118,7 +109,6 @@ static inline void z_erofs_vle_set_workgrp_fmt(
struct z_erofs_vle_workgroup, work) : \
({ BUG(); (void *)NULL; }))
-#endif
#define Z_EROFS_WORKGROUP_SIZE sizeof(struct z_erofs_vle_workgroup)
diff --git a/drivers/staging/erofs/unzip_vle_lz4.c b/drivers/staging/erofs/unzip_vle_lz4.c
index f5b665f..9cb35cd 100644
--- a/drivers/staging/erofs/unzip_vle_lz4.c
+++ b/drivers/staging/erofs/unzip_vle_lz4.c
@@ -57,7 +57,7 @@ int z_erofs_vle_plain_copy(struct page **compressed_pages,
if (compressed_pages[j] != page)
continue;
- BUG_ON(mirrored[j]);
+ DBG_BUGON(mirrored[j]);
memcpy(percpu_data + j * PAGE_SIZE, dst, PAGE_SIZE);
mirrored[j] = true;
break;
diff --git a/drivers/staging/erofs/utils.c b/drivers/staging/erofs/utils.c
index 595cf90..dd2ac9d 100644
--- a/drivers/staging/erofs/utils.c
+++ b/drivers/staging/erofs/utils.c
@@ -23,9 +23,6 @@ struct page *erofs_allocpage(struct list_head *pool, gfp_t gfp)
list_del(&page->lru);
} else {
page = alloc_pages(gfp | __GFP_NOFAIL, 0);
-
- BUG_ON(page == NULL);
- BUG_ON(page->mapping != NULL);
}
return page;
}
@@ -60,7 +57,7 @@ struct erofs_workgroup *erofs_find_workgroup(
/* decrease refcount added by erofs_workgroup_put */
if (unlikely(oldcount == 1))
atomic_long_dec(&erofs_global_shrink_cnt);
- BUG_ON(index != grp->index);
+ DBG_BUGON(index != grp->index);
}
rcu_read_unlock();
return grp;
@@ -73,8 +70,11 @@ int erofs_register_workgroup(struct super_block *sb,
struct erofs_sb_info *sbi;
int err;
- /* grp->refcount should not < 1 */
- BUG_ON(!atomic_read(&grp->refcount));
+ /* grp shouldn't be broken or used before */
+ if (unlikely(atomic_read(&grp->refcount) != 1)) {
+ DBG_BUGON(1);
+ return -EINVAL;
+ }
err = radix_tree_preload(GFP_NOFS);
if (err)
diff --git a/drivers/staging/fsl-dpaa2/rtc/rtc.c b/drivers/staging/fsl-dpaa2/rtc/rtc.c
index 0d52cb8..318a33c 100644
--- a/drivers/staging/fsl-dpaa2/rtc/rtc.c
+++ b/drivers/staging/fsl-dpaa2/rtc/rtc.c
@@ -142,7 +142,10 @@ static int rtc_probe(struct fsl_mc_device *mc_dev)
err = fsl_mc_portal_allocate(mc_dev, 0, &mc_dev->mc_io);
if (err) {
- dev_err(dev, "fsl_mc_portal_allocate err %d\n", err);
+ if (err == -ENXIO)
+ err = -EPROBE_DEFER;
+ else
+ dev_err(dev, "fsl_mc_portal_allocate err %d\n", err);
goto err_exit;
}
diff --git a/drivers/staging/iio/adc/ad7280a.c b/drivers/staging/iio/adc/ad7280a.c
index b736275..6a48ad0 100644
--- a/drivers/staging/iio/adc/ad7280a.c
+++ b/drivers/staging/iio/adc/ad7280a.c
@@ -256,7 +256,9 @@ static int ad7280_read(struct ad7280_state *st, unsigned int devaddr,
if (ret)
return ret;
- __ad7280_read32(st, &tmp);
+ ret = __ad7280_read32(st, &tmp);
+ if (ret)
+ return ret;
if (ad7280_check_crc(st, tmp))
return -EIO;
@@ -294,7 +296,9 @@ static int ad7280_read_channel(struct ad7280_state *st, unsigned int devaddr,
ad7280_delay(st);
- __ad7280_read32(st, &tmp);
+ ret = __ad7280_read32(st, &tmp);
+ if (ret)
+ return ret;
if (ad7280_check_crc(st, tmp))
return -EIO;
@@ -327,7 +331,9 @@ static int ad7280_read_all_channels(struct ad7280_state *st, unsigned int cnt,
ad7280_delay(st);
for (i = 0; i < cnt; i++) {
- __ad7280_read32(st, &tmp);
+ ret = __ad7280_read32(st, &tmp);
+ if (ret)
+ return ret;
if (ad7280_check_crc(st, tmp))
return -EIO;
@@ -370,7 +376,10 @@ static int ad7280_chain_setup(struct ad7280_state *st)
return ret;
for (n = 0; n <= AD7280A_MAX_CHAIN; n++) {
- __ad7280_read32(st, &val);
+ ret = __ad7280_read32(st, &val);
+ if (ret)
+ return ret;
+
if (val == 0)
return n - 1;
diff --git a/drivers/staging/iio/adc/ad7780.c b/drivers/staging/iio/adc/ad7780.c
index 16d7207..8bcb5d5 100644
--- a/drivers/staging/iio/adc/ad7780.c
+++ b/drivers/staging/iio/adc/ad7780.c
@@ -87,12 +87,16 @@ static int ad7780_read_raw(struct iio_dev *indio_dev,
long m)
{
struct ad7780_state *st = iio_priv(indio_dev);
+ int voltage_uv;
switch (m) {
case IIO_CHAN_INFO_RAW:
return ad_sigma_delta_single_conversion(indio_dev, chan, val);
case IIO_CHAN_INFO_SCALE:
- *val = st->int_vref_mv * st->gain;
+ voltage_uv = regulator_get_voltage(st->reg);
+ if (voltage_uv < 0)
+ return voltage_uv;
+ *val = (voltage_uv / 1000) * st->gain;
*val2 = chan->scan_type.realbits - 1;
return IIO_VAL_FRACTIONAL_LOG2;
case IIO_CHAN_INFO_OFFSET:
diff --git a/drivers/staging/iio/resolver/ad2s90.c b/drivers/staging/iio/resolver/ad2s90.c
index 59586947..51cda91 100644
--- a/drivers/staging/iio/resolver/ad2s90.c
+++ b/drivers/staging/iio/resolver/ad2s90.c
@@ -85,7 +85,12 @@ static int ad2s90_probe(struct spi_device *spi)
/* need 600ns between CS and the first falling edge of SCLK */
spi->max_speed_hz = 830000;
spi->mode = SPI_MODE_3;
- spi_setup(spi);
+ ret = spi_setup(spi);
+
+ if (ret < 0) {
+ dev_err(&spi->dev, "spi_setup failed!\n");
+ return ret;
+ }
return 0;
}
diff --git a/drivers/staging/pi433/pi433_if.c b/drivers/staging/pi433/pi433_if.c
index c85a805..a497ec1 100644
--- a/drivers/staging/pi433/pi433_if.c
+++ b/drivers/staging/pi433/pi433_if.c
@@ -1255,6 +1255,10 @@ static int pi433_probe(struct spi_device *spi)
/* create cdev */
device->cdev = cdev_alloc();
+ if (!device->cdev) {
+ dev_dbg(device->dev, "allocation of cdev failed");
+ goto cdev_failed;
+ }
device->cdev->owner = THIS_MODULE;
cdev_init(device->cdev, &pi433_fops);
retval = cdev_add(device->cdev, device->devt, 1);
diff --git a/drivers/staging/speakup/spk_ttyio.c b/drivers/staging/speakup/spk_ttyio.c
index eac63aa..93742db 100644
--- a/drivers/staging/speakup/spk_ttyio.c
+++ b/drivers/staging/speakup/spk_ttyio.c
@@ -265,7 +265,8 @@ static void spk_ttyio_send_xchar(char ch)
return;
}
- speakup_tty->ops->send_xchar(speakup_tty, ch);
+ if (speakup_tty->ops->send_xchar)
+ speakup_tty->ops->send_xchar(speakup_tty, ch);
mutex_unlock(&speakup_tty_mutex);
}
@@ -277,7 +278,8 @@ static void spk_ttyio_tiocmset(unsigned int set, unsigned int clear)
return;
}
- speakup_tty->ops->tiocmset(speakup_tty, set, clear);
+ if (speakup_tty->ops->tiocmset)
+ speakup_tty->ops->tiocmset(speakup_tty, set, clear);
mutex_unlock(&speakup_tty_mutex);
}
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 3f7aad4..f1b730b 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -266,7 +266,7 @@ struct se_session *transport_alloc_session(enum target_prot_op sup_prot_ops)
}
ret = transport_init_session(se_sess);
if (ret < 0) {
- kfree(se_sess);
+ kmem_cache_free(se_sess_cache, se_sess);
return ERR_PTR(ret);
}
se_sess->sup_prot_ops = sup_prot_ops;
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
index 9cd404a..ac76201 100644
--- a/drivers/target/target_core_user.c
+++ b/drivers/target/target_core_user.c
@@ -148,7 +148,7 @@ struct tcmu_dev {
size_t ring_size;
struct mutex cmdr_lock;
- struct list_head cmdr_queue;
+ struct list_head qfull_queue;
uint32_t dbi_max;
uint32_t dbi_thresh;
@@ -159,6 +159,7 @@ struct tcmu_dev {
struct timer_list cmd_timer;
unsigned int cmd_time_out;
+ struct list_head inflight_queue;
struct timer_list qfull_timer;
int qfull_time_out;
@@ -179,7 +180,7 @@ struct tcmu_dev {
struct tcmu_cmd {
struct se_cmd *se_cmd;
struct tcmu_dev *tcmu_dev;
- struct list_head cmdr_queue_entry;
+ struct list_head queue_entry;
uint16_t cmd_id;
@@ -192,6 +193,7 @@ struct tcmu_cmd {
unsigned long deadline;
#define TCMU_CMD_BIT_EXPIRED 0
+#define TCMU_CMD_BIT_INFLIGHT 1
unsigned long flags;
};
/*
@@ -586,7 +588,7 @@ static struct tcmu_cmd *tcmu_alloc_cmd(struct se_cmd *se_cmd)
if (!tcmu_cmd)
return NULL;
- INIT_LIST_HEAD(&tcmu_cmd->cmdr_queue_entry);
+ INIT_LIST_HEAD(&tcmu_cmd->queue_entry);
tcmu_cmd->se_cmd = se_cmd;
tcmu_cmd->tcmu_dev = udev;
@@ -915,11 +917,13 @@ static int tcmu_setup_cmd_timer(struct tcmu_cmd *tcmu_cmd, unsigned int tmo,
return 0;
tcmu_cmd->deadline = round_jiffies_up(jiffies + msecs_to_jiffies(tmo));
- mod_timer(timer, tcmu_cmd->deadline);
+ if (!timer_pending(timer))
+ mod_timer(timer, tcmu_cmd->deadline);
+
return 0;
}
-static int add_to_cmdr_queue(struct tcmu_cmd *tcmu_cmd)
+static int add_to_qfull_queue(struct tcmu_cmd *tcmu_cmd)
{
struct tcmu_dev *udev = tcmu_cmd->tcmu_dev;
unsigned int tmo;
@@ -942,7 +946,7 @@ static int add_to_cmdr_queue(struct tcmu_cmd *tcmu_cmd)
if (ret)
return ret;
- list_add_tail(&tcmu_cmd->cmdr_queue_entry, &udev->cmdr_queue);
+ list_add_tail(&tcmu_cmd->queue_entry, &udev->qfull_queue);
pr_debug("adding cmd %u on dev %s to ring space wait queue\n",
tcmu_cmd->cmd_id, udev->name);
return 0;
@@ -999,7 +1003,7 @@ static sense_reason_t queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, int *scsi_err)
base_command_size = tcmu_cmd_get_base_cmd_size(tcmu_cmd->dbi_cnt);
command_size = tcmu_cmd_get_cmd_size(tcmu_cmd, base_command_size);
- if (!list_empty(&udev->cmdr_queue))
+ if (!list_empty(&udev->qfull_queue))
goto queue;
mb = udev->mb_addr;
@@ -1096,13 +1100,16 @@ static sense_reason_t queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, int *scsi_err)
UPDATE_HEAD(mb->cmd_head, command_size, udev->cmdr_size);
tcmu_flush_dcache_range(mb, sizeof(*mb));
+ list_add_tail(&tcmu_cmd->queue_entry, &udev->inflight_queue);
+ set_bit(TCMU_CMD_BIT_INFLIGHT, &tcmu_cmd->flags);
+
/* TODO: only if FLUSH and FUA? */
uio_event_notify(&udev->uio_info);
return 0;
queue:
- if (add_to_cmdr_queue(tcmu_cmd)) {
+ if (add_to_qfull_queue(tcmu_cmd)) {
*scsi_err = TCM_OUT_OF_RESOURCES;
return -1;
}
@@ -1145,6 +1152,8 @@ static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry *
if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags))
goto out;
+ list_del_init(&cmd->queue_entry);
+
tcmu_cmd_reset_dbi_cur(cmd);
if (entry->hdr.uflags & TCMU_UFLAG_UNKNOWN_OP) {
@@ -1194,9 +1203,29 @@ static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry *
tcmu_free_cmd(cmd);
}
+static void tcmu_set_next_deadline(struct list_head *queue,
+ struct timer_list *timer)
+{
+ struct tcmu_cmd *tcmu_cmd, *tmp_cmd;
+ unsigned long deadline = 0;
+
+ list_for_each_entry_safe(tcmu_cmd, tmp_cmd, queue, queue_entry) {
+ if (!time_after(jiffies, tcmu_cmd->deadline)) {
+ deadline = tcmu_cmd->deadline;
+ break;
+ }
+ }
+
+ if (deadline)
+ mod_timer(timer, deadline);
+ else
+ del_timer(timer);
+}
+
static unsigned int tcmu_handle_completions(struct tcmu_dev *udev)
{
struct tcmu_mailbox *mb;
+ struct tcmu_cmd *cmd;
int handled = 0;
if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags)) {
@@ -1210,7 +1239,6 @@ static unsigned int tcmu_handle_completions(struct tcmu_dev *udev)
while (udev->cmdr_last_cleaned != READ_ONCE(mb->cmd_tail)) {
struct tcmu_cmd_entry *entry = (void *) mb + CMDR_OFF + udev->cmdr_last_cleaned;
- struct tcmu_cmd *cmd;
tcmu_flush_dcache_range(entry, sizeof(*entry));
@@ -1243,7 +1271,7 @@ static unsigned int tcmu_handle_completions(struct tcmu_dev *udev)
/* no more pending commands */
del_timer(&udev->cmd_timer);
- if (list_empty(&udev->cmdr_queue)) {
+ if (list_empty(&udev->qfull_queue)) {
/*
* no more pending or waiting commands so try to
* reclaim blocks if needed.
@@ -1252,6 +1280,8 @@ static unsigned int tcmu_handle_completions(struct tcmu_dev *udev)
tcmu_global_max_blocks)
schedule_delayed_work(&tcmu_unmap_work, 0);
}
+ } else if (udev->cmd_time_out) {
+ tcmu_set_next_deadline(&udev->inflight_queue, &udev->cmd_timer);
}
return handled;
@@ -1271,7 +1301,7 @@ static int tcmu_check_expired_cmd(int id, void *p, void *data)
if (!time_after(jiffies, cmd->deadline))
return 0;
- is_running = list_empty(&cmd->cmdr_queue_entry);
+ is_running = test_bit(TCMU_CMD_BIT_INFLIGHT, &cmd->flags);
se_cmd = cmd->se_cmd;
if (is_running) {
@@ -1288,12 +1318,11 @@ static int tcmu_check_expired_cmd(int id, void *p, void *data)
*/
scsi_status = SAM_STAT_CHECK_CONDITION;
} else {
- list_del_init(&cmd->cmdr_queue_entry);
-
idr_remove(&udev->commands, id);
tcmu_free_cmd(cmd);
scsi_status = SAM_STAT_TASK_SET_FULL;
}
+ list_del_init(&cmd->queue_entry);
pr_debug("Timing out cmd %u on dev %s that is %s.\n",
id, udev->name, is_running ? "inflight" : "queued");
@@ -1372,7 +1401,8 @@ static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name)
INIT_LIST_HEAD(&udev->node);
INIT_LIST_HEAD(&udev->timedout_entry);
- INIT_LIST_HEAD(&udev->cmdr_queue);
+ INIT_LIST_HEAD(&udev->qfull_queue);
+ INIT_LIST_HEAD(&udev->inflight_queue);
idr_init(&udev->commands);
timer_setup(&udev->qfull_timer, tcmu_qfull_timedout, 0);
@@ -1383,7 +1413,7 @@ static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name)
return &udev->se_dev;
}
-static bool run_cmdr_queue(struct tcmu_dev *udev, bool fail)
+static bool run_qfull_queue(struct tcmu_dev *udev, bool fail)
{
struct tcmu_cmd *tcmu_cmd, *tmp_cmd;
LIST_HEAD(cmds);
@@ -1391,15 +1421,15 @@ static bool run_cmdr_queue(struct tcmu_dev *udev, bool fail)
sense_reason_t scsi_ret;
int ret;
- if (list_empty(&udev->cmdr_queue))
+ if (list_empty(&udev->qfull_queue))
return true;
pr_debug("running %s's cmdr queue forcefail %d\n", udev->name, fail);
- list_splice_init(&udev->cmdr_queue, &cmds);
+ list_splice_init(&udev->qfull_queue, &cmds);
- list_for_each_entry_safe(tcmu_cmd, tmp_cmd, &cmds, cmdr_queue_entry) {
- list_del_init(&tcmu_cmd->cmdr_queue_entry);
+ list_for_each_entry_safe(tcmu_cmd, tmp_cmd, &cmds, queue_entry) {
+ list_del_init(&tcmu_cmd->queue_entry);
pr_debug("removing cmd %u on dev %s from queue\n",
tcmu_cmd->cmd_id, udev->name);
@@ -1437,14 +1467,13 @@ static bool run_cmdr_queue(struct tcmu_dev *udev, bool fail)
* cmd was requeued, so just put all cmds back in
* the queue
*/
- list_splice_tail(&cmds, &udev->cmdr_queue);
+ list_splice_tail(&cmds, &udev->qfull_queue);
drained = false;
- goto done;
+ break;
}
}
- if (list_empty(&udev->cmdr_queue))
- del_timer(&udev->qfull_timer);
-done:
+
+ tcmu_set_next_deadline(&udev->qfull_queue, &udev->qfull_timer);
return drained;
}
@@ -1454,7 +1483,7 @@ static int tcmu_irqcontrol(struct uio_info *info, s32 irq_on)
mutex_lock(&udev->cmdr_lock);
tcmu_handle_completions(udev);
- run_cmdr_queue(udev, false);
+ run_qfull_queue(udev, false);
mutex_unlock(&udev->cmdr_lock);
return 0;
@@ -1982,7 +2011,7 @@ static void tcmu_block_dev(struct tcmu_dev *udev)
/* complete IO that has executed successfully */
tcmu_handle_completions(udev);
/* fail IO waiting to be queued */
- run_cmdr_queue(udev, true);
+ run_qfull_queue(udev, true);
unlock:
mutex_unlock(&udev->cmdr_lock);
@@ -1997,7 +2026,7 @@ static void tcmu_reset_ring(struct tcmu_dev *udev, u8 err_level)
mutex_lock(&udev->cmdr_lock);
idr_for_each_entry(&udev->commands, cmd, i) {
- if (!list_empty(&cmd->cmdr_queue_entry))
+ if (!test_bit(TCMU_CMD_BIT_INFLIGHT, &cmd->flags))
continue;
pr_debug("removing cmd %u on dev %s from ring (is expired %d)\n",
@@ -2006,6 +2035,7 @@ static void tcmu_reset_ring(struct tcmu_dev *udev, u8 err_level)
idr_remove(&udev->commands, i);
if (!test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) {
+ list_del_init(&cmd->queue_entry);
if (err_level == 1) {
/*
* Userspace was not able to start the
@@ -2666,6 +2696,10 @@ static void check_timedout_devices(void)
mutex_lock(&udev->cmdr_lock);
idr_for_each(&udev->commands, tcmu_check_expired_cmd, NULL);
+
+ tcmu_set_next_deadline(&udev->inflight_queue, &udev->cmd_timer);
+ tcmu_set_next_deadline(&udev->qfull_queue, &udev->qfull_timer);
+
mutex_unlock(&udev->cmdr_lock);
spin_lock_bh(&timed_out_udevs_lock);
diff --git a/drivers/tee/optee/supp.c b/drivers/tee/optee/supp.c
index df35fc0..43626e1 100644
--- a/drivers/tee/optee/supp.c
+++ b/drivers/tee/optee/supp.c
@@ -19,7 +19,7 @@
struct optee_supp_req {
struct list_head link;
- bool busy;
+ bool in_queue;
u32 func;
u32 ret;
size_t num_params;
@@ -54,7 +54,6 @@ void optee_supp_release(struct optee_supp *supp)
/* Abort all request retrieved by supplicant */
idr_for_each_entry(&supp->idr, req, id) {
- req->busy = false;
idr_remove(&supp->idr, id);
req->ret = TEEC_ERROR_COMMUNICATION;
complete(&req->c);
@@ -63,6 +62,7 @@ void optee_supp_release(struct optee_supp *supp)
/* Abort all queued requests */
list_for_each_entry_safe(req, req_tmp, &supp->reqs, link) {
list_del(&req->link);
+ req->in_queue = false;
req->ret = TEEC_ERROR_COMMUNICATION;
complete(&req->c);
}
@@ -103,6 +103,7 @@ u32 optee_supp_thrd_req(struct tee_context *ctx, u32 func, size_t num_params,
/* Insert the request in the request list */
mutex_lock(&supp->mutex);
list_add_tail(&req->link, &supp->reqs);
+ req->in_queue = true;
mutex_unlock(&supp->mutex);
/* Tell an eventual waiter there's a new request */
@@ -130,9 +131,10 @@ u32 optee_supp_thrd_req(struct tee_context *ctx, u32 func, size_t num_params,
* will serve all requests in a timely manner and
* interrupting then wouldn't make sense.
*/
- interruptable = !req->busy;
- if (!req->busy)
+ if (req->in_queue) {
list_del(&req->link);
+ req->in_queue = false;
+ }
}
mutex_unlock(&supp->mutex);
@@ -176,7 +178,7 @@ static struct optee_supp_req *supp_pop_entry(struct optee_supp *supp,
return ERR_PTR(-ENOMEM);
list_del(&req->link);
- req->busy = true;
+ req->in_queue = false;
return req;
}
@@ -318,7 +320,6 @@ static struct optee_supp_req *supp_pop_req(struct optee_supp *supp,
if ((num_params - nm) != req->num_params)
return ERR_PTR(-EINVAL);
- req->busy = false;
idr_remove(&supp->idr, id);
supp->req_id = -1;
*num_meta = nm;
diff --git a/drivers/thermal/broadcom/bcm2835_thermal.c b/drivers/thermal/broadcom/bcm2835_thermal.c
index 23ad4f9..24b006a 100644
--- a/drivers/thermal/broadcom/bcm2835_thermal.c
+++ b/drivers/thermal/broadcom/bcm2835_thermal.c
@@ -27,6 +27,8 @@
#include <linux/platform_device.h>
#include <linux/thermal.h>
+#include "../thermal_hwmon.h"
+
#define BCM2835_TS_TSENSCTL 0x00
#define BCM2835_TS_TSENSSTAT 0x04
@@ -275,6 +277,15 @@ static int bcm2835_thermal_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, tz);
+ /*
+ * Thermal_zone doesn't enable hwmon as default,
+ * enable it here
+ */
+ tz->tzp->no_hwmon = false;
+ err = thermal_add_hwmon_sysfs(tz);
+ if (err)
+ goto err_tz;
+
bcm2835_thermal_debugfs(pdev);
return 0;
diff --git a/drivers/thermal/thermal-generic-adc.c b/drivers/thermal/thermal-generic-adc.c
index bf1c628..e22fc60 100644
--- a/drivers/thermal/thermal-generic-adc.c
+++ b/drivers/thermal/thermal-generic-adc.c
@@ -26,7 +26,7 @@ struct gadc_thermal_info {
static int gadc_thermal_adc_to_temp(struct gadc_thermal_info *gti, int val)
{
- int temp, adc_hi, adc_lo;
+ int temp, temp_hi, temp_lo, adc_hi, adc_lo;
int i;
for (i = 0; i < gti->nlookup_table; i++) {
@@ -36,13 +36,17 @@ static int gadc_thermal_adc_to_temp(struct gadc_thermal_info *gti, int val)
if (i == 0) {
temp = gti->lookup_table[0];
- } else if (i >= (gti->nlookup_table - 1)) {
+ } else if (i >= gti->nlookup_table) {
temp = gti->lookup_table[2 * (gti->nlookup_table - 1)];
} else {
adc_hi = gti->lookup_table[2 * i - 1];
adc_lo = gti->lookup_table[2 * i + 1];
- temp = gti->lookup_table[2 * i];
- temp -= ((val - adc_lo) * 1000) / (adc_hi - adc_lo);
+
+ temp_hi = gti->lookup_table[2 * i - 2];
+ temp_lo = gti->lookup_table[2 * i];
+
+ temp = temp_hi + mult_frac(temp_lo - temp_hi, val - adc_hi,
+ adc_lo - adc_hi);
}
return temp;
diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
index 6120a82..de12162 100644
--- a/drivers/thermal/thermal_core.c
+++ b/drivers/thermal/thermal_core.c
@@ -470,16 +470,20 @@ static void update_temperature(struct thermal_zone_device *tz)
store_temperature(tz, temp);
}
-static void thermal_zone_device_reset(struct thermal_zone_device *tz)
+static void thermal_zone_device_init(struct thermal_zone_device *tz)
{
struct thermal_instance *pos;
-
tz->temperature = THERMAL_TEMP_INVALID;
- tz->passive = 0;
list_for_each_entry(pos, &tz->thermal_instances, tz_node)
pos->initialized = false;
}
+static void thermal_zone_device_reset(struct thermal_zone_device *tz)
+{
+ tz->passive = 0;
+ thermal_zone_device_init(tz);
+}
+
void thermal_zone_device_update_temp(struct thermal_zone_device *tz,
enum thermal_notify_event event, int temp)
{
@@ -1596,7 +1600,7 @@ static int thermal_pm_notify(struct notifier_block *nb,
case PM_POST_SUSPEND:
atomic_set(&in_suspend, 0);
list_for_each_entry(tz, &thermal_tz_list, node) {
- thermal_zone_device_reset(tz);
+ thermal_zone_device_init(tz);
thermal_zone_device_update(tz,
THERMAL_EVENT_UNSPECIFIED);
}
diff --git a/drivers/thermal/thermal_hwmon.h b/drivers/thermal/thermal_hwmon.h
index 019f6f8..a160b9d 100644
--- a/drivers/thermal/thermal_hwmon.h
+++ b/drivers/thermal/thermal_hwmon.h
@@ -19,13 +19,13 @@
int thermal_add_hwmon_sysfs(struct thermal_zone_device *tz);
void thermal_remove_hwmon_sysfs(struct thermal_zone_device *tz);
#else
-static int
+static inline int
thermal_add_hwmon_sysfs(struct thermal_zone_device *tz)
{
return 0;
}
-static void
+static inline void
thermal_remove_hwmon_sysfs(struct thermal_zone_device *tz)
{
}
diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
index f80a300..48bd694 100644
--- a/drivers/tty/serial/8250/8250_pci.c
+++ b/drivers/tty/serial/8250/8250_pci.c
@@ -3420,6 +3420,11 @@ static int
serial_pci_guess_board(struct pci_dev *dev, struct pciserial_board *board)
{
int num_iomem, num_port, first_port = -1, i;
+ int rc;
+
+ rc = serial_pci_is_class_communication(dev);
+ if (rc)
+ return rc;
/*
* Should we try to make guesses for multiport serial devices later?
@@ -3647,10 +3652,6 @@ pciserial_init_one(struct pci_dev *dev, const struct pci_device_id *ent)
board = &pci_boards[ent->driver_data];
- rc = serial_pci_is_class_communication(dev);
- if (rc)
- return rc;
-
rc = serial_pci_is_blacklisted(dev);
if (rc)
return rc;
diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
index 3f8d127..7d030c2 100644
--- a/drivers/tty/serial/fsl_lpuart.c
+++ b/drivers/tty/serial/fsl_lpuart.c
@@ -1477,6 +1477,8 @@ lpuart_set_termios(struct uart_port *port, struct ktermios *termios,
else
cr1 &= ~UARTCR1_PT;
}
+ } else {
+ cr1 &= ~UARTCR1_PE;
}
/* ask the core to calculate the divisor */
@@ -1688,6 +1690,8 @@ lpuart32_set_termios(struct uart_port *port, struct ktermios *termios,
else
ctrl &= ~UARTCTRL_PT;
}
+ } else {
+ ctrl &= ~UARTCTRL_PE;
}
/* ask the core to calculate the divisor */
diff --git a/drivers/tty/serial/msm_geni_serial.c b/drivers/tty/serial/msm_geni_serial.c
index 04fec08..1a74da9 100644
--- a/drivers/tty/serial/msm_geni_serial.c
+++ b/drivers/tty/serial/msm_geni_serial.c
@@ -2407,6 +2407,8 @@ static int msm_geni_serial_probe(struct platform_device *pdev)
if (ret)
goto exit_geni_serial_probe;
+ dev_port->serial_rsc.ctrl_dev = &pdev->dev;
+
if (of_property_read_u32(pdev->dev.of_node, "qcom,wakeup-byte",
&wake_char)) {
dev_dbg(&pdev->dev, "No Wakeup byte specified\n");
diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c
index 2f8fa18..c6058b5 100644
--- a/drivers/tty/serial/samsung.c
+++ b/drivers/tty/serial/samsung.c
@@ -1365,11 +1365,14 @@ static void s3c24xx_serial_set_termios(struct uart_port *port,
wr_regl(port, S3C2410_ULCON, ulcon);
wr_regl(port, S3C2410_UBRDIV, quot);
+ port->status &= ~UPSTAT_AUTOCTS;
+
umcon = rd_regl(port, S3C2410_UMCON);
if (termios->c_cflag & CRTSCTS) {
umcon |= S3C2410_UMCOM_AFC;
/* Disable RTS when RX FIFO contains 63 bytes */
umcon &= ~S3C2412_UMCON_AFC_8;
+ port->status = UPSTAT_AUTOCTS;
} else {
umcon &= ~S3C2410_UMCOM_AFC;
}
diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
index 7fe6794..f0b354b 100644
--- a/drivers/tty/serial/serial_core.c
+++ b/drivers/tty/serial/serial_core.c
@@ -130,6 +130,9 @@ static void uart_start(struct tty_struct *tty)
struct uart_port *port;
unsigned long flags;
+ if (!state)
+ return;
+
port = uart_port_lock(state, flags);
__uart_start(tty);
uart_port_unlock(port, flags);
@@ -727,6 +730,9 @@ static void uart_unthrottle(struct tty_struct *tty)
upstat_t mask = UPSTAT_SYNC_FIFO;
struct uart_port *port;
+ if (!state)
+ return;
+
port = uart_port_ref(state);
if (!port)
return;
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
index effba6c..859b173 100644
--- a/drivers/tty/serial/sh-sci.c
+++ b/drivers/tty/serial/sh-sci.c
@@ -1331,7 +1331,7 @@ static void sci_tx_dma_release(struct sci_port *s)
dma_release_channel(chan);
}
-static void sci_submit_rx(struct sci_port *s)
+static int sci_submit_rx(struct sci_port *s, bool port_lock_held)
{
struct dma_chan *chan = s->chan_rx;
struct uart_port *port = &s->port;
@@ -1359,19 +1359,22 @@ static void sci_submit_rx(struct sci_port *s)
s->active_rx = s->cookie_rx[0];
dma_async_issue_pending(chan);
- return;
+ return 0;
fail:
+ /* Switch to PIO */
+ if (!port_lock_held)
+ spin_lock_irqsave(&port->lock, flags);
if (i)
dmaengine_terminate_async(chan);
for (i = 0; i < 2; i++)
s->cookie_rx[i] = -EINVAL;
s->active_rx = -EINVAL;
- /* Switch to PIO */
- spin_lock_irqsave(&port->lock, flags);
s->chan_rx = NULL;
sci_start_rx(port);
- spin_unlock_irqrestore(&port->lock, flags);
+ if (!port_lock_held)
+ spin_unlock_irqrestore(&port->lock, flags);
+ return -EAGAIN;
}
static void work_fn_tx(struct work_struct *work)
@@ -1491,7 +1494,7 @@ static enum hrtimer_restart rx_timer_fn(struct hrtimer *t)
}
if (port->type == PORT_SCIFA || port->type == PORT_SCIFB)
- sci_submit_rx(s);
+ sci_submit_rx(s, true);
/* Direct new serial port interrupts back to CPU */
scr = serial_port_in(port, SCSCR);
@@ -1617,7 +1620,7 @@ static void sci_request_dma(struct uart_port *port)
s->chan_rx_saved = s->chan_rx = chan;
if (port->type == PORT_SCIFA || port->type == PORT_SCIFB)
- sci_submit_rx(s);
+ sci_submit_rx(s, false);
}
}
@@ -1666,8 +1669,10 @@ static irqreturn_t sci_rx_interrupt(int irq, void *ptr)
disable_irq_nosync(irq);
scr |= SCSCR_RDRQE;
} else {
+ if (sci_submit_rx(s, false) < 0)
+ goto handle_pio;
+
scr &= ~SCSCR_RIE;
- sci_submit_rx(s);
}
serial_port_out(port, SCSCR, scr);
/* Clear current interrupt */
@@ -1679,6 +1684,8 @@ static irqreturn_t sci_rx_interrupt(int irq, void *ptr)
return IRQ_HANDLED;
}
+
+handle_pio:
#endif
if (s->rx_trigger > 1 && s->rx_fifo_timeout > 0) {
@@ -1914,7 +1921,7 @@ static int sci_request_irq(struct sci_port *port)
static void sci_free_irq(struct sci_port *port)
{
- int i;
+ int i, j;
/*
* Intentionally in reverse order so we iterate over the muxed
@@ -1930,6 +1937,13 @@ static void sci_free_irq(struct sci_port *port)
if (unlikely(irq < 0))
continue;
+ /* Check if already freed (irq was muxed) */
+ for (j = 0; j < i; j++)
+ if (port->irqs[j] == irq)
+ j = i + 1;
+ if (j > i)
+ continue;
+
free_irq(port->irqs[i], port);
kfree(port->irqstr[i]);
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 2ee1111..0bd1fc3 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -1122,6 +1122,16 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
USB_PORT_FEAT_ENABLE);
}
+ /*
+ * Add debounce if USB3 link is in polling/link training state.
+ * Link will automatically transition to Enabled state after
+ * link training completes.
+ */
+ if (hub_is_superspeed(hdev) &&
+ ((portstatus & USB_PORT_STAT_LINK_STATE) ==
+ USB_SS_PORT_LS_POLLING))
+ need_debounce_delay = true;
+
/* Clear status-change flags; we'll debounce later */
if (portchange & USB_PORT_STAT_C_CONNECTION) {
need_debounce_delay = true;
diff --git a/drivers/usb/dwc2/params.c b/drivers/usb/dwc2/params.c
index ef2c199..dff2c6e 100644
--- a/drivers/usb/dwc2/params.c
+++ b/drivers/usb/dwc2/params.c
@@ -71,6 +71,13 @@ static void dwc2_set_his_params(struct dwc2_hsotg *hsotg)
p->power_down = false;
}
+static void dwc2_set_s3c6400_params(struct dwc2_hsotg *hsotg)
+{
+ struct dwc2_core_params *p = &hsotg->params;
+
+ p->power_down = 0;
+}
+
static void dwc2_set_rk_params(struct dwc2_hsotg *hsotg)
{
struct dwc2_core_params *p = &hsotg->params;
@@ -151,7 +158,8 @@ const struct of_device_id dwc2_of_match_table[] = {
{ .compatible = "lantiq,arx100-usb", .data = dwc2_set_ltq_params },
{ .compatible = "lantiq,xrx200-usb", .data = dwc2_set_ltq_params },
{ .compatible = "snps,dwc2" },
- { .compatible = "samsung,s3c6400-hsotg" },
+ { .compatible = "samsung,s3c6400-hsotg",
+ .data = dwc2_set_s3c6400_params },
{ .compatible = "amlogic,meson8-usb",
.data = dwc2_set_amlogic_params },
{ .compatible = "amlogic,meson8b-usb",
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index abe7f64..6bbea04 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -1075,8 +1075,6 @@ static void __dwc3_prepare_one_trb(struct dwc3_ep *dep, struct dwc3_trb *trb,
struct usb_gadget *gadget = &dwc->gadget;
enum usb_device_speed speed = gadget->speed;
- dwc3_ep_inc_enq(dep);
-
trb->size = DWC3_TRB_SIZE_LENGTH(length);
trb->bpl = lower_32_bits(dma);
trb->bph = upper_32_bits(dma);
@@ -1146,16 +1144,20 @@ static void __dwc3_prepare_one_trb(struct dwc3_ep *dep, struct dwc3_trb *trb,
usb_endpoint_type(dep->endpoint.desc));
}
- /* always enable Continue on Short Packet */
+ /*
+ * Enable Continue on Short Packet
+ * when endpoint is not a stream capable
+ */
if (usb_endpoint_dir_out(dep->endpoint.desc)) {
- trb->ctrl |= DWC3_TRB_CTRL_CSP;
+ if (!dep->stream_capable)
+ trb->ctrl |= DWC3_TRB_CTRL_CSP;
if (short_not_ok)
trb->ctrl |= DWC3_TRB_CTRL_ISP_IMI;
}
if ((!no_interrupt && !chain) ||
- (dwc3_calc_trbs_left(dep) == 0))
+ (dwc3_calc_trbs_left(dep) == 1))
trb->ctrl |= DWC3_TRB_CTRL_IOC;
if (chain)
@@ -1166,6 +1168,8 @@ static void __dwc3_prepare_one_trb(struct dwc3_ep *dep, struct dwc3_trb *trb,
trb->ctrl |= DWC3_TRB_CTRL_HWO;
+ dwc3_ep_inc_enq(dep);
+
trace_dwc3_prepare_trb(dep, trb);
}
@@ -1269,7 +1273,7 @@ static void dwc3_prepare_one_trb_linear(struct dwc3_ep *dep,
unsigned int maxp = usb_endpoint_maxp(dep->endpoint.desc);
unsigned int rem = length % maxp;
- if (rem && usb_endpoint_dir_out(dep->endpoint.desc)) {
+ if ((!length || rem) && usb_endpoint_dir_out(dep->endpoint.desc)) {
struct dwc3 *dwc = dep->dwc;
struct dwc3_trb *trb;
diff --git a/drivers/usb/dwc3/trace.h b/drivers/usb/dwc3/trace.h
index f22714c..f27c5cb 100644
--- a/drivers/usb/dwc3/trace.h
+++ b/drivers/usb/dwc3/trace.h
@@ -251,9 +251,11 @@ DECLARE_EVENT_CLASS(dwc3_log_trb,
s = "2x ";
break;
case 3:
+ default:
s = "3x ";
break;
}
+ break;
default:
s = "";
} s; }),
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
index 06ebb88..d3d4c98 100644
--- a/drivers/usb/gadget/Kconfig
+++ b/drivers/usb/gadget/Kconfig
@@ -241,6 +241,12 @@
config USB_F_GSI
tristate
+config USB_F_MTP
+ tristate
+
+config USB_F_PTP
+ tristate
+
# this first set of drivers all depend on bulk-capable hardware.
config USB_CONFIGFS
@@ -588,6 +594,27 @@
related functionalities using GSI hardware accelerated data
path and control path.
+config USB_CONFIGFS_F_MTP
+ bool "MTP gadget"
+ select USB_F_MTP
+ depends on USB_CONFIGFS
+ help
+ The Media Transfer Protocol (MTP) function mounts USB gadget
+ as a media device but unlike Mass Storage Gadget, MTP operates
+ at the file level. Thus exposing the relevant content but hiding
+ the system/restricted files.
+
+config USB_CONFIGFS_F_PTP
+ bool "PTP gadget"
+ select USB_F_PTP
+ depends on USB_CONFIGFS && USB_CONFIGFS_F_MTP
+ help
+ The Picture Transfer Protocol (PTP) function driver is a wrapper
+ around MTP function driver. This function driver mounts USB gadget
+ as a media device but unlike Mass Storage Gadget, PTP operates
+ at the file level. Thus exposing the relevant content but hiding
+ the system/restricted files.
+
choice
tristate "USB Gadget precomposed configurations"
default USB_ETH
diff --git a/drivers/usb/gadget/function/Makefile b/drivers/usb/gadget/function/Makefile
index 26283a9..7d81abb 100644
--- a/drivers/usb/gadget/function/Makefile
+++ b/drivers/usb/gadget/function/Makefile
@@ -65,3 +65,7 @@
obj-$(CONFIG_USB_F_QDSS) += usb_f_qdss.o
usb_f_gsi-y := f_gsi.o
obj-$(CONFIG_USB_F_GSI) += usb_f_gsi.o
+usb_f_mtp-y := f_mtp.o
+obj-$(CONFIG_USB_F_MTP) += usb_f_mtp.o
+usb_f_ptp-y := f_ptp.o
+obj-$(CONFIG_USB_F_PTP) += usb_f_ptp.o
diff --git a/drivers/usb/gadget/function/f_accessory.c b/drivers/usb/gadget/function/f_accessory.c
index 6be5525..586f79e 100644
--- a/drivers/usb/gadget/function/f_accessory.c
+++ b/drivers/usb/gadget/function/f_accessory.c
@@ -658,16 +658,16 @@ static ssize_t acc_write(struct file *fp, const char __user *buf,
}
while (count > 0) {
- if (!dev->online) {
+ /* get an idle tx request to use */
+ req = 0;
+ ret = wait_event_interruptible(dev->write_wq,
+ ((req = req_get(dev, &dev->tx_idle)) || !dev->online));
+ if (!dev->online || !dev->disconnected) {
pr_debug("acc_write dev->error\n");
r = -EIO;
break;
}
- /* get an idle tx request to use */
- req = 0;
- ret = wait_event_interruptible(dev->write_wq,
- ((req = req_get(dev, &dev->tx_idle)) || !dev->online));
if (!req) {
r = ret;
break;
diff --git a/drivers/usb/gadget/function/f_mtp.c b/drivers/usb/gadget/function/f_mtp.c
new file mode 100644
index 0000000..0e3a582
--- /dev/null
+++ b/drivers/usb/gadget/function/f_mtp.c
@@ -0,0 +1,1921 @@
+/*
+ * Gadget Function Driver for MTP
+ *
+ * Copyright (C) 2010 Google, Inc.
+ * Author: Mike Lockwood <lockwood@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+/* #define DEBUG */
+/* #define VERBOSE_DEBUG */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/poll.h>
+#include <linux/delay.h>
+#include <linux/wait.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+
+#include <linux/seq_file.h>
+#include <linux/debugfs.h>
+#include <linux/types.h>
+#include <linux/file.h>
+#include <linux/device.h>
+#include <linux/miscdevice.h>
+
+#include <linux/usb.h>
+#include <linux/usb_usual.h>
+#include <linux/usb/ch9.h>
+#include <linux/usb/f_mtp.h>
+#include <linux/configfs.h>
+#include <linux/usb/composite.h>
+
+#include "configfs.h"
+
+#define MTP_RX_BUFFER_INIT_SIZE 1048576
+#define MTP_TX_BUFFER_INIT_SIZE 1048576
+#define MTP_BULK_BUFFER_SIZE 16384
+#define INTR_BUFFER_SIZE 28
+#define MAX_INST_NAME_LEN 40
+#define MTP_MAX_FILE_SIZE 0xFFFFFFFFL
+
+/* String IDs */
+#define INTERFACE_STRING_INDEX 0
+
+/* values for mtp_dev.state */
+#define STATE_OFFLINE 0 /* initial state, disconnected */
+#define STATE_READY 1 /* ready for userspace calls */
+#define STATE_BUSY 2 /* processing userspace calls */
+#define STATE_CANCELED 3 /* transaction canceled by host */
+#define STATE_ERROR 4 /* error from completion routine */
+
+/* number of tx and rx requests to allocate */
+#define MTP_TX_REQ_MAX 8
+#define RX_REQ_MAX 2
+#define INTR_REQ_MAX 5
+
+/* ID for Microsoft MTP OS String */
+#define MTP_OS_STRING_ID 0xEE
+
+/* MTP class reqeusts */
+#define MTP_REQ_CANCEL 0x64
+#define MTP_REQ_GET_EXT_EVENT_DATA 0x65
+#define MTP_REQ_RESET 0x66
+#define MTP_REQ_GET_DEVICE_STATUS 0x67
+
+/* constants for device status */
+#define MTP_RESPONSE_OK 0x2001
+#define MTP_RESPONSE_DEVICE_BUSY 0x2019
+#define DRIVER_NAME "mtp"
+
+#define MAX_ITERATION 100
+
+unsigned int mtp_rx_req_len = MTP_RX_BUFFER_INIT_SIZE;
+module_param(mtp_rx_req_len, uint, 0644);
+
+unsigned int mtp_tx_req_len = MTP_TX_BUFFER_INIT_SIZE;
+module_param(mtp_tx_req_len, uint, 0644);
+
+unsigned int mtp_tx_reqs = MTP_TX_REQ_MAX;
+module_param(mtp_tx_reqs, uint, 0644);
+
+static const char mtp_shortname[] = DRIVER_NAME "_usb";
+
+struct mtp_dev {
+ struct usb_function function;
+ struct usb_composite_dev *cdev;
+ spinlock_t lock;
+
+ struct usb_ep *ep_in;
+ struct usb_ep *ep_out;
+ struct usb_ep *ep_intr;
+
+ int state;
+
+ /* synchronize access to our device file */
+ atomic_t open_excl;
+ /* to enforce only one ioctl at a time */
+ atomic_t ioctl_excl;
+
+ struct list_head tx_idle;
+ struct list_head intr_idle;
+
+ wait_queue_head_t read_wq;
+ wait_queue_head_t write_wq;
+ wait_queue_head_t intr_wq;
+ struct usb_request *rx_req[RX_REQ_MAX];
+ int rx_done;
+
+ /* for processing MTP_SEND_FILE, MTP_RECEIVE_FILE and
+ * MTP_SEND_FILE_WITH_HEADER ioctls on a work queue
+ */
+ struct workqueue_struct *wq;
+ struct work_struct send_file_work;
+ struct work_struct receive_file_work;
+ struct file *xfer_file;
+ loff_t xfer_file_offset;
+ int64_t xfer_file_length;
+ unsigned xfer_send_header;
+ uint16_t xfer_command;
+ uint32_t xfer_transaction_id;
+ int xfer_result;
+ struct {
+ unsigned long vfs_rbytes;
+ unsigned long vfs_wbytes;
+ unsigned int vfs_rtime;
+ unsigned int vfs_wtime;
+ } perf[MAX_ITERATION];
+ unsigned int dbg_read_index;
+ unsigned int dbg_write_index;
+ struct mutex read_mutex;
+};
+
+static struct usb_interface_descriptor mtp_interface_desc = {
+ .bLength = USB_DT_INTERFACE_SIZE,
+ .bDescriptorType = USB_DT_INTERFACE,
+ .bInterfaceNumber = 0,
+ .bNumEndpoints = 3,
+ .bInterfaceClass = USB_CLASS_VENDOR_SPEC,
+ .bInterfaceSubClass = USB_SUBCLASS_VENDOR_SPEC,
+ .bInterfaceProtocol = 0,
+};
+
+static struct usb_interface_descriptor ptp_interface_desc = {
+ .bLength = USB_DT_INTERFACE_SIZE,
+ .bDescriptorType = USB_DT_INTERFACE,
+ .bInterfaceNumber = 0,
+ .bNumEndpoints = 3,
+ .bInterfaceClass = USB_CLASS_STILL_IMAGE,
+ .bInterfaceSubClass = 1,
+ .bInterfaceProtocol = 1,
+};
+
+static struct usb_endpoint_descriptor mtp_ss_in_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = __constant_cpu_to_le16(1024),
+};
+
+static struct usb_ss_ep_comp_descriptor mtp_ss_in_comp_desc = {
+ .bLength = sizeof(mtp_ss_in_comp_desc),
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+ /* .bMaxBurst = DYNAMIC, */
+};
+
+static struct usb_endpoint_descriptor mtp_ss_out_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_OUT,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = __constant_cpu_to_le16(1024),
+};
+
+static struct usb_ss_ep_comp_descriptor mtp_ss_out_comp_desc = {
+ .bLength = sizeof(mtp_ss_out_comp_desc),
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+ /* .bMaxBurst = DYNAMIC, */
+};
+
+static struct usb_endpoint_descriptor mtp_highspeed_in_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = __constant_cpu_to_le16(512),
+};
+
+static struct usb_endpoint_descriptor mtp_highspeed_out_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_OUT,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = __constant_cpu_to_le16(512),
+};
+
+static struct usb_endpoint_descriptor mtp_fullspeed_in_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_endpoint_descriptor mtp_fullspeed_out_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_OUT,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_endpoint_descriptor mtp_intr_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_INT,
+ .wMaxPacketSize = __constant_cpu_to_le16(INTR_BUFFER_SIZE),
+ .bInterval = 6,
+};
+
+static struct usb_ss_ep_comp_descriptor mtp_intr_ss_comp_desc = {
+ .bLength = sizeof(mtp_intr_ss_comp_desc),
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+ .wBytesPerInterval = cpu_to_le16(INTR_BUFFER_SIZE),
+};
+
+static struct usb_descriptor_header *fs_mtp_descs[] = {
+ (struct usb_descriptor_header *) &mtp_interface_desc,
+ (struct usb_descriptor_header *) &mtp_fullspeed_in_desc,
+ (struct usb_descriptor_header *) &mtp_fullspeed_out_desc,
+ (struct usb_descriptor_header *) &mtp_intr_desc,
+ NULL,
+};
+
+static struct usb_descriptor_header *hs_mtp_descs[] = {
+ (struct usb_descriptor_header *) &mtp_interface_desc,
+ (struct usb_descriptor_header *) &mtp_highspeed_in_desc,
+ (struct usb_descriptor_header *) &mtp_highspeed_out_desc,
+ (struct usb_descriptor_header *) &mtp_intr_desc,
+ NULL,
+};
+
+static struct usb_descriptor_header *ss_mtp_descs[] = {
+ (struct usb_descriptor_header *) &mtp_interface_desc,
+ (struct usb_descriptor_header *) &mtp_ss_in_desc,
+ (struct usb_descriptor_header *) &mtp_ss_in_comp_desc,
+ (struct usb_descriptor_header *) &mtp_ss_out_desc,
+ (struct usb_descriptor_header *) &mtp_ss_out_comp_desc,
+ (struct usb_descriptor_header *) &mtp_intr_desc,
+ (struct usb_descriptor_header *) &mtp_intr_ss_comp_desc,
+ NULL,
+};
+
+static struct usb_descriptor_header *fs_ptp_descs[] = {
+ (struct usb_descriptor_header *) &ptp_interface_desc,
+ (struct usb_descriptor_header *) &mtp_fullspeed_in_desc,
+ (struct usb_descriptor_header *) &mtp_fullspeed_out_desc,
+ (struct usb_descriptor_header *) &mtp_intr_desc,
+ NULL,
+};
+
+static struct usb_descriptor_header *hs_ptp_descs[] = {
+ (struct usb_descriptor_header *) &ptp_interface_desc,
+ (struct usb_descriptor_header *) &mtp_highspeed_in_desc,
+ (struct usb_descriptor_header *) &mtp_highspeed_out_desc,
+ (struct usb_descriptor_header *) &mtp_intr_desc,
+ NULL,
+};
+
+static struct usb_descriptor_header *ss_ptp_descs[] = {
+ (struct usb_descriptor_header *) &ptp_interface_desc,
+ (struct usb_descriptor_header *) &mtp_ss_in_desc,
+ (struct usb_descriptor_header *) &mtp_ss_in_comp_desc,
+ (struct usb_descriptor_header *) &mtp_ss_out_desc,
+ (struct usb_descriptor_header *) &mtp_ss_out_comp_desc,
+ (struct usb_descriptor_header *) &mtp_intr_desc,
+ (struct usb_descriptor_header *) &mtp_intr_ss_comp_desc,
+ NULL,
+};
+
+static struct usb_string mtp_string_defs[] = {
+ /* Naming interface "MTP" so libmtp will recognize us */
+ [INTERFACE_STRING_INDEX].s = "MTP",
+ { }, /* end of list */
+};
+
+static struct usb_gadget_strings mtp_string_table = {
+ .language = 0x0409, /* en-US */
+ .strings = mtp_string_defs,
+};
+
+static struct usb_gadget_strings *mtp_strings[] = {
+ &mtp_string_table,
+ NULL,
+};
+
+/* Microsoft MTP OS String */
+static u8 mtp_os_string[] = {
+ 18, /* sizeof(mtp_os_string) */
+ USB_DT_STRING,
+ /* Signature field: "MSFT100" */
+ 'M', 0, 'S', 0, 'F', 0, 'T', 0, '1', 0, '0', 0, '0', 0,
+ /* vendor code */
+ 1,
+ /* padding */
+ 0
+};
+
+/* Microsoft Extended Configuration Descriptor Header Section */
+struct mtp_ext_config_desc_header {
+ __le32 dwLength;
+ __u16 bcdVersion;
+ __le16 wIndex;
+ __u8 bCount;
+ __u8 reserved[7];
+};
+
+/* Microsoft Extended Configuration Descriptor Function Section */
+struct mtp_ext_config_desc_function {
+ __u8 bFirstInterfaceNumber;
+ __u8 bInterfaceCount;
+ __u8 compatibleID[8];
+ __u8 subCompatibleID[8];
+ __u8 reserved[6];
+};
+
+/* MTP Extended Configuration Descriptor */
+struct mtp_ext_config_desc {
+ struct mtp_ext_config_desc_header header;
+ struct mtp_ext_config_desc_function function;
+};
+
+static struct mtp_ext_config_desc mtp_ext_config_desc = {
+ .header = {
+ .dwLength = __constant_cpu_to_le32(sizeof(mtp_ext_config_desc)),
+ .bcdVersion = __constant_cpu_to_le16(0x0100),
+ .wIndex = __constant_cpu_to_le16(4),
+ .bCount = 1,
+ },
+ .function = {
+ .bFirstInterfaceNumber = 0,
+ .bInterfaceCount = 1,
+ .compatibleID = { 'M', 'T', 'P' },
+ },
+};
+
+struct mtp_device_status {
+ __le16 wLength;
+ __le16 wCode;
+};
+
+struct mtp_data_header {
+ /* length of packet, including this header */
+ __le32 length;
+ /* container type (2 for data packet) */
+ __le16 type;
+ /* MTP command code */
+ __le16 command;
+ /* MTP transaction ID */
+ __le32 transaction_id;
+};
+
+struct mtp_instance {
+ struct usb_function_instance func_inst;
+ const char *name;
+ struct mtp_dev *dev;
+ char mtp_ext_compat_id[16];
+ struct usb_os_desc mtp_os_desc;
+};
+
+/* temporary variable used between mtp_open() and mtp_gadget_bind() */
+static struct mtp_dev *_mtp_dev;
+
+static inline struct mtp_dev *func_to_mtp(struct usb_function *f)
+{
+ return container_of(f, struct mtp_dev, function);
+}
+
+static struct usb_request *mtp_request_new(struct usb_ep *ep, int buffer_size)
+{
+ struct usb_request *req = usb_ep_alloc_request(ep, GFP_KERNEL);
+
+ if (!req)
+ return NULL;
+
+ /* now allocate buffers for the requests */
+ req->buf = kmalloc(buffer_size, GFP_KERNEL);
+ if (!req->buf) {
+ usb_ep_free_request(ep, req);
+ return NULL;
+ }
+
+ return req;
+}
+
+static void mtp_request_free(struct usb_request *req, struct usb_ep *ep)
+{
+ if (req) {
+ kfree(req->buf);
+ usb_ep_free_request(ep, req);
+ }
+}
+
+static inline int mtp_lock(atomic_t *excl)
+{
+ if (atomic_inc_return(excl) == 1) {
+ return 0;
+ } else {
+ atomic_dec(excl);
+ return -1;
+ }
+}
+
+static inline void mtp_unlock(atomic_t *excl)
+{
+ atomic_dec(excl);
+}
+
+/* add a request to the tail of a list */
+static void mtp_req_put(struct mtp_dev *dev, struct list_head *head,
+ struct usb_request *req)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->lock, flags);
+ list_add_tail(&req->list, head);
+ spin_unlock_irqrestore(&dev->lock, flags);
+}
+
+/* remove a request from the head of a list */
+static struct usb_request
+*mtp_req_get(struct mtp_dev *dev, struct list_head *head)
+{
+ unsigned long flags;
+ struct usb_request *req;
+
+ spin_lock_irqsave(&dev->lock, flags);
+ if (list_empty(head)) {
+ req = 0;
+ } else {
+ req = list_first_entry(head, struct usb_request, list);
+ list_del(&req->list);
+ }
+ spin_unlock_irqrestore(&dev->lock, flags);
+ return req;
+}
+
+static void mtp_complete_in(struct usb_ep *ep, struct usb_request *req)
+{
+ struct mtp_dev *dev = _mtp_dev;
+
+ if (req->status != 0 && dev->state != STATE_OFFLINE)
+ dev->state = STATE_ERROR;
+
+ mtp_req_put(dev, &dev->tx_idle, req);
+
+ wake_up(&dev->write_wq);
+}
+
+static void mtp_complete_out(struct usb_ep *ep, struct usb_request *req)
+{
+ struct mtp_dev *dev = _mtp_dev;
+
+ dev->rx_done = 1;
+ if (req->status != 0 && dev->state != STATE_OFFLINE)
+ dev->state = STATE_ERROR;
+
+ wake_up(&dev->read_wq);
+}
+
+static void mtp_complete_intr(struct usb_ep *ep, struct usb_request *req)
+{
+ struct mtp_dev *dev = _mtp_dev;
+
+ if (req->status != 0 && dev->state != STATE_OFFLINE)
+ dev->state = STATE_ERROR;
+
+ mtp_req_put(dev, &dev->intr_idle, req);
+
+ wake_up(&dev->intr_wq);
+}
+
+static int mtp_create_bulk_endpoints(struct mtp_dev *dev,
+ struct usb_endpoint_descriptor *in_desc,
+ struct usb_endpoint_descriptor *out_desc,
+ struct usb_endpoint_descriptor *intr_desc)
+{
+ struct usb_composite_dev *cdev = dev->cdev;
+ struct usb_request *req;
+ struct usb_ep *ep;
+ int i;
+
+ DBG(cdev, "create_bulk_endpoints dev: %pK\n", dev);
+
+ ep = usb_ep_autoconfig(cdev->gadget, in_desc);
+ if (!ep) {
+ DBG(cdev, "usb_ep_autoconfig for ep_in failed\n");
+ return -ENODEV;
+ }
+ DBG(cdev, "usb_ep_autoconfig for ep_in got %s\n", ep->name);
+ ep->driver_data = dev; /* claim the endpoint */
+ dev->ep_in = ep;
+
+ ep = usb_ep_autoconfig(cdev->gadget, out_desc);
+ if (!ep) {
+ DBG(cdev, "usb_ep_autoconfig for ep_out failed\n");
+ return -ENODEV;
+ }
+ DBG(cdev, "usb_ep_autoconfig for mtp ep_out got %s\n", ep->name);
+ ep->driver_data = dev; /* claim the endpoint */
+ dev->ep_out = ep;
+
+ ep = usb_ep_autoconfig(cdev->gadget, intr_desc);
+ if (!ep) {
+ DBG(cdev, "usb_ep_autoconfig for ep_intr failed\n");
+ return -ENODEV;
+ }
+ DBG(cdev, "usb_ep_autoconfig for mtp ep_intr got %s\n", ep->name);
+ ep->driver_data = dev; /* claim the endpoint */
+ dev->ep_intr = ep;
+
+retry_tx_alloc:
+ /* now allocate requests for our endpoints */
+ for (i = 0; i < mtp_tx_reqs; i++) {
+ req = mtp_request_new(dev->ep_in, mtp_tx_req_len);
+ if (!req) {
+ if (mtp_tx_req_len <= MTP_BULK_BUFFER_SIZE)
+ goto fail;
+ while ((req = mtp_req_get(dev, &dev->tx_idle)))
+ mtp_request_free(req, dev->ep_in);
+ mtp_tx_req_len = MTP_BULK_BUFFER_SIZE;
+ mtp_tx_reqs = MTP_TX_REQ_MAX;
+ goto retry_tx_alloc;
+ }
+ req->complete = mtp_complete_in;
+ mtp_req_put(dev, &dev->tx_idle, req);
+ }
+
+ /*
+ * The RX buffer should be aligned to EP max packet for
+ * some controllers. At bind time, we don't know the
+ * operational speed. Hence assuming super speed max
+ * packet size.
+ */
+ if (mtp_rx_req_len % 1024)
+ mtp_rx_req_len = MTP_BULK_BUFFER_SIZE;
+
+retry_rx_alloc:
+ for (i = 0; i < RX_REQ_MAX; i++) {
+ req = mtp_request_new(dev->ep_out, mtp_rx_req_len);
+ if (!req) {
+ if (mtp_rx_req_len <= MTP_BULK_BUFFER_SIZE)
+ goto fail;
+ for (--i; i >= 0; i--)
+ mtp_request_free(dev->rx_req[i], dev->ep_out);
+ mtp_rx_req_len = MTP_BULK_BUFFER_SIZE;
+ goto retry_rx_alloc;
+ }
+ req->complete = mtp_complete_out;
+ dev->rx_req[i] = req;
+ }
+ for (i = 0; i < INTR_REQ_MAX; i++) {
+ req = mtp_request_new(dev->ep_intr, INTR_BUFFER_SIZE);
+ if (!req)
+ goto fail;
+ req->complete = mtp_complete_intr;
+ mtp_req_put(dev, &dev->intr_idle, req);
+ }
+
+ return 0;
+
+fail:
+ pr_err("mtp_bind() could not allocate requests\n");
+ return -1;
+}
+
+static ssize_t mtp_read(struct file *fp, char __user *buf,
+ size_t count, loff_t *pos)
+{
+ struct mtp_dev *dev = fp->private_data;
+ struct usb_composite_dev *cdev = dev->cdev;
+ struct usb_request *req;
+ ssize_t r = count, xfer, len;
+ int ret = 0;
+
+ DBG(cdev, "%s(%zu) state:%d\n", __func__, count, dev->state);
+
+ /* we will block until we're online */
+ DBG(cdev, "mtp_read: waiting for online state\n");
+ ret = wait_event_interruptible(dev->read_wq,
+ dev->state != STATE_OFFLINE);
+ if (ret < 0) {
+ r = ret;
+ goto done;
+ }
+
+ len = ALIGN(count, dev->ep_out->maxpacket);
+ if (len > mtp_rx_req_len)
+ return -EINVAL;
+
+ spin_lock_irq(&dev->lock);
+ if (dev->state == STATE_OFFLINE) {
+ spin_unlock_irq(&dev->lock);
+ return -ENODEV;
+ }
+
+ if (dev->ep_out->desc) {
+ if (!cdev) {
+ spin_unlock_irq(&dev->lock);
+ return -ENODEV;
+ }
+
+ len = usb_ep_align_maybe(cdev->gadget, dev->ep_out, count);
+ if (len > MTP_BULK_BUFFER_SIZE) {
+ spin_unlock_irq(&dev->lock);
+ return -EINVAL;
+ }
+ }
+
+ if (dev->state == STATE_CANCELED) {
+ /* report cancelation to userspace */
+ dev->state = STATE_READY;
+ spin_unlock_irq(&dev->lock);
+ return -ECANCELED;
+ }
+ dev->state = STATE_BUSY;
+ spin_unlock_irq(&dev->lock);
+
+ mutex_lock(&dev->read_mutex);
+ if (dev->state == STATE_OFFLINE) {
+ r = -EIO;
+ mutex_unlock(&dev->read_mutex);
+ goto done;
+ }
+requeue_req:
+ /* queue a request */
+ req = dev->rx_req[0];
+ req->length = len;
+ dev->rx_done = 0;
+ mutex_unlock(&dev->read_mutex);
+ ret = usb_ep_queue(dev->ep_out, req, GFP_KERNEL);
+ if (ret < 0) {
+ r = -EIO;
+ goto done;
+ } else {
+ DBG(cdev, "rx %pK queue\n", req);
+ }
+
+ /* wait for a request to complete */
+ ret = wait_event_interruptible(dev->read_wq,
+ dev->rx_done || dev->state != STATE_BUSY);
+ if (dev->state == STATE_CANCELED) {
+ r = -ECANCELED;
+ if (!dev->rx_done)
+ usb_ep_dequeue(dev->ep_out, req);
+ spin_lock_irq(&dev->lock);
+ dev->state = STATE_CANCELED;
+ spin_unlock_irq(&dev->lock);
+ goto done;
+ }
+ if (ret < 0) {
+ r = ret;
+ usb_ep_dequeue(dev->ep_out, req);
+ goto done;
+ }
+ mutex_lock(&dev->read_mutex);
+ if (dev->state == STATE_BUSY) {
+ /* If we got a 0-len packet, throw it back and try again. */
+ if (req->actual == 0)
+ goto requeue_req;
+
+ DBG(cdev, "rx %pK %d\n", req, req->actual);
+ xfer = (req->actual < count) ? req->actual : count;
+ r = xfer;
+ if (copy_to_user(buf, req->buf, xfer))
+ r = -EFAULT;
+ } else
+ r = -EIO;
+
+ mutex_unlock(&dev->read_mutex);
+done:
+ spin_lock_irq(&dev->lock);
+ if (dev->state == STATE_CANCELED)
+ r = -ECANCELED;
+ else if (dev->state != STATE_OFFLINE)
+ dev->state = STATE_READY;
+ spin_unlock_irq(&dev->lock);
+
+ DBG(cdev, "%s returning %zd state:%d\n", __func__, r, dev->state);
+ return r;
+}
+
+static ssize_t mtp_write(struct file *fp, const char __user *buf,
+ size_t count, loff_t *pos)
+{
+ struct mtp_dev *dev = fp->private_data;
+ struct usb_composite_dev *cdev = dev->cdev;
+ struct usb_request *req = 0;
+ ssize_t r = count;
+ unsigned xfer;
+ int sendZLP = 0;
+ int ret;
+
+ DBG(cdev, "%s(%zu) state:%d\n", __func__, count, dev->state);
+
+ spin_lock_irq(&dev->lock);
+ if (dev->state == STATE_CANCELED) {
+ /* report cancelation to userspace */
+ dev->state = STATE_READY;
+ spin_unlock_irq(&dev->lock);
+ return -ECANCELED;
+ }
+ if (dev->state == STATE_OFFLINE) {
+ spin_unlock_irq(&dev->lock);
+ return -ENODEV;
+ }
+ dev->state = STATE_BUSY;
+ spin_unlock_irq(&dev->lock);
+
+ /* we need to send a zero length packet to signal the end of transfer
+ * if the transfer size is aligned to a packet boundary.
+ */
+ if ((count & (dev->ep_in->maxpacket - 1)) == 0)
+ sendZLP = 1;
+
+ while (count > 0 || sendZLP) {
+ /* so we exit after sending ZLP */
+ if (count == 0)
+ sendZLP = 0;
+
+ if (dev->state != STATE_BUSY) {
+ DBG(cdev, "mtp_write dev->error\n");
+ r = -EIO;
+ break;
+ }
+
+ /* get an idle tx request to use */
+ req = 0;
+ ret = wait_event_interruptible(dev->write_wq,
+ ((req = mtp_req_get(dev, &dev->tx_idle))
+ || dev->state != STATE_BUSY));
+ if (!req) {
+ DBG(cdev, "%s request NULL ret:%d state:%d\n",
+ __func__, ret, dev->state);
+ r = ret;
+ break;
+ }
+
+ if (count > mtp_tx_req_len)
+ xfer = mtp_tx_req_len;
+ else
+ xfer = count;
+ if (xfer && copy_from_user(req->buf, buf, xfer)) {
+ r = -EFAULT;
+ break;
+ }
+
+ req->length = xfer;
+ ret = usb_ep_queue(dev->ep_in, req, GFP_KERNEL);
+ if (ret < 0) {
+ DBG(cdev, "mtp_write: xfer error %d\n", ret);
+ r = -EIO;
+ break;
+ }
+
+ buf += xfer;
+ count -= xfer;
+
+ /* zero this so we don't try to free it on error exit */
+ req = 0;
+ }
+
+ if (req)
+ mtp_req_put(dev, &dev->tx_idle, req);
+
+ spin_lock_irq(&dev->lock);
+ if (dev->state == STATE_CANCELED)
+ r = -ECANCELED;
+ else if (dev->state != STATE_OFFLINE)
+ dev->state = STATE_READY;
+ spin_unlock_irq(&dev->lock);
+
+ DBG(cdev, "%s returning %zd state:%d\n", __func__, r, dev->state);
+ return r;
+}
+
+/* read from a local file and write to USB */
+static void send_file_work(struct work_struct *data)
+{
+ struct mtp_dev *dev = container_of(data, struct mtp_dev,
+ send_file_work);
+ struct usb_composite_dev *cdev = dev->cdev;
+ struct usb_request *req = 0;
+ struct mtp_data_header *header;
+ struct file *filp;
+ loff_t offset;
+ int64_t count;
+ int xfer, ret, hdr_size;
+ int r = 0;
+ int sendZLP = 0;
+ ktime_t start_time;
+
+ /* read our parameters */
+ smp_rmb();
+ filp = dev->xfer_file;
+ offset = dev->xfer_file_offset;
+ count = dev->xfer_file_length;
+
+ DBG(cdev, "send_file_work(%lld %lld)\n", offset, count);
+
+ if (dev->xfer_send_header) {
+ hdr_size = sizeof(struct mtp_data_header);
+ count += hdr_size;
+ } else {
+ hdr_size = 0;
+ }
+
+ /* we need to send a zero length packet to signal the end of transfer
+ * if the transfer size is aligned to a packet boundary.
+ */
+ if ((count & (dev->ep_in->maxpacket - 1)) == 0)
+ sendZLP = 1;
+
+ while (count > 0 || sendZLP) {
+ /* so we exit after sending ZLP */
+ if (count == 0)
+ sendZLP = 0;
+
+ /* get an idle tx request to use */
+ req = 0;
+ ret = wait_event_interruptible(dev->write_wq,
+ (req = mtp_req_get(dev, &dev->tx_idle))
+ || dev->state != STATE_BUSY);
+ if (dev->state == STATE_CANCELED) {
+ r = -ECANCELED;
+ break;
+ }
+ if (!req) {
+ DBG(cdev,
+ "%s request NULL ret:%d state:%d\n", __func__,
+ ret, dev->state);
+ r = ret;
+ break;
+ }
+
+ if (count > mtp_tx_req_len)
+ xfer = mtp_tx_req_len;
+ else
+ xfer = count;
+
+ if (hdr_size) {
+ /* prepend MTP data header */
+ header = (struct mtp_data_header *)req->buf;
+ /*
+ * set file size with header according to
+ * MTP Specification v1.0
+ */
+ header->length = (count > MTP_MAX_FILE_SIZE) ?
+ MTP_MAX_FILE_SIZE : __cpu_to_le32(count);
+ header->type = __cpu_to_le16(2); /* data packet */
+ header->command = __cpu_to_le16(dev->xfer_command);
+ header->transaction_id =
+ __cpu_to_le32(dev->xfer_transaction_id);
+ }
+ start_time = ktime_get();
+ ret = vfs_read(filp, req->buf + hdr_size, xfer - hdr_size,
+ &offset);
+ if (ret < 0) {
+ r = ret;
+ break;
+ }
+
+ xfer = ret + hdr_size;
+ dev->perf[dev->dbg_read_index].vfs_rtime =
+ ktime_to_us(ktime_sub(ktime_get(), start_time));
+ dev->perf[dev->dbg_read_index].vfs_rbytes = xfer;
+ dev->dbg_read_index = (dev->dbg_read_index + 1) % MAX_ITERATION;
+ hdr_size = 0;
+
+ req->length = xfer;
+ ret = usb_ep_queue(dev->ep_in, req, GFP_KERNEL);
+ if (ret < 0) {
+ DBG(cdev, "send_file_work: xfer error %d\n", ret);
+ if (dev->state != STATE_OFFLINE)
+ dev->state = STATE_ERROR;
+ r = -EIO;
+ break;
+ }
+
+ count -= xfer;
+
+ /* zero this so we don't try to free it on error exit */
+ req = 0;
+ }
+
+ if (req)
+ mtp_req_put(dev, &dev->tx_idle, req);
+
+ DBG(cdev, "%s returning %d state:%d\n", __func__, r, dev->state);
+ /* write the result */
+ dev->xfer_result = r;
+ smp_wmb();
+}
+
+/* read from USB and write to a local file */
+static void receive_file_work(struct work_struct *data)
+{
+ struct mtp_dev *dev = container_of(data, struct mtp_dev,
+ receive_file_work);
+ struct usb_composite_dev *cdev = dev->cdev;
+ struct usb_request *read_req = NULL, *write_req = NULL;
+ struct file *filp;
+ loff_t offset;
+ int64_t count;
+ int ret, cur_buf = 0;
+ int r = 0;
+ ktime_t start_time;
+
+ /* read our parameters */
+ smp_rmb();
+ filp = dev->xfer_file;
+ offset = dev->xfer_file_offset;
+ count = dev->xfer_file_length;
+
+ DBG(cdev, "receive_file_work(%lld)\n", count);
+ if (!IS_ALIGNED(count, dev->ep_out->maxpacket))
+ DBG(cdev, "%s- count(%lld) not multiple of mtu(%d)\n", __func__,
+ count, dev->ep_out->maxpacket);
+
+ while (count > 0 || write_req) {
+ if (count > 0) {
+ mutex_lock(&dev->read_mutex);
+ if (dev->state == STATE_OFFLINE) {
+ r = -EIO;
+ mutex_unlock(&dev->read_mutex);
+ break;
+ }
+ /* queue a request */
+ read_req = dev->rx_req[cur_buf];
+ cur_buf = (cur_buf + 1) % RX_REQ_MAX;
+
+ /* some h/w expects size to be aligned to ep's MTU */
+ read_req->length = mtp_rx_req_len;
+
+ dev->rx_done = 0;
+ mutex_unlock(&dev->read_mutex);
+ ret = usb_ep_queue(dev->ep_out, read_req, GFP_KERNEL);
+ if (ret < 0) {
+ r = -EIO;
+ if (dev->state != STATE_OFFLINE)
+ dev->state = STATE_ERROR;
+ break;
+ }
+ }
+
+ if (write_req) {
+ DBG(cdev, "rx %pK %d\n", write_req, write_req->actual);
+ start_time = ktime_get();
+ mutex_lock(&dev->read_mutex);
+ if (dev->state == STATE_OFFLINE) {
+ r = -EIO;
+ mutex_unlock(&dev->read_mutex);
+ break;
+ }
+ ret = vfs_write(filp, write_req->buf, write_req->actual,
+ &offset);
+ DBG(cdev, "vfs_write %d\n", ret);
+ if (ret != write_req->actual) {
+ r = -EIO;
+ mutex_unlock(&dev->read_mutex);
+ if (dev->state != STATE_OFFLINE)
+ dev->state = STATE_ERROR;
+ if (read_req && !dev->rx_done)
+ usb_ep_dequeue(dev->ep_out, read_req);
+ break;
+ }
+ mutex_unlock(&dev->read_mutex);
+ dev->perf[dev->dbg_write_index].vfs_wtime =
+ ktime_to_us(ktime_sub(ktime_get(), start_time));
+ dev->perf[dev->dbg_write_index].vfs_wbytes = ret;
+ dev->dbg_write_index =
+ (dev->dbg_write_index + 1) % MAX_ITERATION;
+ write_req = NULL;
+ }
+
+ if (read_req) {
+ /* wait for our last read to complete */
+ ret = wait_event_interruptible(dev->read_wq,
+ dev->rx_done || dev->state != STATE_BUSY);
+ if (dev->state == STATE_CANCELED
+ || dev->state == STATE_OFFLINE) {
+ if (dev->state == STATE_OFFLINE)
+ r = -EIO;
+ else
+ r = -ECANCELED;
+ if (!dev->rx_done)
+ usb_ep_dequeue(dev->ep_out, read_req);
+ break;
+ }
+ if (read_req->status) {
+ r = read_req->status;
+ break;
+ }
+
+ mutex_lock(&dev->read_mutex);
+ if (dev->state == STATE_OFFLINE) {
+ r = -EIO;
+ mutex_unlock(&dev->read_mutex);
+ break;
+ }
+ /* Check if we aligned the size due to MTU constraint */
+ if (count < read_req->length)
+ read_req->actual = (read_req->actual > count ?
+ count : read_req->actual);
+ /* if xfer_file_length is 0xFFFFFFFF, then we read until
+ * we get a zero length packet
+ */
+ if (count != 0xFFFFFFFF)
+ count -= read_req->actual;
+ if (read_req->actual < read_req->length) {
+ /*
+ * short packet is used to signal EOF for
+ * sizes > 4 gig
+ */
+ DBG(cdev, "got short packet\n");
+ count = 0;
+ }
+
+ write_req = read_req;
+ read_req = NULL;
+ mutex_unlock(&dev->read_mutex);
+ }
+ }
+
+ DBG(cdev, "receive_file_work returning %d\n", r);
+ /* write the result */
+ dev->xfer_result = r;
+ smp_wmb();
+}
+
+static int mtp_send_event(struct mtp_dev *dev, struct mtp_event *event)
+{
+ struct usb_request *req = NULL;
+ int ret;
+ int length = event->length;
+
+ DBG(dev->cdev, "mtp_send_event(%zu)\n", event->length);
+
+ if (length < 0 || length > INTR_BUFFER_SIZE)
+ return -EINVAL;
+ if (dev->state == STATE_OFFLINE)
+ return -ENODEV;
+
+ ret = wait_event_interruptible_timeout(dev->intr_wq,
+ (req = mtp_req_get(dev, &dev->intr_idle)),
+ msecs_to_jiffies(1000));
+ if (!req)
+ return -ETIME;
+
+ if (copy_from_user(req->buf, (void __user *)event->data, length)) {
+ mtp_req_put(dev, &dev->intr_idle, req);
+ return -EFAULT;
+ }
+ req->length = length;
+ ret = usb_ep_queue(dev->ep_intr, req, GFP_KERNEL);
+ if (ret)
+ mtp_req_put(dev, &dev->intr_idle, req);
+
+ return ret;
+}
+
+static long mtp_send_receive_ioctl(struct file *fp, unsigned int code,
+ struct mtp_file_range *mfr)
+{
+ struct mtp_dev *dev = fp->private_data;
+ struct file *filp = NULL;
+ struct work_struct *work;
+ int ret = -EINVAL;
+
+ if (mtp_lock(&dev->ioctl_excl)) {
+ DBG(dev->cdev, "ioctl returning EBUSY state:%d\n", dev->state);
+ return -EBUSY;
+ }
+
+ spin_lock_irq(&dev->lock);
+ if (dev->state == STATE_CANCELED) {
+ /* report cancellation to userspace */
+ dev->state = STATE_READY;
+ spin_unlock_irq(&dev->lock);
+ ret = -ECANCELED;
+ goto out;
+ }
+ if (dev->state == STATE_OFFLINE) {
+ spin_unlock_irq(&dev->lock);
+ ret = -ENODEV;
+ goto out;
+ }
+ dev->state = STATE_BUSY;
+ spin_unlock_irq(&dev->lock);
+
+ /* hold a reference to the file while we are working with it */
+ filp = fget(mfr->fd);
+ if (!filp) {
+ ret = -EBADF;
+ goto fail;
+ }
+
+ /* write the parameters */
+ dev->xfer_file = filp;
+ dev->xfer_file_offset = mfr->offset;
+ dev->xfer_file_length = mfr->length;
+ /* make sure write is done before parameters are read */
+ smp_wmb();
+
+ if (code == MTP_SEND_FILE_WITH_HEADER) {
+ work = &dev->send_file_work;
+ dev->xfer_send_header = 1;
+ dev->xfer_command = mfr->command;
+ dev->xfer_transaction_id = mfr->transaction_id;
+ } else if (code == MTP_SEND_FILE) {
+ work = &dev->send_file_work;
+ dev->xfer_send_header = 0;
+ } else {
+ work = &dev->receive_file_work;
+ }
+
+ /* We do the file transfer on a work queue so it will run
+ * in kernel context, which is necessary for vfs_read and
+ * vfs_write to use our buffers in the kernel address space.
+ */
+ queue_work(dev->wq, work);
+ /* wait for operation to complete */
+ flush_workqueue(dev->wq);
+ fput(filp);
+
+ /* read the result */
+ smp_rmb();
+ ret = dev->xfer_result;
+
+fail:
+ spin_lock_irq(&dev->lock);
+ if (dev->state == STATE_CANCELED)
+ ret = -ECANCELED;
+ else if (dev->state != STATE_OFFLINE)
+ dev->state = STATE_READY;
+ spin_unlock_irq(&dev->lock);
+out:
+ mtp_unlock(&dev->ioctl_excl);
+ DBG(dev->cdev, "ioctl returning %d\n", ret);
+ return ret;
+}
+
+static long mtp_ioctl(struct file *fp, unsigned int code, unsigned long value)
+{
+ struct mtp_dev *dev = fp->private_data;
+ struct mtp_file_range mfr;
+ struct mtp_event event;
+ int ret = -EINVAL;
+
+ switch (code) {
+ case MTP_SEND_FILE:
+ case MTP_RECEIVE_FILE:
+ case MTP_SEND_FILE_WITH_HEADER:
+ if (copy_from_user(&mfr, (void __user *)value, sizeof(mfr))) {
+ ret = -EFAULT;
+ goto fail;
+ }
+ ret = mtp_send_receive_ioctl(fp, code, &mfr);
+ break;
+ case MTP_SEND_EVENT:
+ if (mtp_lock(&dev->ioctl_excl))
+ return -EBUSY;
+ /* return here so we don't change dev->state below,
+ * which would interfere with bulk transfer state.
+ */
+ if (copy_from_user(&event, (void __user *)value, sizeof(event)))
+ ret = -EFAULT;
+ else
+ ret = mtp_send_event(dev, &event);
+ mtp_unlock(&dev->ioctl_excl);
+ break;
+ default:
+ DBG(dev->cdev, "unknown ioctl code: %d\n", code);
+ }
+fail:
+ return ret;
+}
+
+/*
+ * 32 bit userspace calling into 64 bit kernel. handle ioctl code
+ * and userspace pointer
+ */
+#ifdef CONFIG_COMPAT
+static long compat_mtp_ioctl(struct file *fp, unsigned int code,
+ unsigned long value)
+{
+ struct mtp_dev *dev = fp->private_data;
+ struct mtp_file_range mfr;
+ struct __compat_mtp_file_range cmfr;
+ struct mtp_event event;
+ struct __compat_mtp_event cevent;
+ unsigned int cmd;
+ bool send_file = false;
+ int ret = -EINVAL;
+
+ switch (code) {
+ case COMPAT_MTP_SEND_FILE:
+ cmd = MTP_SEND_FILE;
+ send_file = true;
+ break;
+ case COMPAT_MTP_RECEIVE_FILE:
+ cmd = MTP_RECEIVE_FILE;
+ send_file = true;
+ break;
+ case COMPAT_MTP_SEND_FILE_WITH_HEADER:
+ cmd = MTP_SEND_FILE_WITH_HEADER;
+ send_file = true;
+ break;
+ case COMPAT_MTP_SEND_EVENT:
+ cmd = MTP_SEND_EVENT;
+ break;
+ default:
+ DBG(dev->cdev, "unknown compat_ioctl code: %d\n", code);
+ ret = -ENOIOCTLCMD;
+ goto fail;
+ }
+
+ if (send_file) {
+ if (copy_from_user(&cmfr, (void __user *)value, sizeof(cmfr))) {
+ ret = -EFAULT;
+ goto fail;
+ }
+ mfr.fd = cmfr.fd;
+ mfr.offset = cmfr.offset;
+ mfr.length = cmfr.length;
+ mfr.command = cmfr.command;
+ mfr.transaction_id = cmfr.transaction_id;
+ ret = mtp_send_receive_ioctl(fp, cmd, &mfr);
+ } else {
+ if (mtp_lock(&dev->ioctl_excl))
+ return -EBUSY;
+ /* return here so we don't change dev->state below,
+ * which would interfere with bulk transfer state.
+ */
+ if (copy_from_user(&cevent, (void __user *)value,
+ sizeof(cevent))) {
+ ret = -EFAULT;
+ goto fail;
+ }
+ event.length = cevent.length;
+ event.data = compat_ptr(cevent.data);
+ ret = mtp_send_event(dev, &event);
+ mtp_unlock(&dev->ioctl_excl);
+ }
+fail:
+ return ret;
+}
+#endif
+
+static int mtp_open(struct inode *ip, struct file *fp)
+{
+ printk(KERN_INFO "mtp_open\n");
+ if (mtp_lock(&_mtp_dev->open_excl)) {
+ pr_err("%s mtp_release not called returning EBUSY\n", __func__);
+ return -EBUSY;
+ }
+
+ /* clear any error condition */
+ if (_mtp_dev->state != STATE_OFFLINE)
+ _mtp_dev->state = STATE_READY;
+
+ fp->private_data = _mtp_dev;
+ return 0;
+}
+
+static int mtp_release(struct inode *ip, struct file *fp)
+{
+ printk(KERN_INFO "mtp_release\n");
+
+ mtp_unlock(&_mtp_dev->open_excl);
+ return 0;
+}
+
+/* file operations for /dev/mtp_usb */
+static const struct file_operations mtp_fops = {
+ .owner = THIS_MODULE,
+ .read = mtp_read,
+ .write = mtp_write,
+ .unlocked_ioctl = mtp_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = compat_mtp_ioctl,
+#endif
+ .open = mtp_open,
+ .release = mtp_release,
+};
+
+static struct miscdevice mtp_device = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = mtp_shortname,
+ .fops = &mtp_fops,
+};
+
+static int mtp_ctrlrequest(struct usb_composite_dev *cdev,
+ const struct usb_ctrlrequest *ctrl)
+{
+ struct mtp_dev *dev = _mtp_dev;
+ int value = -EOPNOTSUPP;
+ u16 w_index = le16_to_cpu(ctrl->wIndex);
+ u16 w_value = le16_to_cpu(ctrl->wValue);
+ u16 w_length = le16_to_cpu(ctrl->wLength);
+ unsigned long flags;
+
+ VDBG(cdev, "mtp_ctrlrequest "
+ "%02x.%02x v%04x i%04x l%u\n",
+ ctrl->bRequestType, ctrl->bRequest,
+ w_value, w_index, w_length);
+
+ /* Handle MTP OS string */
+ if (ctrl->bRequestType ==
+ (USB_DIR_IN | USB_TYPE_STANDARD | USB_RECIP_DEVICE)
+ && ctrl->bRequest == USB_REQ_GET_DESCRIPTOR
+ && (w_value >> 8) == USB_DT_STRING
+ && (w_value & 0xFF) == MTP_OS_STRING_ID) {
+ value = (w_length < sizeof(mtp_os_string)
+ ? w_length : sizeof(mtp_os_string));
+ memcpy(cdev->req->buf, mtp_os_string, value);
+ } else if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_VENDOR) {
+ /* Handle MTP OS descriptor */
+ DBG(cdev, "vendor request: %d index: %d value: %d length: %d\n",
+ ctrl->bRequest, w_index, w_value, w_length);
+
+ if (ctrl->bRequest == 1
+ && (ctrl->bRequestType & USB_DIR_IN)
+ && (w_index == 4 || w_index == 5)) {
+ value = (w_length < sizeof(mtp_ext_config_desc) ?
+ w_length : sizeof(mtp_ext_config_desc));
+ memcpy(cdev->req->buf, &mtp_ext_config_desc, value);
+
+ /* update compatibleID if PTP */
+ if (dev->function.fs_descriptors == fs_ptp_descs) {
+ struct mtp_ext_config_desc *d = cdev->req->buf;
+
+ d->function.compatibleID[0] = 'P';
+ }
+ }
+ } else if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_CLASS) {
+ DBG(cdev, "class request: %d index: %d value: %d length: %d\n",
+ ctrl->bRequest, w_index, w_value, w_length);
+
+ if (ctrl->bRequest == MTP_REQ_CANCEL && w_index == 0
+ && w_value == 0) {
+ DBG(cdev, "MTP_REQ_CANCEL\n");
+
+ spin_lock_irqsave(&dev->lock, flags);
+ if (dev->state == STATE_BUSY) {
+ dev->state = STATE_CANCELED;
+ wake_up(&dev->read_wq);
+ wake_up(&dev->write_wq);
+ }
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ /* We need to queue a request to read the remaining
+ * bytes, but we don't actually need to look at
+ * the contents.
+ */
+ value = w_length;
+ } else if (ctrl->bRequest == MTP_REQ_GET_DEVICE_STATUS
+ && w_index == 0 && w_value == 0) {
+ struct mtp_device_status *status = cdev->req->buf;
+
+ status->wLength =
+ __constant_cpu_to_le16(sizeof(*status));
+
+ DBG(cdev, "MTP_REQ_GET_DEVICE_STATUS\n");
+ spin_lock_irqsave(&dev->lock, flags);
+ /* device status is "busy" until we report
+ * the cancelation to userspace
+ */
+ if (dev->state == STATE_CANCELED)
+ status->wCode =
+ __cpu_to_le16(MTP_RESPONSE_DEVICE_BUSY);
+ else
+ status->wCode =
+ __cpu_to_le16(MTP_RESPONSE_OK);
+ spin_unlock_irqrestore(&dev->lock, flags);
+ value = sizeof(*status);
+ }
+ }
+
+ /* respond with data transfer or status phase? */
+ if (value >= 0) {
+ int rc;
+
+ cdev->req->zero = value < w_length;
+ cdev->req->length = value;
+ rc = usb_ep_queue(cdev->gadget->ep0, cdev->req, GFP_ATOMIC);
+ if (rc < 0)
+ ERROR(cdev, "%s: response queue error\n", __func__);
+ }
+ return value;
+}
+
+static int
+mtp_function_bind(struct usb_configuration *c, struct usb_function *f)
+{
+ struct usb_composite_dev *cdev = c->cdev;
+ struct mtp_dev *dev = func_to_mtp(f);
+ int id;
+ int ret;
+ struct mtp_instance *fi_mtp;
+
+ dev->cdev = cdev;
+ DBG(cdev, "%s dev: %pK\n", __func__, dev);
+
+ /* allocate interface ID(s) */
+ id = usb_interface_id(c, f);
+ if (id < 0)
+ return id;
+ mtp_interface_desc.bInterfaceNumber = id;
+
+ if (mtp_string_defs[INTERFACE_STRING_INDEX].id == 0) {
+ ret = usb_string_id(c->cdev);
+ if (ret < 0)
+ return ret;
+ mtp_string_defs[INTERFACE_STRING_INDEX].id = ret;
+ mtp_interface_desc.iInterface = ret;
+ }
+
+ fi_mtp = container_of(f->fi, struct mtp_instance, func_inst);
+
+ if (cdev->use_os_string) {
+ f->os_desc_table = kzalloc(sizeof(*f->os_desc_table),
+ GFP_KERNEL);
+ if (!f->os_desc_table)
+ return -ENOMEM;
+ f->os_desc_n = 1;
+ f->os_desc_table[0].os_desc = &fi_mtp->mtp_os_desc;
+ }
+
+ /* allocate endpoints */
+ ret = mtp_create_bulk_endpoints(dev, &mtp_fullspeed_in_desc,
+ &mtp_fullspeed_out_desc, &mtp_intr_desc);
+ if (ret)
+ return ret;
+
+ /* support high speed hardware */
+ if (gadget_is_dualspeed(c->cdev->gadget)) {
+ mtp_highspeed_in_desc.bEndpointAddress =
+ mtp_fullspeed_in_desc.bEndpointAddress;
+ mtp_highspeed_out_desc.bEndpointAddress =
+ mtp_fullspeed_out_desc.bEndpointAddress;
+ }
+ /* support super speed hardware */
+ if (gadget_is_superspeed(c->cdev->gadget)) {
+ unsigned max_burst;
+
+ /* Calculate bMaxBurst, we know packet size is 1024 */
+ max_burst = min_t(unsigned, MTP_BULK_BUFFER_SIZE / 1024, 15);
+ mtp_ss_in_desc.bEndpointAddress =
+ mtp_fullspeed_in_desc.bEndpointAddress;
+ mtp_ss_in_comp_desc.bMaxBurst = max_burst;
+ mtp_ss_out_desc.bEndpointAddress =
+ mtp_fullspeed_out_desc.bEndpointAddress;
+ mtp_ss_out_comp_desc.bMaxBurst = max_burst;
+ }
+
+ fi_mtp->func_inst.f = &dev->function;
+ DBG(cdev, "%s speed %s: IN/%s, OUT/%s\n",
+ gadget_is_superspeed(c->cdev->gadget) ? "super" :
+ (gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full"),
+ f->name, dev->ep_in->name, dev->ep_out->name);
+ return 0;
+}
+
+static void
+mtp_function_unbind(struct usb_configuration *c, struct usb_function *f)
+{
+ struct mtp_dev *dev = func_to_mtp(f);
+ struct mtp_instance *fi_mtp;
+ struct usb_request *req;
+ int i;
+ fi_mtp = container_of(f->fi, struct mtp_instance, func_inst);
+ mtp_string_defs[INTERFACE_STRING_INDEX].id = 0;
+ mutex_lock(&dev->read_mutex);
+ while ((req = mtp_req_get(dev, &dev->tx_idle)))
+ mtp_request_free(req, dev->ep_in);
+ for (i = 0; i < RX_REQ_MAX; i++)
+ mtp_request_free(dev->rx_req[i], dev->ep_out);
+ while ((req = mtp_req_get(dev, &dev->intr_idle)))
+ mtp_request_free(req, dev->ep_intr);
+ mutex_unlock(&dev->read_mutex);
+ spin_lock_irq(&dev->lock);
+ dev->state = STATE_OFFLINE;
+ dev->cdev = NULL;
+ spin_unlock_irq(&dev->lock);
+ kfree(f->os_desc_table);
+ f->os_desc_n = 0;
+ fi_mtp->func_inst.f = NULL;
+}
+
+static int mtp_function_set_alt(struct usb_function *f,
+ unsigned intf, unsigned alt)
+{
+ struct mtp_dev *dev = func_to_mtp(f);
+ struct usb_composite_dev *cdev = f->config->cdev;
+ int ret;
+
+ DBG(cdev, "mtp_function_set_alt intf: %d alt: %d\n", intf, alt);
+
+ ret = config_ep_by_speed(cdev->gadget, f, dev->ep_in);
+ if (ret)
+ return ret;
+
+ ret = usb_ep_enable(dev->ep_in);
+ if (ret)
+ return ret;
+
+ ret = config_ep_by_speed(cdev->gadget, f, dev->ep_out);
+ if (ret)
+ return ret;
+
+ ret = usb_ep_enable(dev->ep_out);
+ if (ret) {
+ usb_ep_disable(dev->ep_in);
+ return ret;
+ }
+
+ ret = config_ep_by_speed(cdev->gadget, f, dev->ep_intr);
+ if (ret)
+ return ret;
+
+ ret = usb_ep_enable(dev->ep_intr);
+ if (ret) {
+ usb_ep_disable(dev->ep_out);
+ usb_ep_disable(dev->ep_in);
+ return ret;
+ }
+ dev->state = STATE_READY;
+
+ /* readers may be blocked waiting for us to go online */
+ wake_up(&dev->read_wq);
+ return 0;
+}
+
+static void mtp_function_disable(struct usb_function *f)
+{
+ struct mtp_dev *dev = func_to_mtp(f);
+ struct usb_composite_dev *cdev = dev->cdev;
+
+ DBG(cdev, "mtp_function_disable\n");
+ spin_lock_irq(&dev->lock);
+ dev->state = STATE_OFFLINE;
+ spin_unlock_irq(&dev->lock);
+ usb_ep_disable(dev->ep_in);
+ usb_ep_disable(dev->ep_out);
+ usb_ep_disable(dev->ep_intr);
+
+ /* readers may be blocked waiting for us to go online */
+ wake_up(&dev->read_wq);
+
+ VDBG(cdev, "%s disabled\n", dev->function.name);
+}
+
+static int debug_mtp_read_stats(struct seq_file *s, void *unused)
+{
+ struct mtp_dev *dev = _mtp_dev;
+ int i;
+ unsigned long flags;
+ unsigned int min, max = 0, sum = 0, iteration = 0;
+
+ seq_puts(s, "\n=======================\n");
+ seq_puts(s, "MTP Write Stats:\n");
+ seq_puts(s, "\n=======================\n");
+ spin_lock_irqsave(&dev->lock, flags);
+ min = dev->perf[0].vfs_wtime;
+ for (i = 0; i < MAX_ITERATION; i++) {
+ seq_printf(s, "vfs write: bytes:%ld\t\t time:%d\n",
+ dev->perf[i].vfs_wbytes,
+ dev->perf[i].vfs_wtime);
+ if (dev->perf[i].vfs_wbytes == mtp_rx_req_len) {
+ sum += dev->perf[i].vfs_wtime;
+ if (min > dev->perf[i].vfs_wtime)
+ min = dev->perf[i].vfs_wtime;
+ if (max < dev->perf[i].vfs_wtime)
+ max = dev->perf[i].vfs_wtime;
+ iteration++;
+ }
+ }
+
+ seq_printf(s, "vfs_write(time in usec) min:%d\t max:%d\t avg:%d\n",
+ min, max, sum / iteration);
+ min = max = sum = iteration = 0;
+ seq_puts(s, "\n=======================\n");
+ seq_puts(s, "MTP Read Stats:\n");
+ seq_puts(s, "\n=======================\n");
+
+ min = dev->perf[0].vfs_rtime;
+ for (i = 0; i < MAX_ITERATION; i++) {
+ seq_printf(s, "vfs read: bytes:%ld\t\t time:%d\n",
+ dev->perf[i].vfs_rbytes,
+ dev->perf[i].vfs_rtime);
+ if (dev->perf[i].vfs_rbytes == mtp_tx_req_len) {
+ sum += dev->perf[i].vfs_rtime;
+ if (min > dev->perf[i].vfs_rtime)
+ min = dev->perf[i].vfs_rtime;
+ if (max < dev->perf[i].vfs_rtime)
+ max = dev->perf[i].vfs_rtime;
+ iteration++;
+ }
+ }
+
+ seq_printf(s, "vfs_read(time in usec) min:%d\t max:%d\t avg:%d\n",
+ min, max, sum / iteration);
+ spin_unlock_irqrestore(&dev->lock, flags);
+ return 0;
+}
+
+static ssize_t debug_mtp_reset_stats(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ int clear_stats;
+ unsigned long flags;
+ struct mtp_dev *dev = _mtp_dev;
+
+ if (buf == NULL) {
+ pr_err("[%s] EINVAL\n", __func__);
+ goto done;
+ }
+
+ if (kstrtoint(buf, 0, &clear_stats) || clear_stats != 0) {
+ pr_err("Wrong value. To clear stats, enter value as 0.\n");
+ goto done;
+ }
+
+ spin_lock_irqsave(&dev->lock, flags);
+ memset(&dev->perf[0], 0, MAX_ITERATION * sizeof(dev->perf[0]));
+ dev->dbg_read_index = 0;
+ dev->dbg_write_index = 0;
+ spin_unlock_irqrestore(&dev->lock, flags);
+done:
+ return count;
+}
+
+static int debug_mtp_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, debug_mtp_read_stats, inode->i_private);
+}
+
+static const struct file_operations debug_mtp_ops = {
+ .open = debug_mtp_open,
+ .read = seq_read,
+ .write = debug_mtp_reset_stats,
+};
+
+struct dentry *dent_mtp;
+static void mtp_debugfs_init(void)
+{
+ struct dentry *dent_mtp_status;
+
+ dent_mtp = debugfs_create_dir("usb_mtp", 0);
+ if (!dent_mtp || IS_ERR(dent_mtp))
+ return;
+
+ dent_mtp_status = debugfs_create_file("status", 0644,
+ dent_mtp, 0, &debug_mtp_ops);
+ if (!dent_mtp_status || IS_ERR(dent_mtp_status)) {
+ debugfs_remove(dent_mtp);
+ dent_mtp = NULL;
+ return;
+ }
+}
+
+static void mtp_debugfs_remove(void)
+{
+ debugfs_remove_recursive(dent_mtp);
+}
+
+static int __mtp_setup(struct mtp_instance *fi_mtp)
+{
+ struct mtp_dev *dev;
+ int ret;
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+
+ if (fi_mtp != NULL)
+ fi_mtp->dev = dev;
+
+ if (!dev)
+ return -ENOMEM;
+
+ spin_lock_init(&dev->lock);
+ init_waitqueue_head(&dev->read_wq);
+ init_waitqueue_head(&dev->write_wq);
+ init_waitqueue_head(&dev->intr_wq);
+ atomic_set(&dev->open_excl, 0);
+ atomic_set(&dev->ioctl_excl, 0);
+ INIT_LIST_HEAD(&dev->tx_idle);
+ INIT_LIST_HEAD(&dev->intr_idle);
+
+ dev->wq = create_singlethread_workqueue("f_mtp");
+ if (!dev->wq) {
+ ret = -ENOMEM;
+ goto err1;
+ }
+ INIT_WORK(&dev->send_file_work, send_file_work);
+ INIT_WORK(&dev->receive_file_work, receive_file_work);
+
+ _mtp_dev = dev;
+
+ ret = misc_register(&mtp_device);
+ if (ret)
+ goto err2;
+
+ mtp_debugfs_init();
+ return 0;
+
+err2:
+ destroy_workqueue(dev->wq);
+err1:
+ _mtp_dev = NULL;
+ kfree(dev);
+ printk(KERN_ERR "mtp gadget driver failed to initialize\n");
+ return ret;
+}
+
+static int mtp_setup_configfs(struct mtp_instance *fi_mtp)
+{
+ return __mtp_setup(fi_mtp);
+}
+
+
+static void mtp_cleanup(void)
+{
+ struct mtp_dev *dev = _mtp_dev;
+
+ if (!dev)
+ return;
+
+ mtp_debugfs_remove();
+ misc_deregister(&mtp_device);
+ destroy_workqueue(dev->wq);
+ _mtp_dev = NULL;
+ kfree(dev);
+}
+
+static struct mtp_instance *to_mtp_instance(struct config_item *item)
+{
+ return container_of(to_config_group(item), struct mtp_instance,
+ func_inst.group);
+}
+
+static void mtp_attr_release(struct config_item *item)
+{
+ struct mtp_instance *fi_mtp = to_mtp_instance(item);
+
+ usb_put_function_instance(&fi_mtp->func_inst);
+}
+
+static struct configfs_item_operations mtp_item_ops = {
+ .release = mtp_attr_release,
+};
+
+static struct config_item_type mtp_func_type = {
+ .ct_item_ops = &mtp_item_ops,
+ .ct_owner = THIS_MODULE,
+};
+
+
+static struct mtp_instance *to_fi_mtp(struct usb_function_instance *fi)
+{
+ return container_of(fi, struct mtp_instance, func_inst);
+}
+
+static int mtp_set_inst_name(struct usb_function_instance *fi, const char *name)
+{
+ struct mtp_instance *fi_mtp;
+ char *ptr;
+ int name_len;
+
+ name_len = strlen(name) + 1;
+ if (name_len > MAX_INST_NAME_LEN)
+ return -ENAMETOOLONG;
+
+ ptr = kstrndup(name, name_len, GFP_KERNEL);
+ if (!ptr)
+ return -ENOMEM;
+
+ fi_mtp = to_fi_mtp(fi);
+ fi_mtp->name = ptr;
+
+ return 0;
+}
+
+static void mtp_free_inst(struct usb_function_instance *fi)
+{
+ struct mtp_instance *fi_mtp;
+
+ fi_mtp = to_fi_mtp(fi);
+ kfree(fi_mtp->name);
+ mtp_cleanup();
+ kfree(fi_mtp);
+}
+
+struct usb_function_instance *alloc_inst_mtp_ptp(bool mtp_config)
+{
+ struct mtp_instance *fi_mtp;
+ int ret = 0;
+ struct usb_os_desc *descs[1];
+ char *names[1];
+
+ fi_mtp = kzalloc(sizeof(*fi_mtp), GFP_KERNEL);
+ if (!fi_mtp)
+ return ERR_PTR(-ENOMEM);
+ fi_mtp->func_inst.set_inst_name = mtp_set_inst_name;
+ fi_mtp->func_inst.free_func_inst = mtp_free_inst;
+
+ fi_mtp->mtp_os_desc.ext_compat_id = fi_mtp->mtp_ext_compat_id;
+ INIT_LIST_HEAD(&fi_mtp->mtp_os_desc.ext_prop);
+ descs[0] = &fi_mtp->mtp_os_desc;
+ names[0] = "MTP";
+
+ if (mtp_config) {
+ ret = mtp_setup_configfs(fi_mtp);
+ if (ret) {
+ kfree(fi_mtp);
+ pr_err("Error setting MTP\n");
+ return ERR_PTR(ret);
+ }
+ } else
+ fi_mtp->dev = _mtp_dev;
+
+ config_group_init_type_name(&fi_mtp->func_inst.group,
+ "", &mtp_func_type);
+ usb_os_desc_prepare_interf_dir(&fi_mtp->func_inst.group, 1,
+ descs, names, THIS_MODULE);
+
+ mutex_init(&fi_mtp->dev->read_mutex);
+
+ return &fi_mtp->func_inst;
+}
+EXPORT_SYMBOL_GPL(alloc_inst_mtp_ptp);
+
+static struct usb_function_instance *mtp_alloc_inst(void)
+{
+ return alloc_inst_mtp_ptp(true);
+}
+
+static int mtp_ctrlreq_configfs(struct usb_function *f,
+ const struct usb_ctrlrequest *ctrl)
+{
+ return mtp_ctrlrequest(f->config->cdev, ctrl);
+}
+
+static void mtp_free(struct usb_function *f)
+{
+ /*NO-OP: no function specific resource allocation in mtp_alloc*/
+}
+
+struct usb_function *function_alloc_mtp_ptp(struct usb_function_instance *fi,
+ bool mtp_config)
+{
+ struct mtp_instance *fi_mtp = to_fi_mtp(fi);
+ struct mtp_dev *dev;
+
+ /*
+ * PTP piggybacks on MTP function so make sure we have
+ * created MTP function before we associate this PTP
+ * function with a gadget configuration.
+ */
+ if (fi_mtp->dev == NULL) {
+ pr_err("Error: Create MTP function before linking"
+ " PTP function with a gadget configuration\n");
+ pr_err("\t1: Delete existing PTP function if any\n");
+ pr_err("\t2: Create MTP function\n");
+ pr_err("\t3: Create and symlink PTP function"
+ " with a gadget configuration\n");
+ return ERR_PTR(-EINVAL); /* Invalid Configuration */
+ }
+
+ dev = fi_mtp->dev;
+ dev->function.name = DRIVER_NAME;
+ dev->function.strings = mtp_strings;
+ if (mtp_config) {
+ dev->function.fs_descriptors = fs_mtp_descs;
+ dev->function.hs_descriptors = hs_mtp_descs;
+ dev->function.ss_descriptors = ss_mtp_descs;
+ dev->function.ssp_descriptors = ss_mtp_descs;
+ } else {
+ dev->function.fs_descriptors = fs_ptp_descs;
+ dev->function.hs_descriptors = hs_ptp_descs;
+ dev->function.ss_descriptors = ss_ptp_descs;
+ dev->function.ssp_descriptors = ss_ptp_descs;
+ }
+ dev->function.bind = mtp_function_bind;
+ dev->function.unbind = mtp_function_unbind;
+ dev->function.set_alt = mtp_function_set_alt;
+ dev->function.disable = mtp_function_disable;
+ dev->function.setup = mtp_ctrlreq_configfs;
+ dev->function.free_func = mtp_free;
+ fi->f = &dev->function;
+
+ return &dev->function;
+}
+EXPORT_SYMBOL_GPL(function_alloc_mtp_ptp);
+
+static struct usb_function *mtp_alloc(struct usb_function_instance *fi)
+{
+ return function_alloc_mtp_ptp(fi, true);
+}
+
+DECLARE_USB_FUNCTION_INIT(mtp, mtp_alloc_inst, mtp_alloc);
+MODULE_LICENSE("GPL");
diff --git a/drivers/usb/gadget/function/f_mtp.h b/drivers/usb/gadget/function/f_mtp.h
new file mode 100644
index 0000000..7adb1ff
--- /dev/null
+++ b/drivers/usb/gadget/function/f_mtp.h
@@ -0,0 +1,18 @@
+/*
+ * Copyright (C) 2014 Google, Inc.
+ * Author: Badhri Jagan Sridharan <badhri@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+extern struct usb_function_instance *alloc_inst_mtp_ptp(bool mtp_config);
+extern struct usb_function *function_alloc_mtp_ptp(
+ struct usb_function_instance *fi, bool mtp_config);
diff --git a/drivers/usb/gadget/function/f_ptp.c b/drivers/usb/gadget/function/f_ptp.c
new file mode 100644
index 0000000..da3e4d5
--- /dev/null
+++ b/drivers/usb/gadget/function/f_ptp.c
@@ -0,0 +1,38 @@
+/*
+ * Gadget Function Driver for PTP
+ *
+ * Copyright (C) 2014 Google, Inc.
+ * Author: Badhri Jagan Sridharan <badhri@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+
+#include <linux/configfs.h>
+#include <linux/usb/composite.h>
+
+#include "f_mtp.h"
+
+static struct usb_function_instance *ptp_alloc_inst(void)
+{
+ return alloc_inst_mtp_ptp(false);
+}
+
+static struct usb_function *ptp_alloc(struct usb_function_instance *fi)
+{
+ return function_alloc_mtp_ptp(fi, false);
+}
+
+DECLARE_USB_FUNCTION_INIT(ptp, ptp_alloc_inst, ptp_alloc);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Badhri Jagan Sridharan");
diff --git a/drivers/usb/gadget/function/f_qdss.c b/drivers/usb/gadget/function/f_qdss.c
index 8a8998b..cbb0959 100644
--- a/drivers/usb/gadget/function/f_qdss.c
+++ b/drivers/usb/gadget/function/f_qdss.c
@@ -2,7 +2,7 @@
/*
* f_qdss.c -- QDSS function Driver
*
- * Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
*/
#include <linux/init.h>
@@ -415,11 +415,13 @@ static int qdss_bind(struct usb_configuration *c, struct usb_function *f)
qdss_data_intf_desc.bInterfaceNumber = iface;
qdss->data_iface_id = iface;
- id = usb_string_id(c->cdev);
- if (id < 0)
- return id;
- qdss_string_defs[QDSS_DATA_IDX].id = id;
- qdss_data_intf_desc.iInterface = id;
+ if (!qdss_string_defs[QDSS_DATA_IDX].id) {
+ id = usb_string_id(c->cdev);
+ if (id < 0)
+ return id;
+ qdss_string_defs[QDSS_DATA_IDX].id = id;
+ qdss_data_intf_desc.iInterface = id;
+ }
if (qdss->debug_inface_enabled) {
/* Allocate ctrl I/F */
@@ -430,11 +432,14 @@ static int qdss_bind(struct usb_configuration *c, struct usb_function *f)
}
qdss_ctrl_intf_desc.bInterfaceNumber = iface;
qdss->ctrl_iface_id = iface;
- id = usb_string_id(c->cdev);
- if (id < 0)
- return id;
- qdss_string_defs[QDSS_CTRL_IDX].id = id;
- qdss_ctrl_intf_desc.iInterface = id;
+
+ if (!qdss_string_defs[QDSS_CTRL_IDX].id) {
+ id = usb_string_id(c->cdev);
+ if (id < 0)
+ return id;
+ qdss_string_defs[QDSS_CTRL_IDX].id = id;
+ qdss_ctrl_intf_desc.iInterface = id;
+ }
}
/* for non-accelerated path keep tx fifo size 1k */
diff --git a/drivers/usb/gadget/udc/net2272.c b/drivers/usb/gadget/udc/net2272.c
index 660878a..b77f312 100644
--- a/drivers/usb/gadget/udc/net2272.c
+++ b/drivers/usb/gadget/udc/net2272.c
@@ -2083,7 +2083,7 @@ static irqreturn_t net2272_irq(int irq, void *_dev)
#if defined(PLX_PCI_RDK2)
/* see if PCI int for us by checking irqstat */
intcsr = readl(dev->rdk2.fpga_base_addr + RDK2_IRQSTAT);
- if (!intcsr & (1 << NET2272_PCI_IRQ)) {
+ if (!(intcsr & (1 << NET2272_PCI_IRQ))) {
spin_unlock(&dev->lock);
return IRQ_NONE;
}
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 366fd63d..9b8d910 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -2471,7 +2471,7 @@ int xhci_event_ring_setup(struct xhci_hcd *xhci, struct xhci_ring **er,
if (!*er)
return -ENOMEM;
- ret = xhci_alloc_erst(xhci, xhci->event_ring, &xhci->erst, flags);
+ ret = xhci_alloc_erst(xhci, *er, erst, flags);
if (ret)
return ret;
diff --git a/drivers/usb/mtu3/mtu3_core.c b/drivers/usb/mtu3/mtu3_core.c
index d045d84..48d10a6 100644
--- a/drivers/usb/mtu3/mtu3_core.c
+++ b/drivers/usb/mtu3/mtu3_core.c
@@ -578,8 +578,10 @@ static void mtu3_regs_init(struct mtu3 *mtu)
if (mtu->is_u3_ip) {
/* disable LGO_U1/U2 by default */
mtu3_clrbits(mbase, U3D_LINK_POWER_CONTROL,
- SW_U1_ACCEPT_ENABLE | SW_U2_ACCEPT_ENABLE |
SW_U1_REQUEST_ENABLE | SW_U2_REQUEST_ENABLE);
+ /* enable accept LGO_U1/U2 link command from host */
+ mtu3_setbits(mbase, U3D_LINK_POWER_CONTROL,
+ SW_U1_ACCEPT_ENABLE | SW_U2_ACCEPT_ENABLE);
/* device responses to u3_exit from host automatically */
mtu3_clrbits(mbase, U3D_LTSSM_CTRL, SOFT_U3_EXIT_EN);
/* automatically build U2 link when U3 detect fail */
diff --git a/drivers/usb/mtu3/mtu3_gadget_ep0.c b/drivers/usb/mtu3/mtu3_gadget_ep0.c
index 25216e7..3c464d8 100644
--- a/drivers/usb/mtu3/mtu3_gadget_ep0.c
+++ b/drivers/usb/mtu3/mtu3_gadget_ep0.c
@@ -336,9 +336,9 @@ static int ep0_handle_feature_dev(struct mtu3 *mtu,
lpc = mtu3_readl(mbase, U3D_LINK_POWER_CONTROL);
if (set)
- lpc |= SW_U1_ACCEPT_ENABLE;
+ lpc |= SW_U1_REQUEST_ENABLE;
else
- lpc &= ~SW_U1_ACCEPT_ENABLE;
+ lpc &= ~SW_U1_REQUEST_ENABLE;
mtu3_writel(mbase, U3D_LINK_POWER_CONTROL, lpc);
mtu->u1_enable = !!set;
@@ -351,9 +351,9 @@ static int ep0_handle_feature_dev(struct mtu3 *mtu,
lpc = mtu3_readl(mbase, U3D_LINK_POWER_CONTROL);
if (set)
- lpc |= SW_U2_ACCEPT_ENABLE;
+ lpc |= SW_U2_REQUEST_ENABLE;
else
- lpc &= ~SW_U2_ACCEPT_ENABLE;
+ lpc &= ~SW_U2_REQUEST_ENABLE;
mtu3_writel(mbase, U3D_LINK_POWER_CONTROL, lpc);
mtu->u2_enable = !!set;
diff --git a/drivers/usb/musb/musb_dsps.c b/drivers/usb/musb/musb_dsps.c
index 23a0df7..403eb9791 100644
--- a/drivers/usb/musb/musb_dsps.c
+++ b/drivers/usb/musb/musb_dsps.c
@@ -181,9 +181,11 @@ static void dsps_musb_enable(struct musb *musb)
musb_writel(reg_base, wrp->epintr_set, epmask);
musb_writel(reg_base, wrp->coreintr_set, coremask);
- /* start polling for ID change in dual-role idle mode */
- if (musb->xceiv->otg->state == OTG_STATE_B_IDLE &&
- musb->port_mode == MUSB_OTG)
+ /*
+ * start polling for runtime PM active and idle,
+ * and for ID change in dual-role idle mode.
+ */
+ if (musb->xceiv->otg->state == OTG_STATE_B_IDLE)
dsps_mod_timer(glue, -1);
}
@@ -227,8 +229,13 @@ static int dsps_check_status(struct musb *musb, void *unused)
switch (musb->xceiv->otg->state) {
case OTG_STATE_A_WAIT_VRISE:
- dsps_mod_timer_optional(glue);
- break;
+ if (musb->port_mode == MUSB_HOST) {
+ musb->xceiv->otg->state = OTG_STATE_A_WAIT_BCON;
+ dsps_mod_timer_optional(glue);
+ break;
+ }
+ /* fall through */
+
case OTG_STATE_A_WAIT_BCON:
/* keep VBUS on for host-only mode */
if (musb->port_mode == MUSB_HOST) {
@@ -249,6 +256,10 @@ static int dsps_check_status(struct musb *musb, void *unused)
musb->xceiv->otg->state = OTG_STATE_A_IDLE;
MUSB_HST_MODE(musb);
}
+
+ if (musb->port_mode == MUSB_PERIPHERAL)
+ skip_session = 1;
+
if (!(devctl & MUSB_DEVCTL_SESSION) && !skip_session)
musb_writeb(mregs, MUSB_DEVCTL,
MUSB_DEVCTL_SESSION);
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
index eae8b1b..ffe462a 100644
--- a/drivers/usb/musb/musb_gadget.c
+++ b/drivers/usb/musb/musb_gadget.c
@@ -452,13 +452,10 @@ void musb_g_tx(struct musb *musb, u8 epnum)
}
if (request) {
- u8 is_dma = 0;
- bool short_packet = false;
trace_musb_req_tx(req);
if (dma && (csr & MUSB_TXCSR_DMAENAB)) {
- is_dma = 1;
csr |= MUSB_TXCSR_P_WZC_BITS;
csr &= ~(MUSB_TXCSR_DMAENAB | MUSB_TXCSR_P_UNDERRUN |
MUSB_TXCSR_TXPKTRDY | MUSB_TXCSR_AUTOSET);
@@ -476,16 +473,8 @@ void musb_g_tx(struct musb *musb, u8 epnum)
*/
if ((request->zero && request->length)
&& (request->length % musb_ep->packet_sz == 0)
- && (request->actual == request->length))
- short_packet = true;
+ && (request->actual == request->length)) {
- if ((musb_dma_inventra(musb) || musb_dma_ux500(musb)) &&
- (is_dma && (!dma->desired_mode ||
- (request->actual &
- (musb_ep->packet_sz - 1)))))
- short_packet = true;
-
- if (short_packet) {
/*
* On DMA completion, FIFO may not be
* available yet...
diff --git a/drivers/usb/musb/musbhsdma.c b/drivers/usb/musb/musbhsdma.c
index a688f7f..5fc6825 100644
--- a/drivers/usb/musb/musbhsdma.c
+++ b/drivers/usb/musb/musbhsdma.c
@@ -346,12 +346,10 @@ static irqreturn_t dma_controller_irq(int irq, void *private_data)
channel->status = MUSB_DMA_STATUS_FREE;
/* completed */
- if ((devctl & MUSB_DEVCTL_HM)
- && (musb_channel->transmit)
- && ((channel->desired_mode == 0)
- || (channel->actual_len &
- (musb_channel->max_packet_sz - 1)))
- ) {
+ if (musb_channel->transmit &&
+ (!channel->desired_mode ||
+ (channel->actual_len %
+ musb_channel->max_packet_sz))) {
u8 epnum = musb_channel->epnum;
int offset = musb->io.ep_offset(epnum,
MUSB_TXCSR);
@@ -363,11 +361,14 @@ static irqreturn_t dma_controller_irq(int irq, void *private_data)
*/
musb_ep_select(mbase, epnum);
txcsr = musb_readw(mbase, offset);
- txcsr &= ~(MUSB_TXCSR_DMAENAB
+ if (channel->desired_mode == 1) {
+ txcsr &= ~(MUSB_TXCSR_DMAENAB
| MUSB_TXCSR_AUTOSET);
- musb_writew(mbase, offset, txcsr);
- /* Send out the packet */
- txcsr &= ~MUSB_TXCSR_DMAMODE;
+ musb_writew(mbase, offset, txcsr);
+ /* Send out the packet */
+ txcsr &= ~MUSB_TXCSR_DMAMODE;
+ txcsr |= MUSB_TXCSR_DMAENAB;
+ }
txcsr |= MUSB_TXCSR_TXPKTRDY;
musb_writew(mbase, offset, txcsr);
}
diff --git a/drivers/usb/pd/policy_engine.c b/drivers/usb/pd/policy_engine.c
index a9253d9..30ab415 100644
--- a/drivers/usb/pd/policy_engine.c
+++ b/drivers/usb/pd/policy_engine.c
@@ -3774,12 +3774,6 @@ static int usbpd_do_swap(struct usbpd *pd, bool dr_swap,
return -EAGAIN;
}
- if (pd->current_state == PE_SNK_READY &&
- !is_sink_tx_ok(pd)) {
- usbpd_err(&pd->dev, "Rp indicates SinkTxNG\n");
- return -EAGAIN;
- }
-
mutex_lock(&pd->swap_lock);
reinit_completion(&pd->is_ready);
if (dr_swap)
@@ -4143,7 +4137,7 @@ static ssize_t select_pdo_store(struct device *dev,
mutex_lock(&pd->swap_lock);
/* Only allowed if we are already in explicit sink contract */
- if (pd->current_state != PE_SNK_READY || !is_sink_tx_ok(pd)) {
+ if (pd->current_state != PE_SNK_READY) {
usbpd_err(&pd->dev, "Cannot select new PDO yet\n");
ret = -EBUSY;
goto out;
@@ -4189,7 +4183,7 @@ static ssize_t select_pdo_store(struct device *dev,
if (pd->selected_pdo != pd->requested_pdo ||
pd->current_voltage != pd->requested_voltage) {
usbpd_err(&pd->dev, "request rejected\n");
- ret = -EINVAL;
+ ret = -ECONNREFUSED;
}
out:
@@ -4295,7 +4289,7 @@ static int trigger_tx_msg(struct usbpd *pd, bool *msg_tx_flag)
int ret = 0;
/* Only allowed if we are already in explicit sink contract */
- if (pd->current_state != PE_SNK_READY || !is_sink_tx_ok(pd)) {
+ if (pd->current_state != PE_SNK_READY) {
usbpd_err(&pd->dev, "Cannot send msg\n");
ret = -EBUSY;
goto out;
diff --git a/drivers/usb/phy/phy-am335x.c b/drivers/usb/phy/phy-am335x.c
index 27bdb72..f5f0568 100644
--- a/drivers/usb/phy/phy-am335x.c
+++ b/drivers/usb/phy/phy-am335x.c
@@ -61,9 +61,6 @@ static int am335x_phy_probe(struct platform_device *pdev)
if (ret)
return ret;
- ret = usb_add_phy_dev(&am_phy->usb_phy_gen.phy);
- if (ret)
- return ret;
am_phy->usb_phy_gen.phy.init = am335x_init;
am_phy->usb_phy_gen.phy.shutdown = am335x_shutdown;
@@ -82,7 +79,7 @@ static int am335x_phy_probe(struct platform_device *pdev)
device_set_wakeup_enable(dev, false);
phy_ctrl_power(am_phy->phy_ctrl, am_phy->id, am_phy->dr_mode, false);
- return 0;
+ return usb_add_phy_dev(&am_phy->usb_phy_gen.phy);
}
static int am335x_phy_remove(struct platform_device *pdev)
diff --git a/drivers/usb/renesas_usbhs/common.c b/drivers/usb/renesas_usbhs/common.c
index 4310df4..b079258 100644
--- a/drivers/usb/renesas_usbhs/common.c
+++ b/drivers/usb/renesas_usbhs/common.c
@@ -458,6 +458,10 @@ static int usbhsc_drvcllbck_notify_hotplug(struct platform_device *pdev)
*/
static const struct of_device_id usbhs_of_match[] = {
{
+ .compatible = "renesas,usbhs-r8a774c0",
+ .data = (void *)USBHS_TYPE_RCAR_GEN3_WITH_PLL,
+ },
+ {
.compatible = "renesas,usbhs-r8a7790",
.data = (void *)USBHS_TYPE_RCAR_GEN2,
},
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index cf82e72..b214a72 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -1035,8 +1035,10 @@ ssize_t vhost_chr_write_iter(struct vhost_dev *dev,
int type, ret;
ret = copy_from_iter(&type, sizeof(type), from);
- if (ret != sizeof(type))
+ if (ret != sizeof(type)) {
+ ret = -EINVAL;
goto done;
+ }
switch (type) {
case VHOST_IOTLB_MSG:
@@ -1055,8 +1057,10 @@ ssize_t vhost_chr_write_iter(struct vhost_dev *dev,
iov_iter_advance(from, offset);
ret = copy_from_iter(&msg, sizeof(msg), from);
- if (ret != sizeof(msg))
+ if (ret != sizeof(msg)) {
+ ret = -EINVAL;
goto done;
+ }
if (vhost_process_iotlb_msg(dev, &msg)) {
ret = -EFAULT;
goto done;
@@ -1784,7 +1788,7 @@ static int log_used(struct vhost_virtqueue *vq, u64 used_offset, u64 len)
ret = translate_desc(vq, (uintptr_t)vq->used + used_offset,
len, iov, 64, VHOST_ACCESS_WO);
- if (ret)
+ if (ret < 0)
return ret;
for (i = 0; i < ret; i++) {
diff --git a/drivers/video/backlight/pwm_bl.c b/drivers/video/backlight/pwm_bl.c
index bdfcc0a..6bde543 100644
--- a/drivers/video/backlight/pwm_bl.c
+++ b/drivers/video/backlight/pwm_bl.c
@@ -263,6 +263,16 @@ static int pwm_backlight_parse_dt(struct device *dev,
memset(data, 0, sizeof(*data));
/*
+ * These values are optional and set as 0 by default, the out values
+ * are modified only if a valid u32 value can be decoded.
+ */
+ of_property_read_u32(node, "post-pwm-on-delay-ms",
+ &data->post_pwm_on_delay);
+ of_property_read_u32(node, "pwm-off-delay-ms", &data->pwm_off_delay);
+
+ data->enable_gpio = -EINVAL;
+
+ /*
* Determine the number of brightness levels, if this property is not
* set a default table of brightness levels will be used.
*/
@@ -374,15 +384,6 @@ static int pwm_backlight_parse_dt(struct device *dev,
data->max_brightness--;
}
- /*
- * These values are optional and set as 0 by default, the out values
- * are modified only if a valid u32 value can be decoded.
- */
- of_property_read_u32(node, "post-pwm-on-delay-ms",
- &data->post_pwm_on_delay);
- of_property_read_u32(node, "pwm-off-delay-ms", &data->pwm_off_delay);
-
- data->enable_gpio = -EINVAL;
return 0;
}
diff --git a/drivers/video/fbdev/clps711x-fb.c b/drivers/video/fbdev/clps711x-fb.c
index ff56107..42f9096 100644
--- a/drivers/video/fbdev/clps711x-fb.c
+++ b/drivers/video/fbdev/clps711x-fb.c
@@ -287,14 +287,17 @@ static int clps711x_fb_probe(struct platform_device *pdev)
}
ret = of_get_fb_videomode(disp, &cfb->mode, OF_USE_NATIVE_MODE);
- if (ret)
+ if (ret) {
+ of_node_put(disp);
goto out_fb_release;
+ }
of_property_read_u32(disp, "ac-prescale", &cfb->ac_prescale);
cfb->cmap_invert = of_property_read_bool(disp, "cmap-invert");
ret = of_property_read_u32(disp, "bits-per-pixel",
&info->var.bits_per_pixel);
+ of_node_put(disp);
if (ret)
goto out_fb_release;
diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c
index 75ebbbf..5d961e3 100644
--- a/drivers/video/fbdev/core/fbcon.c
+++ b/drivers/video/fbdev/core/fbcon.c
@@ -3066,7 +3066,7 @@ static int fbcon_fb_unbind(int idx)
for (i = first_fb_vc; i <= last_fb_vc; i++) {
if (con2fb_map[i] != idx &&
con2fb_map[i] != -1) {
- new_idx = i;
+ new_idx = con2fb_map[i];
break;
}
}
diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c
index 2040542..77cee99 100644
--- a/drivers/video/fbdev/core/fbmem.c
+++ b/drivers/video/fbdev/core/fbmem.c
@@ -435,7 +435,9 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
image->dx += image->width + 8;
}
} else if (rotate == FB_ROTATE_UD) {
- for (x = 0; x < num; x++) {
+ u32 dx = image->dx;
+
+ for (x = 0; x < num && image->dx <= dx; x++) {
info->fbops->fb_imageblit(info, image);
image->dx -= image->width + 8;
}
@@ -447,7 +449,9 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
image->dy += image->height + 8;
}
} else if (rotate == FB_ROTATE_CCW) {
- for (x = 0; x < num; x++) {
+ u32 dy = image->dy;
+
+ for (x = 0; x < num && image->dy <= dy; x++) {
info->fbops->fb_imageblit(info, image);
image->dy -= image->height + 8;
}
diff --git a/drivers/video/fbdev/udlfb.c b/drivers/video/fbdev/udlfb.c
index afbd610..070026a 100644
--- a/drivers/video/fbdev/udlfb.c
+++ b/drivers/video/fbdev/udlfb.c
@@ -916,8 +916,6 @@ static int dlfb_ops_open(struct fb_info *info, int user)
dlfb->fb_count++;
- kref_get(&dlfb->kref);
-
if (fb_defio && (info->fbdefio == NULL)) {
/* enable defio at last moment if not disabled by client */
@@ -940,14 +938,17 @@ static int dlfb_ops_open(struct fb_info *info, int user)
return 0;
}
-/*
- * Called when all client interfaces to start transactions have been disabled,
- * and all references to our device instance (dlfb_data) are released.
- * Every transaction must have a reference, so we know are fully spun down
- */
-static void dlfb_free(struct kref *kref)
+static void dlfb_ops_destroy(struct fb_info *info)
{
- struct dlfb_data *dlfb = container_of(kref, struct dlfb_data, kref);
+ struct dlfb_data *dlfb = info->par;
+
+ if (info->cmap.len != 0)
+ fb_dealloc_cmap(&info->cmap);
+ if (info->monspecs.modedb)
+ fb_destroy_modedb(info->monspecs.modedb);
+ vfree(info->screen_base);
+
+ fb_destroy_modelist(&info->modelist);
while (!list_empty(&dlfb->deferred_free)) {
struct dlfb_deferred_free *d = list_entry(dlfb->deferred_free.next, struct dlfb_deferred_free, list);
@@ -957,40 +958,13 @@ static void dlfb_free(struct kref *kref)
}
vfree(dlfb->backing_buffer);
kfree(dlfb->edid);
+ usb_put_dev(dlfb->udev);
kfree(dlfb);
+
+ /* Assume info structure is freed after this point */
+ framebuffer_release(info);
}
-static void dlfb_free_framebuffer(struct dlfb_data *dlfb)
-{
- struct fb_info *info = dlfb->info;
-
- if (info) {
- unregister_framebuffer(info);
-
- if (info->cmap.len != 0)
- fb_dealloc_cmap(&info->cmap);
- if (info->monspecs.modedb)
- fb_destroy_modedb(info->monspecs.modedb);
- vfree(info->screen_base);
-
- fb_destroy_modelist(&info->modelist);
-
- dlfb->info = NULL;
-
- /* Assume info structure is freed after this point */
- framebuffer_release(info);
- }
-
- /* ref taken in probe() as part of registering framebfufer */
- kref_put(&dlfb->kref, dlfb_free);
-}
-
-static void dlfb_free_framebuffer_work(struct work_struct *work)
-{
- struct dlfb_data *dlfb = container_of(work, struct dlfb_data,
- free_framebuffer_work.work);
- dlfb_free_framebuffer(dlfb);
-}
/*
* Assumes caller is holding info->lock mutex (for open and release at least)
*/
@@ -1000,10 +974,6 @@ static int dlfb_ops_release(struct fb_info *info, int user)
dlfb->fb_count--;
- /* We can't free fb_info here - fbmem will touch it when we return */
- if (dlfb->virtualized && (dlfb->fb_count == 0))
- schedule_delayed_work(&dlfb->free_framebuffer_work, HZ);
-
if ((dlfb->fb_count == 0) && (info->fbdefio)) {
fb_deferred_io_cleanup(info);
kfree(info->fbdefio);
@@ -1013,8 +983,6 @@ static int dlfb_ops_release(struct fb_info *info, int user)
dev_dbg(info->dev, "release, user=%d count=%d\n", user, dlfb->fb_count);
- kref_put(&dlfb->kref, dlfb_free);
-
return 0;
}
@@ -1172,6 +1140,7 @@ static struct fb_ops dlfb_ops = {
.fb_blank = dlfb_ops_blank,
.fb_check_var = dlfb_ops_check_var,
.fb_set_par = dlfb_ops_set_par,
+ .fb_destroy = dlfb_ops_destroy,
};
@@ -1615,12 +1584,13 @@ static int dlfb_parse_vendor_descriptor(struct dlfb_data *dlfb,
return true;
}
-static void dlfb_init_framebuffer_work(struct work_struct *work);
-
static int dlfb_usb_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
+ int i;
+ const struct device_attribute *attr;
struct dlfb_data *dlfb;
+ struct fb_info *info;
int retval = -ENOMEM;
struct usb_device *usbdev = interface_to_usbdev(intf);
@@ -1631,10 +1601,9 @@ static int dlfb_usb_probe(struct usb_interface *intf,
goto error;
}
- kref_init(&dlfb->kref); /* matching kref_put in usb .disconnect fn */
INIT_LIST_HEAD(&dlfb->deferred_free);
- dlfb->udev = usbdev;
+ dlfb->udev = usb_get_dev(usbdev);
usb_set_intfdata(intf, dlfb);
dev_dbg(&intf->dev, "console enable=%d\n", console);
@@ -1657,42 +1626,6 @@ static int dlfb_usb_probe(struct usb_interface *intf,
}
- if (!dlfb_alloc_urb_list(dlfb, WRITES_IN_FLIGHT, MAX_TRANSFER)) {
- retval = -ENOMEM;
- dev_err(&intf->dev, "unable to allocate urb list\n");
- goto error;
- }
-
- kref_get(&dlfb->kref); /* matching kref_put in free_framebuffer_work */
-
- /* We don't register a new USB class. Our client interface is dlfbev */
-
- /* Workitem keep things fast & simple during USB enumeration */
- INIT_DELAYED_WORK(&dlfb->init_framebuffer_work,
- dlfb_init_framebuffer_work);
- schedule_delayed_work(&dlfb->init_framebuffer_work, 0);
-
- return 0;
-
-error:
- if (dlfb) {
-
- kref_put(&dlfb->kref, dlfb_free); /* last ref from kref_init */
-
- /* dev has been deallocated. Do not dereference */
- }
-
- return retval;
-}
-
-static void dlfb_init_framebuffer_work(struct work_struct *work)
-{
- int i, retval;
- struct fb_info *info;
- const struct device_attribute *attr;
- struct dlfb_data *dlfb = container_of(work, struct dlfb_data,
- init_framebuffer_work.work);
-
/* allocates framebuffer driver structure, not framebuffer memory */
info = framebuffer_alloc(0, &dlfb->udev->dev);
if (!info) {
@@ -1706,17 +1639,22 @@ static void dlfb_init_framebuffer_work(struct work_struct *work)
dlfb->ops = dlfb_ops;
info->fbops = &dlfb->ops;
+ INIT_LIST_HEAD(&info->modelist);
+
+ if (!dlfb_alloc_urb_list(dlfb, WRITES_IN_FLIGHT, MAX_TRANSFER)) {
+ retval = -ENOMEM;
+ dev_err(&intf->dev, "unable to allocate urb list\n");
+ goto error;
+ }
+
+ /* We don't register a new USB class. Our client interface is dlfbev */
+
retval = fb_alloc_cmap(&info->cmap, 256, 0);
if (retval < 0) {
dev_err(info->device, "cmap allocation failed: %d\n", retval);
goto error;
}
- INIT_DELAYED_WORK(&dlfb->free_framebuffer_work,
- dlfb_free_framebuffer_work);
-
- INIT_LIST_HEAD(&info->modelist);
-
retval = dlfb_setup_modes(dlfb, info, NULL, 0);
if (retval != 0) {
dev_err(info->device,
@@ -1760,10 +1698,16 @@ static void dlfb_init_framebuffer_work(struct work_struct *work)
dev_name(info->dev), info->var.xres, info->var.yres,
((dlfb->backing_buffer) ?
info->fix.smem_len * 2 : info->fix.smem_len) >> 10);
- return;
+ return 0;
error:
- dlfb_free_framebuffer(dlfb);
+ if (dlfb->info) {
+ dlfb_ops_destroy(dlfb->info);
+ } else if (dlfb) {
+ usb_put_dev(dlfb->udev);
+ kfree(dlfb);
+ }
+ return retval;
}
static void dlfb_usb_disconnect(struct usb_interface *intf)
@@ -1791,20 +1735,9 @@ static void dlfb_usb_disconnect(struct usb_interface *intf)
for (i = 0; i < ARRAY_SIZE(fb_device_attrs); i++)
device_remove_file(info->dev, &fb_device_attrs[i]);
device_remove_bin_file(info->dev, &edid_attr);
- unlink_framebuffer(info);
}
- usb_set_intfdata(intf, NULL);
- dlfb->udev = NULL;
-
- /* if clients still have us open, will be freed on last close */
- if (dlfb->fb_count == 0)
- schedule_delayed_work(&dlfb->free_framebuffer_work, 0);
-
- /* release reference taken by kref_init in probe() */
- kref_put(&dlfb->kref, dlfb_free);
-
- /* consider dlfb_data freed */
+ unregister_framebuffer(info);
}
static struct usb_driver dlfb_driver = {
diff --git a/drivers/virt/vboxguest/vboxguest_core.c b/drivers/virt/vboxguest/vboxguest_core.c
index 3093655..1475ed5 100644
--- a/drivers/virt/vboxguest/vboxguest_core.c
+++ b/drivers/virt/vboxguest/vboxguest_core.c
@@ -1312,7 +1312,7 @@ static int vbg_ioctl_hgcm_call(struct vbg_dev *gdev,
return -EINVAL;
}
- if (f32bit)
+ if (IS_ENABLED(CONFIG_COMPAT) && f32bit)
ret = vbg_hgcm_call32(gdev, client_id,
call->function, call->timeout_ms,
VBG_IOCTL_HGCM_CALL_PARMS32(call),
diff --git a/drivers/watchdog/mt7621_wdt.c b/drivers/watchdog/mt7621_wdt.c
index 5c4a764..81208cd 100644
--- a/drivers/watchdog/mt7621_wdt.c
+++ b/drivers/watchdog/mt7621_wdt.c
@@ -17,6 +17,7 @@
#include <linux/watchdog.h>
#include <linux/moduleparam.h>
#include <linux/platform_device.h>
+#include <linux/mod_devicetable.h>
#include <asm/mach-ralink/ralink_regs.h>
diff --git a/drivers/watchdog/renesas_wdt.c b/drivers/watchdog/renesas_wdt.c
index 88d81fe..d01efd3 100644
--- a/drivers/watchdog/renesas_wdt.c
+++ b/drivers/watchdog/renesas_wdt.c
@@ -77,12 +77,17 @@ static int rwdt_init_timeout(struct watchdog_device *wdev)
static int rwdt_start(struct watchdog_device *wdev)
{
struct rwdt_priv *priv = watchdog_get_drvdata(wdev);
+ u8 val;
pm_runtime_get_sync(wdev->parent);
- rwdt_write(priv, 0, RWTCSRB);
- rwdt_write(priv, priv->cks, RWTCSRA);
+ /* Stop the timer before we modify any register */
+ val = readb_relaxed(priv->base + RWTCSRA) & ~RWTCSRA_TME;
+ rwdt_write(priv, val, RWTCSRA);
+
rwdt_init_timeout(wdev);
+ rwdt_write(priv, priv->cks, RWTCSRA);
+ rwdt_write(priv, 0, RWTCSRB);
while (readb_relaxed(priv->base + RWTCSRA) & RWTCSRA_WRFLG)
cpu_relax();
diff --git a/drivers/watchdog/rt2880_wdt.c b/drivers/watchdog/rt2880_wdt.c
index 98967f0..db7c57d 100644
--- a/drivers/watchdog/rt2880_wdt.c
+++ b/drivers/watchdog/rt2880_wdt.c
@@ -18,6 +18,7 @@
#include <linux/watchdog.h>
#include <linux/moduleparam.h>
#include <linux/platform_device.h>
+#include <linux/mod_devicetable.h>
#include <asm/mach-ralink/ralink_regs.h>
diff --git a/drivers/xen/pvcalls-back.c b/drivers/xen/pvcalls-back.c
index b1092fb..d4ea335 100644
--- a/drivers/xen/pvcalls-back.c
+++ b/drivers/xen/pvcalls-back.c
@@ -160,9 +160,10 @@ static void pvcalls_conn_back_read(void *opaque)
/* write the data, then modify the indexes */
virt_wmb();
- if (ret < 0)
+ if (ret < 0) {
+ atomic_set(&map->read, 0);
intf->in_error = ret;
- else
+ } else
intf->in_prod = prod + ret;
/* update the indexes, then notify the other end */
virt_wmb();
@@ -282,13 +283,11 @@ static int pvcalls_back_socket(struct xenbus_device *dev,
static void pvcalls_sk_state_change(struct sock *sock)
{
struct sock_mapping *map = sock->sk_user_data;
- struct pvcalls_data_intf *intf;
if (map == NULL)
return;
- intf = map->ring;
- intf->in_error = -ENOTCONN;
+ atomic_inc(&map->read);
notify_remote_via_irq(map->irq);
}
diff --git a/drivers/xen/pvcalls-front.c b/drivers/xen/pvcalls-front.c
index 77224d8..91da7e4 100644
--- a/drivers/xen/pvcalls-front.c
+++ b/drivers/xen/pvcalls-front.c
@@ -31,6 +31,12 @@
#define PVCALLS_NR_RSP_PER_RING __CONST_RING_SIZE(xen_pvcalls, XEN_PAGE_SIZE)
#define PVCALLS_FRONT_MAX_SPIN 5000
+static struct proto pvcalls_proto = {
+ .name = "PVCalls",
+ .owner = THIS_MODULE,
+ .obj_size = sizeof(struct sock),
+};
+
struct pvcalls_bedata {
struct xen_pvcalls_front_ring ring;
grant_ref_t ref;
@@ -335,6 +341,42 @@ int pvcalls_front_socket(struct socket *sock)
return ret;
}
+static void free_active_ring(struct sock_mapping *map)
+{
+ if (!map->active.ring)
+ return;
+
+ free_pages((unsigned long)map->active.data.in,
+ map->active.ring->ring_order);
+ free_page((unsigned long)map->active.ring);
+}
+
+static int alloc_active_ring(struct sock_mapping *map)
+{
+ void *bytes;
+
+ map->active.ring = (struct pvcalls_data_intf *)
+ get_zeroed_page(GFP_KERNEL);
+ if (!map->active.ring)
+ goto out;
+
+ map->active.ring->ring_order = PVCALLS_RING_ORDER;
+ bytes = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
+ PVCALLS_RING_ORDER);
+ if (!bytes)
+ goto out;
+
+ map->active.data.in = bytes;
+ map->active.data.out = bytes +
+ XEN_FLEX_RING_SIZE(PVCALLS_RING_ORDER);
+
+ return 0;
+
+out:
+ free_active_ring(map);
+ return -ENOMEM;
+}
+
static int create_active(struct sock_mapping *map, int *evtchn)
{
void *bytes;
@@ -343,15 +385,7 @@ static int create_active(struct sock_mapping *map, int *evtchn)
*evtchn = -1;
init_waitqueue_head(&map->active.inflight_conn_req);
- map->active.ring = (struct pvcalls_data_intf *)
- __get_free_page(GFP_KERNEL | __GFP_ZERO);
- if (map->active.ring == NULL)
- goto out_error;
- map->active.ring->ring_order = PVCALLS_RING_ORDER;
- bytes = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
- PVCALLS_RING_ORDER);
- if (bytes == NULL)
- goto out_error;
+ bytes = map->active.data.in;
for (i = 0; i < (1 << PVCALLS_RING_ORDER); i++)
map->active.ring->ref[i] = gnttab_grant_foreign_access(
pvcalls_front_dev->otherend_id,
@@ -361,10 +395,6 @@ static int create_active(struct sock_mapping *map, int *evtchn)
pvcalls_front_dev->otherend_id,
pfn_to_gfn(virt_to_pfn((void *)map->active.ring)), 0);
- map->active.data.in = bytes;
- map->active.data.out = bytes +
- XEN_FLEX_RING_SIZE(PVCALLS_RING_ORDER);
-
ret = xenbus_alloc_evtchn(pvcalls_front_dev, evtchn);
if (ret)
goto out_error;
@@ -385,8 +415,6 @@ static int create_active(struct sock_mapping *map, int *evtchn)
out_error:
if (*evtchn >= 0)
xenbus_free_evtchn(pvcalls_front_dev, *evtchn);
- free_pages((unsigned long)map->active.data.in, PVCALLS_RING_ORDER);
- free_page((unsigned long)map->active.ring);
return ret;
}
@@ -406,17 +434,24 @@ int pvcalls_front_connect(struct socket *sock, struct sockaddr *addr,
return PTR_ERR(map);
bedata = dev_get_drvdata(&pvcalls_front_dev->dev);
+ ret = alloc_active_ring(map);
+ if (ret < 0) {
+ pvcalls_exit_sock(sock);
+ return ret;
+ }
spin_lock(&bedata->socket_lock);
ret = get_request(bedata, &req_id);
if (ret < 0) {
spin_unlock(&bedata->socket_lock);
+ free_active_ring(map);
pvcalls_exit_sock(sock);
return ret;
}
ret = create_active(map, &evtchn);
if (ret < 0) {
spin_unlock(&bedata->socket_lock);
+ free_active_ring(map);
pvcalls_exit_sock(sock);
return ret;
}
@@ -560,15 +595,13 @@ static int __read_ring(struct pvcalls_data_intf *intf,
error = intf->in_error;
/* get pointers before reading from the ring */
virt_rmb();
- if (error < 0)
- return error;
size = pvcalls_queued(prod, cons, array_size);
masked_prod = pvcalls_mask(prod, array_size);
masked_cons = pvcalls_mask(cons, array_size);
if (size == 0)
- return 0;
+ return error ?: size;
if (len > size)
len = size;
@@ -780,25 +813,36 @@ int pvcalls_front_accept(struct socket *sock, struct socket *newsock, int flags)
}
}
+ map2 = kzalloc(sizeof(*map2), GFP_KERNEL);
+ if (map2 == NULL) {
+ clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT,
+ (void *)&map->passive.flags);
+ pvcalls_exit_sock(sock);
+ return -ENOMEM;
+ }
+ ret = alloc_active_ring(map2);
+ if (ret < 0) {
+ clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT,
+ (void *)&map->passive.flags);
+ kfree(map2);
+ pvcalls_exit_sock(sock);
+ return ret;
+ }
spin_lock(&bedata->socket_lock);
ret = get_request(bedata, &req_id);
if (ret < 0) {
clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT,
(void *)&map->passive.flags);
spin_unlock(&bedata->socket_lock);
+ free_active_ring(map2);
+ kfree(map2);
pvcalls_exit_sock(sock);
return ret;
}
- map2 = kzalloc(sizeof(*map2), GFP_ATOMIC);
- if (map2 == NULL) {
- clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT,
- (void *)&map->passive.flags);
- spin_unlock(&bedata->socket_lock);
- pvcalls_exit_sock(sock);
- return -ENOMEM;
- }
+
ret = create_active(map2, &evtchn);
if (ret < 0) {
+ free_active_ring(map2);
kfree(map2);
clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT,
(void *)&map->passive.flags);
@@ -839,7 +883,7 @@ int pvcalls_front_accept(struct socket *sock, struct socket *newsock, int flags)
received:
map2->sock = newsock;
- newsock->sk = kzalloc(sizeof(*newsock->sk), GFP_KERNEL);
+ newsock->sk = sk_alloc(sock_net(sock->sk), PF_INET, GFP_KERNEL, &pvcalls_proto, false);
if (!newsock->sk) {
bedata->rsp[req_id].req_id = PVCALLS_INVALID_ID;
map->passive.inflight_req_id = PVCALLS_INVALID_ID;
@@ -1032,8 +1076,8 @@ int pvcalls_front_release(struct socket *sock)
spin_lock(&bedata->socket_lock);
list_del(&map->list);
spin_unlock(&bedata->socket_lock);
- if (READ_ONCE(map->passive.inflight_req_id) !=
- PVCALLS_INVALID_ID) {
+ if (READ_ONCE(map->passive.inflight_req_id) != PVCALLS_INVALID_ID &&
+ READ_ONCE(map->passive.inflight_req_id) != 0) {
pvcalls_front_free_map(bedata,
map->passive.accept_map);
}
diff --git a/fs/afs/flock.c b/fs/afs/flock.c
index dc62d15..1bb300e 100644
--- a/fs/afs/flock.c
+++ b/fs/afs/flock.c
@@ -208,7 +208,7 @@ void afs_lock_work(struct work_struct *work)
/* The new front of the queue now owns the state variables. */
next = list_entry(vnode->pending_locks.next,
struct file_lock, fl_u.afs.link);
- vnode->lock_key = afs_file_key(next->fl_file);
+ vnode->lock_key = key_get(afs_file_key(next->fl_file));
vnode->lock_type = (next->fl_type == F_RDLCK) ? AFS_LOCK_READ : AFS_LOCK_WRITE;
vnode->lock_state = AFS_VNODE_LOCK_WAITING_FOR_CB;
goto again;
@@ -413,7 +413,7 @@ static void afs_dequeue_lock(struct afs_vnode *vnode, struct file_lock *fl)
/* The new front of the queue now owns the state variables. */
next = list_entry(vnode->pending_locks.next,
struct file_lock, fl_u.afs.link);
- vnode->lock_key = afs_file_key(next->fl_file);
+ vnode->lock_key = key_get(afs_file_key(next->fl_file));
vnode->lock_type = (next->fl_type == F_RDLCK) ? AFS_LOCK_READ : AFS_LOCK_WRITE;
vnode->lock_state = AFS_VNODE_LOCK_WAITING_FOR_CB;
afs_lock_may_be_available(vnode);
diff --git a/fs/afs/inode.c b/fs/afs/inode.c
index 071075d..0726e40d 100644
--- a/fs/afs/inode.c
+++ b/fs/afs/inode.c
@@ -411,7 +411,6 @@ int afs_validate(struct afs_vnode *vnode, struct key *key)
} else if (test_bit(AFS_VNODE_DELETED, &vnode->flags)) {
valid = true;
} else {
- vnode->cb_s_break = vnode->cb_interest->server->cb_s_break;
vnode->cb_v_break = vnode->volume->cb_v_break;
valid = false;
}
@@ -543,6 +542,8 @@ void afs_evict_inode(struct inode *inode)
#endif
afs_put_permits(rcu_access_pointer(vnode->permit_cache));
+ key_put(vnode->lock_key);
+ vnode->lock_key = NULL;
_leave("");
}
diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h
index 7177d1d..45f5cf9 100644
--- a/fs/btrfs/btrfs_inode.h
+++ b/fs/btrfs/btrfs_inode.h
@@ -29,6 +29,7 @@ enum {
BTRFS_INODE_IN_DELALLOC_LIST,
BTRFS_INODE_READDIO_NEED_LOCK,
BTRFS_INODE_HAS_PROPS,
+ BTRFS_INODE_SNAPSHOT_FLUSH,
};
/* in memory btrfs inode */
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 2cddfe7..82682da 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -3155,7 +3155,7 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
struct inode *inode, u64 new_size,
u32 min_type);
-int btrfs_start_delalloc_inodes(struct btrfs_root *root);
+int btrfs_start_delalloc_snapshot(struct btrfs_root *root);
int btrfs_start_delalloc_roots(struct btrfs_fs_info *fs_info, int nr);
int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end,
unsigned int extra_bits,
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 4dd6faa..79f82f2 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -3928,12 +3928,25 @@ static int extent_write_cache_pages(struct address_space *mapping,
range_whole = 1;
scanned = 1;
}
- if (wbc->sync_mode == WB_SYNC_ALL)
+
+ /*
+ * We do the tagged writepage as long as the snapshot flush bit is set
+ * and we are the first one who do the filemap_flush() on this inode.
+ *
+ * The nr_to_write == LONG_MAX is needed to make sure other flushers do
+ * not race in and drop the bit.
+ */
+ if (range_whole && wbc->nr_to_write == LONG_MAX &&
+ test_and_clear_bit(BTRFS_INODE_SNAPSHOT_FLUSH,
+ &BTRFS_I(inode)->runtime_flags))
+ wbc->tagged_writepages = 1;
+
+ if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
tag = PAGECACHE_TAG_TOWRITE;
else
tag = PAGECACHE_TAG_DIRTY;
retry:
- if (wbc->sync_mode == WB_SYNC_ALL)
+ if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
tag_pages_for_writeback(mapping, index, end);
done_index = index;
while (!done && !nr_to_write_done && (index <= end) &&
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 83b3a626..59f361f 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -10005,7 +10005,7 @@ static struct btrfs_delalloc_work *btrfs_alloc_delalloc_work(struct inode *inode
* some fairly slow code that needs optimization. This walks the list
* of all the inodes with pending delalloc and forces them to disk.
*/
-static int start_delalloc_inodes(struct btrfs_root *root, int nr)
+static int start_delalloc_inodes(struct btrfs_root *root, int nr, bool snapshot)
{
struct btrfs_inode *binode;
struct inode *inode;
@@ -10033,6 +10033,9 @@ static int start_delalloc_inodes(struct btrfs_root *root, int nr)
}
spin_unlock(&root->delalloc_lock);
+ if (snapshot)
+ set_bit(BTRFS_INODE_SNAPSHOT_FLUSH,
+ &binode->runtime_flags);
work = btrfs_alloc_delalloc_work(inode);
if (!work) {
iput(inode);
@@ -10066,7 +10069,7 @@ static int start_delalloc_inodes(struct btrfs_root *root, int nr)
return ret;
}
-int btrfs_start_delalloc_inodes(struct btrfs_root *root)
+int btrfs_start_delalloc_snapshot(struct btrfs_root *root)
{
struct btrfs_fs_info *fs_info = root->fs_info;
int ret;
@@ -10074,7 +10077,7 @@ int btrfs_start_delalloc_inodes(struct btrfs_root *root)
if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state))
return -EROFS;
- ret = start_delalloc_inodes(root, -1);
+ ret = start_delalloc_inodes(root, -1, true);
if (ret > 0)
ret = 0;
return ret;
@@ -10103,7 +10106,7 @@ int btrfs_start_delalloc_roots(struct btrfs_fs_info *fs_info, int nr)
&fs_info->delalloc_roots);
spin_unlock(&fs_info->delalloc_root_lock);
- ret = start_delalloc_inodes(root, nr);
+ ret = start_delalloc_inodes(root, nr, false);
btrfs_put_fs_root(root);
if (ret < 0)
goto out;
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index c9152155..8bf9cce1 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -778,7 +778,7 @@ static int create_snapshot(struct btrfs_root *root, struct inode *dir,
wait_event(root->subv_writers->wait,
percpu_counter_sum(&root->subv_writers->counter) == 0);
- ret = btrfs_start_delalloc_inodes(root);
+ ret = btrfs_start_delalloc_snapshot(root);
if (ret)
goto dec_and_free;
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 0ee1cd4..285f64f 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -850,6 +850,35 @@ static noinline struct btrfs_device *device_list_add(const char *path,
return ERR_PTR(-EEXIST);
}
+ /*
+ * We are going to replace the device path for a given devid,
+ * make sure it's the same device if the device is mounted
+ */
+ if (device->bdev) {
+ struct block_device *path_bdev;
+
+ path_bdev = lookup_bdev(path);
+ if (IS_ERR(path_bdev)) {
+ mutex_unlock(&fs_devices->device_list_mutex);
+ return ERR_CAST(path_bdev);
+ }
+
+ if (device->bdev != path_bdev) {
+ bdput(path_bdev);
+ mutex_unlock(&fs_devices->device_list_mutex);
+ btrfs_warn_in_rcu(device->fs_info,
+ "duplicate device fsid:devid for %pU:%llu old:%s new:%s",
+ disk_super->fsid, devid,
+ rcu_str_deref(device->name), path);
+ return ERR_PTR(-EEXIST);
+ }
+ bdput(path_bdev);
+ btrfs_info_in_rcu(device->fs_info,
+ "device fsid %pU devid %llu moved old:%s new:%s",
+ disk_super->fsid, devid,
+ rcu_str_deref(device->name), path);
+ }
+
name = rcu_string_strdup(path, GFP_NOFS);
if (!name) {
mutex_unlock(&fs_devices->device_list_mutex);
diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c
index 041c27e..f74193d 100644
--- a/fs/ceph/snap.c
+++ b/fs/ceph/snap.c
@@ -616,7 +616,8 @@ int __ceph_finish_cap_snap(struct ceph_inode_info *ci,
capsnap->size);
spin_lock(&mdsc->snap_flush_lock);
- list_add_tail(&ci->i_snap_flush_item, &mdsc->snap_flush_list);
+ if (list_empty(&ci->i_snap_flush_item))
+ list_add_tail(&ci->i_snap_flush_item, &mdsc->snap_flush_list);
spin_unlock(&mdsc->snap_flush_lock);
return 1; /* caller may want to ceph_flush_snaps */
}
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index 7b637fc..23db881 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -1128,6 +1128,10 @@ cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
return -EINVAL;
}
+ BUILD_BUG_ON(sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE) >
+ PAGE_SIZE);
+ max_buf = min_t(unsigned int, max_buf - sizeof(struct smb_hdr),
+ PAGE_SIZE);
max_num = (max_buf - sizeof(struct smb_hdr)) /
sizeof(LOCKING_ANDX_RANGE);
buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
@@ -1466,6 +1470,10 @@ cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock,
if (max_buf < (sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE)))
return -EINVAL;
+ BUILD_BUG_ON(sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE) >
+ PAGE_SIZE);
+ max_buf = min_t(unsigned int, max_buf - sizeof(struct smb_hdr),
+ PAGE_SIZE);
max_num = (max_buf - sizeof(struct smb_hdr)) /
sizeof(LOCKING_ANDX_RANGE);
buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
index e169e1a..3925a7b 100644
--- a/fs/cifs/readdir.c
+++ b/fs/cifs/readdir.c
@@ -655,7 +655,14 @@ find_cifs_entry(const unsigned int xid, struct cifs_tcon *tcon, loff_t pos,
/* scan and find it */
int i;
char *cur_ent;
- char *end_of_smb = cfile->srch_inf.ntwrk_buf_start +
+ char *end_of_smb;
+
+ if (cfile->srch_inf.ntwrk_buf_start == NULL) {
+ cifs_dbg(VFS, "ntwrk_buf_start is NULL during readdir\n");
+ return -EIO;
+ }
+
+ end_of_smb = cfile->srch_inf.ntwrk_buf_start +
server->ops->calc_smb_size(
cfile->srch_inf.ntwrk_buf_start,
server);
diff --git a/fs/cifs/smb2file.c b/fs/cifs/smb2file.c
index 2fc3d319..b204e84 100644
--- a/fs/cifs/smb2file.c
+++ b/fs/cifs/smb2file.c
@@ -128,6 +128,8 @@ smb2_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock,
if (max_buf < sizeof(struct smb2_lock_element))
return -EINVAL;
+ BUILD_BUG_ON(sizeof(struct smb2_lock_element) > PAGE_SIZE);
+ max_buf = min_t(unsigned int, max_buf, PAGE_SIZE);
max_num = max_buf / sizeof(struct smb2_lock_element);
buf = kcalloc(max_num, sizeof(struct smb2_lock_element), GFP_KERNEL);
if (!buf)
@@ -264,6 +266,8 @@ smb2_push_mandatory_locks(struct cifsFileInfo *cfile)
return -EINVAL;
}
+ BUILD_BUG_ON(sizeof(struct smb2_lock_element) > PAGE_SIZE);
+ max_buf = min_t(unsigned int, max_buf, PAGE_SIZE);
max_num = max_buf / sizeof(struct smb2_lock_element);
buf = kcalloc(max_num, sizeof(struct smb2_lock_element), GFP_KERNEL);
if (!buf) {
diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
index 8a01e89..1e5a117 100644
--- a/fs/cifs/smb2pdu.c
+++ b/fs/cifs/smb2pdu.c
@@ -2814,9 +2814,10 @@ smb2_echo_callback(struct mid_q_entry *mid)
{
struct TCP_Server_Info *server = mid->callback_data;
struct smb2_echo_rsp *rsp = (struct smb2_echo_rsp *)mid->resp_buf;
- unsigned int credits_received = 1;
+ unsigned int credits_received = 0;
- if (mid->mid_state == MID_RESPONSE_RECEIVED)
+ if (mid->mid_state == MID_RESPONSE_RECEIVED
+ || mid->mid_state == MID_RESPONSE_MALFORMED)
credits_received = le16_to_cpu(rsp->sync_hdr.CreditRequest);
DeleteMidQEntry(mid);
@@ -3073,7 +3074,7 @@ smb2_readv_callback(struct mid_q_entry *mid)
struct TCP_Server_Info *server = tcon->ses->server;
struct smb2_sync_hdr *shdr =
(struct smb2_sync_hdr *)rdata->iov[0].iov_base;
- unsigned int credits_received = 1;
+ unsigned int credits_received = 0;
struct smb_rqst rqst = { .rq_iov = rdata->iov,
.rq_nvec = 2,
.rq_pages = rdata->pages,
@@ -3112,6 +3113,9 @@ smb2_readv_callback(struct mid_q_entry *mid)
task_io_account_read(rdata->got_bytes);
cifs_stats_bytes_read(tcon, rdata->got_bytes);
break;
+ case MID_RESPONSE_MALFORMED:
+ credits_received = le16_to_cpu(shdr->CreditRequest);
+ /* fall through */
default:
if (rdata->result != -ENODATA)
rdata->result = -EIO;
@@ -3305,7 +3309,7 @@ smb2_writev_callback(struct mid_q_entry *mid)
struct cifs_tcon *tcon = tlink_tcon(wdata->cfile->tlink);
unsigned int written;
struct smb2_write_rsp *rsp = (struct smb2_write_rsp *)mid->resp_buf;
- unsigned int credits_received = 1;
+ unsigned int credits_received = 0;
switch (mid->mid_state) {
case MID_RESPONSE_RECEIVED:
@@ -3333,6 +3337,9 @@ smb2_writev_callback(struct mid_q_entry *mid)
case MID_RETRY_NEEDED:
wdata->result = -EAGAIN;
break;
+ case MID_RESPONSE_MALFORMED:
+ credits_received = le16_to_cpu(rsp->sync_hdr.CreditRequest);
+ /* fall through */
default:
wdata->result = -EIO;
break;
diff --git a/fs/crypto/Makefile b/fs/crypto/Makefile
index cb49698..cc42e5e 100644
--- a/fs/crypto/Makefile
+++ b/fs/crypto/Makefile
@@ -1,4 +1,7 @@
obj-$(CONFIG_FS_ENCRYPTION) += fscrypto.o
-fscrypto-y := crypto.o fname.o hooks.o keyinfo.o policy.o
+ccflags-y += -Ifs/ext4
+ccflags-y += -Ifs/f2fs
+
+fscrypto-y := crypto.o fname.o hooks.o keyinfo.o policy.o fscrypt_ice.o
fscrypto-$(CONFIG_BLOCK) += bio.o
diff --git a/fs/crypto/bio.c b/fs/crypto/bio.c
index 0959044..93cd5e5 100644
--- a/fs/crypto/bio.c
+++ b/fs/crypto/bio.c
@@ -33,14 +33,17 @@ static void __fscrypt_decrypt_bio(struct bio *bio, bool done)
bio_for_each_segment_all(bv, bio, i) {
struct page *page = bv->bv_page;
- int ret = fscrypt_decrypt_page(page->mapping->host, page,
- PAGE_SIZE, 0, page->index);
-
- if (ret) {
- WARN_ON_ONCE(1);
- SetPageError(page);
- } else if (done) {
+ if (fscrypt_using_hardware_encryption(page->mapping->host)) {
SetPageUptodate(page);
+ } else {
+ int ret = fscrypt_decrypt_page(page->mapping->host,
+ page, PAGE_SIZE, 0, page->index);
+ if (ret) {
+ WARN_ON_ONCE(1);
+ SetPageError(page);
+ } else if (done) {
+ SetPageUptodate(page);
+ }
}
if (done)
unlock_page(page);
diff --git a/fs/crypto/fscrypt_ice.c b/fs/crypto/fscrypt_ice.c
new file mode 100644
index 0000000..1de53d4
--- /dev/null
+++ b/fs/crypto/fscrypt_ice.c
@@ -0,0 +1,140 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
+ */
+
+#include "fscrypt_ice.h"
+
+int fscrypt_using_hardware_encryption(const struct inode *inode)
+{
+ struct fscrypt_info *ci = inode->i_crypt_info;
+
+ return S_ISREG(inode->i_mode) && ci &&
+ ci->ci_data_mode == FS_ENCRYPTION_MODE_PRIVATE;
+}
+EXPORT_SYMBOL(fscrypt_using_hardware_encryption);
+
+/*
+ * Retrieves encryption key from the inode
+ */
+char *fscrypt_get_ice_encryption_key(const struct inode *inode)
+{
+ struct fscrypt_info *ci = NULL;
+
+ if (!inode)
+ return NULL;
+
+ ci = inode->i_crypt_info;
+ if (!ci)
+ return NULL;
+
+ return &(ci->ci_raw_key[0]);
+}
+
+/*
+ * Retrieves encryption salt from the inode
+ */
+char *fscrypt_get_ice_encryption_salt(const struct inode *inode)
+{
+ struct fscrypt_info *ci = NULL;
+
+ if (!inode)
+ return NULL;
+
+ ci = inode->i_crypt_info;
+ if (!ci)
+ return NULL;
+
+ return &(ci->ci_raw_key[fscrypt_get_ice_encryption_key_size(inode)]);
+}
+
+/*
+ * returns true if the cipher mode in inode is AES XTS
+ */
+int fscrypt_is_aes_xts_cipher(const struct inode *inode)
+{
+ struct fscrypt_info *ci = inode->i_crypt_info;
+
+ if (!ci)
+ return 0;
+
+ return (ci->ci_data_mode == FS_ENCRYPTION_MODE_PRIVATE);
+}
+
+/*
+ * returns true if encryption info in both inodes is equal
+ */
+bool fscrypt_is_ice_encryption_info_equal(const struct inode *inode1,
+ const struct inode *inode2)
+{
+ char *key1 = NULL;
+ char *key2 = NULL;
+ char *salt1 = NULL;
+ char *salt2 = NULL;
+
+ if (!inode1 || !inode2)
+ return false;
+
+ if (inode1 == inode2)
+ return true;
+
+ /*
+ * both do not belong to ice, so we don't care, they are equal
+ * for us
+ */
+ if (!fscrypt_should_be_processed_by_ice(inode1) &&
+ !fscrypt_should_be_processed_by_ice(inode2))
+ return true;
+
+ /* one belongs to ice, the other does not -> not equal */
+ if (fscrypt_should_be_processed_by_ice(inode1) ^
+ fscrypt_should_be_processed_by_ice(inode2))
+ return false;
+
+ key1 = fscrypt_get_ice_encryption_key(inode1);
+ key2 = fscrypt_get_ice_encryption_key(inode2);
+ salt1 = fscrypt_get_ice_encryption_salt(inode1);
+ salt2 = fscrypt_get_ice_encryption_salt(inode2);
+
+ /* key and salt should not be null by this point */
+ if (!key1 || !key2 || !salt1 || !salt2 ||
+ (fscrypt_get_ice_encryption_key_size(inode1) !=
+ fscrypt_get_ice_encryption_key_size(inode2)) ||
+ (fscrypt_get_ice_encryption_salt_size(inode1) !=
+ fscrypt_get_ice_encryption_salt_size(inode2)))
+ return false;
+
+ if ((memcmp(key1, key2,
+ fscrypt_get_ice_encryption_key_size(inode1)) == 0) &&
+ (memcmp(salt1, salt2,
+ fscrypt_get_ice_encryption_salt_size(inode1)) == 0))
+ return true;
+
+ return false;
+}
+
+void fscrypt_set_ice_dun(const struct inode *inode, struct bio *bio, u64 dun)
+{
+ if (fscrypt_should_be_processed_by_ice(inode))
+ bio->bi_iter.bi_dun = dun;
+}
+EXPORT_SYMBOL(fscrypt_set_ice_dun);
+
+/*
+ * This function will be used for filesystem when deciding to merge bios.
+ * Basic assumption is, if inline_encryption is set, single bio has to
+ * guarantee consecutive LBAs as well as ino|pg->index.
+ */
+bool fscrypt_mergeable_bio(struct bio *bio, u64 dun, bool bio_encrypted)
+{
+ if (!bio)
+ return true;
+
+ /* if both of them are not encrypted, no further check is needed */
+ if (!bio_dun(bio) && !bio_encrypted)
+ return true;
+
+ /* ICE allows only consecutive iv_key stream. */
+ return bio_end_dun(bio) == dun;
+}
+EXPORT_SYMBOL(fscrypt_mergeable_bio);
diff --git a/fs/crypto/fscrypt_ice.h b/fs/crypto/fscrypt_ice.h
new file mode 100644
index 0000000..84de010
--- /dev/null
+++ b/fs/crypto/fscrypt_ice.h
@@ -0,0 +1,99 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _FSCRYPT_ICE_H
+#define _FSCRYPT_ICE_H
+
+#include <linux/blkdev.h>
+#include "fscrypt_private.h"
+
+#if IS_ENABLED(CONFIG_FS_ENCRYPTION)
+static inline bool fscrypt_should_be_processed_by_ice(const struct inode *inode)
+{
+ if (!inode->i_sb->s_cop)
+ return false;
+ if (!inode->i_sb->s_cop->is_encrypted((struct inode *)inode))
+ return false;
+
+ return fscrypt_using_hardware_encryption(inode);
+}
+
+static inline int fscrypt_is_ice_capable(const struct super_block *sb)
+{
+ return blk_queue_inlinecrypt(bdev_get_queue(sb->s_bdev));
+}
+
+int fscrypt_is_aes_xts_cipher(const struct inode *inode);
+
+char *fscrypt_get_ice_encryption_key(const struct inode *inode);
+char *fscrypt_get_ice_encryption_salt(const struct inode *inode);
+
+bool fscrypt_is_ice_encryption_info_equal(const struct inode *inode1,
+ const struct inode *inode2);
+
+static inline size_t fscrypt_get_ice_encryption_key_size(
+ const struct inode *inode)
+{
+ return FS_AES_256_XTS_KEY_SIZE / 2;
+}
+
+static inline size_t fscrypt_get_ice_encryption_salt_size(
+ const struct inode *inode)
+{
+ return FS_AES_256_XTS_KEY_SIZE / 2;
+}
+#else
+static inline bool fscrypt_should_be_processed_by_ice(const struct inode *inode)
+{
+ return false;
+}
+
+static inline int fscrypt_is_ice_capable(const struct super_block *sb)
+{
+ return false;
+}
+
+static inline char *fscrypt_get_ice_encryption_key(const struct inode *inode)
+{
+ return NULL;
+}
+
+static inline char *fscrypt_get_ice_encryption_salt(const struct inode *inode)
+{
+ return NULL;
+}
+
+static inline size_t fscrypt_get_ice_encryption_key_size(
+ const struct inode *inode)
+{
+ return 0;
+}
+
+static inline size_t fscrypt_get_ice_encryption_salt_size(
+ const struct inode *inode)
+{
+ return 0;
+}
+
+static inline int fscrypt_is_xts_cipher(const struct inode *inode)
+{
+ return 0;
+}
+
+static inline bool fscrypt_is_ice_encryption_info_equal(
+ const struct inode *inode1,
+ const struct inode *inode2)
+{
+ return false;
+}
+
+static inline int fscrypt_is_aes_xts_cipher(const struct inode *inode)
+{
+ return 0;
+}
+
+#endif
+
+#endif /* _FSCRYPT_ICE_H */
diff --git a/fs/crypto/fscrypt_private.h b/fs/crypto/fscrypt_private.h
index 7424f85..f06a8c0 100644
--- a/fs/crypto/fscrypt_private.h
+++ b/fs/crypto/fscrypt_private.h
@@ -12,11 +12,23 @@
#ifndef _FSCRYPT_PRIVATE_H
#define _FSCRYPT_PRIVATE_H
+#ifndef __FS_HAS_ENCRYPTION
#define __FS_HAS_ENCRYPTION 1
+#endif
#include <linux/fscrypt.h>
#include <crypto/hash.h>
+#include <linux/pfk.h>
/* Encryption parameters */
+
+#define FS_AES_128_ECB_KEY_SIZE 16
+#define FS_AES_128_CBC_KEY_SIZE 16
+#define FS_AES_128_CTS_KEY_SIZE 16
+#define FS_AES_256_GCM_KEY_SIZE 32
+#define FS_AES_256_CBC_KEY_SIZE 32
+#define FS_AES_256_CTS_KEY_SIZE 32
+#define FS_AES_256_XTS_KEY_SIZE 64
+
#define FS_KEY_DERIVATION_NONCE_SIZE 16
/**
@@ -82,11 +94,13 @@ struct fscrypt_info {
struct fscrypt_master_key *ci_master_key;
/* fields from the fscrypt_context */
+
u8 ci_data_mode;
u8 ci_filename_mode;
u8 ci_flags;
u8 ci_master_key_descriptor[FS_KEY_DESCRIPTOR_SIZE];
u8 ci_nonce[FS_KEY_DERIVATION_NONCE_SIZE];
+ u8 ci_raw_key[FS_MAX_KEY_SIZE];
};
typedef enum {
@@ -112,6 +126,10 @@ static inline bool fscrypt_valid_enc_modes(u32 contents_mode,
filenames_mode == FS_ENCRYPTION_MODE_ADIANTUM)
return true;
+ if (contents_mode == FS_ENCRYPTION_MODE_PRIVATE &&
+ filenames_mode == FS_ENCRYPTION_MODE_AES_256_CTS)
+ return true;
+
return false;
}
@@ -168,6 +186,7 @@ struct fscrypt_mode {
int ivsize;
bool logged_impl_name;
bool needs_essiv;
+ bool inline_encryption;
};
extern void __exit fscrypt_essiv_cleanup(void);
diff --git a/fs/crypto/keyinfo.c b/fs/crypto/keyinfo.c
index 1e11a68..33fd585 100644
--- a/fs/crypto/keyinfo.c
+++ b/fs/crypto/keyinfo.c
@@ -18,6 +18,7 @@
#include <crypto/sha.h>
#include <crypto/skcipher.h>
#include "fscrypt_private.h"
+#include "fscrypt_ice.h"
static struct crypto_shash *essiv_hash_tfm;
@@ -161,11 +162,20 @@ static struct fscrypt_mode available_modes[] = {
.keysize = 32,
.ivsize = 32,
},
+ [FS_ENCRYPTION_MODE_PRIVATE] = {
+ .friendly_name = "ice",
+ .cipher_str = "xts(aes)",
+ .keysize = 64,
+ .ivsize = 16,
+ .inline_encryption = true,
+ },
};
static struct fscrypt_mode *
select_encryption_mode(const struct fscrypt_info *ci, const struct inode *inode)
{
+ struct fscrypt_mode *mode = NULL;
+
if (!fscrypt_valid_enc_modes(ci->ci_data_mode, ci->ci_filename_mode)) {
fscrypt_warn(inode->i_sb,
"inode %lu uses unsupported encryption modes (contents mode %d, filenames mode %d)",
@@ -174,8 +184,19 @@ select_encryption_mode(const struct fscrypt_info *ci, const struct inode *inode)
return ERR_PTR(-EINVAL);
}
- if (S_ISREG(inode->i_mode))
- return &available_modes[ci->ci_data_mode];
+ if (S_ISREG(inode->i_mode)) {
+ mode = &available_modes[ci->ci_data_mode];
+ if (IS_ERR(mode)) {
+ fscrypt_warn(inode->i_sb, "Invalid mode");
+ return ERR_PTR(-EINVAL);
+ }
+ if (mode->inline_encryption &&
+ !fscrypt_is_ice_capable(inode->i_sb)) {
+ fscrypt_warn(inode->i_sb, "ICE support not available");
+ return ERR_PTR(-EINVAL);
+ }
+ return mode;
+ }
if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))
return &available_modes[ci->ci_filename_mode];
@@ -220,6 +241,9 @@ static int find_and_derive_key(const struct inode *inode,
memcpy(derived_key, payload->raw, mode->keysize);
err = 0;
}
+ } else if (mode->inline_encryption) {
+ memcpy(derived_key, payload->raw, mode->keysize);
+ err = 0;
} else {
err = derive_key_aes(payload->raw, ctx, derived_key,
mode->keysize);
@@ -495,12 +519,21 @@ static void put_crypt_info(struct fscrypt_info *ci)
if (ci->ci_master_key) {
put_master_key(ci->ci_master_key);
} else {
- crypto_free_skcipher(ci->ci_ctfm);
- crypto_free_cipher(ci->ci_essiv_tfm);
+ if (ci->ci_ctfm)
+ crypto_free_skcipher(ci->ci_ctfm);
+ if (ci->ci_essiv_tfm)
+ crypto_free_cipher(ci->ci_essiv_tfm);
}
+ memset(ci->ci_raw_key, 0, sizeof(ci->ci_raw_key));
kmem_cache_free(fscrypt_info_cachep, ci);
}
+static int fscrypt_data_encryption_mode(struct inode *inode)
+{
+ return fscrypt_should_be_processed_by_ice(inode) ?
+ FS_ENCRYPTION_MODE_PRIVATE : FS_ENCRYPTION_MODE_AES_256_XTS;
+}
+
int fscrypt_get_encryption_info(struct inode *inode)
{
struct fscrypt_info *crypt_info;
@@ -524,7 +557,8 @@ int fscrypt_get_encryption_info(struct inode *inode)
/* Fake up a context for an unencrypted directory */
memset(&ctx, 0, sizeof(ctx));
ctx.format = FS_ENCRYPTION_CONTEXT_FORMAT_V1;
- ctx.contents_encryption_mode = FS_ENCRYPTION_MODE_AES_256_XTS;
+ ctx.contents_encryption_mode =
+ fscrypt_data_encryption_mode(inode);
ctx.filenames_encryption_mode = FS_ENCRYPTION_MODE_AES_256_CTS;
memset(ctx.master_key_descriptor, 0x42, FS_KEY_DESCRIPTOR_SIZE);
} else if (res != sizeof(ctx)) {
@@ -569,9 +603,13 @@ int fscrypt_get_encryption_info(struct inode *inode)
if (res)
goto out;
- res = setup_crypto_transform(crypt_info, mode, raw_key, inode);
- if (res)
- goto out;
+ if (!mode->inline_encryption) {
+ res = setup_crypto_transform(crypt_info, mode, raw_key, inode);
+ if (res)
+ goto out;
+ } else {
+ memcpy(crypt_info->ci_raw_key, raw_key, mode->keysize);
+ }
if (cmpxchg(&inode->i_crypt_info, NULL, crypt_info) == NULL)
crypt_info = NULL;
diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
index 13b0135..41ef452 100644
--- a/fs/debugfs/inode.c
+++ b/fs/debugfs/inode.c
@@ -787,6 +787,13 @@ struct dentry *debugfs_rename(struct dentry *old_dir, struct dentry *old_dentry,
struct dentry *dentry = NULL, *trap;
struct name_snapshot old_name;
+ if (IS_ERR(old_dir))
+ return old_dir;
+ if (IS_ERR(new_dir))
+ return new_dir;
+ if (IS_ERR_OR_NULL(old_dentry))
+ return old_dentry;
+
trap = lock_rename(new_dir, old_dir);
/* Source or destination directories don't exist? */
if (d_really_is_negative(old_dir) || d_really_is_negative(new_dir))
diff --git a/fs/direct-io.c b/fs/direct-io.c
index 1991460..53ba123 100644
--- a/fs/direct-io.c
+++ b/fs/direct-io.c
@@ -37,6 +37,8 @@
#include <linux/uio.h>
#include <linux/atomic.h>
#include <linux/prefetch.h>
+#define __FS_HAS_ENCRYPTION IS_ENABLED(CONFIG_F2FS_FS_ENCRYPTION)
+#include <linux/fscrypt.h>
/*
* How many user pages to map in one call to get_user_pages(). This determines
@@ -451,6 +453,23 @@ dio_bio_alloc(struct dio *dio, struct dio_submit *sdio,
sdio->logical_offset_in_bio = sdio->cur_page_fs_offset;
}
+#ifdef CONFIG_PFK
+static bool is_inode_filesystem_type(const struct inode *inode,
+ const char *fs_type)
+{
+ if (!inode || !fs_type)
+ return false;
+
+ if (!inode->i_sb)
+ return false;
+
+ if (!inode->i_sb->s_type)
+ return false;
+
+ return (strcmp(inode->i_sb->s_type->name, fs_type) == 0);
+}
+#endif
+
/*
* In the AIO read case we speculatively dirty the pages before starting IO.
* During IO completion, any of these pages which happen to have been written
@@ -473,7 +492,17 @@ static inline void dio_bio_submit(struct dio *dio, struct dio_submit *sdio)
bio_set_pages_dirty(bio);
dio->bio_disk = bio->bi_disk;
+#ifdef CONFIG_PFK
+ bio->bi_dio_inode = dio->inode;
+/* iv sector for security/pfe/pfk_fscrypt.c and f2fs in fs/f2fs/f2fs.h */
+#define PG_DUN_NEW(i, p) \
+ (((((u64)(i)->i_ino) & 0xffffffff) << 32) | ((p) & 0xffffffff))
+
+ if (is_inode_filesystem_type(dio->inode, "f2fs"))
+ fscrypt_set_ice_dun(dio->inode, bio, PG_DUN_NEW(dio->inode,
+ (sdio->logical_offset_in_bio >> PAGE_SHIFT)));
+#endif
if (sdio->submit_io) {
sdio->submit_io(bio, dio->inode, sdio->logical_offset_in_bio);
dio->bio_cookie = BLK_QC_T_NONE;
@@ -485,6 +514,18 @@ static inline void dio_bio_submit(struct dio *dio, struct dio_submit *sdio)
sdio->logical_offset_in_bio = 0;
}
+struct inode *dio_bio_get_inode(struct bio *bio)
+{
+ struct inode *inode = NULL;
+
+ if (bio == NULL)
+ return NULL;
+#ifdef CONFIG_PFK
+ inode = bio->bi_dio_inode;
+#endif
+ return inode;
+}
+
/*
* Release any resources in case of a failure
*/
diff --git a/fs/dlm/ast.c b/fs/dlm/ast.c
index 562fa8c..47ee66d 100644
--- a/fs/dlm/ast.c
+++ b/fs/dlm/ast.c
@@ -292,6 +292,8 @@ void dlm_callback_suspend(struct dlm_ls *ls)
flush_workqueue(ls->ls_callback_wq);
}
+#define MAX_CB_QUEUE 25
+
void dlm_callback_resume(struct dlm_ls *ls)
{
struct dlm_lkb *lkb, *safe;
@@ -302,15 +304,23 @@ void dlm_callback_resume(struct dlm_ls *ls)
if (!ls->ls_callback_wq)
return;
+more:
mutex_lock(&ls->ls_cb_mutex);
list_for_each_entry_safe(lkb, safe, &ls->ls_cb_delay, lkb_cb_list) {
list_del_init(&lkb->lkb_cb_list);
queue_work(ls->ls_callback_wq, &lkb->lkb_cb_work);
count++;
+ if (count == MAX_CB_QUEUE)
+ break;
}
mutex_unlock(&ls->ls_cb_mutex);
if (count)
log_rinfo(ls, "dlm_callback_resume %d", count);
+ if (count == MAX_CB_QUEUE) {
+ count = 0;
+ cond_resched();
+ goto more;
+ }
}
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index 779b741..8b4ded9 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -1155,7 +1155,7 @@ static int ep_poll_callback(wait_queue_entry_t *wait, unsigned mode, int sync, v
* semantics). All the events that happen during that period of time are
* chained in ep->ovflist and requeued later on.
*/
- if (unlikely(ep->ovflist != EP_UNACTIVE_PTR)) {
+ if (ep->ovflist != EP_UNACTIVE_PTR) {
if (epi->next == EP_UNACTIVE_PTR) {
epi->next = ep->ovflist;
ep->ovflist = epi;
diff --git a/fs/ext4/Kconfig b/fs/ext4/Kconfig
index a453cc8..dcf7ee9 100644
--- a/fs/ext4/Kconfig
+++ b/fs/ext4/Kconfig
@@ -107,10 +107,16 @@
decrypted pages in the page cache.
config EXT4_FS_ENCRYPTION
- bool
- default y
+ bool "Ext4 FS Encryption"
+ default n
depends on EXT4_ENCRYPTION
+config EXT4_FS_ICE_ENCRYPTION
+ bool "Ext4 Encryption with ICE support"
+ default n
+ depends on EXT4_FS_ENCRYPTION
+ depends on PFK
+
config EXT4_DEBUG
bool "EXT4 debugging support"
depends on EXT4_FS
diff --git a/fs/ext4/fsync.c b/fs/ext4/fsync.c
index 712f009..5508baa 100644
--- a/fs/ext4/fsync.c
+++ b/fs/ext4/fsync.c
@@ -116,16 +116,8 @@ int ext4_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
goto out;
}
- ret = file_write_and_wait_range(file, start, end);
- if (ret)
- return ret;
-
if (!journal) {
- struct writeback_control wbc = {
- .sync_mode = WB_SYNC_ALL
- };
-
- ret = ext4_write_inode(inode, &wbc);
+ ret = __generic_file_fsync(file, start, end, datasync);
if (!ret)
ret = ext4_sync_parent(inode);
if (test_opt(inode->i_sb, BARRIER))
@@ -133,6 +125,9 @@ int ext4_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
goto out;
}
+ ret = file_write_and_wait_range(file, start, end);
+ if (ret)
+ return ret;
/*
* data=writeback,ordered:
* The caller's filemap_fdatawrite()/wait will sync the data.
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 6906adaf..7292718 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -1219,7 +1219,8 @@ static int ext4_block_write_begin(struct page *page, loff_t pos, unsigned len,
ll_rw_block(REQ_OP_READ, 0, 1, &bh);
*wait_bh++ = bh;
decrypt = ext4_encrypted_inode(inode) &&
- S_ISREG(inode->i_mode);
+ S_ISREG(inode->i_mode) &&
+ !fscrypt_using_hardware_encryption(inode);
}
}
/*
@@ -3765,9 +3766,14 @@ static ssize_t ext4_direct_IO_write(struct kiocb *iocb, struct iov_iter *iter)
get_block_func = ext4_dio_get_block_unwritten_async;
dio_flags = DIO_LOCKING;
}
- ret = __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev, iter,
- get_block_func, ext4_end_io_dio, NULL,
- dio_flags);
+#if defined(CONFIG_EXT4_FS_ENCRYPTION)
+ WARN_ON(ext4_encrypted_inode(inode) && S_ISREG(inode->i_mode)
+ && !fscrypt_using_hardware_encryption(inode));
+#endif
+ ret = __blockdev_direct_IO(iocb, inode,
+ inode->i_sb->s_bdev, iter,
+ get_block_func,
+ ext4_end_io_dio, NULL, dio_flags);
if (ret > 0 && !overwrite && ext4_test_inode_state(inode,
EXT4_STATE_DIO_UNWRITTEN)) {
@@ -3874,8 +3880,9 @@ static ssize_t ext4_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
ssize_t ret;
int rw = iov_iter_rw(iter);
-#ifdef CONFIG_EXT4_FS_ENCRYPTION
- if (ext4_encrypted_inode(inode) && S_ISREG(inode->i_mode))
+#if defined(CONFIG_EXT4_FS_ENCRYPTION)
+ if (ext4_encrypted_inode(inode) && S_ISREG(inode->i_mode)
+ && !fscrypt_using_hardware_encryption(inode))
return 0;
#endif
@@ -4090,7 +4097,8 @@ static int __ext4_block_zero_page_range(handle_t *handle,
if (!buffer_uptodate(bh))
goto unlock;
if (S_ISREG(inode->i_mode) &&
- ext4_encrypted_inode(inode)) {
+ ext4_encrypted_inode(inode) &&
+ !fscrypt_using_hardware_encryption(inode)) {
/* We expect the key to be set. */
BUG_ON(!fscrypt_has_encryption_key(inode));
BUG_ON(blocksize != PAGE_SIZE);
diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c
index db75901..440dcee 100644
--- a/fs/ext4/page-io.c
+++ b/fs/ext4/page-io.c
@@ -482,8 +482,10 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
gfp_t gfp_flags = GFP_NOFS;
retry_encrypt:
- data_page = fscrypt_encrypt_page(inode, page, PAGE_SIZE, 0,
- page->index, gfp_flags);
+ if (!fscrypt_using_hardware_encryption(inode))
+ data_page = fscrypt_encrypt_page(inode,
+ page, PAGE_SIZE, 0,
+ page->index, gfp_flags);
if (IS_ERR(data_page)) {
ret = PTR_ERR(data_page);
if (ret == -ENOMEM && wbc->sync_mode == WB_SYNC_ALL) {
diff --git a/fs/ext4/readpage.c b/fs/ext4/readpage.c
index aa1b9e1..8e5947f 100644
--- a/fs/ext4/readpage.c
+++ b/fs/ext4/readpage.c
@@ -280,6 +280,7 @@ int ext4_mpage_readpages(struct address_space *mapping,
}
if (bio == NULL) {
struct fscrypt_ctx *ctx = NULL;
+ unsigned int flags = 0;
if (ext4_encrypted_inode(inode) &&
S_ISREG(inode->i_mode)) {
@@ -298,8 +299,9 @@ int ext4_mpage_readpages(struct address_space *mapping,
bio->bi_iter.bi_sector = blocks[0] << (blkbits - 9);
bio->bi_end_io = mpage_end_io;
bio->bi_private = ctx;
- bio_set_op_attrs(bio, REQ_OP_READ,
- is_readahead ? REQ_RAHEAD : 0);
+ if (is_readahead)
+ flags = flags | REQ_RAHEAD;
+ bio_set_op_attrs(bio, REQ_OP_READ, flags);
}
length = first_hole << blkbits;
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index a1cf7d6..f266ab5 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -1328,6 +1328,11 @@ static bool ext4_dummy_context(struct inode *inode)
return DUMMY_ENCRYPTION_ENABLED(EXT4_SB(inode->i_sb));
}
+static inline bool ext4_is_encrypted(struct inode *inode)
+{
+ return ext4_encrypted_inode(inode);
+}
+
static const struct fscrypt_operations ext4_cryptops = {
.key_prefix = "ext4:",
.get_context = ext4_get_context,
@@ -1335,6 +1340,7 @@ static const struct fscrypt_operations ext4_cryptops = {
.dummy_context = ext4_dummy_context,
.empty_dir = ext4_empty_dir,
.max_namelen = EXT4_NAME_LEN,
+ .is_encrypted = ext4_is_encrypted,
};
#endif
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index 6552b48..6cab353 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -481,6 +481,7 @@ int f2fs_submit_page_bio(struct f2fs_io_info *fio)
struct bio *bio;
struct page *page = fio->encrypted_page ?
fio->encrypted_page : fio->page;
+ struct inode *inode = fio->page->mapping->host;
if (!f2fs_is_valid_blkaddr(fio->sbi, fio->new_blkaddr,
__is_meta_io(fio) ? META_GENERIC : DATA_GENERIC))
@@ -493,14 +494,13 @@ int f2fs_submit_page_bio(struct f2fs_io_info *fio)
bio = __bio_alloc(fio->sbi, fio->new_blkaddr, fio->io_wbc,
1, is_read_io(fio->op), fio->type, fio->temp);
+ if (f2fs_may_encrypt_bio(inode, fio))
+ fscrypt_set_ice_dun(inode, bio, PG_DUN(inode, fio->page));
+
if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
bio_put(bio);
return -EFAULT;
}
-
- if (fio->io_wbc && !is_read_io(fio->op))
- wbc_account_io(fio->io_wbc, page, PAGE_SIZE);
-
bio_set_op_attrs(bio, fio->op, fio->op_flags);
inc_page_count(fio->sbi, is_read_io(fio->op) ?
@@ -516,6 +516,9 @@ void f2fs_submit_page_write(struct f2fs_io_info *fio)
enum page_type btype = PAGE_TYPE_OF_BIO(fio->type);
struct f2fs_bio_info *io = sbi->write_io[btype] + fio->temp;
struct page *bio_page;
+ struct inode *inode;
+ bool bio_encrypted;
+ u64 dun;
f2fs_bug_on(sbi, is_read_io(fio->op));
@@ -538,6 +541,9 @@ void f2fs_submit_page_write(struct f2fs_io_info *fio)
verify_block_addr(fio, fio->new_blkaddr);
bio_page = fio->encrypted_page ? fio->encrypted_page : fio->page;
+ inode = fio->page->mapping->host;
+ dun = PG_DUN(inode, fio->page);
+ bio_encrypted = f2fs_may_encrypt_bio(inode, fio);
/* set submitted = true as a return value */
fio->submitted = true;
@@ -548,6 +554,11 @@ void f2fs_submit_page_write(struct f2fs_io_info *fio)
(io->fio.op != fio->op || io->fio.op_flags != fio->op_flags) ||
!__same_bdev(sbi, fio->new_blkaddr, io->bio)))
__submit_merged_bio(io);
+
+ /* ICE support */
+ if (!fscrypt_mergeable_bio(io->bio, dun, bio_encrypted))
+ __submit_merged_bio(io);
+
alloc_new:
if (io->bio == NULL) {
if ((fio->type == DATA || fio->type == NODE) &&
@@ -559,6 +570,8 @@ void f2fs_submit_page_write(struct f2fs_io_info *fio)
io->bio = __bio_alloc(sbi, fio->new_blkaddr, fio->io_wbc,
BIO_MAX_PAGES, false,
fio->type, fio->temp);
+ if (bio_encrypted)
+ fscrypt_set_ice_dun(inode, io->bio, dun);
io->fio = *fio;
}
@@ -600,9 +613,10 @@ static struct bio *f2fs_grab_read_bio(struct inode *inode, block_t blkaddr,
return ERR_PTR(-ENOMEM);
f2fs_target_device(sbi, blkaddr, bio);
bio->bi_end_io = f2fs_read_end_io;
- bio_set_op_attrs(bio, REQ_OP_READ, op_flag);
+ bio_set_op_attrs(bio, REQ_OP_READ, 0);
- if (f2fs_encrypted_file(inode))
+ if (f2fs_encrypted_file(inode) &&
+ !fscrypt_using_hardware_encryption(inode))
post_read_steps |= 1 << STEP_DECRYPT;
if (post_read_steps) {
ctx = mempool_alloc(bio_post_read_ctx_pool, GFP_NOFS);
@@ -627,6 +641,9 @@ static int f2fs_submit_page_read(struct inode *inode, struct page *page,
if (IS_ERR(bio))
return PTR_ERR(bio);
+ if (f2fs_may_encrypt_bio(inode, NULL))
+ fscrypt_set_ice_dun(inode, bio, PG_DUN(inode, page));
+
/* wait for GCed page writeback via META_MAPPING */
f2fs_wait_on_block_writeback(inode, blkaddr);
@@ -1557,6 +1574,8 @@ static int f2fs_mpage_readpages(struct address_space *mapping,
sector_t last_block_in_file;
sector_t block_nr;
struct f2fs_map_blocks map;
+ bool bio_encrypted;
+ u64 dun;
map.m_pblk = 0;
map.m_lblk = 0;
@@ -1639,6 +1658,14 @@ static int f2fs_mpage_readpages(struct address_space *mapping,
__f2fs_submit_read_bio(F2FS_I_SB(inode), bio, DATA);
bio = NULL;
}
+
+ dun = PG_DUN(inode, page);
+ bio_encrypted = f2fs_may_encrypt_bio(inode, NULL);
+ if (!fscrypt_mergeable_bio(bio, dun, bio_encrypted)) {
+ __submit_bio(F2FS_I_SB(inode), bio, DATA);
+ bio = NULL;
+ }
+
if (bio == NULL) {
bio = f2fs_grab_read_bio(inode, block_nr, nr_pages,
is_readahead ? REQ_RAHEAD : 0);
@@ -1646,6 +1673,8 @@ static int f2fs_mpage_readpages(struct address_space *mapping,
bio = NULL;
goto set_error_page;
}
+ if (bio_encrypted)
+ fscrypt_set_ice_dun(inode, bio, dun);
}
/*
@@ -1726,6 +1755,9 @@ static int encrypt_one_page(struct f2fs_io_info *fio)
f2fs_wait_on_block_writeback(inode, fio->old_blkaddr);
retry_encrypt:
+ if (fscrypt_using_hardware_encryption(inode))
+ return 0;
+
fio->encrypted_page = fscrypt_encrypt_page(inode, fio->page,
PAGE_SIZE, 0, fio->page->index, gfp_flags);
if (IS_ERR(fio->encrypted_page)) {
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index 86536e9..fbd4a5d 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -3590,6 +3590,10 @@ static inline bool f2fs_force_buffered_io(struct inode *inode,
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
int rw = iov_iter_rw(iter);
+ if ((f2fs_encrypted_file(inode)) &&
+ !fscrypt_using_hardware_encryption(inode))
+ return true;
+
if (f2fs_post_read_required(inode))
return true;
if (sbi->s_ndevs)
@@ -3609,6 +3613,16 @@ static inline bool f2fs_force_buffered_io(struct inode *inode,
return false;
}
+static inline bool f2fs_may_encrypt_bio(struct inode *inode,
+ struct f2fs_io_info *fio)
+{
+ if (fio && (fio->type != DATA || fio->encrypted_page))
+ return false;
+
+ return (f2fs_encrypted_file(inode) &&
+ fscrypt_using_hardware_encryption(inode));
+}
+
#ifdef CONFIG_F2FS_FAULT_INJECTION
extern void f2fs_build_fault_attr(struct f2fs_sb_info *sbi, unsigned int rate,
unsigned int type);
diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
index 14f033e..db8f2f9 100644
--- a/fs/f2fs/super.c
+++ b/fs/f2fs/super.c
@@ -2190,6 +2190,11 @@ static bool f2fs_dummy_context(struct inode *inode)
return DUMMY_ENCRYPTION_ENABLED(F2FS_I_SB(inode));
}
+static inline bool f2fs_is_encrypted(struct inode *inode)
+{
+ return f2fs_encrypted_file(inode);
+}
+
static const struct fscrypt_operations f2fs_cryptops = {
.key_prefix = "f2fs:",
.get_context = f2fs_get_context,
@@ -2197,6 +2202,7 @@ static const struct fscrypt_operations f2fs_cryptops = {
.dummy_context = f2fs_dummy_context,
.empty_dir = f2fs_empty_dir,
.max_namelen = F2FS_NAME_LEN,
+ .is_encrypted = f2fs_is_encrypted,
};
#endif
diff --git a/fs/file_table.c b/fs/file_table.c
index e49af4c..d58c11a 100644
--- a/fs/file_table.c
+++ b/fs/file_table.c
@@ -326,6 +326,12 @@ void flush_delayed_fput(void)
static DECLARE_DELAYED_WORK(delayed_fput_work, delayed_fput);
+void flush_delayed_fput_wait(void)
+{
+ delayed_fput(NULL);
+ flush_delayed_work(&delayed_fput_work);
+}
+
void fput(struct file *file)
{
if (atomic_long_dec_and_test(&file->f_count)) {
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index a8d24c0..414c534 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -1699,7 +1699,6 @@ static int fuse_retrieve(struct fuse_conn *fc, struct inode *inode,
req->in.h.nodeid = outarg->nodeid;
req->in.numargs = 2;
req->in.argpages = 1;
- req->page_descs[0].offset = offset;
req->end = fuse_retrieve_end;
index = outarg->offset >> PAGE_SHIFT;
@@ -1714,6 +1713,7 @@ static int fuse_retrieve(struct fuse_conn *fc, struct inode *inode,
this_num = min_t(unsigned, num, PAGE_SIZE - offset);
req->pages[req->num_pages] = page;
+ req->page_descs[req->num_pages].offset = offset;
req->page_descs[req->num_pages].length = this_num;
req->num_pages++;
@@ -2039,8 +2039,10 @@ static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe,
ret = fuse_dev_do_write(fud, &cs, len);
+ pipe_lock(pipe);
for (idx = 0; idx < nbuf; idx++)
pipe_buf_release(pipe, &bufs[idx]);
+ pipe_unlock(pipe);
out:
kvfree(bufs);
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index f3519d6..baa8fbc 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -1778,7 +1778,7 @@ static bool fuse_writepage_in_flight(struct fuse_req *new_req,
spin_unlock(&fc->lock);
dec_wb_stat(&bdi->wb, WB_WRITEBACK);
- dec_node_page_state(page, NR_WRITEBACK_TEMP);
+ dec_node_page_state(new_req->pages[0], NR_WRITEBACK_TEMP);
wb_writeout_inc(&bdi->wb);
fuse_writepage_free(fc, new_req);
fuse_request_free(new_req);
diff --git a/fs/inode.c b/fs/inode.c
index 39f86b5..790552e 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -730,11 +730,8 @@ static enum lru_status inode_lru_isolate(struct list_head *item,
return LRU_REMOVED;
}
- /*
- * Recently referenced inodes and inodes with many attached pages
- * get one more pass.
- */
- if (inode->i_state & I_REFERENCED || inode->i_data.nrpages > 1) {
+ /* recently referenced inodes get one more pass */
+ if (inode->i_state & I_REFERENCED) {
inode->i_state &= ~I_REFERENCED;
spin_unlock(&inode->i_lock);
return LRU_ROTATE;
diff --git a/fs/namei.c b/fs/namei.c
index 83c9b42..dfb16d8 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -2926,6 +2926,11 @@ int vfs_create2(struct vfsmount *mnt, struct inode *dir, struct dentry *dentry,
if (error)
return error;
error = dir->i_op->create(dir, dentry, mode, want_excl);
+ if (error)
+ return error;
+ error = security_inode_post_create(dir, dentry, mode);
+ if (error)
+ return error;
if (!error)
fsnotify_create(dir, dentry);
return error;
@@ -3750,6 +3755,11 @@ int vfs_mknod2(struct vfsmount *mnt, struct inode *dir, struct dentry *dentry, u
return error;
error = dir->i_op->mknod(dir, dentry, mode, dev);
+ if (error)
+ return error;
+ error = security_inode_post_create(dir, dentry, mode);
+ if (error)
+ return error;
if (!error)
fsnotify_create(dir, dentry);
return error;
diff --git a/fs/namespace.c b/fs/namespace.c
index 6b14059..c2340e8 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -21,6 +21,7 @@
#include <linux/fs_struct.h> /* get_fs_root et.al. */
#include <linux/fsnotify.h> /* fsnotify_vfsmount_delete */
#include <linux/uaccess.h>
+#include <linux/file.h>
#include <linux/proc_ns.h>
#include <linux/magic.h>
#include <linux/bootmem.h>
@@ -1134,6 +1135,12 @@ static void delayed_mntput(struct work_struct *unused)
}
static DECLARE_DELAYED_WORK(delayed_mntput_work, delayed_mntput);
+void flush_delayed_mntput_wait(void)
+{
+ delayed_mntput(NULL);
+ flush_delayed_work(&delayed_mntput_work);
+}
+
static void mntput_no_expire(struct mount *mnt)
{
rcu_read_lock();
@@ -1650,6 +1657,7 @@ int ksys_umount(char __user *name, int flags)
struct mount *mnt;
int retval;
int lookup_flags = 0;
+ bool user_request = !(current->flags & PF_KTHREAD);
if (flags & ~(MNT_FORCE | MNT_DETACH | MNT_EXPIRE | UMOUNT_NOFOLLOW))
return -EINVAL;
@@ -1675,11 +1683,36 @@ int ksys_umount(char __user *name, int flags)
if (flags & MNT_FORCE && !capable(CAP_SYS_ADMIN))
goto dput_and_out;
+ /* flush delayed_fput to put mnt_count */
+ if (user_request)
+ flush_delayed_fput_wait();
+
retval = do_umount(mnt, flags);
dput_and_out:
/* we mustn't call path_put() as that would clear mnt_expiry_mark */
dput(path.dentry);
mntput_no_expire(mnt);
+
+ if (!user_request)
+ goto out;
+
+ if (!retval) {
+ /*
+ * If the last delayed_fput() is called during do_umount()
+ * and makes mnt_count zero, we need to guarantee to register
+ * delayed_mntput by waiting for delayed_fput work again.
+ */
+ flush_delayed_fput_wait();
+
+ /* flush delayed_mntput_work to put sb->s_active */
+ flush_delayed_mntput_wait();
+ }
+ if (!retval || (flags & MNT_FORCE)) {
+ /* filesystem needs to handle unclosed namespaces */
+ if (mnt->mnt.mnt_sb->s_op->umount_end)
+ mnt->mnt.mnt_sb->s_op->umount_end(mnt->mnt.mnt_sb,
+ flags);
+ }
out:
return retval;
}
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index ac4b2f0..5ef2c71 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -2409,8 +2409,7 @@ static int nfs_compare_mount_options(const struct super_block *s, const struct n
goto Ebusy;
if (a->acdirmax != b->acdirmax)
goto Ebusy;
- if (b->auth_info.flavor_len > 0 &&
- clnt_a->cl_auth->au_flavor != clnt_b->cl_auth->au_flavor)
+ if (clnt_a->cl_auth->au_flavor != clnt_b->cl_auth->au_flavor)
goto Ebusy;
return 1;
Ebusy:
diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
index 7fb9f7c..39b835d 100644
--- a/fs/nfsd/nfsctl.c
+++ b/fs/nfsd/nfsctl.c
@@ -1126,6 +1126,8 @@ static ssize_t write_v4_end_grace(struct file *file, char *buf, size_t size)
case 'Y':
case 'y':
case '1':
+ if (nn->nfsd_serv)
+ return -EBUSY;
nfsd4_end_grace(nn);
break;
default:
@@ -1237,8 +1239,8 @@ static __net_init int nfsd_init_net(struct net *net)
retval = nfsd_idmap_init(net);
if (retval)
goto out_idmap_error;
- nn->nfsd4_lease = 45; /* default lease time */
- nn->nfsd4_grace = 45;
+ nn->nfsd4_lease = 90; /* default lease time */
+ nn->nfsd4_grace = 90;
nn->somebody_reclaimed = false;
nn->clverifier_counter = prandom_u32();
nn->clientid_counter = prandom_u32();
diff --git a/fs/ocfs2/Makefile b/fs/ocfs2/Makefile
index 99ee093..cc9b32b 100644
--- a/fs/ocfs2/Makefile
+++ b/fs/ocfs2/Makefile
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
-ccflags-y := -Ifs/ocfs2
+ccflags-y := -I$(src)
obj-$(CONFIG_OCFS2_FS) += \
ocfs2.o \
diff --git a/fs/ocfs2/buffer_head_io.c b/fs/ocfs2/buffer_head_io.c
index 1d098c3..9f8250d 100644
--- a/fs/ocfs2/buffer_head_io.c
+++ b/fs/ocfs2/buffer_head_io.c
@@ -152,7 +152,6 @@ int ocfs2_read_blocks_sync(struct ocfs2_super *osb, u64 block,
#endif
}
- clear_buffer_uptodate(bh);
get_bh(bh); /* for end_buffer_read_sync() */
bh->b_end_io = end_buffer_read_sync;
submit_bh(REQ_OP_READ, 0, bh);
@@ -306,7 +305,6 @@ int ocfs2_read_blocks(struct ocfs2_caching_info *ci, u64 block, int nr,
continue;
}
- clear_buffer_uptodate(bh);
get_bh(bh); /* for end_buffer_read_sync() */
if (validate)
set_buffer_needs_validate(bh);
diff --git a/fs/ocfs2/dlm/Makefile b/fs/ocfs2/dlm/Makefile
index bd1aab1f..ef28544 100644
--- a/fs/ocfs2/dlm/Makefile
+++ b/fs/ocfs2/dlm/Makefile
@@ -1,4 +1,4 @@
-ccflags-y := -Ifs/ocfs2
+ccflags-y := -I$(src)/..
obj-$(CONFIG_OCFS2_FS_O2CB) += ocfs2_dlm.o
diff --git a/fs/ocfs2/dlmfs/Makefile b/fs/ocfs2/dlmfs/Makefile
index eed3db8c..33431a0 100644
--- a/fs/ocfs2/dlmfs/Makefile
+++ b/fs/ocfs2/dlmfs/Makefile
@@ -1,4 +1,4 @@
-ccflags-y := -Ifs/ocfs2
+ccflags-y := -I$(src)/..
obj-$(CONFIG_OCFS2_FS) += ocfs2_dlmfs.o
diff --git a/fs/proc/base.c b/fs/proc/base.c
index c6a8cba..1d56f1f 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -1085,10 +1085,6 @@ static int __set_oom_adj(struct file *file, int oom_adj, bool legacy)
task_lock(p);
if (!p->vfork_done && process_shares_mm(p, mm)) {
- pr_info("updating oom_score_adj for %d (%s) from %d to %d because it shares mm with %d (%s). Report if this is unexpected.\n",
- task_pid_nr(p), p->comm,
- p->signal->oom_score_adj, oom_adj,
- task_pid_nr(task), task->comm);
p->signal->oom_score_adj = oom_adj;
if (!legacy && has_capability_noaudit(current, CAP_SYS_RESOURCE))
p->signal->oom_score_adj_min = (short)oom_adj;
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index ab3a089..38ed88e 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -480,7 +480,7 @@ struct mem_size_stats {
};
static void smaps_account(struct mem_size_stats *mss, struct page *page,
- bool compound, bool young, bool dirty)
+ bool compound, bool young, bool dirty, bool locked)
{
int i, nr = compound ? 1 << compound_order(page) : 1;
unsigned long size = nr * PAGE_SIZE;
@@ -507,24 +507,31 @@ static void smaps_account(struct mem_size_stats *mss, struct page *page,
else
mss->private_clean += size;
mss->pss += (u64)size << PSS_SHIFT;
+ if (locked)
+ mss->pss_locked += (u64)size << PSS_SHIFT;
return;
}
for (i = 0; i < nr; i++, page++) {
int mapcount = page_mapcount(page);
+ unsigned long pss = (PAGE_SIZE << PSS_SHIFT);
if (mapcount >= 2) {
if (dirty || PageDirty(page))
mss->shared_dirty += PAGE_SIZE;
else
mss->shared_clean += PAGE_SIZE;
- mss->pss += (PAGE_SIZE << PSS_SHIFT) / mapcount;
+ mss->pss += pss / mapcount;
+ if (locked)
+ mss->pss_locked += pss / mapcount;
} else {
if (dirty || PageDirty(page))
mss->private_dirty += PAGE_SIZE;
else
mss->private_clean += PAGE_SIZE;
- mss->pss += PAGE_SIZE << PSS_SHIFT;
+ mss->pss += pss;
+ if (locked)
+ mss->pss_locked += pss;
}
}
}
@@ -547,6 +554,7 @@ static void smaps_pte_entry(pte_t *pte, unsigned long addr,
{
struct mem_size_stats *mss = walk->private;
struct vm_area_struct *vma = walk->vma;
+ bool locked = !!(vma->vm_flags & VM_LOCKED);
struct page *page = NULL;
if (pte_present(*pte)) {
@@ -589,7 +597,7 @@ static void smaps_pte_entry(pte_t *pte, unsigned long addr,
if (!page)
return;
- smaps_account(mss, page, false, pte_young(*pte), pte_dirty(*pte));
+ smaps_account(mss, page, false, pte_young(*pte), pte_dirty(*pte), locked);
}
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
@@ -598,6 +606,7 @@ static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
{
struct mem_size_stats *mss = walk->private;
struct vm_area_struct *vma = walk->vma;
+ bool locked = !!(vma->vm_flags & VM_LOCKED);
struct page *page;
/* FOLL_DUMP will return -EFAULT on huge zero page */
@@ -612,7 +621,7 @@ static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
/* pass */;
else
VM_BUG_ON_PAGE(1, page);
- smaps_account(mss, page, true, pmd_young(*pmd), pmd_dirty(*pmd));
+ smaps_account(mss, page, true, pmd_young(*pmd), pmd_dirty(*pmd), locked);
}
#else
static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
@@ -794,11 +803,8 @@ static void smap_gather_stats(struct vm_area_struct *vma,
}
}
#endif
-
/* mmap_sem is held in m_start */
walk_page_vma(vma, &smaps_walk);
- if (vma->vm_flags & VM_LOCKED)
- mss->pss_locked += mss->pss;
}
#define SEQ_PUT_DEC(str, val) \
diff --git a/fs/udf/inode.c b/fs/udf/inode.c
index 5df554a9..ae796e1 100644
--- a/fs/udf/inode.c
+++ b/fs/udf/inode.c
@@ -1357,6 +1357,12 @@ static int udf_read_inode(struct inode *inode, bool hidden_inode)
iinfo->i_alloc_type = le16_to_cpu(fe->icbTag.flags) &
ICBTAG_FLAG_AD_MASK;
+ if (iinfo->i_alloc_type != ICBTAG_FLAG_AD_SHORT &&
+ iinfo->i_alloc_type != ICBTAG_FLAG_AD_LONG &&
+ iinfo->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB) {
+ ret = -EIO;
+ goto out;
+ }
iinfo->i_unique = 0;
iinfo->i_lenEAttr = 0;
iinfo->i_lenExtents = 0;
diff --git a/fs/xfs/libxfs/xfs_attr_leaf.c b/fs/xfs/libxfs/xfs_attr_leaf.c
index 6fc5425..2652d00 100644
--- a/fs/xfs/libxfs/xfs_attr_leaf.c
+++ b/fs/xfs/libxfs/xfs_attr_leaf.c
@@ -243,7 +243,7 @@ xfs_attr3_leaf_verify(
struct xfs_mount *mp = bp->b_target->bt_mount;
struct xfs_attr_leafblock *leaf = bp->b_addr;
struct xfs_attr_leaf_entry *entries;
- uint16_t end;
+ uint32_t end; /* must be 32bit - see below */
int i;
xfs_attr3_leaf_hdr_from_disk(mp->m_attr_geo, &ichdr, leaf);
@@ -293,6 +293,11 @@ xfs_attr3_leaf_verify(
/*
* Quickly check the freemap information. Attribute data has to be
* aligned to 4-byte boundaries, and likewise for the free space.
+ *
+ * Note that for 64k block size filesystems, the freemap entries cannot
+ * overflow as they are only be16 fields. However, when checking end
+ * pointer of the freemap, we have to be careful to detect overflows and
+ * so use uint32_t for those checks.
*/
for (i = 0; i < XFS_ATTR_LEAF_MAPSIZE; i++) {
if (ichdr.freemap[i].base > mp->m_attr_geo->blksize)
@@ -303,7 +308,9 @@ xfs_attr3_leaf_verify(
return __this_address;
if (ichdr.freemap[i].size & 0x3)
return __this_address;
- end = ichdr.freemap[i].base + ichdr.freemap[i].size;
+
+ /* be care of 16 bit overflows here */
+ end = (uint32_t)ichdr.freemap[i].base + ichdr.freemap[i].size;
if (end < ichdr.freemap[i].base)
return __this_address;
if (end > mp->m_attr_geo->blksize)
diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
index a476703..3a496ff 100644
--- a/fs/xfs/libxfs/xfs_bmap.c
+++ b/fs/xfs/libxfs/xfs_bmap.c
@@ -1683,10 +1683,13 @@ xfs_bmap_add_extent_delay_real(
case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
/*
* Filling in all of a previously delayed allocation extent.
- * The right neighbor is contiguous, the left is not.
+ * The right neighbor is contiguous, the left is not. Take care
+ * with delay -> unwritten extent allocation here because the
+ * delalloc record we are overwriting is always written.
*/
PREV.br_startblock = new->br_startblock;
PREV.br_blockcount += RIGHT.br_blockcount;
+ PREV.br_state = new->br_state;
xfs_iext_next(ifp, &bma->icur);
xfs_iext_remove(bma->ip, &bma->icur, state);
diff --git a/fs/xfs/libxfs/xfs_btree.c b/fs/xfs/libxfs/xfs_btree.c
index 34c6d7b..bbdae2b 100644
--- a/fs/xfs/libxfs/xfs_btree.c
+++ b/fs/xfs/libxfs/xfs_btree.c
@@ -330,7 +330,7 @@ xfs_btree_sblock_verify_crc(
if (xfs_sb_version_hascrc(&mp->m_sb)) {
if (!xfs_log_check_lsn(mp, be64_to_cpu(block->bb_u.s.bb_lsn)))
- return __this_address;
+ return false;
return xfs_buf_verify_cksum(bp, XFS_BTREE_SBLOCK_CRC_OFF);
}
diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
index 49f5f58..b697866 100644
--- a/fs/xfs/xfs_aops.c
+++ b/fs/xfs/xfs_aops.c
@@ -449,6 +449,7 @@ xfs_map_blocks(
}
wpc->imap = imap;
+ xfs_trim_extent_eof(&wpc->imap, ip);
trace_xfs_map_blocks_found(ip, offset, count, wpc->io_type, &imap);
return 0;
allocate_blocks:
@@ -459,6 +460,7 @@ xfs_map_blocks(
ASSERT(whichfork == XFS_COW_FORK || cow_fsb == NULLFILEOFF ||
imap.br_startoff + imap.br_blockcount <= cow_fsb);
wpc->imap = imap;
+ xfs_trim_extent_eof(&wpc->imap, ip);
trace_xfs_map_blocks_alloc(ip, offset, count, wpc->io_type, &imap);
return 0;
}
diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c
index 6de8d90..211b06e 100644
--- a/fs/xfs/xfs_bmap_util.c
+++ b/fs/xfs/xfs_bmap_util.c
@@ -1175,9 +1175,9 @@ xfs_free_file_space(
* page could be mmap'd and iomap_zero_range doesn't do that for us.
* Writeback of the eof page will do this, albeit clumsily.
*/
- if (offset + len >= XFS_ISIZE(ip) && ((offset + len) & PAGE_MASK)) {
+ if (offset + len >= XFS_ISIZE(ip) && offset_in_page(offset + len) > 0) {
error = filemap_write_and_wait_range(VFS_I(ip)->i_mapping,
- (offset + len) & ~PAGE_MASK, LLONG_MAX);
+ round_down(offset + len, PAGE_SIZE), LLONG_MAX);
}
return error;
@@ -1824,6 +1824,12 @@ xfs_swap_extents(
if (error)
goto out_unlock;
+ if (xfs_inode_has_cow_data(tip)) {
+ error = xfs_reflink_cancel_cow_range(tip, 0, NULLFILEOFF, true);
+ if (error)
+ return error;
+ }
+
/*
* Extent "swapping" with rmap requires a permanent reservation and
* a block reservation because it's really just a remap operation
diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c
index 12d8455..010db5f 100644
--- a/fs/xfs/xfs_buf_item.c
+++ b/fs/xfs/xfs_buf_item.c
@@ -1233,9 +1233,23 @@ xfs_buf_iodone(
}
/*
- * Requeue a failed buffer for writeback
+ * Requeue a failed buffer for writeback.
*
- * Return true if the buffer has been re-queued properly, false otherwise
+ * We clear the log item failed state here as well, but we have to be careful
+ * about reference counts because the only active reference counts on the buffer
+ * may be the failed log items. Hence if we clear the log item failed state
+ * before queuing the buffer for IO we can release all active references to
+ * the buffer and free it, leading to use after free problems in
+ * xfs_buf_delwri_queue. It makes no difference to the buffer or log items which
+ * order we process them in - the buffer is locked, and we own the buffer list
+ * so nothing on them is going to change while we are performing this action.
+ *
+ * Hence we can safely queue the buffer for IO before we clear the failed log
+ * item state, therefore always having an active reference to the buffer and
+ * avoiding the transient zero-reference state that leads to use-after-free.
+ *
+ * Return true if the buffer was added to the buffer list, false if it was
+ * already on the buffer list.
*/
bool
xfs_buf_resubmit_failed_buffers(
@@ -1243,16 +1257,16 @@ xfs_buf_resubmit_failed_buffers(
struct list_head *buffer_list)
{
struct xfs_log_item *lip;
+ bool ret;
+
+ ret = xfs_buf_delwri_queue(bp, buffer_list);
/*
- * Clear XFS_LI_FAILED flag from all items before resubmit
- *
- * XFS_LI_FAILED set/clear is protected by ail_lock, caller this
+ * XFS_LI_FAILED set/clear is protected by ail_lock, caller of this
* function already have it acquired
*/
list_for_each_entry(lip, &bp->b_li_list, li_bio_list)
xfs_clear_li_failed(lip);
- /* Add this buffer back to the delayed write list */
- return xfs_buf_delwri_queue(bp, buffer_list);
+ return ret;
}
diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
index 0ef5ece..bad9047 100644
--- a/fs/xfs/xfs_ioctl.c
+++ b/fs/xfs/xfs_ioctl.c
@@ -1616,7 +1616,7 @@ xfs_ioc_getbmap(
error = 0;
out_free_buf:
kmem_free(buf);
- return 0;
+ return error;
}
struct getfsmap_info {
diff --git a/fs/xfs/xfs_qm_bhv.c b/fs/xfs/xfs_qm_bhv.c
index 73a1d77..3091e4b 100644
--- a/fs/xfs/xfs_qm_bhv.c
+++ b/fs/xfs/xfs_qm_bhv.c
@@ -40,7 +40,7 @@ xfs_fill_statvfs_from_dquot(
statp->f_files = limit;
statp->f_ffree =
(statp->f_files > dqp->q_res_icount) ?
- (statp->f_ffree - dqp->q_res_icount) : 0;
+ (statp->f_files - dqp->q_res_icount) : 0;
}
}
diff --git a/fs/xfs/xfs_reflink.c b/fs/xfs/xfs_reflink.c
index 42ea7ba..7088f44 100644
--- a/fs/xfs/xfs_reflink.c
+++ b/fs/xfs/xfs_reflink.c
@@ -302,6 +302,7 @@ xfs_reflink_reserve_cow(
if (error)
return error;
+ xfs_trim_extent(imap, got.br_startoff, got.br_blockcount);
trace_xfs_reflink_cow_alloc(ip, &got);
return 0;
}
diff --git a/fs/xfs/xfs_stats.c b/fs/xfs/xfs_stats.c
index 4e44231..740ac96 100644
--- a/fs/xfs/xfs_stats.c
+++ b/fs/xfs/xfs_stats.c
@@ -119,7 +119,7 @@ static int xqmstat_proc_show(struct seq_file *m, void *v)
int j;
seq_printf(m, "qm");
- for (j = XFSSTAT_END_IBT_V2; j < XFSSTAT_END_XQMSTAT; j++)
+ for (j = XFSSTAT_END_REFCOUNT; j < XFSSTAT_END_XQMSTAT; j++)
seq_printf(m, " %u", counter_val(xfsstats.xs_stats, j));
seq_putc(m, '\n');
return 0;
diff --git a/include/dt-bindings/clock/qcom,audio-ext-clk.h b/include/dt-bindings/clock/qcom,audio-ext-clk.h
index f4dbc28..dcdcb1c 100644
--- a/include/dt-bindings/clock/qcom,audio-ext-clk.h
+++ b/include/dt-bindings/clock/qcom,audio-ext-clk.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0-only */
-/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
*/
#ifndef __AUDIO_EXT_CLK_H
@@ -15,5 +15,7 @@
#define AUDIO_LPASS_MCLK_5 6
#define AUDIO_LPASS_MCLK_6 7
#define AUDIO_LPASS_MCLK_7 8
+#define AUDIO_LPASS_CORE_HW_VOTE 9
+#define AUDIO_LPASS_MCLK_8 10
#endif
diff --git a/include/dt-bindings/clock/qcom,gcc-kona.h b/include/dt-bindings/clock/qcom,gcc-kona.h
index 0f706be..f55639c 100644
--- a/include/dt-bindings/clock/qcom,gcc-kona.h
+++ b/include/dt-bindings/clock/qcom,gcc-kona.h
@@ -52,178 +52,176 @@
#define GCC_GPU_IREF_EN 40
#define GCC_GPU_MEMNOC_GFX_CLK 41
#define GCC_GPU_SNOC_DVM_GFX_CLK 42
-#define GCC_NPU_AT_CLK 43
-#define GCC_NPU_AXI_CLK 44
-#define GCC_NPU_BWMON_AXI_CLK 45
-#define GCC_NPU_BWMON_CFG_AHB_CLK 46
-#define GCC_NPU_CFG_AHB_CLK 47
-#define GCC_NPU_DMA_CLK 48
-#define GCC_NPU_GPLL0_CLK_SRC 49
-#define GCC_NPU_GPLL0_DIV_CLK_SRC 50
-#define GCC_NPU_TRIG_CLK 51
-#define GCC_PCIE0_PHY_REFGEN_CLK 52
-#define GCC_PCIE1_PHY_REFGEN_CLK 53
-#define GCC_PCIE2_PHY_REFGEN_CLK 54
-#define GCC_PCIE_0_AUX_CLK 55
-#define GCC_PCIE_0_AUX_CLK_SRC 56
-#define GCC_PCIE_0_CFG_AHB_CLK 57
-#define GCC_PCIE_0_MSTR_AXI_CLK 58
-#define GCC_PCIE_0_PIPE_CLK 59
-#define GCC_PCIE_0_SLV_AXI_CLK 60
-#define GCC_PCIE_0_SLV_Q2A_AXI_CLK 61
-#define GCC_PCIE_1_AUX_CLK 62
-#define GCC_PCIE_1_AUX_CLK_SRC 63
-#define GCC_PCIE_1_CFG_AHB_CLK 64
-#define GCC_PCIE_1_MSTR_AXI_CLK 65
-#define GCC_PCIE_1_PIPE_CLK 66
-#define GCC_PCIE_1_SLV_AXI_CLK 67
-#define GCC_PCIE_1_SLV_Q2A_AXI_CLK 68
-#define GCC_PCIE_2_AUX_CLK 69
-#define GCC_PCIE_2_AUX_CLK_SRC 70
-#define GCC_PCIE_2_CFG_AHB_CLK 71
-#define GCC_PCIE_2_MSTR_AXI_CLK 72
-#define GCC_PCIE_2_PIPE_CLK 73
-#define GCC_PCIE_2_SLV_AXI_CLK 74
-#define GCC_PCIE_2_SLV_Q2A_AXI_CLK 75
-#define GCC_PCIE_MDM_CLKREF_EN 76
-#define GCC_PCIE_PHY_AUX_CLK 77
-#define GCC_PCIE_PHY_REFGEN_CLK_SRC 78
-#define GCC_PCIE_WIFI_CLKREF_EN 79
-#define GCC_PCIE_WIGIG_CLKREF_EN 80
-#define GCC_PDM2_CLK 81
-#define GCC_PDM2_CLK_SRC 82
-#define GCC_PDM_AHB_CLK 83
-#define GCC_PDM_XO4_CLK 84
-#define GCC_PRNG_AHB_CLK 85
-#define GCC_QMIP_CAMERA_NRT_AHB_CLK 86
-#define GCC_QMIP_CAMERA_RT_AHB_CLK 87
-#define GCC_QMIP_DISP_AHB_CLK 88
-#define GCC_QMIP_VIDEO_CVP_AHB_CLK 89
-#define GCC_QMIP_VIDEO_VCODEC_AHB_CLK 90
-#define GCC_QUPV3_WRAP0_CORE_2X_CLK 91
-#define GCC_QUPV3_WRAP0_CORE_CLK 92
-#define GCC_QUPV3_WRAP0_S0_CLK 93
-#define GCC_QUPV3_WRAP0_S0_CLK_SRC 94
-#define GCC_QUPV3_WRAP0_S1_CLK 95
-#define GCC_QUPV3_WRAP0_S1_CLK_SRC 96
-#define GCC_QUPV3_WRAP0_S2_CLK 97
-#define GCC_QUPV3_WRAP0_S2_CLK_SRC 98
-#define GCC_QUPV3_WRAP0_S3_CLK 99
-#define GCC_QUPV3_WRAP0_S3_CLK_SRC 100
-#define GCC_QUPV3_WRAP0_S4_CLK 101
-#define GCC_QUPV3_WRAP0_S4_CLK_SRC 102
-#define GCC_QUPV3_WRAP0_S5_CLK 103
-#define GCC_QUPV3_WRAP0_S5_CLK_SRC 104
-#define GCC_QUPV3_WRAP0_S6_CLK 105
-#define GCC_QUPV3_WRAP0_S6_CLK_SRC 106
-#define GCC_QUPV3_WRAP0_S7_CLK 107
-#define GCC_QUPV3_WRAP0_S7_CLK_SRC 108
-#define GCC_QUPV3_WRAP1_CORE_2X_CLK 109
-#define GCC_QUPV3_WRAP1_CORE_CLK 110
-#define GCC_QUPV3_WRAP1_S0_CLK 111
-#define GCC_QUPV3_WRAP1_S0_CLK_SRC 112
-#define GCC_QUPV3_WRAP1_S1_CLK 113
-#define GCC_QUPV3_WRAP1_S1_CLK_SRC 114
-#define GCC_QUPV3_WRAP1_S2_CLK 115
-#define GCC_QUPV3_WRAP1_S2_CLK_SRC 116
-#define GCC_QUPV3_WRAP1_S3_CLK 117
-#define GCC_QUPV3_WRAP1_S3_CLK_SRC 118
-#define GCC_QUPV3_WRAP1_S4_CLK 119
-#define GCC_QUPV3_WRAP1_S4_CLK_SRC 120
-#define GCC_QUPV3_WRAP1_S5_CLK 121
-#define GCC_QUPV3_WRAP1_S5_CLK_SRC 122
-#define GCC_QUPV3_WRAP2_CORE_2X_CLK 123
-#define GCC_QUPV3_WRAP2_CORE_CLK 124
-#define GCC_QUPV3_WRAP2_S0_CLK 125
-#define GCC_QUPV3_WRAP2_S0_CLK_SRC 126
-#define GCC_QUPV3_WRAP2_S1_CLK 127
-#define GCC_QUPV3_WRAP2_S1_CLK_SRC 128
-#define GCC_QUPV3_WRAP2_S2_CLK 129
-#define GCC_QUPV3_WRAP2_S2_CLK_SRC 130
-#define GCC_QUPV3_WRAP2_S3_CLK 131
-#define GCC_QUPV3_WRAP2_S3_CLK_SRC 132
-#define GCC_QUPV3_WRAP2_S4_CLK 133
-#define GCC_QUPV3_WRAP2_S4_CLK_SRC 134
-#define GCC_QUPV3_WRAP2_S5_CLK 135
-#define GCC_QUPV3_WRAP2_S5_CLK_SRC 136
-#define GCC_QUPV3_WRAP_0_M_AHB_CLK 137
-#define GCC_QUPV3_WRAP_0_S_AHB_CLK 138
-#define GCC_QUPV3_WRAP_1_M_AHB_CLK 139
-#define GCC_QUPV3_WRAP_1_S_AHB_CLK 140
-#define GCC_QUPV3_WRAP_2_M_AHB_CLK 141
-#define GCC_QUPV3_WRAP_2_S_AHB_CLK 142
-#define GCC_SDCC2_AHB_CLK 143
-#define GCC_SDCC2_APPS_CLK 144
-#define GCC_SDCC2_APPS_CLK_SRC 145
-#define GCC_SDCC4_AHB_CLK 146
-#define GCC_SDCC4_APPS_CLK 147
-#define GCC_SDCC4_APPS_CLK_SRC 148
-#define GCC_SYS_NOC_CPUSS_AHB_CLK 149
-#define GCC_TSIF_AHB_CLK 150
-#define GCC_TSIF_INACTIVITY_TIMERS_CLK 151
-#define GCC_TSIF_REF_CLK 152
-#define GCC_TSIF_REF_CLK_SRC 153
-#define GCC_UFS_1X_CLKREF_EN 154
-#define GCC_UFS_CARD_AHB_CLK 155
-#define GCC_UFS_CARD_AXI_CLK 156
-#define GCC_UFS_CARD_AXI_CLK_SRC 157
-#define GCC_UFS_CARD_AXI_HW_CTL_CLK 158
-#define GCC_UFS_CARD_ICE_CORE_CLK 159
-#define GCC_UFS_CARD_ICE_CORE_CLK_SRC 160
-#define GCC_UFS_CARD_ICE_CORE_HW_CTL_CLK 161
-#define GCC_UFS_CARD_PHY_AUX_CLK 162
-#define GCC_UFS_CARD_PHY_AUX_CLK_SRC 163
-#define GCC_UFS_CARD_PHY_AUX_HW_CTL_CLK 164
-#define GCC_UFS_CARD_RX_SYMBOL_0_CLK 165
-#define GCC_UFS_CARD_RX_SYMBOL_1_CLK 166
-#define GCC_UFS_CARD_TX_SYMBOL_0_CLK 167
-#define GCC_UFS_CARD_UNIPRO_CORE_CLK 168
-#define GCC_UFS_CARD_UNIPRO_CORE_CLK_SRC 169
-#define GCC_UFS_CARD_UNIPRO_CORE_HW_CTL_CLK 170
-#define GCC_UFS_PHY_AHB_CLK 171
-#define GCC_UFS_PHY_AXI_CLK 172
-#define GCC_UFS_PHY_AXI_CLK_SRC 173
-#define GCC_UFS_PHY_AXI_HW_CTL_CLK 174
-#define GCC_UFS_PHY_ICE_CORE_CLK 175
-#define GCC_UFS_PHY_ICE_CORE_CLK_SRC 176
-#define GCC_UFS_PHY_ICE_CORE_HW_CTL_CLK 177
-#define GCC_UFS_PHY_PHY_AUX_CLK 178
-#define GCC_UFS_PHY_PHY_AUX_CLK_SRC 179
-#define GCC_UFS_PHY_PHY_AUX_HW_CTL_CLK 180
-#define GCC_UFS_PHY_RX_SYMBOL_0_CLK 181
-#define GCC_UFS_PHY_RX_SYMBOL_1_CLK 182
-#define GCC_UFS_PHY_TX_SYMBOL_0_CLK 183
-#define GCC_UFS_PHY_UNIPRO_CORE_CLK 184
-#define GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC 185
-#define GCC_UFS_PHY_UNIPRO_CORE_HW_CTL_CLK 186
-#define GCC_USB30_PRIM_MASTER_CLK 187
-#define GCC_USB30_PRIM_MASTER_CLK_SRC 188
-#define GCC_USB30_PRIM_MOCK_UTMI_CLK 189
-#define GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC 190
-#define GCC_USB30_PRIM_MOCK_UTMI_POSTDIV_CLK_SRC 191
-#define GCC_USB30_PRIM_SLEEP_CLK 192
-#define GCC_USB30_SEC_MASTER_CLK 193
-#define GCC_USB30_SEC_MASTER_CLK_SRC 194
-#define GCC_USB30_SEC_MOCK_UTMI_CLK 195
-#define GCC_USB30_SEC_MOCK_UTMI_CLK_SRC 196
-#define GCC_USB30_SEC_MOCK_UTMI_POSTDIV_CLK_SRC 197
-#define GCC_USB30_SEC_SLEEP_CLK 198
-#define GCC_USB3_PRIM_PHY_AUX_CLK 199
-#define GCC_USB3_PRIM_PHY_AUX_CLK_SRC 200
-#define GCC_USB3_PRIM_PHY_COM_AUX_CLK 201
-#define GCC_USB3_PRIM_PHY_PIPE_CLK 202
-#define GCC_USB3_SEC_CLKREF_EN 203
-#define GCC_USB3_SEC_PHY_AUX_CLK 204
-#define GCC_USB3_SEC_PHY_AUX_CLK_SRC 205
-#define GCC_USB3_SEC_PHY_COM_AUX_CLK 206
-#define GCC_USB3_SEC_PHY_PIPE_CLK 207
-#define GCC_VIDEO_AHB_CLK 208
-#define GCC_VIDEO_AXI0_CLK 209
-#define GCC_VIDEO_AXI1_CLK 210
-#define GCC_VIDEO_XO_CLK 211
-#define GPLL0 212
-#define GPLL0_OUT_EVEN 213
-#define GPLL9 214
+#define GCC_NPU_AXI_CLK 43
+#define GCC_NPU_BWMON_AXI_CLK 44
+#define GCC_NPU_BWMON_CFG_AHB_CLK 45
+#define GCC_NPU_CFG_AHB_CLK 46
+#define GCC_NPU_DMA_CLK 47
+#define GCC_NPU_GPLL0_CLK_SRC 48
+#define GCC_NPU_GPLL0_DIV_CLK_SRC 49
+#define GCC_PCIE0_PHY_REFGEN_CLK 50
+#define GCC_PCIE1_PHY_REFGEN_CLK 51
+#define GCC_PCIE2_PHY_REFGEN_CLK 52
+#define GCC_PCIE_0_AUX_CLK 53
+#define GCC_PCIE_0_AUX_CLK_SRC 54
+#define GCC_PCIE_0_CFG_AHB_CLK 55
+#define GCC_PCIE_0_MSTR_AXI_CLK 56
+#define GCC_PCIE_0_PIPE_CLK 57
+#define GCC_PCIE_0_SLV_AXI_CLK 58
+#define GCC_PCIE_0_SLV_Q2A_AXI_CLK 59
+#define GCC_PCIE_1_AUX_CLK 60
+#define GCC_PCIE_1_AUX_CLK_SRC 61
+#define GCC_PCIE_1_CFG_AHB_CLK 62
+#define GCC_PCIE_1_MSTR_AXI_CLK 63
+#define GCC_PCIE_1_PIPE_CLK 64
+#define GCC_PCIE_1_SLV_AXI_CLK 65
+#define GCC_PCIE_1_SLV_Q2A_AXI_CLK 66
+#define GCC_PCIE_2_AUX_CLK 67
+#define GCC_PCIE_2_AUX_CLK_SRC 68
+#define GCC_PCIE_2_CFG_AHB_CLK 69
+#define GCC_PCIE_2_MSTR_AXI_CLK 70
+#define GCC_PCIE_2_PIPE_CLK 71
+#define GCC_PCIE_2_SLV_AXI_CLK 72
+#define GCC_PCIE_2_SLV_Q2A_AXI_CLK 73
+#define GCC_PCIE_MDM_CLKREF_EN 74
+#define GCC_PCIE_PHY_AUX_CLK 75
+#define GCC_PCIE_PHY_REFGEN_CLK_SRC 76
+#define GCC_PCIE_WIFI_CLKREF_EN 77
+#define GCC_PCIE_WIGIG_CLKREF_EN 78
+#define GCC_PDM2_CLK 79
+#define GCC_PDM2_CLK_SRC 80
+#define GCC_PDM_AHB_CLK 81
+#define GCC_PDM_XO4_CLK 82
+#define GCC_PRNG_AHB_CLK 83
+#define GCC_QMIP_CAMERA_NRT_AHB_CLK 84
+#define GCC_QMIP_CAMERA_RT_AHB_CLK 85
+#define GCC_QMIP_DISP_AHB_CLK 86
+#define GCC_QMIP_VIDEO_CVP_AHB_CLK 87
+#define GCC_QMIP_VIDEO_VCODEC_AHB_CLK 88
+#define GCC_QUPV3_WRAP0_CORE_2X_CLK 89
+#define GCC_QUPV3_WRAP0_CORE_CLK 90
+#define GCC_QUPV3_WRAP0_S0_CLK 91
+#define GCC_QUPV3_WRAP0_S0_CLK_SRC 92
+#define GCC_QUPV3_WRAP0_S1_CLK 93
+#define GCC_QUPV3_WRAP0_S1_CLK_SRC 94
+#define GCC_QUPV3_WRAP0_S2_CLK 95
+#define GCC_QUPV3_WRAP0_S2_CLK_SRC 96
+#define GCC_QUPV3_WRAP0_S3_CLK 97
+#define GCC_QUPV3_WRAP0_S3_CLK_SRC 98
+#define GCC_QUPV3_WRAP0_S4_CLK 99
+#define GCC_QUPV3_WRAP0_S4_CLK_SRC 100
+#define GCC_QUPV3_WRAP0_S5_CLK 101
+#define GCC_QUPV3_WRAP0_S5_CLK_SRC 102
+#define GCC_QUPV3_WRAP0_S6_CLK 103
+#define GCC_QUPV3_WRAP0_S6_CLK_SRC 104
+#define GCC_QUPV3_WRAP0_S7_CLK 105
+#define GCC_QUPV3_WRAP0_S7_CLK_SRC 106
+#define GCC_QUPV3_WRAP1_CORE_2X_CLK 107
+#define GCC_QUPV3_WRAP1_CORE_CLK 108
+#define GCC_QUPV3_WRAP1_S0_CLK 109
+#define GCC_QUPV3_WRAP1_S0_CLK_SRC 110
+#define GCC_QUPV3_WRAP1_S1_CLK 111
+#define GCC_QUPV3_WRAP1_S1_CLK_SRC 112
+#define GCC_QUPV3_WRAP1_S2_CLK 113
+#define GCC_QUPV3_WRAP1_S2_CLK_SRC 114
+#define GCC_QUPV3_WRAP1_S3_CLK 115
+#define GCC_QUPV3_WRAP1_S3_CLK_SRC 116
+#define GCC_QUPV3_WRAP1_S4_CLK 117
+#define GCC_QUPV3_WRAP1_S4_CLK_SRC 118
+#define GCC_QUPV3_WRAP1_S5_CLK 119
+#define GCC_QUPV3_WRAP1_S5_CLK_SRC 120
+#define GCC_QUPV3_WRAP2_CORE_2X_CLK 121
+#define GCC_QUPV3_WRAP2_CORE_CLK 122
+#define GCC_QUPV3_WRAP2_S0_CLK 123
+#define GCC_QUPV3_WRAP2_S0_CLK_SRC 124
+#define GCC_QUPV3_WRAP2_S1_CLK 125
+#define GCC_QUPV3_WRAP2_S1_CLK_SRC 126
+#define GCC_QUPV3_WRAP2_S2_CLK 127
+#define GCC_QUPV3_WRAP2_S2_CLK_SRC 128
+#define GCC_QUPV3_WRAP2_S3_CLK 129
+#define GCC_QUPV3_WRAP2_S3_CLK_SRC 130
+#define GCC_QUPV3_WRAP2_S4_CLK 131
+#define GCC_QUPV3_WRAP2_S4_CLK_SRC 132
+#define GCC_QUPV3_WRAP2_S5_CLK 133
+#define GCC_QUPV3_WRAP2_S5_CLK_SRC 134
+#define GCC_QUPV3_WRAP_0_M_AHB_CLK 135
+#define GCC_QUPV3_WRAP_0_S_AHB_CLK 136
+#define GCC_QUPV3_WRAP_1_M_AHB_CLK 137
+#define GCC_QUPV3_WRAP_1_S_AHB_CLK 138
+#define GCC_QUPV3_WRAP_2_M_AHB_CLK 139
+#define GCC_QUPV3_WRAP_2_S_AHB_CLK 140
+#define GCC_SDCC2_AHB_CLK 141
+#define GCC_SDCC2_APPS_CLK 142
+#define GCC_SDCC2_APPS_CLK_SRC 143
+#define GCC_SDCC4_AHB_CLK 144
+#define GCC_SDCC4_APPS_CLK 145
+#define GCC_SDCC4_APPS_CLK_SRC 146
+#define GCC_SYS_NOC_CPUSS_AHB_CLK 147
+#define GCC_TSIF_AHB_CLK 148
+#define GCC_TSIF_INACTIVITY_TIMERS_CLK 149
+#define GCC_TSIF_REF_CLK 150
+#define GCC_TSIF_REF_CLK_SRC 151
+#define GCC_UFS_1X_CLKREF_EN 152
+#define GCC_UFS_CARD_AHB_CLK 153
+#define GCC_UFS_CARD_AXI_CLK 154
+#define GCC_UFS_CARD_AXI_CLK_SRC 155
+#define GCC_UFS_CARD_AXI_HW_CTL_CLK 156
+#define GCC_UFS_CARD_ICE_CORE_CLK 157
+#define GCC_UFS_CARD_ICE_CORE_CLK_SRC 158
+#define GCC_UFS_CARD_ICE_CORE_HW_CTL_CLK 159
+#define GCC_UFS_CARD_PHY_AUX_CLK 160
+#define GCC_UFS_CARD_PHY_AUX_CLK_SRC 161
+#define GCC_UFS_CARD_PHY_AUX_HW_CTL_CLK 162
+#define GCC_UFS_CARD_RX_SYMBOL_0_CLK 163
+#define GCC_UFS_CARD_RX_SYMBOL_1_CLK 164
+#define GCC_UFS_CARD_TX_SYMBOL_0_CLK 165
+#define GCC_UFS_CARD_UNIPRO_CORE_CLK 166
+#define GCC_UFS_CARD_UNIPRO_CORE_CLK_SRC 167
+#define GCC_UFS_CARD_UNIPRO_CORE_HW_CTL_CLK 168
+#define GCC_UFS_PHY_AHB_CLK 169
+#define GCC_UFS_PHY_AXI_CLK 170
+#define GCC_UFS_PHY_AXI_CLK_SRC 171
+#define GCC_UFS_PHY_AXI_HW_CTL_CLK 172
+#define GCC_UFS_PHY_ICE_CORE_CLK 173
+#define GCC_UFS_PHY_ICE_CORE_CLK_SRC 174
+#define GCC_UFS_PHY_ICE_CORE_HW_CTL_CLK 175
+#define GCC_UFS_PHY_PHY_AUX_CLK 176
+#define GCC_UFS_PHY_PHY_AUX_CLK_SRC 177
+#define GCC_UFS_PHY_PHY_AUX_HW_CTL_CLK 178
+#define GCC_UFS_PHY_RX_SYMBOL_0_CLK 179
+#define GCC_UFS_PHY_RX_SYMBOL_1_CLK 180
+#define GCC_UFS_PHY_TX_SYMBOL_0_CLK 181
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK 182
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC 183
+#define GCC_UFS_PHY_UNIPRO_CORE_HW_CTL_CLK 184
+#define GCC_USB30_PRIM_MASTER_CLK 185
+#define GCC_USB30_PRIM_MASTER_CLK_SRC 186
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK 187
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC 188
+#define GCC_USB30_PRIM_MOCK_UTMI_POSTDIV_CLK_SRC 189
+#define GCC_USB30_PRIM_SLEEP_CLK 190
+#define GCC_USB30_SEC_MASTER_CLK 191
+#define GCC_USB30_SEC_MASTER_CLK_SRC 192
+#define GCC_USB30_SEC_MOCK_UTMI_CLK 193
+#define GCC_USB30_SEC_MOCK_UTMI_CLK_SRC 194
+#define GCC_USB30_SEC_MOCK_UTMI_POSTDIV_CLK_SRC 195
+#define GCC_USB30_SEC_SLEEP_CLK 196
+#define GCC_USB3_PRIM_PHY_AUX_CLK 197
+#define GCC_USB3_PRIM_PHY_AUX_CLK_SRC 198
+#define GCC_USB3_PRIM_PHY_COM_AUX_CLK 199
+#define GCC_USB3_PRIM_PHY_PIPE_CLK 200
+#define GCC_USB3_SEC_CLKREF_EN 201
+#define GCC_USB3_SEC_PHY_AUX_CLK 202
+#define GCC_USB3_SEC_PHY_AUX_CLK_SRC 203
+#define GCC_USB3_SEC_PHY_COM_AUX_CLK 204
+#define GCC_USB3_SEC_PHY_PIPE_CLK 205
+#define GCC_VIDEO_AHB_CLK 206
+#define GCC_VIDEO_AXI0_CLK 207
+#define GCC_VIDEO_AXI1_CLK 208
+#define GCC_VIDEO_XO_CLK 209
+#define GPLL0 210
+#define GPLL0_OUT_EVEN 211
+#define GPLL9 212
/* GCC resets */
#define GCC_DPM_BCR 0
diff --git a/include/dt-bindings/phy/qcom,lito-qmp-usb3.h b/include/dt-bindings/phy/qcom,lito-qmp-usb3.h
new file mode 100644
index 0000000..cea50b9
--- /dev/null
+++ b/include/dt-bindings/phy/qcom,lito-qmp-usb3.h
@@ -0,0 +1,727 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_PHY_QCOM_LITO_QMP_USB_H
+#define _DT_BINDINGS_PHY_QCOM_LITO_QMP_USB_H
+
+/* USB3-DP Combo PHY register offsets */
+
+#define USB3_DP_COM_PHY_MODE_CTRL 0x0000
+#define USB3_DP_COM_SW_RESET 0x0004
+#define USB3_DP_COM_POWER_DOWN_CTRL 0x0008
+#define USB3_DP_COM_SWI_CTRL 0x000c
+#define USB3_DP_COM_TYPEC_CTRL 0x0010
+#define USB3_DP_COM_TYPEC_PWRDN_CTRL 0x0014
+#define USB3_DP_COM_DP_BIST_CFG_0 0x0018
+#define USB3_DP_COM_RESET_OVRD_CTRL 0x001c
+#define USB3_DP_COM_DBG_CLK_MUX_CTRL 0x0020
+#define USB3_DP_COM_TYPEC_STATUS 0x0024
+#define USB3_DP_COM_PLACEHOLDER_STATUS 0x0028
+#define USB3_DP_COM_REVISION_ID0 0x002c
+#define USB3_DP_COM_REVISION_ID1 0x0030
+#define USB3_DP_COM_REVISION_ID2 0x0034
+#define USB3_DP_COM_REVISION_ID3 0x0038
+#define USB3_DP_QSERDES_COM_ATB_SEL1 0x1000
+#define USB3_DP_QSERDES_COM_ATB_SEL2 0x1004
+#define USB3_DP_QSERDES_COM_FREQ_UPDATE 0x1008
+#define USB3_DP_QSERDES_COM_BG_TIMER 0x100c
+#define USB3_DP_QSERDES_COM_SSC_EN_CENTER 0x1010
+#define USB3_DP_QSERDES_COM_SSC_ADJ_PER1 0x1014
+#define USB3_DP_QSERDES_COM_SSC_ADJ_PER2 0x1018
+#define USB3_DP_QSERDES_COM_SSC_PER1 0x101c
+#define USB3_DP_QSERDES_COM_SSC_PER2 0x1020
+#define USB3_DP_QSERDES_COM_SSC_STEP_SIZE1_MODE0 0x1024
+#define USB3_DP_QSERDES_COM_SSC_STEP_SIZE2_MODE0 0x1028
+#define USB3_DP_QSERDES_COM_SSC_STEP_SIZE3_MODE0 0x102c
+#define USB3_DP_QSERDES_COM_SSC_STEP_SIZE1_MODE1 0x1030
+#define USB3_DP_QSERDES_COM_SSC_STEP_SIZE2_MODE1 0x1034
+#define USB3_DP_QSERDES_COM_SSC_STEP_SIZE3_MODE1 0x1038
+#define USB3_DP_QSERDES_COM_POST_DIV 0x103c
+#define USB3_DP_QSERDES_COM_POST_DIV_MUX 0x1040
+#define USB3_DP_QSERDES_COM_BIAS_EN_CLKBUFLR_EN 0x1044
+#define USB3_DP_QSERDES_COM_CLK_ENABLE1 0x1048
+#define USB3_DP_QSERDES_COM_SYS_CLK_CTRL 0x104c
+#define USB3_DP_QSERDES_COM_SYSCLK_BUF_ENABLE 0x1050
+#define USB3_DP_QSERDES_COM_PLL_EN 0x1054
+#define USB3_DP_QSERDES_COM_PLL_IVCO 0x1058
+#define USB3_DP_QSERDES_COM_CMN_IETRIM 0x105c
+#define USB3_DP_QSERDES_COM_CMN_IPTRIM 0x1060
+#define USB3_DP_QSERDES_COM_EP_CLOCK_DETECT_CTRL 0x1064
+#define USB3_DP_QSERDES_COM_SYSCLK_DET_COMP_STATUS 0x1068
+#define USB3_DP_QSERDES_COM_CLK_EP_DIV_MODE0 0x106c
+#define USB3_DP_QSERDES_COM_CLK_EP_DIV_MODE1 0x1070
+#define USB3_DP_QSERDES_COM_CP_CTRL_MODE0 0x1074
+#define USB3_DP_QSERDES_COM_CP_CTRL_MODE1 0x1078
+#define USB3_DP_QSERDES_COM_PLL_RCTRL_MODE0 0x107c
+#define USB3_DP_QSERDES_COM_PLL_RCTRL_MODE1 0x1080
+#define USB3_DP_QSERDES_COM_PLL_CCTRL_MODE0 0x1084
+#define USB3_DP_QSERDES_COM_PLL_CCTRL_MODE1 0x1088
+#define USB3_DP_QSERDES_COM_PLL_CNTRL 0x108c
+#define USB3_DP_QSERDES_COM_BIAS_EN_CTRL_BY_PSM 0x1090
+#define USB3_DP_QSERDES_COM_SYSCLK_EN_SEL 0x1094
+#define USB3_DP_QSERDES_COM_CML_SYSCLK_SEL 0x1098
+#define USB3_DP_QSERDES_COM_RESETSM_CNTRL 0x109c
+#define USB3_DP_QSERDES_COM_RESETSM_CNTRL2 0x10a0
+#define USB3_DP_QSERDES_COM_LOCK_CMP_EN 0x10a4
+#define USB3_DP_QSERDES_COM_LOCK_CMP_CFG 0x10a8
+#define USB3_DP_QSERDES_COM_LOCK_CMP1_MODE0 0x10ac
+#define USB3_DP_QSERDES_COM_LOCK_CMP2_MODE0 0x10b0
+#define USB3_DP_QSERDES_COM_LOCK_CMP1_MODE1 0x10b4
+#define USB3_DP_QSERDES_COM_LOCK_CMP2_MODE1 0x10b8
+#define USB3_DP_QSERDES_COM_DEC_START_MODE0 0x10bc
+#define USB3_DP_QSERDES_COM_DEC_START_MSB_MODE0 0x10c0
+#define USB3_DP_QSERDES_COM_DEC_START_MODE1 0x10c4
+#define USB3_DP_QSERDES_COM_DEC_START_MSB_MODE1 0x10c8
+#define USB3_DP_QSERDES_COM_DIV_FRAC_START1_MODE0 0x10cc
+#define USB3_DP_QSERDES_COM_DIV_FRAC_START2_MODE0 0x10d0
+#define USB3_DP_QSERDES_COM_DIV_FRAC_START3_MODE0 0x10d4
+#define USB3_DP_QSERDES_COM_DIV_FRAC_START1_MODE1 0x10d8
+#define USB3_DP_QSERDES_COM_DIV_FRAC_START2_MODE1 0x10dc
+#define USB3_DP_QSERDES_COM_DIV_FRAC_START3_MODE1 0x10e0
+#define USB3_DP_QSERDES_COM_INTEGLOOP_INITVAL 0x10e4
+#define USB3_DP_QSERDES_COM_INTEGLOOP_EN 0x10e8
+#define USB3_DP_QSERDES_COM_INTEGLOOP_GAIN0_MODE0 0x10ec
+#define USB3_DP_QSERDES_COM_INTEGLOOP_GAIN1_MODE0 0x10f0
+#define USB3_DP_QSERDES_COM_INTEGLOOP_GAIN0_MODE1 0x10f4
+#define USB3_DP_QSERDES_COM_INTEGLOOP_GAIN1_MODE1 0x10f8
+#define USB3_DP_QSERDES_COM_INTEGLOOP_P_PATH_GAIN0 0x10fc
+#define USB3_DP_QSERDES_COM_INTEGLOOP_P_PATH_GAIN1 0x1100
+#define USB3_DP_QSERDES_COM_VCOCAL_DEADMAN_CTRL 0x1104
+#define USB3_DP_QSERDES_COM_VCO_TUNE_CTRL 0x1108
+#define USB3_DP_QSERDES_COM_VCO_TUNE_MAP 0x110c
+#define USB3_DP_QSERDES_COM_VCO_TUNE1_MODE0 0x1110
+#define USB3_DP_QSERDES_COM_VCO_TUNE2_MODE0 0x1114
+#define USB3_DP_QSERDES_COM_VCO_TUNE1_MODE1 0x1118
+#define USB3_DP_QSERDES_COM_VCO_TUNE2_MODE1 0x111c
+#define USB3_DP_QSERDES_COM_VCO_TUNE_INITVAL1 0x1120
+#define USB3_DP_QSERDES_COM_VCO_TUNE_INITVAL2 0x1124
+#define USB3_DP_QSERDES_COM_VCO_TUNE_MINVAL1 0x1128
+#define USB3_DP_QSERDES_COM_VCO_TUNE_MINVAL2 0x112c
+#define USB3_DP_QSERDES_COM_VCO_TUNE_MAXVAL1 0x1130
+#define USB3_DP_QSERDES_COM_VCO_TUNE_MAXVAL2 0x1134
+#define USB3_DP_QSERDES_COM_VCO_TUNE_TIMER1 0x1138
+#define USB3_DP_QSERDES_COM_VCO_TUNE_TIMER2 0x113c
+#define USB3_DP_QSERDES_COM_CMN_STATUS 0x1140
+#define USB3_DP_QSERDES_COM_RESET_SM_STATUS 0x1144
+#define USB3_DP_QSERDES_COM_RESTRIM_CODE_STATUS 0x1148
+#define USB3_DP_QSERDES_COM_PLLCAL_CODE1_STATUS 0x114c
+#define USB3_DP_QSERDES_COM_PLLCAL_CODE2_STATUS 0x1150
+#define USB3_DP_QSERDES_COM_CLK_SELECT 0x1154
+#define USB3_DP_QSERDES_COM_HSCLK_SEL 0x1158
+#define USB3_DP_QSERDES_COM_HSCLK_HS_SWITCH_SEL 0x115c
+#define USB3_DP_QSERDES_COM_INTEGLOOP_BINCODE_STATUS 0x1160
+#define USB3_DP_QSERDES_COM_PLL_ANALOG 0x1164
+#define USB3_DP_QSERDES_COM_CORECLK_DIV_MODE0 0x1168
+#define USB3_DP_QSERDES_COM_CORECLK_DIV_MODE1 0x116c
+#define USB3_DP_QSERDES_COM_SW_RESET 0x1170
+#define USB3_DP_QSERDES_COM_CORE_CLK_EN 0x1174
+#define USB3_DP_QSERDES_COM_C_READY_STATUS 0x1178
+#define USB3_DP_QSERDES_COM_CMN_CONFIG 0x117c
+#define USB3_DP_QSERDES_COM_CMN_RATE_OVERRIDE 0x1180
+#define USB3_DP_QSERDES_COM_SVS_MODE_CLK_SEL 0x1184
+#define USB3_DP_QSERDES_COM_DEBUG_BUS0 0x1188
+#define USB3_DP_QSERDES_COM_DEBUG_BUS1 0x118c
+#define USB3_DP_QSERDES_COM_DEBUG_BUS2 0x1190
+#define USB3_DP_QSERDES_COM_DEBUG_BUS3 0x1194
+#define USB3_DP_QSERDES_COM_DEBUG_BUS_SEL 0x1198
+#define USB3_DP_QSERDES_COM_CMN_MISC1 0x119c
+#define USB3_DP_QSERDES_COM_CMN_MODE 0x11a0
+#define USB3_DP_QSERDES_COM_CMN_MODE_CONTD 0x11a4
+#define USB3_DP_QSERDES_COM_VCO_DC_LEVEL_CTRL 0x11a8
+#define USB3_DP_QSERDES_COM_BIN_VCOCAL_CMP_CODE1_MODE0 0x11ac
+#define USB3_DP_QSERDES_COM_BIN_VCOCAL_CMP_CODE2_MODE0 0x11b0
+#define USB3_DP_QSERDES_COM_BIN_VCOCAL_CMP_CODE1_MODE1 0x11b4
+#define USB3_DP_QSERDES_COM_BIN_VCOCAL_CMP_CODE2_MODE1 0x11b8
+#define USB3_DP_QSERDES_COM_BIN_VCOCAL_HSCLK_SEL 0x11bc
+#define USB3_DP_QSERDES_COM_RESERVED_1 0x11c0
+#define USB3_DP_QSERDES_TXA_BIST_MODE_LANENO 0x1200
+#define USB3_DP_QSERDES_TXA_BIST_INVERT 0x1204
+#define USB3_DP_QSERDES_TXA_CLKBUF_ENABLE 0x1208
+#define USB3_DP_QSERDES_TXA_TX_EMP_POST1_LVL 0x120c
+#define USB3_DP_QSERDES_TXA_TX_IDLE_LVL_LARGE_AMP 0x1210
+#define USB3_DP_QSERDES_TXA_TX_DRV_LVL 0x1214
+#define USB3_DP_QSERDES_TXA_TX_DRV_LVL_OFFSET 0x1218
+#define USB3_DP_QSERDES_TXA_RESET_TSYNC_EN 0x121c
+#define USB3_DP_QSERDES_TXA_PRE_STALL_LDO_BOOST_EN 0x1220
+#define USB3_DP_QSERDES_TXA_TX_BAND 0x1224
+#define USB3_DP_QSERDES_TXA_SLEW_CNTL 0x1228
+#define USB3_DP_QSERDES_TXA_INTERFACE_SELECT 0x122c
+#define USB3_DP_QSERDES_TXA_LPB_EN 0x1230
+#define USB3_DP_QSERDES_TXA_RES_CODE_LANE_TX 0x1234
+#define USB3_DP_QSERDES_TXA_RES_CODE_LANE_RX 0x1238
+#define USB3_DP_QSERDES_TXA_RES_CODE_LANE_OFFSET_TX 0x123c
+#define USB3_DP_QSERDES_TXA_RES_CODE_LANE_OFFSET_RX 0x1240
+#define USB3_DP_QSERDES_TXA_PERL_LENGTH1 0x1244
+#define USB3_DP_QSERDES_TXA_PERL_LENGTH2 0x1248
+#define USB3_DP_QSERDES_TXA_SERDES_BYP_EN_OUT 0x124c
+#define USB3_DP_QSERDES_TXA_DEBUG_BUS_SEL 0x1250
+#define USB3_DP_QSERDES_TXA_TRANSCEIVER_BIAS_EN 0x1254
+#define USB3_DP_QSERDES_TXA_HIGHZ_DRVR_EN 0x1258
+#define USB3_DP_QSERDES_TXA_TX_POL_INV 0x125c
+#define USB3_DP_QSERDES_TXA_PARRATE_REC_DETECT_IDLE_EN 0x1260
+#define USB3_DP_QSERDES_TXA_BIST_PATTERN1 0x1264
+#define USB3_DP_QSERDES_TXA_BIST_PATTERN2 0x1268
+#define USB3_DP_QSERDES_TXA_BIST_PATTERN3 0x126c
+#define USB3_DP_QSERDES_TXA_BIST_PATTERN4 0x1270
+#define USB3_DP_QSERDES_TXA_BIST_PATTERN5 0x1274
+#define USB3_DP_QSERDES_TXA_BIST_PATTERN6 0x1278
+#define USB3_DP_QSERDES_TXA_BIST_PATTERN7 0x127c
+#define USB3_DP_QSERDES_TXA_BIST_PATTERN8 0x1280
+#define USB3_DP_QSERDES_TXA_LANE_MODE_1 0x1284
+#define USB3_DP_QSERDES_TXA_LANE_MODE_2 0x1288
+#define USB3_DP_QSERDES_TXA_LANE_MODE_3 0x128c
+#define USB3_DP_QSERDES_TXA_LANE_MODE_4 0x1290
+#define USB3_DP_QSERDES_TXA_LANE_MODE_5 0x1294
+#define USB3_DP_QSERDES_TXA_ATB_SEL1 0x1298
+#define USB3_DP_QSERDES_TXA_ATB_SEL2 0x129c
+#define USB3_DP_QSERDES_TXA_RCV_DETECT_LVL 0x12a0
+#define USB3_DP_QSERDES_TXA_RCV_DETECT_LVL_2 0x12a4
+#define USB3_DP_QSERDES_TXA_PRBS_SEED1 0x12a8
+#define USB3_DP_QSERDES_TXA_PRBS_SEED2 0x12ac
+#define USB3_DP_QSERDES_TXA_PRBS_SEED3 0x12b0
+#define USB3_DP_QSERDES_TXA_PRBS_SEED4 0x12b4
+#define USB3_DP_QSERDES_TXA_RESET_GEN 0x12b8
+#define USB3_DP_QSERDES_TXA_RESET_GEN_MUXES 0x12bc
+#define USB3_DP_QSERDES_TXA_TRAN_DRVR_EMP_EN 0x12c0
+#define USB3_DP_QSERDES_TXA_TX_INTERFACE_MODE 0x12c4
+#define USB3_DP_QSERDES_TXA_VMODE_CTRL1 0x12c8
+#define USB3_DP_QSERDES_TXA_ALOG_OBSV_BUS_CTRL_1 0x12cc
+#define USB3_DP_QSERDES_TXA_BIST_STATUS 0x12d0
+#define USB3_DP_QSERDES_TXA_BIST_ERROR_COUNT1 0x12d4
+#define USB3_DP_QSERDES_TXA_BIST_ERROR_COUNT2 0x12d8
+#define USB3_DP_QSERDES_TXA_ALOG_OBSV_BUS_STATUS_1 0x12dc
+#define USB3_DP_QSERDES_TXA_LANE_DIG_CONFIG 0x12e0
+#define USB3_DP_QSERDES_TXA_PI_QEC_CTRL 0x12e4
+#define USB3_DP_QSERDES_TXA_PRE_EMPH 0x12e8
+#define USB3_DP_QSERDES_TXA_SW_RESET 0x12ec
+#define USB3_DP_QSERDES_TXA_DCC_OFFSET 0x12f0
+#define USB3_DP_QSERDES_TXA_DIG_BKUP_CTRL 0x12f4
+#define USB3_DP_QSERDES_TXA_DEBUG_BUS0 0x12f8
+#define USB3_DP_QSERDES_TXA_DEBUG_BUS1 0x12fc
+#define USB3_DP_QSERDES_TXA_DEBUG_BUS2 0x1300
+#define USB3_DP_QSERDES_TXA_DEBUG_BUS3 0x1304
+#define USB3_DP_QSERDES_TXA_READ_EQCODE 0x1308
+#define USB3_DP_QSERDES_TXA_READ_OFFSETCODE 0x130c
+#define USB3_DP_QSERDES_TXA_IA_ERROR_COUNTER_LOW 0x1310
+#define USB3_DP_QSERDES_TXA_IA_ERROR_COUNTER_HIGH 0x1314
+#define USB3_DP_QSERDES_TXA_VGA_READ_CODE 0x1318
+#define USB3_DP_QSERDES_TXA_VTH_READ_CODE 0x131c
+#define USB3_DP_QSERDES_TXA_DFE_TAP1_READ_CODE 0x1320
+#define USB3_DP_QSERDES_TXA_DFE_TAP2_READ_CODE 0x1324
+#define USB3_DP_QSERDES_TXA_IDAC_STATUS_I 0x1328
+#define USB3_DP_QSERDES_TXA_IDAC_STATUS_IBAR 0x132c
+#define USB3_DP_QSERDES_TXA_IDAC_STATUS_Q 0x1330
+#define USB3_DP_QSERDES_TXA_IDAC_STATUS_QBAR 0x1334
+#define USB3_DP_QSERDES_TXA_IDAC_STATUS_A 0x1338
+#define USB3_DP_QSERDES_TXA_IDAC_STATUS_ABAR 0x133c
+#define USB3_DP_QSERDES_TXA_IDAC_STATUS_SM_ON 0x1340
+#define USB3_DP_QSERDES_TXA_IDAC_STATUS_CAL_DONE 0x1344
+#define USB3_DP_QSERDES_TXA_IDAC_STATUS_SIGNERROR 0x1348
+#define USB3_DP_QSERDES_TXA_DCC_CAL_STATUS 0x134c
+#define USB3_DP_QSERDES_RXA_UCDR_FO_GAIN_HALF 0x1400
+#define USB3_DP_QSERDES_RXA_UCDR_FO_GAIN_QUARTER 0x1404
+#define USB3_DP_QSERDES_RXA_UCDR_FO_GAIN 0x1408
+#define USB3_DP_QSERDES_RXA_UCDR_SO_GAIN_HALF 0x140c
+#define USB3_DP_QSERDES_RXA_UCDR_SO_GAIN_QUARTER 0x1410
+#define USB3_DP_QSERDES_RXA_UCDR_SO_GAIN 0x1414
+#define USB3_DP_QSERDES_RXA_UCDR_SVS_FO_GAIN_HALF 0x1418
+#define USB3_DP_QSERDES_RXA_UCDR_SVS_FO_GAIN_QUARTER 0x141c
+#define USB3_DP_QSERDES_RXA_UCDR_SVS_FO_GAIN 0x1420
+#define USB3_DP_QSERDES_RXA_UCDR_SVS_SO_GAIN_HALF 0x1424
+#define USB3_DP_QSERDES_RXA_UCDR_SVS_SO_GAIN_QUARTER 0x1428
+#define USB3_DP_QSERDES_RXA_UCDR_SVS_SO_GAIN 0x142c
+#define USB3_DP_QSERDES_RXA_UCDR_FASTLOCK_FO_GAIN 0x1430
+#define USB3_DP_QSERDES_RXA_UCDR_SO_SATURATION_AND_ENABLE 0x1434
+#define USB3_DP_QSERDES_RXA_UCDR_FO_TO_SO_DELAY 0x1438
+#define USB3_DP_QSERDES_RXA_UCDR_FASTLOCK_COUNT_LOW 0x143c
+#define USB3_DP_QSERDES_RXA_UCDR_FASTLOCK_COUNT_HIGH 0x1440
+#define USB3_DP_QSERDES_RXA_UCDR_PI_CONTROLS 0x1444
+#define USB3_DP_QSERDES_RXA_UCDR_PI_CTRL2 0x1448
+#define USB3_DP_QSERDES_RXA_UCDR_SB2_THRESH1 0x144c
+#define USB3_DP_QSERDES_RXA_UCDR_SB2_THRESH2 0x1450
+#define USB3_DP_QSERDES_RXA_UCDR_SB2_GAIN1 0x1454
+#define USB3_DP_QSERDES_RXA_UCDR_SB2_GAIN2 0x1458
+#define USB3_DP_QSERDES_RXA_AUX_CONTROL 0x145c
+#define USB3_DP_QSERDES_RXA_AUX_DATA_TCOARSE_TFINE 0x1460
+#define USB3_DP_QSERDES_RXA_RCLK_AUXDATA_SEL 0x1464
+#define USB3_DP_QSERDES_RXA_AC_JTAG_ENABLE 0x1468
+#define USB3_DP_QSERDES_RXA_AC_JTAG_INITP 0x146c
+#define USB3_DP_QSERDES_RXA_AC_JTAG_INITN 0x1470
+#define USB3_DP_QSERDES_RXA_AC_JTAG_LVL 0x1474
+#define USB3_DP_QSERDES_RXA_AC_JTAG_MODE 0x1478
+#define USB3_DP_QSERDES_RXA_AC_JTAG_RESET 0x147c
+#define USB3_DP_QSERDES_RXA_RX_TERM_BW 0x1480
+#define USB3_DP_QSERDES_RXA_RX_RCVR_IQ_EN 0x1484
+#define USB3_DP_QSERDES_RXA_RX_IDAC_I_DC_OFFSETS 0x1488
+#define USB3_DP_QSERDES_RXA_RX_IDAC_IBAR_DC_OFFSETS 0x148c
+#define USB3_DP_QSERDES_RXA_RX_IDAC_Q_DC_OFFSETS 0x1490
+#define USB3_DP_QSERDES_RXA_RX_IDAC_QBAR_DC_OFFSETS 0x1494
+#define USB3_DP_QSERDES_RXA_RX_IDAC_A_DC_OFFSETS 0x1498
+#define USB3_DP_QSERDES_RXA_RX_IDAC_ABAR_DC_OFFSETS 0x149c
+#define USB3_DP_QSERDES_RXA_RX_IDAC_EN 0x14a0
+#define USB3_DP_QSERDES_RXA_RX_IDAC_ENABLES 0x14a4
+#define USB3_DP_QSERDES_RXA_RX_IDAC_SIGN 0x14a8
+#define USB3_DP_QSERDES_RXA_RX_HIGHZ_HIGHRATE 0x14ac
+#define USB3_DP_QSERDES_RXA_RX_TERM_AC_BYPASS_DC_COUPLE_OFFSET 0x14b0
+#define USB3_DP_QSERDES_RXA_DFE_1 0x14b4
+#define USB3_DP_QSERDES_RXA_DFE_2 0x14b8
+#define USB3_DP_QSERDES_RXA_DFE_3 0x14bc
+#define USB3_DP_QSERDES_RXA_DFE_4 0x14c0
+#define USB3_DP_QSERDES_RXA_TX_ADAPT_PRE_THRESH1 0x14c4
+#define USB3_DP_QSERDES_RXA_TX_ADAPT_PRE_THRESH2 0x14c8
+#define USB3_DP_QSERDES_RXA_TX_ADAPT_POST_THRESH 0x14cc
+#define USB3_DP_QSERDES_RXA_TX_ADAPT_MAIN_THRESH 0x14d0
+#define USB3_DP_QSERDES_RXA_VGA_CAL_CNTRL1 0x14d4
+#define USB3_DP_QSERDES_RXA_VGA_CAL_CNTRL2 0x14d8
+#define USB3_DP_QSERDES_RXA_GM_CAL 0x14dc
+#define USB3_DP_QSERDES_RXA_RX_VGA_GAIN2_LSB 0x14e0
+#define USB3_DP_QSERDES_RXA_RX_VGA_GAIN2_MSB 0x14e4
+#define USB3_DP_QSERDES_RXA_RX_EQU_ADAPTOR_CNTRL1 0x14e8
+#define USB3_DP_QSERDES_RXA_RX_EQU_ADAPTOR_CNTRL2 0x14ec
+#define USB3_DP_QSERDES_RXA_RX_EQU_ADAPTOR_CNTRL3 0x14f0
+#define USB3_DP_QSERDES_RXA_RX_EQU_ADAPTOR_CNTRL4 0x14f4
+#define USB3_DP_QSERDES_RXA_RX_IDAC_TSETTLE_LOW 0x14f8
+#define USB3_DP_QSERDES_RXA_RX_IDAC_TSETTLE_HIGH 0x14fc
+#define USB3_DP_QSERDES_RXA_RX_IDAC_MEASURE_TIME 0x1500
+#define USB3_DP_QSERDES_RXA_RX_IDAC_ACCUMULATOR 0x1504
+#define USB3_DP_QSERDES_RXA_RX_EQ_OFFSET_LSB 0x1508
+#define USB3_DP_QSERDES_RXA_RX_EQ_OFFSET_MSB 0x150c
+#define USB3_DP_QSERDES_RXA_RX_EQ_OFFSET_ADAPTOR_CNTRL1 0x1510
+#define USB3_DP_QSERDES_RXA_RX_OFFSET_ADAPTOR_CNTRL2 0x1514
+#define USB3_DP_QSERDES_RXA_SIGDET_ENABLES 0x1518
+#define USB3_DP_QSERDES_RXA_SIGDET_CNTRL 0x151c
+#define USB3_DP_QSERDES_RXA_SIGDET_LVL 0x1520
+#define USB3_DP_QSERDES_RXA_SIGDET_DEGLITCH_CNTRL 0x1524
+#define USB3_DP_QSERDES_RXA_RX_BAND 0x1528
+#define USB3_DP_QSERDES_RXA_CDR_FREEZE_UP_DN 0x152c
+#define USB3_DP_QSERDES_RXA_CDR_RESET_OVERRIDE 0x1530
+#define USB3_DP_QSERDES_RXA_RX_INTERFACE_MODE 0x1534
+#define USB3_DP_QSERDES_RXA_JITTER_GEN_MODE 0x1538
+#define USB3_DP_QSERDES_RXA_SJ_AMP1 0x153c
+#define USB3_DP_QSERDES_RXA_SJ_AMP2 0x1540
+#define USB3_DP_QSERDES_RXA_SJ_PER1 0x1544
+#define USB3_DP_QSERDES_RXA_SJ_PER2 0x1548
+#define USB3_DP_QSERDES_RXA_PPM_OFFSET1 0x154c
+#define USB3_DP_QSERDES_RXA_PPM_OFFSET2 0x1550
+#define USB3_DP_QSERDES_RXA_SIGN_PPM_PERIOD1 0x1554
+#define USB3_DP_QSERDES_RXA_SIGN_PPM_PERIOD2 0x1558
+#define USB3_DP_QSERDES_RXA_RX_MODE_00_LOW 0x155c
+#define USB3_DP_QSERDES_RXA_RX_MODE_00_HIGH 0x1560
+#define USB3_DP_QSERDES_RXA_RX_MODE_00_HIGH2 0x1564
+#define USB3_DP_QSERDES_RXA_RX_MODE_00_HIGH3 0x1568
+#define USB3_DP_QSERDES_RXA_RX_MODE_00_HIGH4 0x156c
+#define USB3_DP_QSERDES_RXA_RX_MODE_01_LOW 0x1570
+#define USB3_DP_QSERDES_RXA_RX_MODE_01_HIGH 0x1574
+#define USB3_DP_QSERDES_RXA_RX_MODE_01_HIGH2 0x1578
+#define USB3_DP_QSERDES_RXA_RX_MODE_01_HIGH3 0x157c
+#define USB3_DP_QSERDES_RXA_RX_MODE_01_HIGH4 0x1580
+#define USB3_DP_QSERDES_RXA_RX_MODE_10_LOW 0x1584
+#define USB3_DP_QSERDES_RXA_RX_MODE_10_HIGH 0x1588
+#define USB3_DP_QSERDES_RXA_RX_MODE_10_HIGH2 0x158c
+#define USB3_DP_QSERDES_RXA_RX_MODE_10_HIGH3 0x1590
+#define USB3_DP_QSERDES_RXA_RX_MODE_10_HIGH4 0x1594
+#define USB3_DP_QSERDES_RXA_PHPRE_CTRL 0x1598
+#define USB3_DP_QSERDES_RXA_PHPRE_INITVAL 0x159c
+#define USB3_DP_QSERDES_RXA_DFE_EN_TIMER 0x15a0
+#define USB3_DP_QSERDES_RXA_DFE_CTLE_POST_CAL_OFFSET 0x15a4
+#define USB3_DP_QSERDES_RXA_DCC_CTRL1 0x15a8
+#define USB3_DP_QSERDES_RXA_DCC_CTRL2 0x15ac
+#define USB3_DP_QSERDES_RXA_VTH_CODE 0x15b0
+#define USB3_DP_QSERDES_RXA_VTH_MIN_THRESH 0x15b4
+#define USB3_DP_QSERDES_RXA_VTH_MAX_THRESH 0x15b8
+#define USB3_DP_QSERDES_RXA_ALOG_OBSV_BUS_CTRL_1 0x15bc
+#define USB3_DP_QSERDES_RXA_PI_CTRL1 0x15c0
+#define USB3_DP_QSERDES_RXA_PI_CTRL2 0x15c4
+#define USB3_DP_QSERDES_RXA_PI_QUAD 0x15c8
+#define USB3_DP_QSERDES_RXA_IDATA1 0x15cc
+#define USB3_DP_QSERDES_RXA_IDATA2 0x15d0
+#define USB3_DP_QSERDES_RXA_AUX_DATA1 0x15d4
+#define USB3_DP_QSERDES_RXA_AUX_DATA2 0x15d8
+#define USB3_DP_QSERDES_RXA_AC_JTAG_OUTP 0x15dc
+#define USB3_DP_QSERDES_RXA_AC_JTAG_OUTN 0x15e0
+#define USB3_DP_QSERDES_RXA_RX_SIGDET 0x15e4
+#define USB3_DP_QSERDES_RXA_ALOG_OBSV_BUS_STATUS_1 0x15e8
+#define USB3_DP_QSERDES_TXB_BIST_MODE_LANENO 0x1600
+#define USB3_DP_QSERDES_TXB_BIST_INVERT 0x1604
+#define USB3_DP_QSERDES_TXB_CLKBUF_ENABLE 0x1608
+#define USB3_DP_QSERDES_TXB_TX_EMP_POST1_LVL 0x160c
+#define USB3_DP_QSERDES_TXB_TX_IDLE_LVL_LARGE_AMP 0x1610
+#define USB3_DP_QSERDES_TXB_TX_DRV_LVL 0x1614
+#define USB3_DP_QSERDES_TXB_TX_DRV_LVL_OFFSET 0x1618
+#define USB3_DP_QSERDES_TXB_RESET_TSYNC_EN 0x161c
+#define USB3_DP_QSERDES_TXB_PRE_STALL_LDO_BOOST_EN 0x1620
+#define USB3_DP_QSERDES_TXB_TX_BAND 0x1624
+#define USB3_DP_QSERDES_TXB_SLEW_CNTL 0x1628
+#define USB3_DP_QSERDES_TXB_INTERFACE_SELECT 0x162c
+#define USB3_DP_QSERDES_TXB_LPB_EN 0x1630
+#define USB3_DP_QSERDES_TXB_RES_CODE_LANE_TX 0x1634
+#define USB3_DP_QSERDES_TXB_RES_CODE_LANE_RX 0x1638
+#define USB3_DP_QSERDES_TXB_RES_CODE_LANE_OFFSET_TX 0x163c
+#define USB3_DP_QSERDES_TXB_RES_CODE_LANE_OFFSET_RX 0x1640
+#define USB3_DP_QSERDES_TXB_PERL_LENGTH1 0x1644
+#define USB3_DP_QSERDES_TXB_PERL_LENGTH2 0x1648
+#define USB3_DP_QSERDES_TXB_SERDES_BYP_EN_OUT 0x164c
+#define USB3_DP_QSERDES_TXB_DEBUG_BUS_SEL 0x1650
+#define USB3_DP_QSERDES_TXB_TRANSCEIVER_BIAS_EN 0x1654
+#define USB3_DP_QSERDES_TXB_HIGHZ_DRVR_EN 0x1658
+#define USB3_DP_QSERDES_TXB_TX_POL_INV 0x165c
+#define USB3_DP_QSERDES_TXB_PARRATE_REC_DETECT_IDLE_EN 0x1660
+#define USB3_DP_QSERDES_TXB_BIST_PATTERN1 0x1664
+#define USB3_DP_QSERDES_TXB_BIST_PATTERN2 0x1668
+#define USB3_DP_QSERDES_TXB_BIST_PATTERN3 0x166c
+#define USB3_DP_QSERDES_TXB_BIST_PATTERN4 0x1670
+#define USB3_DP_QSERDES_TXB_BIST_PATTERN5 0x1674
+#define USB3_DP_QSERDES_TXB_BIST_PATTERN6 0x1678
+#define USB3_DP_QSERDES_TXB_BIST_PATTERN7 0x167c
+#define USB3_DP_QSERDES_TXB_BIST_PATTERN8 0x1680
+#define USB3_DP_QSERDES_TXB_LANE_MODE_1 0x1684
+#define USB3_DP_QSERDES_TXB_LANE_MODE_2 0x1688
+#define USB3_DP_QSERDES_TXB_LANE_MODE_3 0x168c
+#define USB3_DP_QSERDES_TXB_LANE_MODE_4 0x1690
+#define USB3_DP_QSERDES_TXB_LANE_MODE_5 0x1694
+#define USB3_DP_QSERDES_TXB_ATB_SEL1 0x1698
+#define USB3_DP_QSERDES_TXB_ATB_SEL2 0x169c
+#define USB3_DP_QSERDES_TXB_RCV_DETECT_LVL 0x16a0
+#define USB3_DP_QSERDES_TXB_RCV_DETECT_LVL_2 0x16a4
+#define USB3_DP_QSERDES_TXB_PRBS_SEED1 0x16a8
+#define USB3_DP_QSERDES_TXB_PRBS_SEED2 0x16ac
+#define USB3_DP_QSERDES_TXB_PRBS_SEED3 0x16b0
+#define USB3_DP_QSERDES_TXB_PRBS_SEED4 0x16b4
+#define USB3_DP_QSERDES_TXB_RESET_GEN 0x16b8
+#define USB3_DP_QSERDES_TXB_RESET_GEN_MUXES 0x16bc
+#define USB3_DP_QSERDES_TXB_TRAN_DRVR_EMP_EN 0x16c0
+#define USB3_DP_QSERDES_TXB_TX_INTERFACE_MODE 0x16c4
+#define USB3_DP_QSERDES_TXB_VMODE_CTRL1 0x16c8
+#define USB3_DP_QSERDES_TXB_ALOG_OBSV_BUS_CTRL_1 0x16cc
+#define USB3_DP_QSERDES_TXB_BIST_STATUS 0x16d0
+#define USB3_DP_QSERDES_TXB_BIST_ERROR_COUNT1 0x16d4
+#define USB3_DP_QSERDES_TXB_BIST_ERROR_COUNT2 0x16d8
+#define USB3_DP_QSERDES_TXB_ALOG_OBSV_BUS_STATUS_1 0x16dc
+#define USB3_DP_QSERDES_TXB_LANE_DIG_CONFIG 0x16e0
+#define USB3_DP_QSERDES_TXB_PI_QEC_CTRL 0x16e4
+#define USB3_DP_QSERDES_TXB_PRE_EMPH 0x16e8
+#define USB3_DP_QSERDES_TXB_SW_RESET 0x16ec
+#define USB3_DP_QSERDES_TXB_DCC_OFFSET 0x16f0
+#define USB3_DP_QSERDES_TXB_DIG_BKUP_CTRL 0x16f4
+#define USB3_DP_QSERDES_TXB_DEBUG_BUS0 0x16f8
+#define USB3_DP_QSERDES_TXB_DEBUG_BUS1 0x16fc
+#define USB3_DP_QSERDES_TXB_DEBUG_BUS2 0x1700
+#define USB3_DP_QSERDES_TXB_DEBUG_BUS3 0x1704
+#define USB3_DP_QSERDES_TXB_READ_EQCODE 0x1708
+#define USB3_DP_QSERDES_TXB_READ_OFFSETCODE 0x170c
+#define USB3_DP_QSERDES_TXB_IA_ERROR_COUNTER_LOW 0x1710
+#define USB3_DP_QSERDES_TXB_IA_ERROR_COUNTER_HIGH 0x1714
+#define USB3_DP_QSERDES_TXB_VGA_READ_CODE 0x1718
+#define USB3_DP_QSERDES_TXB_VTH_READ_CODE 0x171c
+#define USB3_DP_QSERDES_TXB_DFE_TAP1_READ_CODE 0x1720
+#define USB3_DP_QSERDES_TXB_DFE_TAP2_READ_CODE 0x1724
+#define USB3_DP_QSERDES_TXB_IDAC_STATUS_I 0x1728
+#define USB3_DP_QSERDES_TXB_IDAC_STATUS_IBAR 0x172c
+#define USB3_DP_QSERDES_TXB_IDAC_STATUS_Q 0x1730
+#define USB3_DP_QSERDES_TXB_IDAC_STATUS_QBAR 0x1734
+#define USB3_DP_QSERDES_TXB_IDAC_STATUS_A 0x1738
+#define USB3_DP_QSERDES_TXB_IDAC_STATUS_ABAR 0x173c
+#define USB3_DP_QSERDES_TXB_IDAC_STATUS_SM_ON 0x1740
+#define USB3_DP_QSERDES_TXB_IDAC_STATUS_CAL_DONE 0x1744
+#define USB3_DP_QSERDES_TXB_IDAC_STATUS_SIGNERROR 0x1748
+#define USB3_DP_QSERDES_TXB_DCC_CAL_STATUS 0x174c
+#define USB3_DP_QSERDES_RXB_UCDR_FO_GAIN_HALF 0x1800
+#define USB3_DP_QSERDES_RXB_UCDR_FO_GAIN_QUARTER 0x1804
+#define USB3_DP_QSERDES_RXB_UCDR_FO_GAIN 0x1808
+#define USB3_DP_QSERDES_RXB_UCDR_SO_GAIN_HALF 0x180c
+#define USB3_DP_QSERDES_RXB_UCDR_SO_GAIN_QUARTER 0x1810
+#define USB3_DP_QSERDES_RXB_UCDR_SO_GAIN 0x1814
+#define USB3_DP_QSERDES_RXB_UCDR_SVS_FO_GAIN_HALF 0x1818
+#define USB3_DP_QSERDES_RXB_UCDR_SVS_FO_GAIN_QUARTER 0x181c
+#define USB3_DP_QSERDES_RXB_UCDR_SVS_FO_GAIN 0x1820
+#define USB3_DP_QSERDES_RXB_UCDR_SVS_SO_GAIN_HALF 0x1824
+#define USB3_DP_QSERDES_RXB_UCDR_SVS_SO_GAIN_QUARTER 0x1828
+#define USB3_DP_QSERDES_RXB_UCDR_SVS_SO_GAIN 0x182c
+#define USB3_DP_QSERDES_RXB_UCDR_FASTLOCK_FO_GAIN 0x1830
+#define USB3_DP_QSERDES_RXB_UCDR_SO_SATURATION_AND_ENABLE 0x1834
+#define USB3_DP_QSERDES_RXB_UCDR_FO_TO_SO_DELAY 0x1838
+#define USB3_DP_QSERDES_RXB_UCDR_FASTLOCK_COUNT_LOW 0x183c
+#define USB3_DP_QSERDES_RXB_UCDR_FASTLOCK_COUNT_HIGH 0x1840
+#define USB3_DP_QSERDES_RXB_UCDR_PI_CONTROLS 0x1844
+#define USB3_DP_QSERDES_RXB_UCDR_PI_CTRL2 0x1848
+#define USB3_DP_QSERDES_RXB_UCDR_SB2_THRESH1 0x184c
+#define USB3_DP_QSERDES_RXB_UCDR_SB2_THRESH2 0x1850
+#define USB3_DP_QSERDES_RXB_UCDR_SB2_GAIN1 0x1854
+#define USB3_DP_QSERDES_RXB_UCDR_SB2_GAIN2 0x1858
+#define USB3_DP_QSERDES_RXB_AUX_CONTROL 0x185c
+#define USB3_DP_QSERDES_RXB_AUX_DATA_TCOARSE_TFINE 0x1860
+#define USB3_DP_QSERDES_RXB_RCLK_AUXDATA_SEL 0x1864
+#define USB3_DP_QSERDES_RXB_AC_JTAG_ENABLE 0x1868
+#define USB3_DP_QSERDES_RXB_AC_JTAG_INITP 0x186c
+#define USB3_DP_QSERDES_RXB_AC_JTAG_INITN 0x1870
+#define USB3_DP_QSERDES_RXB_AC_JTAG_LVL 0x1874
+#define USB3_DP_QSERDES_RXB_AC_JTAG_MODE 0x1878
+#define USB3_DP_QSERDES_RXB_AC_JTAG_RESET 0x187c
+#define USB3_DP_QSERDES_RXB_RX_TERM_BW 0x1880
+#define USB3_DP_QSERDES_RXB_RX_RCVR_IQ_EN 0x1884
+#define USB3_DP_QSERDES_RXB_RX_IDAC_I_DC_OFFSETS 0x1888
+#define USB3_DP_QSERDES_RXB_RX_IDAC_IBAR_DC_OFFSETS 0x188c
+#define USB3_DP_QSERDES_RXB_RX_IDAC_Q_DC_OFFSETS 0x1890
+#define USB3_DP_QSERDES_RXB_RX_IDAC_QBAR_DC_OFFSETS 0x1894
+#define USB3_DP_QSERDES_RXB_RX_IDAC_A_DC_OFFSETS 0x1898
+#define USB3_DP_QSERDES_RXB_RX_IDAC_ABAR_DC_OFFSETS 0x189c
+#define USB3_DP_QSERDES_RXB_RX_IDAC_EN 0x18a0
+#define USB3_DP_QSERDES_RXB_RX_IDAC_ENABLES 0x18a4
+#define USB3_DP_QSERDES_RXB_RX_IDAC_SIGN 0x18a8
+#define USB3_DP_QSERDES_RXB_RX_HIGHZ_HIGHRATE 0x18ac
+#define USB3_DP_QSERDES_RXB_RX_TERM_AC_BYPASS_DC_COUPLE_OFFSET 0x18b0
+#define USB3_DP_QSERDES_RXB_DFE_1 0x18b4
+#define USB3_DP_QSERDES_RXB_DFE_2 0x18b8
+#define USB3_DP_QSERDES_RXB_DFE_3 0x18bc
+#define USB3_DP_QSERDES_RXB_DFE_4 0x18c0
+#define USB3_DP_QSERDES_RXB_TX_ADAPT_PRE_THRESH1 0x18c4
+#define USB3_DP_QSERDES_RXB_TX_ADAPT_PRE_THRESH2 0x18c8
+#define USB3_DP_QSERDES_RXB_TX_ADAPT_POST_THRESH 0x18cc
+#define USB3_DP_QSERDES_RXB_TX_ADAPT_MAIN_THRESH 0x18d0
+#define USB3_DP_QSERDES_RXB_VGA_CAL_CNTRL1 0x18d4
+#define USB3_DP_QSERDES_RXB_VGA_CAL_CNTRL2 0x18d8
+#define USB3_DP_QSERDES_RXB_GM_CAL 0x18dc
+#define USB3_DP_QSERDES_RXB_RX_VGA_GAIN2_LSB 0x18e0
+#define USB3_DP_QSERDES_RXB_RX_VGA_GAIN2_MSB 0x18e4
+#define USB3_DP_QSERDES_RXB_RX_EQU_ADAPTOR_CNTRL1 0x18e8
+#define USB3_DP_QSERDES_RXB_RX_EQU_ADAPTOR_CNTRL2 0x18ec
+#define USB3_DP_QSERDES_RXB_RX_EQU_ADAPTOR_CNTRL3 0x18f0
+#define USB3_DP_QSERDES_RXB_RX_EQU_ADAPTOR_CNTRL4 0x18f4
+#define USB3_DP_QSERDES_RXB_RX_IDAC_TSETTLE_LOW 0x18f8
+#define USB3_DP_QSERDES_RXB_RX_IDAC_TSETTLE_HIGH 0x18fc
+#define USB3_DP_QSERDES_RXB_RX_IDAC_MEASURE_TIME 0x1900
+#define USB3_DP_QSERDES_RXB_RX_IDAC_ACCUMULATOR 0x1904
+#define USB3_DP_QSERDES_RXB_RX_EQ_OFFSET_LSB 0x1908
+#define USB3_DP_QSERDES_RXB_RX_EQ_OFFSET_MSB 0x190c
+#define USB3_DP_QSERDES_RXB_RX_EQ_OFFSET_ADAPTOR_CNTRL1 0x1910
+#define USB3_DP_QSERDES_RXB_RX_OFFSET_ADAPTOR_CNTRL2 0x1914
+#define USB3_DP_QSERDES_RXB_SIGDET_ENABLES 0x1918
+#define USB3_DP_QSERDES_RXB_SIGDET_CNTRL 0x191c
+#define USB3_DP_QSERDES_RXB_SIGDET_LVL 0x1920
+#define USB3_DP_QSERDES_RXB_SIGDET_DEGLITCH_CNTRL 0x1924
+#define USB3_DP_QSERDES_RXB_RX_BAND 0x1928
+#define USB3_DP_QSERDES_RXB_CDR_FREEZE_UP_DN 0x192c
+#define USB3_DP_QSERDES_RXB_CDR_RESET_OVERRIDE 0x1930
+#define USB3_DP_QSERDES_RXB_RX_INTERFACE_MODE 0x1934
+#define USB3_DP_QSERDES_RXB_JITTER_GEN_MODE 0x1938
+#define USB3_DP_QSERDES_RXB_SJ_AMP1 0x193c
+#define USB3_DP_QSERDES_RXB_SJ_AMP2 0x1940
+#define USB3_DP_QSERDES_RXB_SJ_PER1 0x1944
+#define USB3_DP_QSERDES_RXB_SJ_PER2 0x1948
+#define USB3_DP_QSERDES_RXB_PPM_OFFSET1 0x194c
+#define USB3_DP_QSERDES_RXB_PPM_OFFSET2 0x1950
+#define USB3_DP_QSERDES_RXB_SIGN_PPM_PERIOD1 0x1954
+#define USB3_DP_QSERDES_RXB_SIGN_PPM_PERIOD2 0x1958
+#define USB3_DP_QSERDES_RXB_RX_MODE_00_LOW 0x195c
+#define USB3_DP_QSERDES_RXB_RX_MODE_00_HIGH 0x1960
+#define USB3_DP_QSERDES_RXB_RX_MODE_00_HIGH2 0x1964
+#define USB3_DP_QSERDES_RXB_RX_MODE_00_HIGH3 0x1968
+#define USB3_DP_QSERDES_RXB_RX_MODE_00_HIGH4 0x196c
+#define USB3_DP_QSERDES_RXB_RX_MODE_01_LOW 0x1970
+#define USB3_DP_QSERDES_RXB_RX_MODE_01_HIGH 0x1974
+#define USB3_DP_QSERDES_RXB_RX_MODE_01_HIGH2 0x1978
+#define USB3_DP_QSERDES_RXB_RX_MODE_01_HIGH3 0x197c
+#define USB3_DP_QSERDES_RXB_RX_MODE_01_HIGH4 0x1980
+#define USB3_DP_QSERDES_RXB_RX_MODE_10_LOW 0x1984
+#define USB3_DP_QSERDES_RXB_RX_MODE_10_HIGH 0x1988
+#define USB3_DP_QSERDES_RXB_RX_MODE_10_HIGH2 0x198c
+#define USB3_DP_QSERDES_RXB_RX_MODE_10_HIGH3 0x1990
+#define USB3_DP_QSERDES_RXB_RX_MODE_10_HIGH4 0x1994
+#define USB3_DP_QSERDES_RXB_PHPRE_CTRL 0x1998
+#define USB3_DP_QSERDES_RXB_PHPRE_INITVAL 0x199c
+#define USB3_DP_QSERDES_RXB_DFE_EN_TIMER 0x19a0
+#define USB3_DP_QSERDES_RXB_DFE_CTLE_POST_CAL_OFFSET 0x19a4
+#define USB3_DP_QSERDES_RXB_DCC_CTRL1 0x19a8
+#define USB3_DP_QSERDES_RXB_DCC_CTRL2 0x19ac
+#define USB3_DP_QSERDES_RXB_VTH_CODE 0x19b0
+#define USB3_DP_QSERDES_RXB_VTH_MIN_THRESH 0x19b4
+#define USB3_DP_QSERDES_RXB_VTH_MAX_THRESH 0x19b8
+#define USB3_DP_QSERDES_RXB_ALOG_OBSV_BUS_CTRL_1 0x19bc
+#define USB3_DP_QSERDES_RXB_PI_CTRL1 0x19c0
+#define USB3_DP_QSERDES_RXB_PI_CTRL2 0x19c4
+#define USB3_DP_QSERDES_RXB_PI_QUAD 0x19c8
+#define USB3_DP_QSERDES_RXB_IDATA1 0x19cc
+#define USB3_DP_QSERDES_RXB_IDATA2 0x19d0
+#define USB3_DP_QSERDES_RXB_AUX_DATA1 0x19d4
+#define USB3_DP_QSERDES_RXB_AUX_DATA2 0x19d8
+#define USB3_DP_QSERDES_RXB_AC_JTAG_OUTP 0x19dc
+#define USB3_DP_QSERDES_RXB_AC_JTAG_OUTN 0x19e0
+#define USB3_DP_QSERDES_RXB_RX_SIGDET 0x19e4
+#define USB3_DP_QSERDES_RXB_ALOG_OBSV_BUS_STATUS_1 0x19e8
+#define USB3_DP_PCS_MISC_TYPEC_CTRL 0x1a00
+#define USB3_DP_PCS_MISC_TYPEC_PWRDN_CTRL 0x1a04
+#define USB3_DP_PCS_MISC_PCS_MISC_CONFIG1 0x1a08
+#define USB3_DP_PCS_MISC_CLAMP_ENABLE 0x1a0c
+#define USB3_DP_PCS_MISC_TYPEC_STATUS 0x1a10
+#define USB3_DP_PCS_MISC_PLACEHOLDER_STATUS 0x1a14
+#define USB3_DP_PCS_LN_PCS_STATUS1 0x1b00
+#define USB3_DP_PCS_LN_PCS_STATUS2 0x1b04
+#define USB3_DP_PCS_LN_PCS_STATUS2_CLEAR 0x1b08
+#define USB3_DP_PCS_LN_PCS_STATUS3 0x1b0c
+#define USB3_DP_PCS_LN_BIST_CHK_ERR_CNT_L_STATUS 0x1b10
+#define USB3_DP_PCS_LN_BIST_CHK_ERR_CNT_H_STATUS 0x1b14
+#define USB3_DP_PCS_LN_BIST_CHK_STATUS 0x1b18
+#define USB3_DP_PCS_LN_INSIG_SW_CTRL1 0x1b1c
+#define USB3_DP_PCS_LN_INSIG_MX_CTRL1 0x1b20
+#define USB3_DP_PCS_LN_OUTSIG_SW_CTRL1 0x1b24
+#define USB3_DP_PCS_LN_OUTSIG_MX_CTRL1 0x1b28
+#define USB3_DP_PCS_LN_TEST_CONTROL1 0x1b2c
+#define USB3_DP_PCS_LN_BIST_CTRL 0x1b30
+#define USB3_DP_PCS_LN_PRBS_SEED0 0x1b34
+#define USB3_DP_PCS_LN_PRBS_SEED1 0x1b38
+#define USB3_DP_PCS_LN_FIXED_PAT_CTRL 0x1b3c
+#define USB3_DP_PCS_LN_EQ_CONFIG 0x1b40
+#define USB3_DP_PCS_LN_TEST_CONTROL2 0x1b44
+#define USB3_DP_PCS_LN_TEST_CONTROL3 0x1b48
+#define USB3_DP_PCS_SW_RESET 0x1c00
+#define USB3_DP_PCS_REVISION_ID0 0x1c04
+#define USB3_DP_PCS_REVISION_ID1 0x1c08
+#define USB3_DP_PCS_REVISION_ID2 0x1c0c
+#define USB3_DP_PCS_REVISION_ID3 0x1c10
+#define USB3_DP_PCS_PCS_STATUS1 0x1c14
+#define USB3_DP_PCS_PCS_STATUS2 0x1c18
+#define USB3_DP_PCS_PCS_STATUS3 0x1c1c
+#define USB3_DP_PCS_PCS_STATUS4 0x1c20
+#define USB3_DP_PCS_PCS_STATUS5 0x1c24
+#define USB3_DP_PCS_PCS_STATUS6 0x1c28
+#define USB3_DP_PCS_PCS_STATUS7 0x1c2c
+#define USB3_DP_PCS_DEBUG_BUS_0_STATUS 0x1c30
+#define USB3_DP_PCS_DEBUG_BUS_1_STATUS 0x1c34
+#define USB3_DP_PCS_DEBUG_BUS_2_STATUS 0x1c38
+#define USB3_DP_PCS_DEBUG_BUS_3_STATUS 0x1c3c
+#define USB3_DP_PCS_POWER_DOWN_CONTROL 0x1c40
+#define USB3_DP_PCS_START_CONTROL 0x1c44
+#define USB3_DP_PCS_INSIG_SW_CTRL1 0x1c48
+#define USB3_DP_PCS_INSIG_SW_CTRL2 0x1c4c
+#define USB3_DP_PCS_INSIG_SW_CTRL3 0x1c50
+#define USB3_DP_PCS_INSIG_SW_CTRL4 0x1c54
+#define USB3_DP_PCS_INSIG_SW_CTRL5 0x1c58
+#define USB3_DP_PCS_INSIG_SW_CTRL6 0x1c5c
+#define USB3_DP_PCS_INSIG_SW_CTRL7 0x1c60
+#define USB3_DP_PCS_INSIG_SW_CTRL8 0x1c64
+#define USB3_DP_PCS_INSIG_MX_CTRL1 0x1c68
+#define USB3_DP_PCS_INSIG_MX_CTRL2 0x1c6c
+#define USB3_DP_PCS_INSIG_MX_CTRL3 0x1c70
+#define USB3_DP_PCS_INSIG_MX_CTRL4 0x1c74
+#define USB3_DP_PCS_INSIG_MX_CTRL5 0x1c78
+#define USB3_DP_PCS_INSIG_MX_CTRL7 0x1c7c
+#define USB3_DP_PCS_INSIG_MX_CTRL8 0x1c80
+#define USB3_DP_PCS_OUTSIG_SW_CTRL1 0x1c84
+#define USB3_DP_PCS_OUTSIG_MX_CTRL1 0x1c88
+#define USB3_DP_PCS_CLAMP_ENABLE 0x1c8c
+#define USB3_DP_PCS_POWER_STATE_CONFIG1 0x1c90
+#define USB3_DP_PCS_POWER_STATE_CONFIG2 0x1c94
+#define USB3_DP_PCS_FLL_CNTRL1 0x1c98
+#define USB3_DP_PCS_FLL_CNTRL2 0x1c9c
+#define USB3_DP_PCS_FLL_CNT_VAL_L 0x1ca0
+#define USB3_DP_PCS_FLL_CNT_VAL_H_TOL 0x1ca4
+#define USB3_DP_PCS_FLL_MAN_CODE 0x1ca8
+#define USB3_DP_PCS_TEST_CONTROL1 0x1cac
+#define USB3_DP_PCS_TEST_CONTROL2 0x1cb0
+#define USB3_DP_PCS_TEST_CONTROL3 0x1cb4
+#define USB3_DP_PCS_TEST_CONTROL4 0x1cb8
+#define USB3_DP_PCS_TEST_CONTROL5 0x1cbc
+#define USB3_DP_PCS_TEST_CONTROL6 0x1cc0
+#define USB3_DP_PCS_LOCK_DETECT_CONFIG1 0x1cc4
+#define USB3_DP_PCS_LOCK_DETECT_CONFIG2 0x1cc8
+#define USB3_DP_PCS_LOCK_DETECT_CONFIG3 0x1ccc
+#define USB3_DP_PCS_LOCK_DETECT_CONFIG4 0x1cd0
+#define USB3_DP_PCS_LOCK_DETECT_CONFIG5 0x1cd4
+#define USB3_DP_PCS_LOCK_DETECT_CONFIG6 0x1cd8
+#define USB3_DP_PCS_REFGEN_REQ_CONFIG1 0x1cdc
+#define USB3_DP_PCS_REFGEN_REQ_CONFIG2 0x1ce0
+#define USB3_DP_PCS_REFGEN_REQ_CONFIG3 0x1ce4
+#define USB3_DP_PCS_BIST_CTRL 0x1ce8
+#define USB3_DP_PCS_PRBS_POLY0 0x1cec
+#define USB3_DP_PCS_PRBS_POLY1 0x1cf0
+#define USB3_DP_PCS_FIXED_PAT0 0x1cf4
+#define USB3_DP_PCS_FIXED_PAT1 0x1cf8
+#define USB3_DP_PCS_FIXED_PAT2 0x1cfc
+#define USB3_DP_PCS_FIXED_PAT3 0x1d00
+#define USB3_DP_PCS_FIXED_PAT4 0x1d04
+#define USB3_DP_PCS_FIXED_PAT5 0x1d08
+#define USB3_DP_PCS_FIXED_PAT6 0x1d0c
+#define USB3_DP_PCS_FIXED_PAT7 0x1d10
+#define USB3_DP_PCS_FIXED_PAT8 0x1d14
+#define USB3_DP_PCS_FIXED_PAT9 0x1d18
+#define USB3_DP_PCS_FIXED_PAT10 0x1d1c
+#define USB3_DP_PCS_FIXED_PAT11 0x1d20
+#define USB3_DP_PCS_FIXED_PAT12 0x1d24
+#define USB3_DP_PCS_FIXED_PAT13 0x1d28
+#define USB3_DP_PCS_FIXED_PAT14 0x1d2c
+#define USB3_DP_PCS_FIXED_PAT15 0x1d30
+#define USB3_DP_PCS_TXMGN_CONFIG 0x1d34
+#define USB3_DP_PCS_G12S1_TXMGN_V0 0x1d38
+#define USB3_DP_PCS_G12S1_TXMGN_V1 0x1d3c
+#define USB3_DP_PCS_G12S1_TXMGN_V2 0x1d40
+#define USB3_DP_PCS_G12S1_TXMGN_V3 0x1d44
+#define USB3_DP_PCS_G12S1_TXMGN_V4 0x1d48
+#define USB3_DP_PCS_G12S1_TXMGN_V0_RS 0x1d4c
+#define USB3_DP_PCS_G12S1_TXMGN_V1_RS 0x1d50
+#define USB3_DP_PCS_G12S1_TXMGN_V2_RS 0x1d54
+#define USB3_DP_PCS_G12S1_TXMGN_V3_RS 0x1d58
+#define USB3_DP_PCS_G12S1_TXMGN_V4_RS 0x1d5c
+#define USB3_DP_PCS_G3S2_TXMGN_MAIN 0x1d60
+#define USB3_DP_PCS_G3S2_TXMGN_MAIN_RS 0x1d64
+#define USB3_DP_PCS_G12S1_TXDEEMPH_M6DB 0x1d68
+#define USB3_DP_PCS_G12S1_TXDEEMPH_M3P5DB 0x1d6c
+#define USB3_DP_PCS_G3S2_PRE_GAIN 0x1d70
+#define USB3_DP_PCS_G3S2_POST_GAIN 0x1d74
+#define USB3_DP_PCS_G3S2_PRE_POST_OFFSET 0x1d78
+#define USB3_DP_PCS_G3S2_PRE_GAIN_RS 0x1d7c
+#define USB3_DP_PCS_G3S2_POST_GAIN_RS 0x1d80
+#define USB3_DP_PCS_G3S2_PRE_POST_OFFSET_RS 0x1d84
+#define USB3_DP_PCS_RX_SIGDET_LVL 0x1d88
+#define USB3_DP_PCS_RX_SIGDET_DTCT_CNTRL 0x1d8c
+#define USB3_DP_PCS_RCVR_DTCT_DLY_P1U2_L 0x1d90
+#define USB3_DP_PCS_RCVR_DTCT_DLY_P1U2_H 0x1d94
+#define USB3_DP_PCS_RATE_SLEW_CNTRL1 0x1d98
+#define USB3_DP_PCS_RATE_SLEW_CNTRL2 0x1d9c
+#define USB3_DP_PCS_PWRUP_RESET_DLY_TIME_AUXCLK 0x1da0
+#define USB3_DP_PCS_P2U3_WAKEUP_DLY_TIME_AUXCLK_L 0x1da4
+#define USB3_DP_PCS_P2U3_WAKEUP_DLY_TIME_AUXCLK_H 0x1da8
+#define USB3_DP_PCS_TSYNC_RSYNC_TIME 0x1dac
+#define USB3_DP_PCS_CDR_RESET_TIME 0x1db0
+#define USB3_DP_PCS_TSYNC_DLY_TIME 0x1db4
+#define USB3_DP_PCS_ELECIDLE_DLY_SEL 0x1db8
+#define USB3_DP_PCS_CMN_ACK_OUT_SEL 0x1dbc
+#define USB3_DP_PCS_ALIGN_DETECT_CONFIG1 0x1dc0
+#define USB3_DP_PCS_ALIGN_DETECT_CONFIG2 0x1dc4
+#define USB3_DP_PCS_ALIGN_DETECT_CONFIG3 0x1dc8
+#define USB3_DP_PCS_ALIGN_DETECT_CONFIG4 0x1dcc
+#define USB3_DP_PCS_PCS_TX_RX_CONFIG 0x1dd0
+#define USB3_DP_PCS_RX_IDLE_DTCT_CNTRL 0x1dd4
+#define USB3_DP_PCS_RX_DCC_CAL_CONFIG 0x1dd8
+#define USB3_DP_PCS_EQ_CONFIG1 0x1ddc
+#define USB3_DP_PCS_EQ_CONFIG2 0x1de0
+#define USB3_DP_PCS_EQ_CONFIG3 0x1de4
+#define USB3_DP_PCS_EQ_CONFIG4 0x1de8
+#define USB3_DP_PCS_EQ_CONFIG5 0x1dec
+#define USB3_DP_PCS_USB3_POWER_STATE_CONFIG1 0x1f00
+#define USB3_DP_PCS_USB3_AUTONOMOUS_MODE_STATUS 0x1f04
+#define USB3_DP_PCS_USB3_AUTONOMOUS_MODE_CTRL 0x1f08
+#define USB3_DP_PCS_USB3_AUTONOMOUS_MODE_CTRL2 0x1f0c
+#define USB3_DP_PCS_USB3_LFPS_RXTERM_IRQ_SOURCE_STATUS 0x1f10
+#define USB3_DP_PCS_USB3_LFPS_RXTERM_IRQ_CLEAR 0x1f14
+#define USB3_DP_PCS_USB3_LFPS_DET_HIGH_COUNT_VAL 0x1f18
+#define USB3_DP_PCS_USB3_LFPS_TX_ECSTART 0x1f1c
+#define USB3_DP_PCS_USB3_LFPS_PER_TIMER_VAL 0x1f20
+#define USB3_DP_PCS_USB3_LFPS_TX_END_CNT_U3_START 0x1f24
+#define USB3_DP_PCS_USB3_RXEQTRAINING_LOCK_TIME 0x1f28
+#define USB3_DP_PCS_USB3_RXEQTRAINING_WAIT_TIME 0x1f2c
+#define USB3_DP_PCS_USB3_RXEQTRAINING_CTLE_TIME 0x1f30
+#define USB3_DP_PCS_USB3_RXEQTRAINING_WAIT_TIME_S2 0x1f34
+#define USB3_DP_PCS_USB3_RXEQTRAINING_DFE_TIME_S2 0x1f38
+#define USB3_DP_PCS_USB3_RCVR_DTCT_DLY_U3_L 0x1f3c
+#define USB3_DP_PCS_USB3_RCVR_DTCT_DLY_U3_H 0x1f40
+#define USB3_DP_PCS_USB3_ARCVR_DTCT_EN_PERIOD 0x1f44
+#define USB3_DP_PCS_USB3_ARCVR_DTCT_CM_DLY 0x1f48
+#define USB3_DP_PCS_USB3_TXONESZEROS_RUN_LENGTH 0x1f4c
+#define USB3_DP_PCS_USB3_ALFPS_DEGLITCH_VAL 0x1f50
+#define USB3_DP_PCS_USB3_SIGDET_STARTUP_TIMER_VAL 0x1f54
+#define USB3_DP_PCS_USB3_TEST_CONTROL 0x1f58
+#define USB3_DP_PCS_USB3_RXTERMINATION_DLY_SEL 0x1f5c
+
+#endif /* _DT_BINDINGS_PHY_QCOM_LITO_QMP_USB_H */
diff --git a/include/keys/user-type.h b/include/keys/user-type.h
index e098cbe..12babe9 100644
--- a/include/keys/user-type.h
+++ b/include/keys/user-type.h
@@ -31,7 +31,7 @@
struct user_key_payload {
struct rcu_head rcu; /* RCU destructor */
unsigned short datalen; /* length of this data */
- char data[0]; /* actual data */
+ char data[0] __aligned(__alignof__(u64)); /* actual data */
};
extern struct key_type key_type_user;
diff --git a/include/linux/bio.h b/include/linux/bio.h
index 5137174..58f02ec 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -73,6 +73,9 @@
#define bio_sectors(bio) bvec_iter_sectors((bio)->bi_iter)
#define bio_end_sector(bio) bvec_iter_end_sector((bio)->bi_iter)
+#define bio_dun(bio) ((bio)->bi_iter.bi_dun)
+#define bio_duns(bio) (bio_sectors(bio) >> 3) /* 4KB unit */
+#define bio_end_dun(bio) (bio_dun(bio) + bio_duns(bio))
/*
* Return the data direction, READ or WRITE.
@@ -170,6 +173,11 @@ static inline void bio_advance_iter(struct bio *bio, struct bvec_iter *iter,
{
iter->bi_sector += bytes >> 9;
+#ifdef CONFIG_PFK
+ if (iter->bi_dun)
+ iter->bi_dun += bytes >> 12;
+#endif
+
if (bio_no_advance_iter(bio)) {
iter->bi_size -= bytes;
iter->bi_done += bytes;
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index d178dce..d300296 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -187,6 +187,13 @@ struct bio {
struct bio_integrity_payload *bi_integrity; /* data integrity */
#endif
};
+#ifdef CONFIG_PFK
+ /* Encryption key to use (NULL if none) */
+ const struct blk_encryption_key *bi_crypt_key;
+#endif
+#ifdef CONFIG_DM_DEFAULT_KEY
+ int bi_crypt_skip;
+#endif
unsigned short bi_vcnt; /* how many bio_vec's */
@@ -201,7 +208,9 @@ struct bio {
struct bio_vec *bi_io_vec; /* the actual vec list */
struct bio_set *bi_pool;
-
+#ifdef CONFIG_PFK
+ struct inode *bi_dio_inode;
+#endif
/*
* We can inline a number of vecs at the end of the bio, to avoid
* double allocations for a small number of bio_vecs. This member
@@ -324,6 +333,11 @@ enum req_flag_bits {
__REQ_SORTED = __REQ_RAHEAD, /* elevator knows about this request */
__REQ_URGENT, /* urgent request */
+ /* Android specific flags */
+ __REQ_NOENCRYPT, /*
+ * ok to not encrypt (already encrypted at fs
+ * level)
+ */
/* command specific flags for REQ_OP_WRITE_ZEROES: */
__REQ_NOUNMAP, /* do not free blocks when zeroing */
@@ -348,6 +362,7 @@ enum req_flag_bits {
#define REQ_RAHEAD (1ULL << __REQ_RAHEAD)
#define REQ_BACKGROUND (1ULL << __REQ_BACKGROUND)
#define REQ_NOWAIT (1ULL << __REQ_NOWAIT)
+#define REQ_NOENCRYPT (1ULL << __REQ_NOENCRYPT)
#define REQ_NOUNMAP (1ULL << __REQ_NOUNMAP)
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 6980014..1b4dbe94 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -161,6 +161,7 @@ struct request {
unsigned int __data_len; /* total data len */
int tag;
sector_t __sector; /* sector cursor */
+ u64 __dun; /* dun for UFS */
struct bio *bio;
struct bio *biotail;
@@ -699,6 +700,7 @@ struct request_queue {
#define QUEUE_FLAG_SCSI_PASSTHROUGH 27 /* queue supports SCSI commands */
#define QUEUE_FLAG_QUIESCED 28 /* queue has been quiesced */
#define QUEUE_FLAG_PREEMPT_ONLY 29 /* only process REQ_PREEMPT requests */
+#define QUEUE_FLAG_INLINECRYPT 30 /* inline encryption support */
#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
(1 << QUEUE_FLAG_SAME_COMP) | \
@@ -731,6 +733,8 @@ bool blk_queue_flag_test_and_clear(unsigned int flag, struct request_queue *q);
#define blk_queue_dax(q) test_bit(QUEUE_FLAG_DAX, &(q)->queue_flags)
#define blk_queue_scsi_passthrough(q) \
test_bit(QUEUE_FLAG_SCSI_PASSTHROUGH, &(q)->queue_flags)
+#define blk_queue_inlinecrypt(q) \
+ test_bit(QUEUE_FLAG_INLINECRYPT, &(q)->queue_flags)
#define blk_noretry_request(rq) \
((rq)->cmd_flags & (REQ_FAILFAST_DEV|REQ_FAILFAST_TRANSPORT| \
@@ -878,6 +882,15 @@ static inline unsigned int blk_queue_depth(struct request_queue *q)
return q->nr_requests;
}
+static inline void queue_flag_set_unlocked(unsigned int flag,
+ struct request_queue *q)
+{
+ if (test_bit(QUEUE_FLAG_INIT_DONE, &q->queue_flags) &&
+ kref_read(&q->kobj.kref))
+ lockdep_assert_held(q->queue_lock);
+ __set_bit(flag, &q->queue_flags);
+}
+
/*
* q->prep_rq_fn return values
*/
@@ -1043,6 +1056,11 @@ static inline sector_t blk_rq_pos(const struct request *rq)
return rq->__sector;
}
+static inline sector_t blk_rq_dun(const struct request *rq)
+{
+ return rq->__dun;
+}
+
static inline unsigned int blk_rq_bytes(const struct request *rq)
{
return rq->__data_len;
diff --git a/include/linux/bvec.h b/include/linux/bvec.h
index fe7a22d..543bb5f 100644
--- a/include/linux/bvec.h
+++ b/include/linux/bvec.h
@@ -44,6 +44,7 @@ struct bvec_iter {
unsigned int bi_bvec_done; /* number of bytes completed in
current bvec */
+ u64 bi_dun; /* DUN setting for bio */
};
/*
diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
index 22254c1..5e1694f 100644
--- a/include/linux/cgroup-defs.h
+++ b/include/linux/cgroup-defs.h
@@ -20,6 +20,7 @@
#include <linux/u64_stats_sync.h>
#include <linux/workqueue.h>
#include <linux/bpf-cgroup.h>
+#include <linux/psi_types.h>
#ifdef CONFIG_CGROUPS
@@ -436,6 +437,9 @@ struct cgroup {
/* used to schedule release agent */
struct work_struct release_agent_work;
+ /* used to track pressure stalls */
+ struct psi_group psi;
+
/* used to store eBPF programs */
struct cgroup_bpf bpf;
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index 32c5535..6399b32 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -657,6 +657,11 @@ static inline void pr_cont_cgroup_path(struct cgroup *cgrp)
pr_cont_kernfs_path(cgrp->kn);
}
+static inline struct psi_group *cgroup_psi(struct cgroup *cgrp)
+{
+ return &cgrp->psi;
+}
+
static inline void cgroup_init_kthreadd(void)
{
/*
@@ -710,6 +715,16 @@ static inline union kernfs_node_id *cgroup_get_kernfs_id(struct cgroup *cgrp)
return NULL;
}
+static inline struct cgroup *cgroup_parent(struct cgroup *cgrp)
+{
+ return NULL;
+}
+
+static inline struct psi_group *cgroup_psi(struct cgroup *cgrp)
+{
+ return NULL;
+}
+
static inline bool task_under_cgroup_hierarchy(struct task_struct *task,
struct cgroup *ancestor)
{
diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
index 34d2179..2764fab 100644
--- a/include/linux/clk-provider.h
+++ b/include/linux/clk-provider.h
@@ -46,7 +46,7 @@
* hand-off enable_count & prepare_count
* to first consumer that enables clk
*/
-#define CLK_IS_MEASURE BIT(14) /* measure clock */
+#define CLK_IS_MEASURE BIT(15) /* measure clock */
struct clk;
struct clk_hw;
diff --git a/include/linux/compiler-clang.h b/include/linux/compiler-clang.h
index 426c9e9..0294807 100644
--- a/include/linux/compiler-clang.h
+++ b/include/linux/compiler-clang.h
@@ -3,9 +3,8 @@
#error "Please don't include <linux/compiler-clang.h> directly, include <linux/compiler.h> instead."
#endif
-/* Some compiler specific definitions are overwritten here
- * for Clang compiler
- */
+/* Compiler specific definitions for Clang compiler */
+
#define uninitialized_var(x) x = *(&(x))
/* same as gcc, this was present in clang-2.6 so we can assume it works
diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
index 0242f6e..a8ff0ca 100644
--- a/include/linux/compiler-gcc.h
+++ b/include/linux/compiler-gcc.h
@@ -58,10 +58,6 @@
(typeof(ptr)) (__ptr + (off)); \
})
-/* Make the optimizer believe the variable can be manipulated arbitrarily. */
-#define OPTIMIZER_HIDE_VAR(var) \
- __asm__ ("" : "=r" (var) : "0" (var))
-
/*
* A trick to suppress uninitialized variable warning without generating any
* code
diff --git a/include/linux/compiler-intel.h b/include/linux/compiler-intel.h
index 4c7f9be..f1fc60f 100644
--- a/include/linux/compiler-intel.h
+++ b/include/linux/compiler-intel.h
@@ -5,9 +5,7 @@
#ifdef __ECC
-/* Some compiler specific definitions are overwritten here
- * for Intel ECC compiler
- */
+/* Compiler specific definitions for Intel ECC compiler */
#include <asm/intrinsics.h>
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index 8e8e85f..81c3744 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -158,7 +158,9 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
#endif
#ifndef OPTIMIZER_HIDE_VAR
-#define OPTIMIZER_HIDE_VAR(var) barrier()
+/* Make the optimizer believe the variable can be manipulated arbitrarily. */
+#define OPTIMIZER_HIDE_VAR(var) \
+ __asm__ ("" : "=r" (var) : "0" (var))
#endif
/* Not-quite-unique ID. */
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index 1bf10aa..f172204 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -180,12 +180,10 @@ enum cpuhp_smt_control {
#if defined(CONFIG_SMP) && defined(CONFIG_HOTPLUG_SMT)
extern enum cpuhp_smt_control cpu_smt_control;
extern void cpu_smt_disable(bool force);
-extern void cpu_smt_check_topology_early(void);
extern void cpu_smt_check_topology(void);
#else
# define cpu_smt_control (CPU_SMT_ENABLED)
static inline void cpu_smt_disable(bool force) { }
-static inline void cpu_smt_check_topology_early(void) { }
static inline void cpu_smt_check_topology(void) { }
#endif
diff --git a/include/linux/file.h b/include/linux/file.h
index 6b2fb03..97265cf 100644
--- a/include/linux/file.h
+++ b/include/linux/file.h
@@ -87,6 +87,7 @@ extern void put_unused_fd(unsigned int fd);
extern void fd_install(unsigned int fd, struct file *file);
extern void flush_delayed_fput(void);
+extern void flush_delayed_fput_wait(void);
extern void __fput_sync(struct file *);
#endif /* __LINUX_FILE_H */
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 421ef90..79eed97 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -1878,6 +1878,7 @@ struct super_operations {
void *(*clone_mnt_data) (void *);
void (*copy_mnt_data) (void *, void *);
void (*umount_begin) (struct super_block *);
+ void (*umount_end)(struct super_block *sb, int flags);
int (*show_options)(struct seq_file *, struct dentry *);
int (*show_options2)(struct vfsmount *,struct seq_file *, struct dentry *);
@@ -3096,6 +3097,8 @@ static inline void inode_dio_end(struct inode *inode)
wake_up_bit(&inode->i_state, __I_DIO_WAKEUP);
}
+struct inode *dio_bio_get_inode(struct bio *bio);
+
extern void inode_set_flags(struct inode *inode, unsigned int flags,
unsigned int mask);
diff --git a/include/linux/fscrypt.h b/include/linux/fscrypt.h
index 952ab97..ddd5b3a3 100644
--- a/include/linux/fscrypt.h
+++ b/include/linux/fscrypt.h
@@ -19,6 +19,11 @@
#define FS_CRYPTO_BLOCK_SIZE 16
struct fscrypt_ctx;
+
+/* iv sector for security/pfe/pfk_fscrypt.c and f2fs */
+#define PG_DUN(i, p) \
+ (((((u64)(i)->i_ino) & 0xffffffff) << 32) | ((p)->index & 0xffffffff))
+
struct fscrypt_info;
struct fscrypt_str {
diff --git a/include/linux/fscrypt_notsupp.h b/include/linux/fscrypt_notsupp.h
index ee8b43e..0abc588 100644
--- a/include/linux/fscrypt_notsupp.h
+++ b/include/linux/fscrypt_notsupp.h
@@ -174,6 +174,21 @@ static inline int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk,
return -EOPNOTSUPP;
}
+/* fscrypt_ice.c */
+static inline int fscrypt_using_hardware_encryption(const struct inode *inode)
+{
+ return 0;
+}
+
+static inline void fscrypt_set_ice_dun(const struct inode *inode,
+ struct bio *bio, u64 dun) {}
+
+static inline bool fscrypt_mergeable_bio(struct bio *bio,
+ sector_t iv_block, bool bio_encrypted)
+{
+ return true;
+}
+
/* hooks.c */
static inline int fscrypt_file_open(struct inode *inode, struct file *filp)
diff --git a/include/linux/fscrypt_supp.h b/include/linux/fscrypt_supp.h
index 6456c6b..435fa38 100644
--- a/include/linux/fscrypt_supp.h
+++ b/include/linux/fscrypt_supp.h
@@ -30,6 +30,7 @@ struct fscrypt_operations {
bool (*dummy_context)(struct inode *);
bool (*empty_dir)(struct inode *);
unsigned int max_namelen;
+ bool (*is_encrypted)(struct inode *inode);
};
struct fscrypt_ctx {
@@ -182,6 +183,12 @@ extern void fscrypt_pullback_bio_page(struct page **, bool);
extern int fscrypt_zeroout_range(const struct inode *, pgoff_t, sector_t,
unsigned int);
+/* fscrypt_ice.c */
+extern int fscrypt_using_hardware_encryption(const struct inode *inode);
+extern void fscrypt_set_ice_dun(const struct inode *inode,
+ struct bio *bio, u64 dun);
+extern bool fscrypt_mergeable_bio(struct bio *bio, u64 dun, bool bio_encrypted);
+
/* hooks.c */
extern int fscrypt_file_open(struct inode *inode, struct file *filp);
extern int __fscrypt_prepare_link(struct inode *inode, struct inode *dir);
diff --git a/include/linux/genl_magic_struct.h b/include/linux/genl_magic_struct.h
index 5972e49..eeae59d 100644
--- a/include/linux/genl_magic_struct.h
+++ b/include/linux/genl_magic_struct.h
@@ -191,6 +191,7 @@ static inline void ct_assert_unique_operations(void)
{
switch (0) {
#include GENL_MAGIC_INCLUDE_FILE
+ case 0:
;
}
}
@@ -209,6 +210,7 @@ static inline void ct_assert_unique_top_level_attributes(void)
{
switch (0) {
#include GENL_MAGIC_INCLUDE_FILE
+ case 0:
;
}
}
@@ -218,7 +220,8 @@ static inline void ct_assert_unique_top_level_attributes(void)
static inline void ct_assert_unique_ ## s_name ## _attributes(void) \
{ \
switch (0) { \
- s_fields \
+ s_fields \
+ case 0: \
; \
} \
}
diff --git a/include/linux/gpio/consumer.h b/include/linux/gpio/consumer.h
index 21ddbe4..acc4279 100644
--- a/include/linux/gpio/consumer.h
+++ b/include/linux/gpio/consumer.h
@@ -142,7 +142,7 @@ int gpiod_is_active_low(const struct gpio_desc *desc);
int gpiod_cansleep(const struct gpio_desc *desc);
int gpiod_to_irq(const struct gpio_desc *desc);
-void gpiod_set_consumer_name(struct gpio_desc *desc, const char *name);
+int gpiod_set_consumer_name(struct gpio_desc *desc, const char *name);
/* Convert between the old gpio_ and new gpiod_ interfaces */
struct gpio_desc *gpio_to_desc(unsigned gpio);
@@ -465,10 +465,12 @@ static inline int gpiod_to_irq(const struct gpio_desc *desc)
return -EINVAL;
}
-static inline void gpiod_set_consumer_name(struct gpio_desc *desc, const char *name)
+static inline int gpiod_set_consumer_name(struct gpio_desc *desc,
+ const char *name)
{
/* GPIO can never have been requested */
WARN_ON(1);
+ return -EINVAL;
}
static inline struct gpio_desc *gpio_to_desc(unsigned gpio)
diff --git a/include/linux/hid-debug.h b/include/linux/hid-debug.h
index 8663f21..2d6100e 100644
--- a/include/linux/hid-debug.h
+++ b/include/linux/hid-debug.h
@@ -24,7 +24,10 @@
#ifdef CONFIG_DEBUG_FS
+#include <linux/kfifo.h>
+
#define HID_DEBUG_BUFSIZE 512
+#define HID_DEBUG_FIFOSIZE 512
void hid_dump_input(struct hid_device *, struct hid_usage *, __s32);
void hid_dump_report(struct hid_device *, int , u8 *, int);
@@ -37,11 +40,8 @@ void hid_debug_init(void);
void hid_debug_exit(void);
void hid_debug_event(struct hid_device *, char *);
-
struct hid_debug_list {
- char *hid_debug_buf;
- int head;
- int tail;
+ DECLARE_KFIFO_PTR(hid_debug_fifo, char);
struct fasync_struct *fasync;
struct hid_device *hdev;
struct list_head node;
@@ -64,4 +64,3 @@ struct hid_debug_list {
#endif
#endif
-
diff --git a/include/linux/i3c/ccc.h b/include/linux/i3c/ccc.h
new file mode 100644
index 0000000..73b0982
--- /dev/null
+++ b/include/linux/i3c/ccc.h
@@ -0,0 +1,385 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018 Cadence Design Systems Inc.
+ *
+ * Author: Boris Brezillon <boris.brezillon@bootlin.com>
+ */
+
+#ifndef I3C_CCC_H
+#define I3C_CCC_H
+
+#include <linux/bitops.h>
+#include <linux/i3c/device.h>
+
+/* I3C CCC (Common Command Codes) related definitions */
+#define I3C_CCC_DIRECT BIT(7)
+
+#define I3C_CCC_ID(id, broadcast) \
+ ((id) | ((broadcast) ? 0 : I3C_CCC_DIRECT))
+
+/* Commands valid in both broadcast and unicast modes */
+#define I3C_CCC_ENEC(broadcast) I3C_CCC_ID(0x0, broadcast)
+#define I3C_CCC_DISEC(broadcast) I3C_CCC_ID(0x1, broadcast)
+#define I3C_CCC_ENTAS(as, broadcast) I3C_CCC_ID(0x2 + (as), broadcast)
+#define I3C_CCC_RSTDAA(broadcast) I3C_CCC_ID(0x6, broadcast)
+#define I3C_CCC_SETMWL(broadcast) I3C_CCC_ID(0x9, broadcast)
+#define I3C_CCC_SETMRL(broadcast) I3C_CCC_ID(0xa, broadcast)
+#define I3C_CCC_SETXTIME(broadcast) ((broadcast) ? 0x28 : 0x98)
+#define I3C_CCC_VENDOR(id, broadcast) ((id) + ((broadcast) ? 0x61 : 0xe0))
+
+/* Broadcast-only commands */
+#define I3C_CCC_ENTDAA I3C_CCC_ID(0x7, true)
+#define I3C_CCC_DEFSLVS I3C_CCC_ID(0x8, true)
+#define I3C_CCC_ENTTM I3C_CCC_ID(0xb, true)
+#define I3C_CCC_ENTHDR(x) I3C_CCC_ID(0x20 + (x), true)
+
+/* Unicast-only commands */
+#define I3C_CCC_SETDASA I3C_CCC_ID(0x7, false)
+#define I3C_CCC_SETNEWDA I3C_CCC_ID(0x8, false)
+#define I3C_CCC_GETMWL I3C_CCC_ID(0xb, false)
+#define I3C_CCC_GETMRL I3C_CCC_ID(0xc, false)
+#define I3C_CCC_GETPID I3C_CCC_ID(0xd, false)
+#define I3C_CCC_GETBCR I3C_CCC_ID(0xe, false)
+#define I3C_CCC_GETDCR I3C_CCC_ID(0xf, false)
+#define I3C_CCC_GETSTATUS I3C_CCC_ID(0x10, false)
+#define I3C_CCC_GETACCMST I3C_CCC_ID(0x11, false)
+#define I3C_CCC_SETBRGTGT I3C_CCC_ID(0x13, false)
+#define I3C_CCC_GETMXDS I3C_CCC_ID(0x14, false)
+#define I3C_CCC_GETHDRCAP I3C_CCC_ID(0x15, false)
+#define I3C_CCC_GETXTIME I3C_CCC_ID(0x19, false)
+
+#define I3C_CCC_EVENT_SIR BIT(0)
+#define I3C_CCC_EVENT_MR BIT(1)
+#define I3C_CCC_EVENT_HJ BIT(3)
+
+/**
+ * struct i3c_ccc_events - payload passed to ENEC/DISEC CCC
+ *
+ * @events: bitmask of I3C_CCC_EVENT_xxx events.
+ *
+ * Depending on the CCC command, the specific events coming from all devices
+ * (broadcast version) or a specific device (unicast version) will be
+ * enabled (ENEC) or disabled (DISEC).
+ */
+struct i3c_ccc_events {
+ u8 events;
+};
+
+/**
+ * struct i3c_ccc_mwl - payload passed to SETMWL/GETMWL CCC
+ *
+ * @len: maximum write length in bytes
+ *
+ * The maximum write length is only applicable to SDR private messages or
+ * extended Write CCCs (like SETXTIME).
+ */
+struct i3c_ccc_mwl {
+ __be16 len;
+};
+
+/**
+ * struct i3c_ccc_mrl - payload passed to SETMRL/GETMRL CCC
+ *
+ * @len: maximum read length in bytes
+ * @ibi_len: maximum IBI payload length
+ *
+ * The maximum read length is only applicable to SDR private messages or
+ * extended Read CCCs (like GETXTIME).
+ * The IBI length is only valid if the I3C slave is IBI capable
+ * (%I3C_BCR_IBI_REQ_CAP is set).
+ */
+struct i3c_ccc_mrl {
+ __be16 read_len;
+ u8 ibi_len;
+} __packed;
+
+/**
+ * struct i3c_ccc_dev_desc - I3C/I2C device descriptor used for DEFSLVS
+ *
+ * @dyn_addr: dynamic address assigned to the I3C slave or 0 if the entry is
+ * describing an I2C slave.
+ * @dcr: DCR value (not applicable to entries describing I2C devices)
+ * @lvr: LVR value (not applicable to entries describing I3C devices)
+ * @bcr: BCR value or 0 if this entry is describing an I2C slave
+ * @static_addr: static address or 0 if the device does not have a static
+ * address
+ *
+ * The DEFSLVS command should be passed an array of i3c_ccc_dev_desc
+ * descriptors (one entry per I3C/I2C dev controlled by the master).
+ */
+struct i3c_ccc_dev_desc {
+ u8 dyn_addr;
+ union {
+ u8 dcr;
+ u8 lvr;
+ };
+ u8 bcr;
+ u8 static_addr;
+};
+
+/**
+ * struct i3c_ccc_defslvs - payload passed to DEFSLVS CCC
+ *
+ * @count: number of dev descriptors
+ * @master: descriptor describing the current master
+ * @slaves: array of descriptors describing slaves controlled by the
+ * current master
+ *
+ * Information passed to the broadcast DEFSLVS to propagate device
+ * information to all masters currently acting as slaves on the bus.
+ * This is only meaningful if you have more than one master.
+ */
+struct i3c_ccc_defslvs {
+ u8 count;
+ struct i3c_ccc_dev_desc master;
+ struct i3c_ccc_dev_desc slaves[0];
+} __packed;
+
+/**
+ * enum i3c_ccc_test_mode - enum listing all available test modes
+ *
+ * @I3C_CCC_EXIT_TEST_MODE: exit test mode
+ * @I3C_CCC_VENDOR_TEST_MODE: enter vendor test mode
+ */
+enum i3c_ccc_test_mode {
+ I3C_CCC_EXIT_TEST_MODE,
+ I3C_CCC_VENDOR_TEST_MODE,
+};
+
+/**
+ * struct i3c_ccc_enttm - payload passed to ENTTM CCC
+ *
+ * @mode: one of the &enum i3c_ccc_test_mode modes
+ *
+ * Information passed to the ENTTM CCC to instruct an I3C device to enter a
+ * specific test mode.
+ */
+struct i3c_ccc_enttm {
+ u8 mode;
+};
+
+/**
+ * struct i3c_ccc_setda - payload passed to SETNEWDA and SETDASA CCCs
+ *
+ * @addr: dynamic address to assign to an I3C device
+ *
+ * Information passed to the SETNEWDA and SETDASA CCCs to assign/change the
+ * dynamic address of an I3C device.
+ */
+struct i3c_ccc_setda {
+ u8 addr;
+};
+
+/**
+ * struct i3c_ccc_getpid - payload passed to GETPID CCC
+ *
+ * @pid: 48 bits PID in big endian
+ */
+struct i3c_ccc_getpid {
+ u8 pid[6];
+};
+
+/**
+ * struct i3c_ccc_getbcr - payload passed to GETBCR CCC
+ *
+ * @bcr: BCR (Bus Characteristic Register) value
+ */
+struct i3c_ccc_getbcr {
+ u8 bcr;
+};
+
+/**
+ * struct i3c_ccc_getdcr - payload passed to GETDCR CCC
+ *
+ * @dcr: DCR (Device Characteristic Register) value
+ */
+struct i3c_ccc_getdcr {
+ u8 dcr;
+};
+
+#define I3C_CCC_STATUS_PENDING_INT(status) ((status) & GENMASK(3, 0))
+#define I3C_CCC_STATUS_PROTOCOL_ERROR BIT(5)
+#define I3C_CCC_STATUS_ACTIVITY_MODE(status) \
+ (((status) & GENMASK(7, 6)) >> 6)
+
+/**
+ * struct i3c_ccc_getstatus - payload passed to GETSTATUS CCC
+ *
+ * @status: status of the I3C slave (see I3C_CCC_STATUS_xxx macros for more
+ * information).
+ */
+struct i3c_ccc_getstatus {
+ __be16 status;
+};
+
+/**
+ * struct i3c_ccc_getaccmst - payload passed to GETACCMST CCC
+ *
+ * @newmaster: address of the master taking bus ownership
+ */
+struct i3c_ccc_getaccmst {
+ u8 newmaster;
+};
+
+/**
+ * struct i3c_ccc_bridged_slave_desc - bridged slave descriptor
+ *
+ * @addr: dynamic address of the bridged device
+ * @id: ID of the slave device behind the bridge
+ */
+struct i3c_ccc_bridged_slave_desc {
+ u8 addr;
+ __be16 id;
+} __packed;
+
+/**
+ * struct i3c_ccc_setbrgtgt - payload passed to SETBRGTGT CCC
+ *
+ * @count: number of bridged slaves
+ * @bslaves: bridged slave descriptors
+ */
+struct i3c_ccc_setbrgtgt {
+ u8 count;
+ struct i3c_ccc_bridged_slave_desc bslaves[0];
+} __packed;
+
+/**
+ * enum i3c_sdr_max_data_rate - max data rate values for private SDR transfers
+ */
+enum i3c_sdr_max_data_rate {
+ I3C_SDR0_FSCL_MAX,
+ I3C_SDR1_FSCL_8MHZ,
+ I3C_SDR2_FSCL_6MHZ,
+ I3C_SDR3_FSCL_4MHZ,
+ I3C_SDR4_FSCL_2MHZ,
+};
+
+/**
+ * enum i3c_tsco - clock to data turn-around
+ */
+enum i3c_tsco {
+ I3C_TSCO_8NS,
+ I3C_TSCO_9NS,
+ I3C_TSCO_10NS,
+ I3C_TSCO_11NS,
+ I3C_TSCO_12NS,
+};
+
+#define I3C_CCC_MAX_SDR_FSCL_MASK GENMASK(2, 0)
+#define I3C_CCC_MAX_SDR_FSCL(x) ((x) & I3C_CCC_MAX_SDR_FSCL_MASK)
+
+/**
+ * struct i3c_ccc_getmxds - payload passed to GETMXDS CCC
+ *
+ * @maxwr: write limitations
+ * @maxrd: read limitations
+ * @maxrdturn: maximum read turn-around expressed micro-seconds and
+ * little-endian formatted
+ */
+struct i3c_ccc_getmxds {
+ u8 maxwr;
+ u8 maxrd;
+ u8 maxrdturn[3];
+} __packed;
+
+#define I3C_CCC_HDR_MODE(mode) BIT(mode)
+
+/**
+ * struct i3c_ccc_gethdrcap - payload passed to GETHDRCAP CCC
+ *
+ * @modes: bitmap of supported HDR modes
+ */
+struct i3c_ccc_gethdrcap {
+ u8 modes;
+} __packed;
+
+/**
+ * enum i3c_ccc_setxtime_subcmd - SETXTIME sub-commands
+ */
+enum i3c_ccc_setxtime_subcmd {
+ I3C_CCC_SETXTIME_ST = 0x7f,
+ I3C_CCC_SETXTIME_DT = 0xbf,
+ I3C_CCC_SETXTIME_ENTER_ASYNC_MODE0 = 0xdf,
+ I3C_CCC_SETXTIME_ENTER_ASYNC_MODE1 = 0xef,
+ I3C_CCC_SETXTIME_ENTER_ASYNC_MODE2 = 0xf7,
+ I3C_CCC_SETXTIME_ENTER_ASYNC_MODE3 = 0xfb,
+ I3C_CCC_SETXTIME_ASYNC_TRIGGER = 0xfd,
+ I3C_CCC_SETXTIME_TPH = 0x3f,
+ I3C_CCC_SETXTIME_TU = 0x9f,
+ I3C_CCC_SETXTIME_ODR = 0x8f,
+};
+
+/**
+ * struct i3c_ccc_setxtime - payload passed to SETXTIME CCC
+ *
+ * @subcmd: one of the sub-commands ddefined in &enum i3c_ccc_setxtime_subcmd
+ * @data: sub-command payload. Amount of data is determined by
+ * &i3c_ccc_setxtime->subcmd
+ */
+struct i3c_ccc_setxtime {
+ u8 subcmd;
+ u8 data[0];
+} __packed;
+
+#define I3C_CCC_GETXTIME_SYNC_MODE BIT(0)
+#define I3C_CCC_GETXTIME_ASYNC_MODE(x) BIT((x) + 1)
+#define I3C_CCC_GETXTIME_OVERFLOW BIT(7)
+
+/**
+ * struct i3c_ccc_getxtime - payload retrieved from GETXTIME CCC
+ *
+ * @supported_modes: bitmap describing supported XTIME modes
+ * @state: current status (enabled mode and overflow status)
+ * @frequency: slave's internal oscillator frequency in 500KHz steps
+ * @inaccuracy: slave's internal oscillator inaccuracy in 0.1% steps
+ */
+struct i3c_ccc_getxtime {
+ u8 supported_modes;
+ u8 state;
+ u8 frequency;
+ u8 inaccuracy;
+} __packed;
+
+/**
+ * struct i3c_ccc_cmd_payload - CCC payload
+ *
+ * @len: payload length
+ * @data: payload data. This buffer must be DMA-able
+ */
+struct i3c_ccc_cmd_payload {
+ u16 len;
+ void *data;
+};
+
+/**
+ * struct i3c_ccc_cmd_dest - CCC command destination
+ *
+ * @addr: can be an I3C device address or the broadcast address if this is a
+ * broadcast CCC
+ * @payload: payload to be sent to this device or broadcasted
+ */
+struct i3c_ccc_cmd_dest {
+ u8 addr;
+ struct i3c_ccc_cmd_payload payload;
+};
+
+/**
+ * struct i3c_ccc_cmd - CCC command
+ *
+ * @rnw: true if the CCC should retrieve data from the device. Only valid for
+ * unicast commands
+ * @id: CCC command id
+ * @ndests: number of destinations. Should always be one for broadcast commands
+ * @dests: array of destinations and associated payload for this CCC. Most of
+ * the time, only one destination is provided
+ * @err: I3C error code
+ */
+struct i3c_ccc_cmd {
+ u8 rnw;
+ u8 id;
+ unsigned int ndests;
+ struct i3c_ccc_cmd_dest *dests;
+ enum i3c_error_code err;
+};
+
+#endif /* I3C_CCC_H */
diff --git a/include/linux/i3c/device.h b/include/linux/i3c/device.h
new file mode 100644
index 0000000..5ecb055
--- /dev/null
+++ b/include/linux/i3c/device.h
@@ -0,0 +1,331 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018 Cadence Design Systems Inc.
+ *
+ * Author: Boris Brezillon <boris.brezillon@bootlin.com>
+ */
+
+#ifndef I3C_DEV_H
+#define I3C_DEV_H
+
+#include <linux/bitops.h>
+#include <linux/device.h>
+#include <linux/i2c.h>
+#include <linux/kconfig.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+
+/**
+ * enum i3c_error_code - I3C error codes
+ *
+ * These are the standard error codes as defined by the I3C specification.
+ * When -EIO is returned by the i3c_device_do_priv_xfers() or
+ * i3c_device_send_hdr_cmds() one can check the error code in
+ * &struct_i3c_priv_xfer.err or &struct i3c_hdr_cmd.err to get a better idea of
+ * what went wrong.
+ *
+ * @I3C_ERROR_UNKNOWN: unknown error, usually means the error is not I3C
+ * related
+ * @I3C_ERROR_M0: M0 error
+ * @I3C_ERROR_M1: M1 error
+ * @I3C_ERROR_M2: M2 error
+ */
+enum i3c_error_code {
+ I3C_ERROR_UNKNOWN = 0,
+ I3C_ERROR_M0 = 1,
+ I3C_ERROR_M1,
+ I3C_ERROR_M2,
+};
+
+/**
+ * enum i3c_hdr_mode - HDR mode ids
+ * @I3C_HDR_DDR: DDR mode
+ * @I3C_HDR_TSP: TSP mode
+ * @I3C_HDR_TSL: TSL mode
+ */
+enum i3c_hdr_mode {
+ I3C_HDR_DDR,
+ I3C_HDR_TSP,
+ I3C_HDR_TSL,
+};
+
+/**
+ * struct i3c_priv_xfer - I3C SDR private transfer
+ * @rnw: encodes the transfer direction. true for a read, false for a write
+ * @len: transfer length in bytes of the transfer
+ * @data: input/output buffer
+ * @data.in: input buffer. Must point to a DMA-able buffer
+ * @data.out: output buffer. Must point to a DMA-able buffer
+ * @err: I3C error code
+ */
+struct i3c_priv_xfer {
+ u8 rnw;
+ u16 len;
+ union {
+ void *in;
+ const void *out;
+ } data;
+ enum i3c_error_code err;
+};
+
+/**
+ * enum i3c_dcr - I3C DCR values
+ * @I3C_DCR_GENERIC_DEVICE: generic I3C device
+ */
+enum i3c_dcr {
+ I3C_DCR_GENERIC_DEVICE = 0,
+};
+
+#define I3C_PID_MANUF_ID(pid) (((pid) & GENMASK_ULL(47, 33)) >> 33)
+#define I3C_PID_RND_LOWER_32BITS(pid) (!!((pid) & BIT_ULL(32)))
+#define I3C_PID_RND_VAL(pid) ((pid) & GENMASK_ULL(31, 0))
+#define I3C_PID_PART_ID(pid) (((pid) & GENMASK_ULL(31, 16)) >> 16)
+#define I3C_PID_INSTANCE_ID(pid) (((pid) & GENMASK_ULL(15, 12)) >> 12)
+#define I3C_PID_EXTRA_INFO(pid) ((pid) & GENMASK_ULL(11, 0))
+
+#define I3C_BCR_DEVICE_ROLE(bcr) ((bcr) & GENMASK(7, 6))
+#define I3C_BCR_I3C_SLAVE (0 << 6)
+#define I3C_BCR_I3C_MASTER (1 << 6)
+#define I3C_BCR_HDR_CAP BIT(5)
+#define I3C_BCR_BRIDGE BIT(4)
+#define I3C_BCR_OFFLINE_CAP BIT(3)
+#define I3C_BCR_IBI_PAYLOAD BIT(2)
+#define I3C_BCR_IBI_REQ_CAP BIT(1)
+#define I3C_BCR_MAX_DATA_SPEED_LIM BIT(0)
+
+/**
+ * struct i3c_device_info - I3C device information
+ * @pid: Provisional ID
+ * @bcr: Bus Characteristic Register
+ * @dcr: Device Characteristic Register
+ * @static_addr: static/I2C address
+ * @dyn_addr: dynamic address
+ * @hdr_cap: supported HDR modes
+ * @max_read_ds: max read speed information
+ * @max_write_ds: max write speed information
+ * @max_ibi_len: max IBI payload length
+ * @max_read_turnaround: max read turn-around time in micro-seconds
+ * @max_read_len: max private SDR read length in bytes
+ * @max_write_len: max private SDR write length in bytes
+ *
+ * These are all basic information that should be advertised by an I3C device.
+ * Some of them are optional depending on the device type and device
+ * capabilities.
+ * For each I3C slave attached to a master with
+ * i3c_master_add_i3c_dev_locked(), the core will send the relevant CCC command
+ * to retrieve these data.
+ */
+struct i3c_device_info {
+ u64 pid;
+ u8 bcr;
+ u8 dcr;
+ u8 static_addr;
+ u8 dyn_addr;
+ u8 hdr_cap;
+ u8 max_read_ds;
+ u8 max_write_ds;
+ u8 max_ibi_len;
+ u32 max_read_turnaround;
+ u16 max_read_len;
+ u16 max_write_len;
+};
+
+/*
+ * I3C device internals are kept hidden from I3C device users. It's just
+ * simpler to refactor things when everything goes through getter/setters, and
+ * I3C device drivers should not have to worry about internal representation
+ * anyway.
+ */
+struct i3c_device;
+
+/* These macros should be used to i3c_device_id entries. */
+#define I3C_MATCH_MANUF_AND_PART (I3C_MATCH_MANUF | I3C_MATCH_PART)
+
+#define I3C_DEVICE(_manufid, _partid, _drvdata) \
+ { \
+ .match_flags = I3C_MATCH_MANUF_AND_PART, \
+ .manuf_id = _manufid, \
+ .part_id = _partid, \
+ .data = _drvdata, \
+ }
+
+#define I3C_DEVICE_EXTRA_INFO(_manufid, _partid, _info, _drvdata) \
+ { \
+ .match_flags = I3C_MATCH_MANUF_AND_PART | \
+ I3C_MATCH_EXTRA_INFO, \
+ .manuf_id = _manufid, \
+ .part_id = _partid, \
+ .extra_info = _info, \
+ .data = _drvdata, \
+ }
+
+#define I3C_CLASS(_dcr, _drvdata) \
+ { \
+ .match_flags = I3C_MATCH_DCR, \
+ .dcr = _dcr, \
+ }
+
+/**
+ * struct i3c_driver - I3C device driver
+ * @driver: inherit from device_driver
+ * @probe: I3C device probe method
+ * @remove: I3C device remove method
+ * @id_table: I3C device match table. Will be used by the framework to decide
+ * which device to bind to this driver
+ */
+struct i3c_driver {
+ struct device_driver driver;
+ int (*probe)(struct i3c_device *dev);
+ int (*remove)(struct i3c_device *dev);
+ const struct i3c_device_id *id_table;
+};
+
+static inline struct i3c_driver *drv_to_i3cdrv(struct device_driver *drv)
+{
+ return container_of(drv, struct i3c_driver, driver);
+}
+
+struct device *i3cdev_to_dev(struct i3c_device *i3cdev);
+struct i3c_device *dev_to_i3cdev(struct device *dev);
+
+static inline void i3cdev_set_drvdata(struct i3c_device *i3cdev,
+ void *data)
+{
+ struct device *dev = i3cdev_to_dev(i3cdev);
+
+ dev_set_drvdata(dev, data);
+}
+
+static inline void *i3cdev_get_drvdata(struct i3c_device *i3cdev)
+{
+ struct device *dev = i3cdev_to_dev(i3cdev);
+
+ return dev_get_drvdata(dev);
+}
+
+int i3c_driver_register_with_owner(struct i3c_driver *drv,
+ struct module *owner);
+void i3c_driver_unregister(struct i3c_driver *drv);
+
+#define i3c_driver_register(__drv) \
+ i3c_driver_register_with_owner(__drv, THIS_MODULE)
+
+/**
+ * module_i3c_driver() - Register a module providing an I3C driver
+ * @__drv: the I3C driver to register
+ *
+ * Provide generic init/exit functions that simply register/unregister an I3C
+ * driver.
+ * Should be used by any driver that does not require extra init/cleanup steps.
+ */
+#define module_i3c_driver(__drv) \
+ module_driver(__drv, i3c_driver_register, i3c_driver_unregister)
+
+/**
+ * i3c_i2c_driver_register() - Register an i2c and an i3c driver
+ * @i3cdrv: the I3C driver to register
+ * @i2cdrv: the I2C driver to register
+ *
+ * This function registers both @i2cdev and @i3cdev, and fails if one of these
+ * registrations fails. This is mainly useful for devices that support both I2C
+ * and I3C modes.
+ * Note that when CONFIG_I3C is not enabled, this function only registers the
+ * I2C driver.
+ *
+ * Return: 0 if both registrations succeeds, a negative error code otherwise.
+ */
+static inline int i3c_i2c_driver_register(struct i3c_driver *i3cdrv,
+ struct i2c_driver *i2cdrv)
+{
+ int ret;
+
+ ret = i2c_add_driver(i2cdrv);
+ if (ret || !IS_ENABLED(CONFIG_I3C))
+ return ret;
+
+ ret = i3c_driver_register(i3cdrv);
+ if (ret)
+ i2c_del_driver(i2cdrv);
+
+ return ret;
+}
+
+/**
+ * i3c_i2c_driver_unregister() - Unregister an i2c and an i3c driver
+ * @i3cdrv: the I3C driver to register
+ * @i2cdrv: the I2C driver to register
+ *
+ * This function unregisters both @i3cdrv and @i2cdrv.
+ * Note that when CONFIG_I3C is not enabled, this function only unregisters the
+ * @i2cdrv.
+ */
+static inline void i3c_i2c_driver_unregister(struct i3c_driver *i3cdrv,
+ struct i2c_driver *i2cdrv)
+{
+ if (IS_ENABLED(CONFIG_I3C))
+ i3c_driver_unregister(i3cdrv);
+
+ i2c_del_driver(i2cdrv);
+}
+
+/**
+ * module_i3c_i2c_driver() - Register a module providing an I3C and an I2C
+ * driver
+ * @__i3cdrv: the I3C driver to register
+ * @__i2cdrv: the I3C driver to register
+ *
+ * Provide generic init/exit functions that simply register/unregister an I3C
+ * and an I2C driver.
+ * This macro can be used even if CONFIG_I3C is disabled, in this case, only
+ * the I2C driver will be registered.
+ * Should be used by any driver that does not require extra init/cleanup steps.
+ */
+#define module_i3c_i2c_driver(__i3cdrv, __i2cdrv) \
+ module_driver(__i3cdrv, \
+ i3c_i2c_driver_register, \
+ i3c_i2c_driver_unregister)
+
+int i3c_device_do_priv_xfers(struct i3c_device *dev,
+ struct i3c_priv_xfer *xfers,
+ int nxfers);
+
+void i3c_device_get_info(struct i3c_device *dev, struct i3c_device_info *info);
+
+struct i3c_ibi_payload {
+ unsigned int len;
+ const void *data;
+};
+
+/**
+ * struct i3c_ibi_setup - IBI setup object
+ * @max_payload_len: maximum length of the payload associated to an IBI. If one
+ * IBI appears to have a payload that is bigger than this
+ * number, the IBI will be rejected.
+ * @num_slots: number of pre-allocated IBI slots. This should be chosen so that
+ * the system never runs out of IBI slots, otherwise you'll lose
+ * IBIs.
+ * @handler: IBI handler, every time an IBI is received. This handler is called
+ * in a workqueue context. It is allowed to sleep and send new
+ * messages on the bus, though it's recommended to keep the
+ * processing done there as fast as possible to avoid delaying
+ * processing of other queued on the same workqueue.
+ *
+ * Temporary structure used to pass information to i3c_device_request_ibi().
+ * This object can be allocated on the stack since i3c_device_request_ibi()
+ * copies every bit of information and do not use it after
+ * i3c_device_request_ibi() has returned.
+ */
+struct i3c_ibi_setup {
+ unsigned int max_payload_len;
+ unsigned int num_slots;
+ void (*handler)(struct i3c_device *dev,
+ const struct i3c_ibi_payload *payload);
+};
+
+int i3c_device_request_ibi(struct i3c_device *dev,
+ const struct i3c_ibi_setup *setup);
+void i3c_device_free_ibi(struct i3c_device *dev);
+int i3c_device_enable_ibi(struct i3c_device *dev);
+int i3c_device_disable_ibi(struct i3c_device *dev);
+
+#endif /* I3C_DEV_H */
diff --git a/include/linux/i3c/master.h b/include/linux/i3c/master.h
new file mode 100644
index 0000000..f13fd8b
--- /dev/null
+++ b/include/linux/i3c/master.h
@@ -0,0 +1,648 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018 Cadence Design Systems Inc.
+ *
+ * Author: Boris Brezillon <boris.brezillon@bootlin.com>
+ */
+
+#ifndef I3C_MASTER_H
+#define I3C_MASTER_H
+
+#include <asm/bitsperlong.h>
+
+#include <linux/bitops.h>
+#include <linux/i2c.h>
+#include <linux/i3c/ccc.h>
+#include <linux/i3c/device.h>
+#include <linux/rwsem.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+
+#define I3C_HOT_JOIN_ADDR 0x2
+#define I3C_BROADCAST_ADDR 0x7e
+#define I3C_MAX_ADDR GENMASK(6, 0)
+
+struct i3c_master_controller;
+struct i3c_bus;
+struct i2c_device;
+struct i3c_device;
+
+/**
+ * struct i3c_i2c_dev_desc - Common part of the I3C/I2C device descriptor
+ * @node: node element used to insert the slot into the I2C or I3C device
+ * list
+ * @master: I3C master that instantiated this device. Will be used to do
+ * I2C/I3C transfers
+ * @master_priv: master private data assigned to the device. Can be used to
+ * add master specific information
+ *
+ * This structure is describing common I3C/I2C dev information.
+ */
+struct i3c_i2c_dev_desc {
+ struct list_head node;
+ struct i3c_master_controller *master;
+ void *master_priv;
+};
+
+#define I3C_LVR_I2C_INDEX_MASK GENMASK(7, 5)
+#define I3C_LVR_I2C_INDEX(x) ((x) << 5)
+#define I3C_LVR_I2C_FM_MODE BIT(4)
+
+#define I2C_MAX_ADDR GENMASK(9, 0)
+
+/**
+ * struct i2c_dev_boardinfo - I2C device board information
+ * @node: used to insert the boardinfo object in the I2C boardinfo list
+ * @base: regular I2C board information
+ * @lvr: LVR (Legacy Virtual Register) needed by the I3C core to know about
+ * the I2C device limitations
+ *
+ * This structure is used to attach board-level information to an I2C device.
+ * Each I2C device connected on the I3C bus should have one.
+ */
+struct i2c_dev_boardinfo {
+ struct list_head node;
+ struct i2c_board_info base;
+ u8 lvr;
+};
+
+/**
+ * struct i2c_dev_desc - I2C device descriptor
+ * @common: common part of the I2C device descriptor
+ * @boardinfo: pointer to the boardinfo attached to this I2C device
+ * @dev: I2C device object registered to the I2C framework
+ *
+ * Each I2C device connected on the bus will have an i2c_dev_desc.
+ * This object is created by the core and later attached to the controller
+ * using &struct_i3c_master_controller->ops->attach_i2c_dev().
+ *
+ * &struct_i2c_dev_desc is the internal representation of an I2C device
+ * connected on an I3C bus. This object is also passed to all
+ * &struct_i3c_master_controller_ops hooks.
+ */
+struct i2c_dev_desc {
+ struct i3c_i2c_dev_desc common;
+ const struct i2c_dev_boardinfo *boardinfo;
+ struct i2c_client *dev;
+};
+
+/**
+ * struct i3c_ibi_slot - I3C IBI (In-Band Interrupt) slot
+ * @work: work associated to this slot. The IBI handler will be called from
+ * there
+ * @dev: the I3C device that has generated this IBI
+ * @len: length of the payload associated to this IBI
+ * @data: payload buffer
+ *
+ * An IBI slot is an object pre-allocated by the controller and used when an
+ * IBI comes in.
+ * Every time an IBI comes in, the I3C master driver should find a free IBI
+ * slot in its IBI slot pool, retrieve the IBI payload and queue the IBI using
+ * i3c_master_queue_ibi().
+ *
+ * How IBI slots are allocated is left to the I3C master driver, though, for
+ * simple kmalloc-based allocation, the generic IBI slot pool can be used.
+ */
+struct i3c_ibi_slot {
+ struct work_struct work;
+ struct i3c_dev_desc *dev;
+ unsigned int len;
+ void *data;
+};
+
+/**
+ * struct i3c_device_ibi_info - IBI information attached to a specific device
+ * @all_ibis_handled: used to be informed when no more IBIs are waiting to be
+ * processed. Used by i3c_device_disable_ibi() to wait for
+ * all IBIs to be dequeued
+ * @pending_ibis: count the number of pending IBIs. Each pending IBI has its
+ * work element queued to the controller workqueue
+ * @max_payload_len: maximum payload length for an IBI coming from this device.
+ * this value is specified when calling
+ * i3c_device_request_ibi() and should not change at run
+ * time. All messages IBIs exceeding this limit should be
+ * rejected by the master
+ * @num_slots: number of IBI slots reserved for this device
+ * @enabled: reflect the IBI status
+ * @handler: IBI handler specified at i3c_device_request_ibi() call time. This
+ * handler will be called from the controller workqueue, and as such
+ * is allowed to sleep (though it is recommended to process the IBI
+ * as fast as possible to not stall processing of other IBIs queued
+ * on the same workqueue).
+ * New I3C messages can be sent from the IBI handler
+ *
+ * The &struct_i3c_device_ibi_info object is allocated when
+ * i3c_device_request_ibi() is called and attached to a specific device. This
+ * object is here to manage IBIs coming from a specific I3C device.
+ *
+ * Note that this structure is the generic view of the IBI management
+ * infrastructure. I3C master drivers may have their own internal
+ * representation which they can associate to the device using
+ * controller-private data.
+ */
+struct i3c_device_ibi_info {
+ struct completion all_ibis_handled;
+ atomic_t pending_ibis;
+ unsigned int max_payload_len;
+ unsigned int num_slots;
+ unsigned int enabled;
+ void (*handler)(struct i3c_device *dev,
+ const struct i3c_ibi_payload *payload);
+};
+
+/**
+ * struct i3c_dev_boardinfo - I3C device board information
+ * @node: used to insert the boardinfo object in the I3C boardinfo list
+ * @init_dyn_addr: initial dynamic address requested by the FW. We provide no
+ * guarantee that the device will end up using this address,
+ * but try our best to assign this specific address to the
+ * device
+ * @static_addr: static address the I3C device listen on before it's been
+ * assigned a dynamic address by the master. Will be used during
+ * bus initialization to assign it a specific dynamic address
+ * before starting DAA (Dynamic Address Assignment)
+ * @pid: I3C Provisional ID exposed by the device. This is a unique identifier
+ * that may be used to attach boardinfo to i3c_dev_desc when the device
+ * does not have a static address
+ * @of_node: optional DT node in case the device has been described in the DT
+ *
+ * This structure is used to attach board-level information to an I3C device.
+ * Not all I3C devices connected on the bus will have a boardinfo. It's only
+ * needed if you want to attach extra resources to a device or assign it a
+ * specific dynamic address.
+ */
+struct i3c_dev_boardinfo {
+ struct list_head node;
+ u8 init_dyn_addr;
+ u8 static_addr;
+ u64 pid;
+ struct device_node *of_node;
+};
+
+/**
+ * struct i3c_dev_desc - I3C device descriptor
+ * @common: common part of the I3C device descriptor
+ * @info: I3C device information. Will be automatically filled when you create
+ * your device with i3c_master_add_i3c_dev_locked()
+ * @ibi_lock: lock used to protect the &struct_i3c_device->ibi
+ * @ibi: IBI info attached to a device. Should be NULL until
+ * i3c_device_request_ibi() is called
+ * @dev: pointer to the I3C device object exposed to I3C device drivers. This
+ * should never be accessed from I3C master controller drivers. Only core
+ * code should manipulate it in when updating the dev <-> desc link or
+ * when propagating IBI events to the driver
+ * @boardinfo: pointer to the boardinfo attached to this I3C device
+ *
+ * Internal representation of an I3C device. This object is only used by the
+ * core and passed to I3C master controller drivers when they're requested to
+ * do some operations on the device.
+ * The core maintains the link between the internal I3C dev descriptor and the
+ * object exposed to the I3C device drivers (&struct_i3c_device).
+ */
+struct i3c_dev_desc {
+ struct i3c_i2c_dev_desc common;
+ struct i3c_device_info info;
+ struct mutex ibi_lock;
+ struct i3c_device_ibi_info *ibi;
+ struct i3c_device *dev;
+ const struct i3c_dev_boardinfo *boardinfo;
+};
+
+/**
+ * struct i3c_device - I3C device object
+ * @dev: device object to register the I3C dev to the device model
+ * @desc: pointer to an i3c device descriptor object. This link is updated
+ * every time the I3C device is rediscovered with a different dynamic
+ * address assigned
+ * @bus: I3C bus this device is attached to
+ *
+ * I3C device object exposed to I3C device drivers. The takes care of linking
+ * this object to the relevant &struct_i3c_dev_desc one.
+ * All I3C devs on the I3C bus are represented, including I3C masters. For each
+ * of them, we have an instance of &struct i3c_device.
+ */
+struct i3c_device {
+ struct device dev;
+ struct i3c_dev_desc *desc;
+ struct i3c_bus *bus;
+};
+
+/*
+ * The I3C specification says the maximum number of devices connected on the
+ * bus is 11, but this number depends on external parameters like trace length,
+ * capacitive load per Device, and the types of Devices present on the Bus.
+ * I3C master can also have limitations, so this number is just here as a
+ * reference and should be adjusted on a per-controller/per-board basis.
+ */
+#define I3C_BUS_MAX_DEVS 11
+
+#define I3C_BUS_MAX_I3C_SCL_RATE 12900000
+#define I3C_BUS_TYP_I3C_SCL_RATE 12500000
+#define I3C_BUS_I2C_FM_PLUS_SCL_RATE 1000000
+#define I3C_BUS_I2C_FM_SCL_RATE 400000
+#define I3C_BUS_TLOW_OD_MIN_NS 200
+
+/**
+ * enum i3c_bus_mode - I3C bus mode
+ * @I3C_BUS_MODE_PURE: only I3C devices are connected to the bus. No limitation
+ * expected
+ * @I3C_BUS_MODE_MIXED_FAST: I2C devices with 50ns spike filter are present on
+ * the bus. The only impact in this mode is that the
+ * high SCL pulse has to stay below 50ns to trick I2C
+ * devices when transmitting I3C frames
+ * @I3C_BUS_MODE_MIXED_SLOW: I2C devices without 50ns spike filter are present
+ * on the bus
+ */
+enum i3c_bus_mode {
+ I3C_BUS_MODE_PURE,
+ I3C_BUS_MODE_MIXED_FAST,
+ I3C_BUS_MODE_MIXED_SLOW,
+};
+
+/**
+ * enum i3c_addr_slot_status - I3C address slot status
+ * @I3C_ADDR_SLOT_FREE: address is free
+ * @I3C_ADDR_SLOT_RSVD: address is reserved
+ * @I3C_ADDR_SLOT_I2C_DEV: address is assigned to an I2C device
+ * @I3C_ADDR_SLOT_I3C_DEV: address is assigned to an I3C device
+ * @I3C_ADDR_SLOT_STATUS_MASK: address slot mask
+ *
+ * On an I3C bus, addresses are assigned dynamically, and we need to know which
+ * addresses are free to use and which ones are already assigned.
+ *
+ * Addresses marked as reserved are those reserved by the I3C protocol
+ * (broadcast address, ...).
+ */
+enum i3c_addr_slot_status {
+ I3C_ADDR_SLOT_FREE,
+ I3C_ADDR_SLOT_RSVD,
+ I3C_ADDR_SLOT_I2C_DEV,
+ I3C_ADDR_SLOT_I3C_DEV,
+ I3C_ADDR_SLOT_STATUS_MASK = 3,
+};
+
+/**
+ * struct i3c_bus - I3C bus object
+ * @cur_master: I3C master currently driving the bus. Since I3C is multi-master
+ * this can change over the time. Will be used to let a master
+ * know whether it needs to request bus ownership before sending
+ * a frame or not
+ * @id: bus ID. Assigned by the framework when register the bus
+ * @addrslots: a bitmap with 2-bits per-slot to encode the address status and
+ * ease the DAA (Dynamic Address Assignment) procedure (see
+ * &enum i3c_addr_slot_status)
+ * @mode: bus mode (see &enum i3c_bus_mode)
+ * @scl_rate.i3c: maximum rate for the clock signal when doing I3C SDR/priv
+ * transfers
+ * @scl_rate.i2c: maximum rate for the clock signal when doing I2C transfers
+ * @scl_rate: SCL signal rate for I3C and I2C mode
+ * @devs.i3c: contains a list of I3C device descriptors representing I3C
+ * devices connected on the bus and successfully attached to the
+ * I3C master
+ * @devs.i2c: contains a list of I2C device descriptors representing I2C
+ * devices connected on the bus and successfully attached to the
+ * I3C master
+ * @devs: 2 lists containing all I3C/I2C devices connected to the bus
+ * @lock: read/write lock on the bus. This is needed to protect against
+ * operations that have an impact on the whole bus and the devices
+ * connected to it. For example, when asking slaves to drop their
+ * dynamic address (RSTDAA CCC), we need to make sure no one is trying
+ * to send I3C frames to these devices.
+ * Note that this lock does not protect against concurrency between
+ * devices: several drivers can send different I3C/I2C frames through
+ * the same master in parallel. This is the responsibility of the
+ * master to guarantee that frames are actually sent sequentially and
+ * not interlaced
+ *
+ * The I3C bus is represented with its own object and not implicitly described
+ * by the I3C master to cope with the multi-master functionality, where one bus
+ * can be shared amongst several masters, each of them requesting bus ownership
+ * when they need to.
+ */
+struct i3c_bus {
+ struct i3c_dev_desc *cur_master;
+ int id;
+ unsigned long addrslots[((I2C_MAX_ADDR + 1) * 2) / BITS_PER_LONG];
+ enum i3c_bus_mode mode;
+ struct {
+ unsigned long i3c;
+ unsigned long i2c;
+ } scl_rate;
+ struct {
+ struct list_head i3c;
+ struct list_head i2c;
+ } devs;
+ struct rw_semaphore lock;
+};
+
+/**
+ * struct i3c_master_controller_ops - I3C master methods
+ * @bus_init: hook responsible for the I3C bus initialization. You should at
+ * least call master_set_info() from there and set the bus mode.
+ * You can also put controller specific initialization in there.
+ * This method is mandatory.
+ * @bus_cleanup: cleanup everything done in
+ * &i3c_master_controller_ops->bus_init().
+ * This method is optional.
+ * @attach_i3c_dev: called every time an I3C device is attached to the bus. It
+ * can be after a DAA or when a device is statically declared
+ * by the FW, in which case it will only have a static address
+ * and the dynamic address will be 0.
+ * When this function is called, device information have not
+ * been retrieved yet.
+ * This is a good place to attach master controller specific
+ * data to I3C devices.
+ * This method is optional.
+ * @reattach_i3c_dev: called every time an I3C device has its addressed
+ * changed. It can be because the device has been powered
+ * down and has lost its address, or it can happen when a
+ * device had a static address and has been assigned a
+ * dynamic address with SETDASA.
+ * This method is optional.
+ * @detach_i3c_dev: called when an I3C device is detached from the bus. Usually
+ * happens when the master device is unregistered.
+ * This method is optional.
+ * @do_daa: do a DAA (Dynamic Address Assignment) procedure. This is procedure
+ * should send an ENTDAA CCC command and then add all devices
+ * discovered sure the DAA using i3c_master_add_i3c_dev_locked().
+ * Add devices added with i3c_master_add_i3c_dev_locked() will then be
+ * attached or re-attached to the controller.
+ * This method is mandatory.
+ * @supports_ccc_cmd: should return true if the CCC command is supported, false
+ * otherwise.
+ * This method is optional, if not provided the core assumes
+ * all CCC commands are supported.
+ * @send_ccc_cmd: send a CCC command
+ * This method is mandatory.
+ * @priv_xfers: do one or several private I3C SDR transfers
+ * This method is mandatory.
+ * @attach_i2c_dev: called every time an I2C device is attached to the bus.
+ * This is a good place to attach master controller specific
+ * data to I2C devices.
+ * This method is optional.
+ * @detach_i2c_dev: called when an I2C device is detached from the bus. Usually
+ * happens when the master device is unregistered.
+ * This method is optional.
+ * @i2c_xfers: do one or several I2C transfers. Note that, unlike i3c
+ * transfers, the core does not guarantee that buffers attached to
+ * the transfers are DMA-safe. If drivers want to have DMA-safe
+ * buffers, they should use the i2c_get_dma_safe_msg_buf()
+ * and i2c_put_dma_safe_msg_buf() helpers provided by the I2C
+ * framework.
+ * This method is mandatory.
+ * @i2c_funcs: expose the supported I2C functionalities.
+ * This method is mandatory.
+ * @request_ibi: attach an IBI handler to an I3C device. This implies defining
+ * an IBI handler and the constraints of the IBI (maximum payload
+ * length and number of pre-allocated slots).
+ * Some controllers support less IBI-capable devices than regular
+ * devices, so this method might return -%EBUSY if there's no
+ * more space for an extra IBI registration
+ * This method is optional.
+ * @free_ibi: free an IBI previously requested with ->request_ibi(). The IBI
+ * should have been disabled with ->disable_irq() prior to that
+ * This method is mandatory only if ->request_ibi is not NULL.
+ * @enable_ibi: enable the IBI. Only valid if ->request_ibi() has been called
+ * prior to ->enable_ibi(). The controller should first enable
+ * the IBI on the controller end (for example, unmask the hardware
+ * IRQ) and then send the ENEC CCC command (with the IBI flag set)
+ * to the I3C device.
+ * This method is mandatory only if ->request_ibi is not NULL.
+ * @disable_ibi: disable an IBI. First send the DISEC CCC command with the IBI
+ * flag set and then deactivate the hardware IRQ on the
+ * controller end.
+ * This method is mandatory only if ->request_ibi is not NULL.
+ * @recycle_ibi_slot: recycle an IBI slot. Called every time an IBI has been
+ * processed by its handler. The IBI slot should be put back
+ * in the IBI slot pool so that the controller can re-use it
+ * for a future IBI
+ * This method is mandatory only if ->request_ibi is not
+ * NULL.
+ */
+struct i3c_master_controller_ops {
+ int (*bus_init)(struct i3c_master_controller *master);
+ void (*bus_cleanup)(struct i3c_master_controller *master);
+ int (*attach_i3c_dev)(struct i3c_dev_desc *dev);
+ int (*reattach_i3c_dev)(struct i3c_dev_desc *dev, u8 old_dyn_addr);
+ void (*detach_i3c_dev)(struct i3c_dev_desc *dev);
+ int (*do_daa)(struct i3c_master_controller *master);
+ bool (*supports_ccc_cmd)(struct i3c_master_controller *master,
+ const struct i3c_ccc_cmd *cmd);
+ int (*send_ccc_cmd)(struct i3c_master_controller *master,
+ struct i3c_ccc_cmd *cmd);
+ int (*priv_xfers)(struct i3c_dev_desc *dev,
+ struct i3c_priv_xfer *xfers,
+ int nxfers);
+ int (*attach_i2c_dev)(struct i2c_dev_desc *dev);
+ void (*detach_i2c_dev)(struct i2c_dev_desc *dev);
+ int (*i2c_xfers)(struct i2c_dev_desc *dev,
+ const struct i2c_msg *xfers, int nxfers);
+ u32 (*i2c_funcs)(struct i3c_master_controller *master);
+ int (*request_ibi)(struct i3c_dev_desc *dev,
+ const struct i3c_ibi_setup *req);
+ void (*free_ibi)(struct i3c_dev_desc *dev);
+ int (*enable_ibi)(struct i3c_dev_desc *dev);
+ int (*disable_ibi)(struct i3c_dev_desc *dev);
+ void (*recycle_ibi_slot)(struct i3c_dev_desc *dev,
+ struct i3c_ibi_slot *slot);
+};
+
+/**
+ * struct i3c_master_controller - I3C master controller object
+ * @dev: device to be registered to the device-model
+ * @this: an I3C device object representing this master. This device will be
+ * added to the list of I3C devs available on the bus
+ * @i2c: I2C adapter used for backward compatibility. This adapter is
+ * registered to the I2C subsystem to be as transparent as possible to
+ * existing I2C drivers
+ * @ops: master operations. See &struct i3c_master_controller_ops
+ * @secondary: true if the master is a secondary master
+ * @init_done: true when the bus initialization is done
+ * @boardinfo.i3c: list of I3C boardinfo objects
+ * @boardinfo.i2c: list of I2C boardinfo objects
+ * @boardinfo: board-level information attached to devices connected on the bus
+ * @bus: I3C bus exposed by this master
+ * @wq: workqueue used to execute IBI handlers. Can also be used by master
+ * drivers if they need to postpone operations that need to take place
+ * in a thread context. Typical examples are Hot Join processing which
+ * requires taking the bus lock in maintenance, which in turn, can only
+ * be done from a sleep-able context
+ *
+ * A &struct i3c_master_controller has to be registered to the I3C subsystem
+ * through i3c_master_register(). None of &struct i3c_master_controller fields
+ * should be set manually, just pass appropriate values to
+ * i3c_master_register().
+ */
+struct i3c_master_controller {
+ struct device dev;
+ struct i3c_dev_desc *this;
+ struct i2c_adapter i2c;
+ const struct i3c_master_controller_ops *ops;
+ unsigned int secondary : 1;
+ unsigned int init_done : 1;
+ struct {
+ struct list_head i3c;
+ struct list_head i2c;
+ } boardinfo;
+ struct i3c_bus bus;
+ struct workqueue_struct *wq;
+};
+
+/**
+ * i3c_bus_for_each_i2cdev() - iterate over all I2C devices present on the bus
+ * @bus: the I3C bus
+ * @dev: an I2C device descriptor pointer updated to point to the current slot
+ * at each iteration of the loop
+ *
+ * Iterate over all I2C devs present on the bus.
+ */
+#define i3c_bus_for_each_i2cdev(bus, dev) \
+ list_for_each_entry(dev, &(bus)->devs.i2c, common.node)
+
+/**
+ * i3c_bus_for_each_i3cdev() - iterate over all I3C devices present on the bus
+ * @bus: the I3C bus
+ * @dev: and I3C device descriptor pointer updated to point to the current slot
+ * at each iteration of the loop
+ *
+ * Iterate over all I3C devs present on the bus.
+ */
+#define i3c_bus_for_each_i3cdev(bus, dev) \
+ list_for_each_entry(dev, &(bus)->devs.i3c, common.node)
+
+int i3c_master_do_i2c_xfers(struct i3c_master_controller *master,
+ const struct i2c_msg *xfers,
+ int nxfers);
+
+int i3c_master_disec_locked(struct i3c_master_controller *master, u8 addr,
+ u8 evts);
+int i3c_master_enec_locked(struct i3c_master_controller *master, u8 addr,
+ u8 evts);
+int i3c_master_entdaa_locked(struct i3c_master_controller *master);
+int i3c_master_defslvs_locked(struct i3c_master_controller *master);
+
+int i3c_master_get_free_addr(struct i3c_master_controller *master,
+ u8 start_addr);
+
+int i3c_master_add_i3c_dev_locked(struct i3c_master_controller *master,
+ u8 addr);
+int i3c_master_do_daa(struct i3c_master_controller *master);
+
+int i3c_master_set_info(struct i3c_master_controller *master,
+ const struct i3c_device_info *info);
+
+int i3c_master_register(struct i3c_master_controller *master,
+ struct device *parent,
+ const struct i3c_master_controller_ops *ops,
+ bool secondary);
+int i3c_master_unregister(struct i3c_master_controller *master);
+
+/**
+ * i3c_dev_get_master_data() - get master private data attached to an I3C
+ * device descriptor
+ * @dev: the I3C device descriptor to get private data from
+ *
+ * Return: the private data previously attached with i3c_dev_set_master_data()
+ * or NULL if no data has been attached to the device.
+ */
+static inline void *i3c_dev_get_master_data(const struct i3c_dev_desc *dev)
+{
+ return dev->common.master_priv;
+}
+
+/**
+ * i3c_dev_set_master_data() - attach master private data to an I3C device
+ * descriptor
+ * @dev: the I3C device descriptor to attach private data to
+ * @data: private data
+ *
+ * This functions allows a master controller to attach per-device private data
+ * which can then be retrieved with i3c_dev_get_master_data().
+ */
+static inline void i3c_dev_set_master_data(struct i3c_dev_desc *dev,
+ void *data)
+{
+ dev->common.master_priv = data;
+}
+
+/**
+ * i2c_dev_get_master_data() - get master private data attached to an I2C
+ * device descriptor
+ * @dev: the I2C device descriptor to get private data from
+ *
+ * Return: the private data previously attached with i2c_dev_set_master_data()
+ * or NULL if no data has been attached to the device.
+ */
+static inline void *i2c_dev_get_master_data(const struct i2c_dev_desc *dev)
+{
+ return dev->common.master_priv;
+}
+
+/**
+ * i2c_dev_set_master_data() - attach master private data to an I2C device
+ * descriptor
+ * @dev: the I2C device descriptor to attach private data to
+ * @data: private data
+ *
+ * This functions allows a master controller to attach per-device private data
+ * which can then be retrieved with i2c_device_get_master_data().
+ */
+static inline void i2c_dev_set_master_data(struct i2c_dev_desc *dev,
+ void *data)
+{
+ dev->common.master_priv = data;
+}
+
+/**
+ * i3c_dev_get_master() - get master used to communicate with a device
+ * @dev: I3C dev
+ *
+ * Return: the master controller driving @dev
+ */
+static inline struct i3c_master_controller *
+i3c_dev_get_master(struct i3c_dev_desc *dev)
+{
+ return dev->common.master;
+}
+
+/**
+ * i2c_dev_get_master() - get master used to communicate with a device
+ * @dev: I2C dev
+ *
+ * Return: the master controller driving @dev
+ */
+static inline struct i3c_master_controller *
+i2c_dev_get_master(struct i2c_dev_desc *dev)
+{
+ return dev->common.master;
+}
+
+/**
+ * i3c_master_get_bus() - get the bus attached to a master
+ * @master: master object
+ *
+ * Return: the I3C bus @master is connected to
+ */
+static inline struct i3c_bus *
+i3c_master_get_bus(struct i3c_master_controller *master)
+{
+ return &master->bus;
+}
+
+struct i3c_generic_ibi_pool;
+
+struct i3c_generic_ibi_pool *
+i3c_generic_ibi_alloc_pool(struct i3c_dev_desc *dev,
+ const struct i3c_ibi_setup *req);
+void i3c_generic_ibi_free_pool(struct i3c_generic_ibi_pool *pool);
+
+struct i3c_ibi_slot *
+i3c_generic_ibi_get_free_slot(struct i3c_generic_ibi_pool *pool);
+void i3c_generic_ibi_recycle_slot(struct i3c_generic_ibi_pool *pool,
+ struct i3c_ibi_slot *slot);
+
+void i3c_master_queue_ibi(struct i3c_dev_desc *dev, struct i3c_ibi_slot *slot);
+
+struct i3c_ibi_slot *i3c_master_get_free_ibi_slot(struct i3c_dev_desc *dev);
+
+#endif /* I3C_MASTER_H */
diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
index 9c03a7d..da3a837 100644
--- a/include/linux/ieee80211.h
+++ b/include/linux/ieee80211.h
@@ -3185,4 +3185,57 @@ static inline bool ieee80211_action_contains_tpc(struct sk_buff *skb)
return true;
}
+struct element {
+ u8 id;
+ u8 datalen;
+ u8 data[];
+};
+
+/* element iteration helpers */
+#define for_each_element(element, _data, _datalen) \
+ for (element = (void *)(_data); \
+ (u8 *)(_data) + (_datalen) - (u8 *)element >= \
+ sizeof(*element) && \
+ (u8 *)(_data) + (_datalen) - (u8 *)element >= \
+ sizeof(*element) + element->datalen; \
+ element = (void *)(element->data + element->datalen))
+
+#define for_each_element_id(element, _id, data, datalen) \
+ for_each_element(element, data, datalen) \
+ if (element->id == (_id))
+
+#define for_each_element_extid(element, extid, data, datalen) \
+ for_each_element(element, data, datalen) \
+ if (element->id == WLAN_EID_EXTENSION && \
+ element->datalen > 0 && \
+ element->data[0] == (extid))
+
+#define for_each_subelement(sub, element) \
+ for_each_element(sub, (element)->data, (element)->datalen)
+
+#define for_each_subelement_id(sub, id, element) \
+ for_each_element_id(sub, id, (element)->data, (element)->datalen)
+
+#define for_each_subelement_extid(sub, extid, element) \
+ for_each_element_extid(sub, extid, (element)->data, (element)->datalen)
+
+/**
+ * for_each_element_completed - determine if element parsing consumed all data
+ * @element: element pointer after for_each_element() or friends
+ * @data: same data pointer as passed to for_each_element() or friends
+ * @datalen: same data length as passed to for_each_element() or friends
+ *
+ * This function returns %true if all the data was parsed or considered
+ * while walking the elements. Only use this if your for_each_element()
+ * loop cannot be broken out of, otherwise it always returns %false.
+ *
+ * If some data was malformed, this returns %false since the last parsed
+ * element will not fill the whole remaining data.
+ */
+static inline bool for_each_element_completed(const struct element *element,
+ const void *data, size_t datalen)
+{
+ return (u8 *)element == (u8 *)data + datalen;
+}
+
#endif /* LINUX_IEEE80211_H */
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index c926698..a03d5e2 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -694,7 +694,8 @@ int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
void *data, unsigned long len);
int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
- void *data, int offset, unsigned long len);
+ void *data, unsigned int offset,
+ unsigned long len);
int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
gpa_t gpa, unsigned long len);
int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len);
diff --git a/include/linux/lsm_hooks.h b/include/linux/lsm_hooks.h
index 97a020c..ce16efb 100644
--- a/include/linux/lsm_hooks.h
+++ b/include/linux/lsm_hooks.h
@@ -1516,6 +1516,8 @@ union security_list_options {
size_t *len);
int (*inode_create)(struct inode *dir, struct dentry *dentry,
umode_t mode);
+ int (*inode_post_create)(struct inode *dir, struct dentry *dentry,
+ umode_t mode);
int (*inode_link)(struct dentry *old_dentry, struct inode *dir,
struct dentry *new_dentry);
int (*inode_unlink)(struct inode *dir, struct dentry *dentry);
@@ -1830,6 +1832,7 @@ struct security_hook_heads {
struct hlist_head inode_free_security;
struct hlist_head inode_init_security;
struct hlist_head inode_create;
+ struct hlist_head inode_post_create;
struct hlist_head inode_link;
struct hlist_head inode_unlink;
struct hlist_head inode_symlink;
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 88a041b..bbcfe2e 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -1321,7 +1321,7 @@ enum {
static inline const struct cpumask *
mlx5_get_vector_affinity_hint(struct mlx5_core_dev *dev, int vector)
{
- return dev->priv.irq_info[vector].mask;
+ return dev->priv.irq_info[vector + MLX5_EQ_VEC_COMP_BASE].mask;
}
#endif /* MLX5_DRIVER_H */
diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h
index 5957349..353bbc9 100644
--- a/include/linux/mmc/card.h
+++ b/include/linux/mmc/card.h
@@ -396,6 +396,7 @@ struct mmc_card {
struct notifier_block reboot_notify;
enum mmc_pon_type pon_type;
struct mmc_bkops_info bkops;
+ struct workqueue_struct *complete_wq; /* Private workqueue */
};
static inline bool mmc_large_sector(struct mmc_card *card)
diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
index 1a852ff..12b42dd 100644
--- a/include/linux/mod_devicetable.h
+++ b/include/linux/mod_devicetable.h
@@ -448,6 +448,23 @@ struct pci_epf_device_id {
kernel_ulong_t driver_data;
};
+/* i3c */
+
+#define I3C_MATCH_DCR 0x1
+#define I3C_MATCH_MANUF 0x2
+#define I3C_MATCH_PART 0x4
+#define I3C_MATCH_EXTRA_INFO 0x8
+
+struct i3c_device_id {
+ __u8 match_flags;
+ __u8 dcr;
+ __u16 manuf_id;
+ __u16 part_id;
+ __u16 extra_info;
+
+ const void *data;
+};
+
/* spi */
#define SPI_NAME_SIZE 32
diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h
index 2b2a6dc..4c76fe2 100644
--- a/include/linux/netdev_features.h
+++ b/include/linux/netdev_features.h
@@ -11,6 +11,8 @@
#define _LINUX_NETDEV_FEATURES_H
#include <linux/types.h>
+#include <linux/bitops.h>
+#include <asm/byteorder.h>
typedef u64 netdev_features_t;
@@ -154,8 +156,26 @@ enum {
#define NETIF_F_HW_TLS_TX __NETIF_F(HW_TLS_TX)
#define NETIF_F_HW_TLS_RX __NETIF_F(HW_TLS_RX)
-#define for_each_netdev_feature(mask_addr, bit) \
- for_each_set_bit(bit, (unsigned long *)mask_addr, NETDEV_FEATURE_COUNT)
+/* Finds the next feature with the highest number of the range of start till 0.
+ */
+static inline int find_next_netdev_feature(u64 feature, unsigned long start)
+{
+ /* like BITMAP_LAST_WORD_MASK() for u64
+ * this sets the most significant 64 - start to 0.
+ */
+ feature &= ~0ULL >> (-start & ((sizeof(feature) * 8) - 1));
+
+ return fls64(feature) - 1;
+}
+
+/* This goes for the MSB to the LSB through the set feature bits,
+ * mask_addr should be a u64 and bit an int
+ */
+#define for_each_netdev_feature(mask_addr, bit) \
+ for ((bit) = find_next_netdev_feature((mask_addr), \
+ NETDEV_FEATURE_COUNT); \
+ (bit) >= 0; \
+ (bit) = find_next_netdev_feature((mask_addr), (bit) - 1))
/* Features valid for ethtool to change */
/* = all defined minus driver/device-class-related */
diff --git a/include/linux/of_irq.h b/include/linux/of_irq.h
index 1214cab..8634250 100644
--- a/include/linux/of_irq.h
+++ b/include/linux/of_irq.h
@@ -32,6 +32,7 @@ static inline int of_irq_parse_oldworld(struct device_node *device, int index,
}
#endif /* CONFIG_PPC32 && CONFIG_PPC_PMAC */
+extern int of_irq_domain_map(const struct irq_fwspec *in, struct irq_fwspec *out);
extern int of_irq_parse_raw(const __be32 *addr, struct of_phandle_args *out_irq);
extern int of_irq_parse_one(struct device_node *device, int index,
struct of_phandle_args *out_irq);
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 4f3baec..c337495 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -449,6 +449,11 @@ struct pmu {
* Filter events for PMU-specific reasons.
*/
int (*filter_match) (struct perf_event *event); /* optional */
+
+ /*
+ * Check period value for PERF_EVENT_IOC_PERIOD ioctl.
+ */
+ int (*check_period) (struct perf_event *event, u64 value); /* optional */
};
enum perf_addr_filter_action_t {
diff --git a/include/linux/psi.h b/include/linux/psi.h
index b0daf05..7006008 100644
--- a/include/linux/psi.h
+++ b/include/linux/psi.h
@@ -1,12 +1,16 @@
#ifndef _LINUX_PSI_H
#define _LINUX_PSI_H
+#include <linux/jump_label.h>
#include <linux/psi_types.h>
#include <linux/sched.h>
+struct seq_file;
+struct css_set;
+
#ifdef CONFIG_PSI
-extern bool psi_disabled;
+extern struct static_key_false psi_disabled;
void psi_init(void);
@@ -16,6 +20,14 @@ void psi_memstall_tick(struct task_struct *task, int cpu);
void psi_memstall_enter(unsigned long *flags);
void psi_memstall_leave(unsigned long *flags);
+int psi_show(struct seq_file *s, struct psi_group *group, enum psi_res res);
+
+#ifdef CONFIG_CGROUPS
+int psi_cgroup_alloc(struct cgroup *cgrp);
+void psi_cgroup_free(struct cgroup *cgrp);
+void cgroup_move_task(struct task_struct *p, struct css_set *to);
+#endif
+
#else /* CONFIG_PSI */
static inline void psi_init(void) {}
@@ -23,6 +35,20 @@ static inline void psi_init(void) {}
static inline void psi_memstall_enter(unsigned long *flags) {}
static inline void psi_memstall_leave(unsigned long *flags) {}
+#ifdef CONFIG_CGROUPS
+static inline int psi_cgroup_alloc(struct cgroup *cgrp)
+{
+ return 0;
+}
+static inline void psi_cgroup_free(struct cgroup *cgrp)
+{
+}
+static inline void cgroup_move_task(struct task_struct *p, struct css_set *to)
+{
+ rcu_assign_pointer(p->cgroups, to);
+}
+#endif
+
#endif /* CONFIG_PSI */
#endif /* _LINUX_PSI_H */
diff --git a/include/linux/qcom-geni-se.h b/include/linux/qcom-geni-se.h
index 56b803d..9df8d9b 100644
--- a/include/linux/qcom-geni-se.h
+++ b/include/linux/qcom-geni-se.h
@@ -31,6 +31,7 @@ enum se_protocol_types {
/**
* struct geni_se_rsc - GENI Serial Engine Resource
+ * @ctrl_dev Pointer to controller device.
* @wrapper_dev: Pointer to the parent QUPv3 core.
* @se_clk: Handle to the core serial engine clock.
* @m_ahb_clk: Handle to the primary AHB clock.
@@ -44,6 +45,7 @@ enum se_protocol_types {
* @geni_gpi_sleep: Handle to the sleep pinctrl state.
*/
struct se_geni_rsc {
+ struct device *ctrl_dev;
struct device *wrapper_dev;
struct clk *se_clk;
struct clk *m_ahb_clk;
diff --git a/include/linux/qed/qed_chain.h b/include/linux/qed/qed_chain.h
index 59ddf9a..2dd0a9e 100644
--- a/include/linux/qed/qed_chain.h
+++ b/include/linux/qed/qed_chain.h
@@ -663,6 +663,37 @@ static inline void *qed_chain_get_last_elem(struct qed_chain *p_chain)
static inline void qed_chain_set_prod(struct qed_chain *p_chain,
u32 prod_idx, void *p_prod_elem)
{
+ if (p_chain->mode == QED_CHAIN_MODE_PBL) {
+ u32 cur_prod, page_mask, page_cnt, page_diff;
+
+ cur_prod = is_chain_u16(p_chain) ? p_chain->u.chain16.prod_idx :
+ p_chain->u.chain32.prod_idx;
+
+ /* Assume that number of elements in a page is power of 2 */
+ page_mask = ~p_chain->elem_per_page_mask;
+
+ /* Use "cur_prod - 1" and "prod_idx - 1" since producer index
+ * reaches the first element of next page before the page index
+ * is incremented. See qed_chain_produce().
+ * Index wrap around is not a problem because the difference
+ * between current and given producer indices is always
+ * positive and lower than the chain's capacity.
+ */
+ page_diff = (((cur_prod - 1) & page_mask) -
+ ((prod_idx - 1) & page_mask)) /
+ p_chain->elem_per_page;
+
+ page_cnt = qed_chain_get_page_cnt(p_chain);
+ if (is_chain_u16(p_chain))
+ p_chain->pbl.c.u16.prod_page_idx =
+ (p_chain->pbl.c.u16.prod_page_idx -
+ page_diff + page_cnt) % page_cnt;
+ else
+ p_chain->pbl.c.u32.prod_page_idx =
+ (p_chain->pbl.c.u32.prod_page_idx -
+ page_diff + page_cnt) % page_cnt;
+ }
+
if (is_chain_u16(p_chain))
p_chain->u.chain16.prod_idx = (u16) prod_idx;
else
diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h
index 5981923..229b7c2 100644
--- a/include/linux/sched/sysctl.h
+++ b/include/linux/sched/sysctl.h
@@ -33,9 +33,6 @@ extern unsigned int sysctl_sched_child_runs_first;
extern unsigned int sysctl_sched_capacity_margin_up[MAX_MARGIN_LEVELS];
extern unsigned int sysctl_sched_capacity_margin_down[MAX_MARGIN_LEVELS];
#ifdef CONFIG_SCHED_WALT
-extern unsigned int sysctl_sched_use_walt_cpu_util;
-extern unsigned int sysctl_sched_use_walt_task_util;
-extern unsigned int sysctl_sched_walt_init_task_load_pct;
extern unsigned int sysctl_sched_cpu_high_irqload;
extern unsigned int sysctl_sched_boost;
extern unsigned int sysctl_sched_group_upmigrate_pct;
diff --git a/include/linux/sched_clock.h b/include/linux/sched_clock.h
index abe28d5..3423919 100644
--- a/include/linux/sched_clock.h
+++ b/include/linux/sched_clock.h
@@ -13,6 +13,8 @@ extern void generic_sched_clock_init(void);
extern void sched_clock_register(u64 (*read)(void), int bits,
unsigned long rate);
+extern int sched_clock_suspend(void);
+extern void sched_clock_resume(void);
#else
static inline void generic_sched_clock_init(void) { }
@@ -20,6 +22,8 @@ static inline void sched_clock_register(u64 (*read)(void), int bits,
unsigned long rate)
{
}
+static inline int sched_clock_suspend(void) { return 0; }
+static inline void sched_clock_resume(void) { }
#endif
#endif
diff --git a/include/linux/security.h b/include/linux/security.h
index 75f4156..c115dd2 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -31,6 +31,7 @@
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/fs.h>
+#include <linux/bio.h>
struct linux_binprm;
struct cred;
@@ -283,6 +284,8 @@ int security_old_inode_init_security(struct inode *inode, struct inode *dir,
const struct qstr *qstr, const char **name,
void **value, size_t *len);
int security_inode_create(struct inode *dir, struct dentry *dentry, umode_t mode);
+int security_inode_post_create(struct inode *dir, struct dentry *dentry,
+ umode_t mode);
int security_inode_link(struct dentry *old_dentry, struct inode *dir,
struct dentry *new_dentry);
int security_inode_unlink(struct inode *dir, struct dentry *dentry);
@@ -671,6 +674,13 @@ static inline int security_inode_create(struct inode *dir,
return 0;
}
+static inline int security_inode_post_create(struct inode *dir,
+ struct dentry *dentry,
+ umode_t mode)
+{
+ return 0;
+}
+
static inline int security_inode_link(struct dentry *old_dentry,
struct inode *dir,
struct dentry *new_dentry)
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 5d69e20..a404d47 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -2392,7 +2392,7 @@ static inline void skb_probe_transport_header(struct sk_buff *skb,
if (skb_flow_dissect_flow_keys_basic(skb, &keys, NULL, 0, 0, 0, 0))
skb_set_transport_header(skb, keys.control.thoff);
- else
+ else if (offset_hint >= 0)
skb_set_transport_header(skb, offset_hint);
}
diff --git a/include/linux/soc/qcom/irq.h b/include/linux/soc/qcom/irq.h
new file mode 100644
index 0000000..0010148
--- /dev/null
+++ b/include/linux/soc/qcom/irq.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __QCOM_IRQ_H
+#define __QCOM_IRQ_H
+
+#include <linux/irqdomain.h>
+
+/**
+ * struct qcom_irq_fwspec - qcom specific irq fwspec wrapper
+ * @fwspec: irq fwspec
+ * @mask: if true, keep the irq masked in the gpio controller
+ *
+ * Use this structure to communicate between the parent irq chip, MPM or PDC,
+ * to the gpio chip, TLMM, about the gpio being allocated in the parent
+ * and if the gpio chip should keep the line masked because the parent irq
+ * chip is handling everything about the irq line.
+ */
+struct qcom_irq_fwspec {
+ struct irq_fwspec fwspec;
+ bool mask;
+};
+
+#endif
diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h
index 336fd1a..f30bf50 100644
--- a/include/linux/sunrpc/xprt.h
+++ b/include/linux/sunrpc/xprt.h
@@ -443,6 +443,11 @@ static inline int xprt_test_and_set_connecting(struct rpc_xprt *xprt)
return test_and_set_bit(XPRT_CONNECTING, &xprt->state);
}
+static inline int xprt_close_wait(struct rpc_xprt *xprt)
+{
+ return test_bit(XPRT_CLOSE_WAIT, &xprt->state);
+}
+
static inline void xprt_set_bound(struct rpc_xprt *xprt)
{
test_and_set_bit(XPRT_BOUND, &xprt->state);
diff --git a/include/linux/usb/f_mtp.h b/include/linux/usb/f_mtp.h
new file mode 100644
index 0000000..8def143
--- /dev/null
+++ b/include/linux/usb/f_mtp.h
@@ -0,0 +1,53 @@
+/*
+ * Gadget Function Driver for MTP
+ *
+ * Copyright (C) 2010 Google, Inc.
+ * Author: Mike Lockwood <lockwood@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __LINUX_USB_F_MTP_H
+#define __LINUX_USB_F_MTP_H
+
+#include <uapi/linux/usb/f_mtp.h>
+#include <linux/ioctl.h>
+#ifdef CONFIG_COMPAT
+#include <linux/compat.h>
+#endif
+
+#ifdef __KERNEL__
+
+#ifdef CONFIG_COMPAT
+struct __compat_mtp_file_range {
+ compat_int_t fd;
+ compat_loff_t offset;
+ int64_t length;
+ uint16_t command;
+ uint32_t transaction_id;
+};
+
+struct __compat_mtp_event {
+ compat_size_t length;
+ compat_caddr_t data;
+};
+
+#define COMPAT_MTP_SEND_FILE _IOW('M', 0, \
+ struct __compat_mtp_file_range)
+#define COMPAT_MTP_RECEIVE_FILE _IOW('M', 1, \
+ struct __compat_mtp_file_range)
+#define COMPAT_MTP_SEND_EVENT _IOW('M', 3, \
+ struct __compat_mtp_event)
+#define COMPAT_MTP_SEND_FILE_WITH_HEADER _IOW('M', 4, \
+ struct __compat_mtp_file_range)
+#endif
+#endif
+#endif /* __LINUX_USB_F_MTP_H */
diff --git a/include/linux/virtio_net.h b/include/linux/virtio_net.h
index cb462f9..e0348cb 100644
--- a/include/linux/virtio_net.h
+++ b/include/linux/virtio_net.h
@@ -57,6 +57,25 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb,
if (!skb_partial_csum_set(skb, start, off))
return -EINVAL;
+ } else {
+ /* gso packets without NEEDS_CSUM do not set transport_offset.
+ * probe and drop if does not match one of the above types.
+ */
+ if (gso_type && skb->network_header) {
+ if (!skb->protocol)
+ virtio_net_hdr_set_proto(skb, hdr);
+retry:
+ skb_probe_transport_header(skb, -1);
+ if (!skb_transport_header_was_set(skb)) {
+ /* UFO does not specify ipv4 or 6: try both */
+ if (gso_type & SKB_GSO_UDP &&
+ skb->protocol == htons(ETH_P_IP)) {
+ skb->protocol = htons(ETH_P_IPV6);
+ goto retry;
+ }
+ return -EINVAL;
+ }
+ }
}
if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
diff --git a/include/net/ax25.h b/include/net/ax25.h
index 3f9aea8..8b7eb46 100644
--- a/include/net/ax25.h
+++ b/include/net/ax25.h
@@ -201,6 +201,18 @@ static inline void ax25_hold_route(ax25_route *ax25_rt)
void __ax25_put_route(ax25_route *ax25_rt);
+extern rwlock_t ax25_route_lock;
+
+static inline void ax25_route_lock_use(void)
+{
+ read_lock(&ax25_route_lock);
+}
+
+static inline void ax25_route_lock_unuse(void)
+{
+ read_unlock(&ax25_route_lock);
+}
+
static inline void ax25_put_route(ax25_route *ax25_rt)
{
if (refcount_dec_and_test(&ax25_rt->refcount))
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index 80e2183..3c56de5a 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -6,7 +6,7 @@
* Copyright 2006-2010 Johannes Berg <johannes@sipsolutions.net>
* Copyright 2013-2014 Intel Mobile Communications GmbH
* Copyright 2015-2017 Intel Deutschland GmbH
- * Copyright (C) 2018 Intel Corporation
+ * Copyright (C) 2018-2019 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -2008,6 +2008,8 @@ struct cfg80211_bss_ies {
* @signal: signal strength value (type depends on the wiphy's signal_type)
* @chains: bitmask for filled values in @chain_signal.
* @chain_signal: per-chain signal strength of last received BSS in dBm.
+ * @bssid_index: index in the multiple BSS set
+ * @max_bssid_indicator: max number of members in the BSS set
* @priv: private area for driver use, has at least wiphy->bss_priv_size bytes
*/
struct cfg80211_bss {
@@ -2019,6 +2021,8 @@ struct cfg80211_bss {
const struct cfg80211_bss_ies __rcu *proberesp_ies;
struct cfg80211_bss *hidden_beacon_bss;
+ struct cfg80211_bss *transmitted_bss;
+ struct list_head nontrans_list;
s32 signal;
@@ -2029,19 +2033,36 @@ struct cfg80211_bss {
u8 chains;
s8 chain_signal[IEEE80211_MAX_CHAINS];
+ u8 bssid_index;
+ u8 max_bssid_indicator;
+
u8 priv[0] __aligned(sizeof(void *));
};
/**
- * ieee80211_bss_get_ie - find IE with given ID
+ * ieee80211_bss_get_elem - find element with given ID
* @bss: the bss to search
- * @ie: the IE ID
+ * @id: the element ID
*
* Note that the return value is an RCU-protected pointer, so
* rcu_read_lock() must be held when calling this function.
* Return: %NULL if not found.
*/
-const u8 *ieee80211_bss_get_ie(struct cfg80211_bss *bss, u8 ie);
+const struct element *ieee80211_bss_get_elem(struct cfg80211_bss *bss, u8 id);
+
+/**
+ * ieee80211_bss_get_ie - find IE with given ID
+ * @bss: the bss to search
+ * @id: the element ID
+ *
+ * Note that the return value is an RCU-protected pointer, so
+ * rcu_read_lock() must be held when calling this function.
+ * Return: %NULL if not found.
+ */
+static inline const u8 *ieee80211_bss_get_ie(struct cfg80211_bss *bss, u8 id)
+{
+ return (void *)ieee80211_bss_get_elem(bss, id);
+}
/**
@@ -3999,6 +4020,12 @@ struct wiphy_iftype_ext_capab {
* @txq_limit: configuration of internal TX queue frame limit
* @txq_memory_limit: configuration internal TX queue memory limit
* @txq_quantum: configuration of internal TX queue scheduler quantum
+ *
+ * @support_mbssid: can HW support association with nontransmitted AP
+ * @support_only_he_mbssid: don't parse MBSSID elements if it is not
+ * HE AP, in order to avoid compatibility issues.
+ * @support_mbssid must be set for this to have any effect.
+ *
*/
struct wiphy {
/* assign these fields before you register the wiphy */
@@ -4137,6 +4164,9 @@ struct wiphy {
u32 txq_memory_limit;
u32 txq_quantum;
+ u8 support_mbssid:1,
+ support_only_he_mbssid:1;
+
char priv[0] __aligned(NETDEV_ALIGN);
};
@@ -4677,6 +4707,33 @@ unsigned int cfg80211_classify8021d(struct sk_buff *skb,
struct cfg80211_qos_map *qos_map);
/**
+ * cfg80211_find_elem_match - match information element and byte array in data
+ *
+ * @eid: element ID
+ * @ies: data consisting of IEs
+ * @len: length of data
+ * @match: byte array to match
+ * @match_len: number of bytes in the match array
+ * @match_offset: offset in the IE data where the byte array should match.
+ * Note the difference to cfg80211_find_ie_match() which considers
+ * the offset to start from the element ID byte, but here we take
+ * the data portion instead.
+ *
+ * Return: %NULL if the element ID could not be found or if
+ * the element is invalid (claims to be longer than the given
+ * data) or if the byte array doesn't match; otherwise return the
+ * requested element struct.
+ *
+ * Note: There are no checks on the element length other than
+ * having to fit into the given data and being large enough for the
+ * byte array to match.
+ */
+const struct element *
+cfg80211_find_elem_match(u8 eid, const u8 *ies, unsigned int len,
+ const u8 *match, unsigned int match_len,
+ unsigned int match_offset);
+
+/**
* cfg80211_find_ie_match - match information element and byte array in data
*
* @eid: element ID
@@ -4700,9 +4757,44 @@ unsigned int cfg80211_classify8021d(struct sk_buff *skb,
* having to fit into the given data and being large enough for the
* byte array to match.
*/
-const u8 *cfg80211_find_ie_match(u8 eid, const u8 *ies, int len,
- const u8 *match, int match_len,
- int match_offset);
+static inline const u8 *
+cfg80211_find_ie_match(u8 eid, const u8 *ies, unsigned int len,
+ const u8 *match, unsigned int match_len,
+ unsigned int match_offset)
+{
+ /* match_offset can't be smaller than 2, unless match_len is
+ * zero, in which case match_offset must be zero as well.
+ */
+ if (WARN_ON((match_len && match_offset < 2) ||
+ (!match_len && match_offset)))
+ return NULL;
+
+ return (void *)cfg80211_find_elem_match(eid, ies, len,
+ match, match_len,
+ match_offset ?
+ match_offset - 2 : 0);
+}
+
+/**
+ * cfg80211_find_elem - find information element in data
+ *
+ * @eid: element ID
+ * @ies: data consisting of IEs
+ * @len: length of data
+ *
+ * Return: %NULL if the element ID could not be found or if
+ * the element is invalid (claims to be longer than the given
+ * data) or if the byte array doesn't match; otherwise return the
+ * requested element struct.
+ *
+ * Note: There are no checks on the element length other than
+ * having to fit into the given data.
+ */
+static inline const struct element *
+cfg80211_find_elem(u8 eid, const u8 *ies, int len)
+{
+ return cfg80211_find_elem_match(eid, ies, len, NULL, 0, 0);
+}
/**
* cfg80211_find_ie - find information element in data
@@ -4725,6 +4817,28 @@ static inline const u8 *cfg80211_find_ie(u8 eid, const u8 *ies, int len)
}
/**
+ * cfg80211_find_ext_elem - find information element with EID Extension in data
+ *
+ * @ext_eid: element ID Extension
+ * @ies: data consisting of IEs
+ * @len: length of data
+ *
+ * Return: %NULL if the etended element could not be found or if
+ * the element is invalid (claims to be longer than the given
+ * data) or if the byte array doesn't match; otherwise return the
+ * requested element struct.
+ *
+ * Note: There are no checks on the element length other than
+ * having to fit into the given data.
+ */
+static inline const struct element *
+cfg80211_find_ext_elem(u8 ext_eid, const u8 *ies, int len)
+{
+ return cfg80211_find_elem_match(WLAN_EID_EXTENSION, ies, len,
+ &ext_eid, 1, 0);
+}
+
+/**
* cfg80211_find_ext_ie - find information element with EID Extension in data
*
* @ext_eid: element ID Extension
@@ -4746,6 +4860,25 @@ static inline const u8 *cfg80211_find_ext_ie(u8 ext_eid, const u8 *ies, int len)
}
/**
+ * cfg80211_find_vendor_elem - find vendor specific information element in data
+ *
+ * @oui: vendor OUI
+ * @oui_type: vendor-specific OUI type (must be < 0xff), negative means any
+ * @ies: data consisting of IEs
+ * @len: length of data
+ *
+ * Return: %NULL if the vendor specific element ID could not be found or if the
+ * element is invalid (claims to be longer than the given data); otherwise
+ * return the element structure for the requested element.
+ *
+ * Note: There are no checks on the element length other than having to fit into
+ * the given data.
+ */
+const struct element *cfg80211_find_vendor_elem(unsigned int oui, int oui_type,
+ const u8 *ies,
+ unsigned int len);
+
+/**
* cfg80211_find_vendor_ie - find vendor specific information element in data
*
* @oui: vendor OUI
@@ -4761,8 +4894,12 @@ static inline const u8 *cfg80211_find_ext_ie(u8 ext_eid, const u8 *ies, int len)
* Note: There are no checks on the element length other than having to fit into
* the given data.
*/
-const u8 *cfg80211_find_vendor_ie(unsigned int oui, int oui_type,
- const u8 *ies, int len);
+static inline const u8 *
+cfg80211_find_vendor_ie(unsigned int oui, int oui_type,
+ const u8 *ies, unsigned int len)
+{
+ return (void *)cfg80211_find_vendor_elem(oui, oui_type, ies, len);
+}
/**
* DOC: Regulatory enforcement infrastructure
@@ -4997,6 +5134,29 @@ cfg80211_inform_bss_frame(struct wiphy *wiphy,
}
/**
+ * cfg80211_gen_new_bssid - generate a nontransmitted BSSID for multi-BSSID
+ * @bssid: transmitter BSSID
+ * @max_bssid: max BSSID indicator, taken from Multiple BSSID element
+ * @mbssid_index: BSSID index, taken from Multiple BSSID index element
+ * @new_bssid_addr: address of the resulting BSSID
+ */
+static inline void cfg80211_gen_new_bssid(const u8 *bssid, u8 max_bssid,
+ u8 mbssid_index, u8 *new_bssid_addr)
+{
+ u64 bssid_tmp, new_bssid;
+ u64 lsb_n;
+
+ bssid_tmp = ether_addr_to_u64(bssid);
+
+ lsb_n = bssid_tmp & ((1 << max_bssid) - 1);
+ new_bssid = bssid_tmp;
+ new_bssid &= ~((1 << max_bssid) - 1);
+ new_bssid |= (lsb_n + mbssid_index) % (1 << max_bssid);
+
+ u64_to_ether_addr(new_bssid, new_bssid_addr);
+}
+
+/**
* enum cfg80211_bss_frame_type - frame type that the BSS data came from
* @CFG80211_BSS_FTYPE_UNKNOWN: driver doesn't know whether the data is
* from a beacon or probe response
diff --git a/include/net/cnss2.h b/include/net/cnss2.h
index 93cb55d..922e661 100644
--- a/include/net/cnss2.h
+++ b/include/net/cnss2.h
@@ -160,6 +160,7 @@ extern int cnss_get_fw_files_for_target(struct device *dev,
extern int cnss_get_platform_cap(struct device *dev,
struct cnss_platform_cap *cap);
extern struct dma_iommu_mapping *cnss_smmu_get_mapping(struct device *dev);
+extern struct iommu_domain *cnss_smmu_get_domain(struct device *dev);
extern int cnss_smmu_map(struct device *dev,
phys_addr_t paddr, uint32_t *iova_addr, size_t size);
extern int cnss_get_soc_info(struct device *dev, struct cnss_soc_info *info);
diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
index 00b5e78..74ff688 100644
--- a/include/net/inetpeer.h
+++ b/include/net/inetpeer.h
@@ -39,6 +39,7 @@ struct inet_peer {
u32 metrics[RTAX_MAX];
u32 rate_tokens; /* rate limiting for ICMP */
+ u32 n_redirects;
unsigned long rate_last;
/*
* Once inet_peer is queued for deletion (refcnt == 0), following field
diff --git a/include/net/netfilter/nf_flow_table.h b/include/net/netfilter/nf_flow_table.h
index 0e355f4..0a3de10 100644
--- a/include/net/netfilter/nf_flow_table.h
+++ b/include/net/netfilter/nf_flow_table.h
@@ -84,7 +84,6 @@ struct flow_offload {
struct nf_flow_route {
struct {
struct dst_entry *dst;
- int ifindex;
} tuple[FLOW_OFFLOAD_DIR_MAX];
};
diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h
index 4c6289d..fe73e74 100644
--- a/include/scsi/scsi_host.h
+++ b/include/scsi/scsi_host.h
@@ -648,6 +648,9 @@ struct Scsi_Host {
/* The controller does not support WRITE SAME */
unsigned no_write_same:1;
+ /* Inline encryption support? */
+ unsigned inlinecrypt_support:1;
+
unsigned use_blk_mq:1;
unsigned use_cmd_list:1;
diff --git a/include/sound/compress_driver.h b/include/sound/compress_driver.h
index 8e52178..b52d4a0 100644
--- a/include/sound/compress_driver.h
+++ b/include/sound/compress_driver.h
@@ -177,7 +177,11 @@ static inline void snd_compr_drain_notify(struct snd_compr_stream *stream)
if (snd_BUG_ON(!stream))
return;
- stream->runtime->state = SNDRV_PCM_STATE_SETUP;
+ if (stream->direction == SND_COMPRESS_PLAYBACK)
+ stream->runtime->state = SNDRV_PCM_STATE_SETUP;
+ else
+ stream->runtime->state = SNDRV_PCM_STATE_PREPARED;
+
wake_up(&stream->runtime->sleep);
}
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
index 305576e..872a60f 100644
--- a/include/trace/events/sched.h
+++ b/include/trace/events/sched.h
@@ -205,11 +205,11 @@ TRACE_EVENT(sched_switch,
TP_fast_assign(
memcpy(__entry->next_comm, next->comm, TASK_COMM_LEN);
__entry->prev_pid = prev->pid;
- __entry->prev_prio = prev->prio;
+ __entry->prev_prio = prev->prio == -1 ? 150 : prev->prio;
__entry->prev_state = __trace_sched_switch_state(preempt, prev);
memcpy(__entry->prev_comm, prev->comm, TASK_COMM_LEN);
__entry->next_pid = next->pid;
- __entry->next_prio = next->prio;
+ __entry->next_prio = next->prio == -1 ? 150 : next->prio;
/* XXX SCHED_DEADLINE */
),
@@ -940,10 +940,7 @@ TRACE_EVENT(sched_load_rt_rq,
);
#ifdef CONFIG_SCHED_WALT
-extern unsigned int sysctl_sched_use_walt_cpu_util;
-extern unsigned int sysctl_sched_use_walt_task_util;
extern unsigned int sched_ravg_window;
-extern unsigned int walt_disabled;
#endif
/*
@@ -973,8 +970,7 @@ TRACE_EVENT(sched_load_avg_cpu,
__entry->util_avg_walt = div64_ul(cpu_rq(cpu)->prev_runnable_sum,
sched_ravg_window >> SCHED_CAPACITY_SHIFT);
- if (!walt_disabled && sysctl_sched_use_walt_cpu_util)
- __entry->util_avg = __entry->util_avg_walt;
+ __entry->util_avg = __entry->util_avg_walt;
#endif
),
@@ -1002,8 +998,6 @@ TRACE_EVENT(sched_load_se,
__field( unsigned long, load )
__field( unsigned long, rbl_load )
__field( unsigned long, util )
- __field( unsigned long, util_pelt )
- __field( u32, util_walt )
),
TP_fast_assign(
@@ -1020,22 +1014,11 @@ TRACE_EVENT(sched_load_se,
__entry->load = se->avg.load_avg;
__entry->rbl_load = se->avg.runnable_load_avg;
__entry->util = se->avg.util_avg;
- __entry->util_pelt = __entry->util;
- __entry->util_walt = 0;
-#ifdef CONFIG_SCHED_WALT
- if (!se->my_q) {
- struct task_struct *p = container_of(se, struct task_struct, se);
- __entry->util_walt = p->ravg.demand / (sched_ravg_window >> SCHED_CAPACITY_SHIFT);
- if (!walt_disabled && sysctl_sched_use_walt_task_util)
- __entry->util = __entry->util_walt;
- }
-#endif
),
- TP_printk("cpu=%d path=%s comm=%s pid=%d load=%lu rbl_load=%lu util=%lu util_pelt=%lu util_walt=%u",
+ TP_printk("cpu=%d path=%s comm=%s pid=%d load=%lu rbl_load=%lu util=%lu",
__entry->cpu, __get_str(path), __entry->comm, __entry->pid,
- __entry->load, __entry->rbl_load, __entry->util,
- __entry->util_pelt, __entry->util_walt)
+ __entry->load, __entry->rbl_load, __entry->util)
);
/*
diff --git a/include/trace/events/walt.h b/include/trace/events/walt.h
index 2c10a02..c31c67a 100644
--- a/include/trace/events/walt.h
+++ b/include/trace/events/walt.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
*/
#ifdef CONFIG_SCHED_WALT
@@ -491,57 +491,6 @@ TRACE_EVENT(sched_load_balance_skip_tasks,
__entry->affinity, __entry->task_util, __entry->h_load)
);
-DECLARE_EVENT_CLASS(sched_cpu_load,
-
- TP_PROTO(struct rq *rq, int idle, u64 irqload, unsigned int power_cost),
-
- TP_ARGS(rq, idle, irqload, power_cost),
-
- TP_STRUCT__entry(
- __field(unsigned int, cpu)
- __field(unsigned int, idle)
- __field(unsigned int, nr_running)
- __field(unsigned int, nr_big_tasks)
- __field(unsigned int, load_scale_factor)
- __field(unsigned int, capacity)
- __field(u64, cumulative_runnable_avg)
- __field(u64, irqload)
- __field(unsigned int, max_freq)
- __field(unsigned int, power_cost)
- __field(int, cstate)
- __field(int, dstate)
- ),
-
- TP_fast_assign(
- __entry->cpu = rq->cpu;
- __entry->idle = idle;
- __entry->nr_running = rq->nr_running;
- __entry->nr_big_tasks = rq->walt_stats.nr_big_tasks;
- __entry->load_scale_factor =
- cpu_load_scale_factor(rq->cpu);
- __entry->capacity = cpu_capacity(rq->cpu);
- __entry->cumulative_runnable_avg =
- rq->walt_stats.cumulative_runnable_avg_scaled;
- __entry->irqload = irqload;
- __entry->max_freq = cpu_max_freq(rq->cpu);
- __entry->power_cost = power_cost;
- __entry->cstate = rq->cstate;
- __entry->dstate = rq->cluster->dstate;
- ),
-
- TP_printk("cpu %u idle %d nr_run %u nr_big %u lsf %u capacity %u cr_avg %llu irqload %llu fmax %u power_cost %u cstate %d dstate %d",
- __entry->cpu, __entry->idle, __entry->nr_running,
- __entry->nr_big_tasks, __entry->load_scale_factor,
- __entry->capacity, __entry->cumulative_runnable_avg,
- __entry->irqload, __entry->max_freq, __entry->power_cost,
- __entry->cstate, __entry->dstate)
-);
-
-DEFINE_EVENT(sched_cpu_load, sched_cpu_load_lb,
- TP_PROTO(struct rq *rq, int idle, u64 irqload, unsigned int power_cost),
- TP_ARGS(rq, idle, irqload, power_cost)
-);
-
TRACE_EVENT(sched_load_to_gov,
TP_PROTO(struct rq *rq, u64 aggr_grp_load, u32 tt_load,
diff --git a/include/uapi/linux/fs.h b/include/uapi/linux/fs.h
index 086e7ee..245f38c 100644
--- a/include/uapi/linux/fs.h
+++ b/include/uapi/linux/fs.h
@@ -283,6 +283,7 @@ struct fsxattr {
#define FS_ENCRYPTION_MODE_SPECK128_256_XTS 7 /* Removed, do not use. */
#define FS_ENCRYPTION_MODE_SPECK128_256_CTS 8 /* Removed, do not use. */
#define FS_ENCRYPTION_MODE_ADIANTUM 9
+#define FS_ENCRYPTION_MODE_PRIVATE 127
struct fscrypt_policy {
__u8 version;
diff --git a/include/uapi/linux/inet_diag.h b/include/uapi/linux/inet_diag.h
index 14565d7..e8baca8 100644
--- a/include/uapi/linux/inet_diag.h
+++ b/include/uapi/linux/inet_diag.h
@@ -137,15 +137,21 @@ enum {
INET_DIAG_TCLASS,
INET_DIAG_SKMEMINFO,
INET_DIAG_SHUTDOWN,
- INET_DIAG_DCTCPINFO,
- INET_DIAG_PROTOCOL, /* response attribute only */
+
+ /*
+ * Next extenstions cannot be requested in struct inet_diag_req_v2:
+ * its field idiag_ext has only 8 bits.
+ */
+
+ INET_DIAG_DCTCPINFO, /* request as INET_DIAG_VEGASINFO */
+ INET_DIAG_PROTOCOL, /* response attribute only */
INET_DIAG_SKV6ONLY,
INET_DIAG_LOCALS,
INET_DIAG_PEERS,
INET_DIAG_PAD,
- INET_DIAG_MARK,
- INET_DIAG_BBRINFO,
- INET_DIAG_CLASS_ID,
+ INET_DIAG_MARK, /* only with CAP_NET_ADMIN */
+ INET_DIAG_BBRINFO, /* request as INET_DIAG_VEGASINFO */
+ INET_DIAG_CLASS_ID, /* request as INET_DIAG_TCLASS */
INET_DIAG_MD5SIG,
__INET_DIAG_MAX,
};
diff --git a/include/uapi/linux/msm_ipa.h b/include/uapi/linux/msm_ipa.h
index fbbaa5b..54e2915 100644
--- a/include/uapi/linux/msm_ipa.h
+++ b/include/uapi/linux/msm_ipa.h
@@ -41,6 +41,11 @@
#define IPA_DFLT_RT_TBL_NAME "ipa_dflt_rt"
/**
+ * name for default value of invalid protocol of NAT
+ */
+#define IPAHAL_NAT_INVALID_PROTOCOL 0xFF
+
+/**
* commands supported by IPA driver
*/
#define IPA_IOCTL_ADD_HDR 0
@@ -362,10 +367,12 @@ enum ipa_client_type {
#define IPA_CLIENT_WIGIG2_CONS IPA_CLIENT_WIGIG2_CONS
#define IPA_CLIENT_WIGIG3_CONS IPA_CLIENT_WIGIG3_CONS
#define IPA_CLIENT_WIGIG4_CONS IPA_CLIENT_WIGIG4_CONS
+#define IPA_CLIENT_APPS_WAN_COAL_CONS IPA_CLIENT_APPS_WAN_COAL_CONS
#define IPA_CLIENT_IS_APPS_CONS(client) \
((client) == IPA_CLIENT_APPS_LAN_CONS || \
- (client) == IPA_CLIENT_APPS_WAN_CONS)
+ (client) == IPA_CLIENT_APPS_WAN_CONS || \
+ (client) == IPA_CLIENT_APPS_WAN_COAL_CONS)
#define IPA_CLIENT_IS_USB_CONS(client) \
((client) == IPA_CLIENT_USB_CONS || \
diff --git a/include/uapi/linux/usb/f_mtp.h b/include/uapi/linux/usb/f_mtp.h
new file mode 100644
index 0000000..5032918
--- /dev/null
+++ b/include/uapi/linux/usb/f_mtp.h
@@ -0,0 +1,61 @@
+/*
+ * Gadget Function Driver for MTP
+ *
+ * Copyright (C) 2010 Google, Inc.
+ * Author: Mike Lockwood <lockwood@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _UAPI_LINUX_USB_F_MTP_H
+#define _UAPI_LINUX_USB_F_MTP_H
+
+#include <linux/ioctl.h>
+#include <linux/types.h>
+
+struct mtp_file_range {
+ /* file descriptor for file to transfer */
+ int fd;
+ /* offset in file for start of transfer */
+ loff_t offset;
+ /* number of bytes to transfer */
+ int64_t length;
+ /* MTP command ID for data header,
+ * used only for MTP_SEND_FILE_WITH_HEADER
+ */
+ uint16_t command;
+ /* MTP transaction ID for data header,
+ * used only for MTP_SEND_FILE_WITH_HEADER
+ */
+ uint32_t transaction_id;
+};
+
+struct mtp_event {
+ /* size of the event */
+ size_t length;
+ /* event data to send */
+ void *data;
+};
+
+/* Sends the specified file range to the host */
+#define MTP_SEND_FILE _IOW('M', 0, struct mtp_file_range)
+/* Receives data from the host and writes it to a file.
+ * The file is created if it does not exist.
+ */
+#define MTP_RECEIVE_FILE _IOW('M', 1, struct mtp_file_range)
+/* Sends an event to the host via the interrupt endpoint */
+#define MTP_SEND_EVENT _IOW('M', 3, struct mtp_event)
+/* Sends the specified file range to the host,
+ * with a 12 byte MTP data packet header at the beginning.
+ */
+#define MTP_SEND_FILE_WITH_HEADER _IOW('M', 4, struct mtp_file_range)
+
+#endif /* _UAPI_LINUX_USB_F_MTP_H */
diff --git a/include/video/udlfb.h b/include/video/udlfb.h
index 3abd327..7d09e54 100644
--- a/include/video/udlfb.h
+++ b/include/video/udlfb.h
@@ -36,12 +36,9 @@ struct dlfb_data {
struct usb_device *udev;
struct fb_info *info;
struct urb_list urbs;
- struct kref kref;
char *backing_buffer;
int fb_count;
bool virtualized; /* true when physical usb device not present */
- struct delayed_work init_framebuffer_work;
- struct delayed_work free_framebuffer_work;
atomic_t usb_active; /* 0 = update virtual buffer, but no usb traffic */
atomic_t lost_pixels; /* 1 = a render op failed. Need screen refresh */
char *edid; /* null until we read edid from hw or get from sysfs */
diff --git a/init/Kconfig b/init/Kconfig
index 516ce72..0729071 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -515,10 +515,23 @@
the share of walltime in which some or all tasks in the system are
delayed due to contention of the respective resource.
+ In kernels with cgroup support, cgroups (cgroup2 only) will
+ have cpu.pressure, memory.pressure, and io.pressure files,
+ which aggregate pressure stalls for the grouped tasks only.
+
For more details see Documentation/accounting/psi.txt.
Say N if unsure.
+config PSI_DEFAULT_DISABLED
+ bool "Require boot parameter to enable pressure stall information tracking"
+ default n
+ depends on PSI
+ help
+ If set, pressure stall information tracking will be disabled
+ per default but can be enabled through passing psi_enable=1
+ on the kernel commandline during boot.
+
endmenu # "CPU/Task time and stats accounting"
config CPU_ISOLATION
diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c
index 8061a43..6a32933 100644
--- a/kernel/bpf/stackmap.c
+++ b/kernel/bpf/stackmap.c
@@ -180,11 +180,14 @@ static inline int stack_map_parse_build_id(void *page_addr,
if (nhdr->n_type == BPF_BUILD_ID &&
nhdr->n_namesz == sizeof("GNU") &&
- nhdr->n_descsz == BPF_BUILD_ID_SIZE) {
+ nhdr->n_descsz > 0 &&
+ nhdr->n_descsz <= BPF_BUILD_ID_SIZE) {
memcpy(build_id,
note_start + note_offs +
ALIGN(sizeof("GNU"), 4) + sizeof(Elf32_Nhdr),
- BPF_BUILD_ID_SIZE);
+ nhdr->n_descsz);
+ memset(build_id + nhdr->n_descsz, 0,
+ BPF_BUILD_ID_SIZE - nhdr->n_descsz);
return 0;
}
new_offs = note_offs + sizeof(Elf32_Nhdr) +
@@ -260,7 +263,7 @@ static int stack_map_get_build_id(struct vm_area_struct *vma,
return -EFAULT; /* page not mapped */
ret = -EINVAL;
- page_addr = page_address(page);
+ page_addr = kmap_atomic(page);
ehdr = (Elf32_Ehdr *)page_addr;
/* compare magic x7f "ELF" */
@@ -276,6 +279,7 @@ static int stack_map_get_build_id(struct vm_area_struct *vma,
else if (ehdr->e_ident[EI_CLASS] == ELFCLASS64)
ret = stack_map_get_build_id_64(page_addr, build_id);
out:
+ kunmap_atomic(page_addr);
put_page(page);
return ret;
}
@@ -310,6 +314,7 @@ static void stack_map_get_build_id_offset(struct bpf_stack_build_id *id_offs,
for (i = 0; i < trace_nr; i++) {
id_offs[i].status = BPF_STACK_BUILD_ID_IP;
id_offs[i].ip = ips[i];
+ memset(id_offs[i].build_id, 0, BPF_BUILD_ID_SIZE);
}
return;
}
@@ -320,6 +325,7 @@ static void stack_map_get_build_id_offset(struct bpf_stack_build_id *id_offs,
/* per entry fall back to ips */
id_offs[i].status = BPF_STACK_BUILD_ID_IP;
id_offs[i].ip = ips[i];
+ memset(id_offs[i].build_id, 0, BPF_BUILD_ID_SIZE);
continue;
}
id_offs[i].offset = (vma->vm_pgoff << PAGE_SHIFT) + ips[i]
diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
index 1aa5179..498c6bc 100644
--- a/kernel/cgroup/cgroup.c
+++ b/kernel/cgroup/cgroup.c
@@ -55,6 +55,7 @@
#include <linux/nsproxy.h>
#include <linux/file.h>
#include <linux/sched/cputime.h>
+#include <linux/psi.h>
#include <net/sock.h>
#define CREATE_TRACE_POINTS
@@ -832,7 +833,7 @@ static void css_set_move_task(struct task_struct *task,
*/
WARN_ON_ONCE(task->flags & PF_EXITING);
- rcu_assign_pointer(task->cgroups, to_cset);
+ cgroup_move_task(task, to_cset);
list_add_tail(&task->cg_list, use_mg_tasks ? &to_cset->mg_tasks :
&to_cset->tasks);
}
@@ -1743,7 +1744,7 @@ static int parse_cgroup_root_flags(char *data, unsigned int *root_flags)
*root_flags = 0;
- if (!data)
+ if (!data || *data == '\0')
return 0;
while ((token = strsep(&data, ",")) != NULL) {
@@ -3416,6 +3417,21 @@ static int cpu_stat_show(struct seq_file *seq, void *v)
return ret;
}
+#ifdef CONFIG_PSI
+static int cgroup_io_pressure_show(struct seq_file *seq, void *v)
+{
+ return psi_show(seq, &seq_css(seq)->cgroup->psi, PSI_IO);
+}
+static int cgroup_memory_pressure_show(struct seq_file *seq, void *v)
+{
+ return psi_show(seq, &seq_css(seq)->cgroup->psi, PSI_MEM);
+}
+static int cgroup_cpu_pressure_show(struct seq_file *seq, void *v)
+{
+ return psi_show(seq, &seq_css(seq)->cgroup->psi, PSI_CPU);
+}
+#endif
+
static int cgroup_file_open(struct kernfs_open_file *of)
{
struct cftype *cft = of->kn->priv;
@@ -4551,6 +4567,23 @@ static struct cftype cgroup_base_files[] = {
.flags = CFTYPE_NOT_ON_ROOT,
.seq_show = cpu_stat_show,
},
+#ifdef CONFIG_PSI
+ {
+ .name = "io.pressure",
+ .flags = CFTYPE_NOT_ON_ROOT,
+ .seq_show = cgroup_io_pressure_show,
+ },
+ {
+ .name = "memory.pressure",
+ .flags = CFTYPE_NOT_ON_ROOT,
+ .seq_show = cgroup_memory_pressure_show,
+ },
+ {
+ .name = "cpu.pressure",
+ .flags = CFTYPE_NOT_ON_ROOT,
+ .seq_show = cgroup_cpu_pressure_show,
+ },
+#endif
{ } /* terminate */
};
@@ -4611,6 +4644,7 @@ static void css_free_rwork_fn(struct work_struct *work)
*/
cgroup_put(cgroup_parent(cgrp));
kernfs_put(cgrp->kn);
+ psi_cgroup_free(cgrp);
if (cgroup_on_dfl(cgrp))
cgroup_rstat_exit(cgrp);
kfree(cgrp);
@@ -4867,10 +4901,15 @@ static struct cgroup *cgroup_create(struct cgroup *parent)
cgrp->self.parent = &parent->self;
cgrp->root = root;
cgrp->level = level;
- ret = cgroup_bpf_inherit(cgrp);
+
+ ret = psi_cgroup_alloc(cgrp);
if (ret)
goto out_idr_free;
+ ret = cgroup_bpf_inherit(cgrp);
+ if (ret)
+ goto out_psi_free;
+
for (tcgrp = cgrp; tcgrp; tcgrp = cgroup_parent(tcgrp)) {
cgrp->ancestor_ids[tcgrp->level] = tcgrp->id;
@@ -4908,6 +4947,8 @@ static struct cgroup *cgroup_create(struct cgroup *parent)
return cgrp;
+out_psi_free:
+ psi_cgroup_free(cgrp);
out_idr_free:
cgroup_idr_remove(&root->cgroup_idr, cgrp->id);
out_stat_exit:
diff --git a/kernel/cpu.c b/kernel/cpu.c
index fae0fb4..c40e62a 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -358,9 +358,6 @@ void __weak arch_smt_update(void) { }
#ifdef CONFIG_HOTPLUG_SMT
enum cpuhp_smt_control cpu_smt_control __read_mostly = CPU_SMT_ENABLED;
-EXPORT_SYMBOL_GPL(cpu_smt_control);
-
-static bool cpu_smt_available __read_mostly;
void __init cpu_smt_disable(bool force)
{
@@ -378,25 +375,11 @@ void __init cpu_smt_disable(bool force)
/*
* The decision whether SMT is supported can only be done after the full
- * CPU identification. Called from architecture code before non boot CPUs
- * are brought up.
- */
-void __init cpu_smt_check_topology_early(void)
-{
- if (!topology_smt_supported())
- cpu_smt_control = CPU_SMT_NOT_SUPPORTED;
-}
-
-/*
- * If SMT was disabled by BIOS, detect it here, after the CPUs have been
- * brought online. This ensures the smt/l1tf sysfs entries are consistent
- * with reality. cpu_smt_available is set to true during the bringup of non
- * boot CPUs when a SMT sibling is detected. Note, this may overwrite
- * cpu_smt_control's previous setting.
+ * CPU identification. Called from architecture code.
*/
void __init cpu_smt_check_topology(void)
{
- if (!cpu_smt_available)
+ if (!topology_smt_supported())
cpu_smt_control = CPU_SMT_NOT_SUPPORTED;
}
@@ -409,18 +392,10 @@ early_param("nosmt", smt_cmdline_disable);
static inline bool cpu_smt_allowed(unsigned int cpu)
{
- if (topology_is_primary_thread(cpu))
+ if (cpu_smt_control == CPU_SMT_ENABLED)
return true;
- /*
- * If the CPU is not a 'primary' thread and the booted_once bit is
- * set then the processor has SMT support. Store this information
- * for the late check of SMT support in cpu_smt_check_topology().
- */
- if (per_cpu(cpuhp_state, cpu).booted_once)
- cpu_smt_available = true;
-
- if (cpu_smt_control == CPU_SMT_ENABLED)
+ if (topology_is_primary_thread(cpu))
return true;
/*
diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
index 65c0f13..94aa9ae 100644
--- a/kernel/debug/debug_core.c
+++ b/kernel/debug/debug_core.c
@@ -535,6 +535,8 @@ static int kgdb_cpu_enter(struct kgdb_state *ks, struct pt_regs *regs,
arch_kgdb_ops.correct_hw_break();
if (trace_on)
tracing_on();
+ kgdb_info[cpu].debuggerinfo = NULL;
+ kgdb_info[cpu].task = NULL;
kgdb_info[cpu].exception_state &=
~(DCPU_WANT_MASTER | DCPU_IS_SLAVE);
kgdb_info[cpu].enter_kgdb--;
@@ -667,6 +669,8 @@ static int kgdb_cpu_enter(struct kgdb_state *ks, struct pt_regs *regs,
if (trace_on)
tracing_on();
+ kgdb_info[cpu].debuggerinfo = NULL;
+ kgdb_info[cpu].task = NULL;
kgdb_info[cpu].exception_state &=
~(DCPU_WANT_MASTER | DCPU_IS_SLAVE);
kgdb_info[cpu].enter_kgdb--;
diff --git a/kernel/debug/kdb/kdb_bt.c b/kernel/debug/kdb/kdb_bt.c
index 7921ae4..7e2379a 100644
--- a/kernel/debug/kdb/kdb_bt.c
+++ b/kernel/debug/kdb/kdb_bt.c
@@ -186,7 +186,16 @@ kdb_bt(int argc, const char **argv)
kdb_printf("btc: cpu status: ");
kdb_parse("cpu\n");
for_each_online_cpu(cpu) {
- sprintf(buf, "btt 0x%px\n", KDB_TSK(cpu));
+ void *kdb_tsk = KDB_TSK(cpu);
+
+ /* If a CPU failed to round up we could be here */
+ if (!kdb_tsk) {
+ kdb_printf("WARNING: no task for cpu %ld\n",
+ cpu);
+ continue;
+ }
+
+ sprintf(buf, "btt 0x%px\n", kdb_tsk);
kdb_parse(buf);
touch_nmi_watchdog();
}
diff --git a/kernel/debug/kdb/kdb_debugger.c b/kernel/debug/kdb/kdb_debugger.c
index 15e1a7a..53a0df6 100644
--- a/kernel/debug/kdb/kdb_debugger.c
+++ b/kernel/debug/kdb/kdb_debugger.c
@@ -118,13 +118,6 @@ int kdb_stub(struct kgdb_state *ks)
kdb_bp_remove();
KDB_STATE_CLEAR(DOING_SS);
KDB_STATE_SET(PAGER);
- /* zero out any offline cpu data */
- for_each_present_cpu(i) {
- if (!cpu_online(i)) {
- kgdb_info[i].debuggerinfo = NULL;
- kgdb_info[i].task = NULL;
- }
- }
if (ks->err_code == DIE_OOPS || reason == KDB_REASON_OOPS) {
ks->pass_exception = 1;
KDB_FLAG_SET(CATASTROPHIC);
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 6f257fa..0709f85 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -5136,6 +5136,11 @@ static void __perf_event_period(struct perf_event *event,
}
}
+static int perf_event_check_period(struct perf_event *event, u64 value)
+{
+ return event->pmu->check_period(event, value);
+}
+
static int perf_event_period(struct perf_event *event, u64 __user *arg)
{
u64 value;
@@ -5152,6 +5157,9 @@ static int perf_event_period(struct perf_event *event, u64 __user *arg)
if (event->attr.freq && value > sysctl_perf_event_sample_rate)
return -EINVAL;
+ if (perf_event_check_period(event, value))
+ return -EINVAL;
+
event_function_call(event, __perf_event_period, &value);
return 0;
@@ -9539,6 +9547,11 @@ static int perf_pmu_nop_int(struct pmu *pmu)
return 0;
}
+static int perf_event_nop_int(struct perf_event *event, u64 value)
+{
+ return 0;
+}
+
static DEFINE_PER_CPU(unsigned int, nop_txn_flags);
static void perf_pmu_start_txn(struct pmu *pmu, unsigned int flags)
@@ -9839,6 +9852,9 @@ int perf_pmu_register(struct pmu *pmu, const char *name, int type)
pmu->pmu_disable = perf_pmu_nop_void;
}
+ if (!pmu->check_period)
+ pmu->check_period = perf_event_nop_int;
+
if (!pmu->event_idx)
pmu->event_idx = perf_event_idx_default;
diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c
index 5d3cf40..5631af9 100644
--- a/kernel/events/ring_buffer.c
+++ b/kernel/events/ring_buffer.c
@@ -724,6 +724,9 @@ struct ring_buffer *rb_alloc(int nr_pages, long watermark, int cpu, int flags)
size = sizeof(struct ring_buffer);
size += nr_pages * sizeof(void *);
+ if (order_base_2(size) >= PAGE_SHIFT+MAX_ORDER)
+ goto fail;
+
rb = kzalloc(size, GFP_KERNEL);
if (!rb)
goto fail;
diff --git a/kernel/futex.c b/kernel/futex.c
index f89abca..d7c465f 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -2850,35 +2850,39 @@ static int futex_lock_pi(u32 __user *uaddr, unsigned int flags,
* and BUG when futex_unlock_pi() interleaves with this.
*
* Therefore acquire wait_lock while holding hb->lock, but drop the
- * latter before calling rt_mutex_start_proxy_lock(). This still fully
- * serializes against futex_unlock_pi() as that does the exact same
- * lock handoff sequence.
+ * latter before calling __rt_mutex_start_proxy_lock(). This
+ * interleaves with futex_unlock_pi() -- which does a similar lock
+ * handoff -- such that the latter can observe the futex_q::pi_state
+ * before __rt_mutex_start_proxy_lock() is done.
*/
raw_spin_lock_irq(&q.pi_state->pi_mutex.wait_lock);
spin_unlock(q.lock_ptr);
+ /*
+ * __rt_mutex_start_proxy_lock() unconditionally enqueues the @rt_waiter
+ * such that futex_unlock_pi() is guaranteed to observe the waiter when
+ * it sees the futex_q::pi_state.
+ */
ret = __rt_mutex_start_proxy_lock(&q.pi_state->pi_mutex, &rt_waiter, current);
raw_spin_unlock_irq(&q.pi_state->pi_mutex.wait_lock);
if (ret) {
if (ret == 1)
ret = 0;
-
- spin_lock(q.lock_ptr);
- goto no_block;
+ goto cleanup;
}
-
if (unlikely(to))
hrtimer_start_expires(&to->timer, HRTIMER_MODE_ABS);
ret = rt_mutex_wait_proxy_lock(&q.pi_state->pi_mutex, to, &rt_waiter);
+cleanup:
spin_lock(q.lock_ptr);
/*
- * If we failed to acquire the lock (signal/timeout), we must
+ * If we failed to acquire the lock (deadlock/signal/timeout), we must
* first acquire the hb->lock before removing the lock from the
- * rt_mutex waitqueue, such that we can keep the hb and rt_mutex
- * wait lists consistent.
+ * rt_mutex waitqueue, such that we can keep the hb and rt_mutex wait
+ * lists consistent.
*
* In particular; it is important that futex_unlock_pi() can not
* observe this inconsistency.
@@ -3002,6 +3006,10 @@ static int futex_unlock_pi(u32 __user *uaddr, unsigned int flags)
* there is no point where we hold neither; and therefore
* wake_futex_pi() must observe a state consistent with what we
* observed.
+ *
+ * In particular; this forces __rt_mutex_start_proxy() to
+ * complete such that we're guaranteed to observe the
+ * rt_waiter. Also see the WARN in wake_futex_pi().
*/
raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
spin_unlock(&hb->lock);
diff --git a/kernel/hung_task.c b/kernel/hung_task.c
index b9132d1..9eca237 100644
--- a/kernel/hung_task.c
+++ b/kernel/hung_task.c
@@ -33,7 +33,7 @@ int __read_mostly sysctl_hung_task_check_count = PID_MAX_LIMIT;
* is disabled during the critical section. It also controls the size of
* the RCU grace period. So it needs to be upper-bound.
*/
-#define HUNG_TASK_BATCHING 1024
+#define HUNG_TASK_LOCK_BREAK (HZ / 10)
/*
* Zero means infinite timeout - no checking done:
@@ -111,8 +111,11 @@ static void check_hung_task(struct task_struct *t, unsigned long timeout)
trace_sched_process_hang(t);
- if (!sysctl_hung_task_warnings && !sysctl_hung_task_panic)
- return;
+ if (sysctl_hung_task_panic) {
+ console_verbose();
+ hung_task_show_lock = true;
+ hung_task_call_panic = true;
+ }
/*
* Ok, the task did not get scheduled for more than 2 minutes,
@@ -134,11 +137,6 @@ static void check_hung_task(struct task_struct *t, unsigned long timeout)
}
touch_nmi_watchdog();
-
- if (sysctl_hung_task_panic) {
- hung_task_show_lock = true;
- hung_task_call_panic = true;
- }
}
/*
@@ -172,7 +170,7 @@ static bool rcu_lock_break(struct task_struct *g, struct task_struct *t)
static void check_hung_uninterruptible_tasks(unsigned long timeout)
{
int max_count = sysctl_hung_task_check_count;
- int batch_count = HUNG_TASK_BATCHING;
+ unsigned long last_break = jiffies;
struct task_struct *g, *t;
/*
@@ -187,10 +185,10 @@ static void check_hung_uninterruptible_tasks(unsigned long timeout)
for_each_process_thread(g, t) {
if (!max_count--)
goto unlock;
- if (!--batch_count) {
- batch_count = HUNG_TASK_BATCHING;
+ if (time_after(jiffies, last_break + HUNG_TASK_LOCK_BREAK)) {
if (!rcu_lock_break(g, t))
goto unlock;
+ last_break = jiffies;
}
/* use "==" to skip the TASK_KILLABLE tasks waiting on NFS */
if (t->state == TASK_UNINTERRUPTIBLE)
diff --git a/kernel/irq/affinity.c b/kernel/irq/affinity.c
index f4f29b9..e12cdf6 100644
--- a/kernel/irq/affinity.c
+++ b/kernel/irq/affinity.c
@@ -117,12 +117,11 @@ static int irq_build_affinity_masks(const struct irq_affinity *affd,
*/
if (numvecs <= nodes) {
for_each_node_mask(n, nodemsk) {
- cpumask_copy(masks + curvec, node_to_cpumask[n]);
- if (++done == numvecs)
- break;
+ cpumask_or(masks + curvec, masks + curvec, node_to_cpumask[n]);
if (++curvec == last_affv)
curvec = affd->pre_vectors;
}
+ done = numvecs;
goto out;
}
diff --git a/kernel/kcov.c b/kernel/kcov.c
index 97959d7..c2277db 100644
--- a/kernel/kcov.c
+++ b/kernel/kcov.c
@@ -112,7 +112,7 @@ void notrace __sanitizer_cov_trace_pc(void)
EXPORT_SYMBOL(__sanitizer_cov_trace_pc);
#ifdef CONFIG_KCOV_ENABLE_COMPARISONS
-static void write_comp_data(u64 type, u64 arg1, u64 arg2, u64 ip)
+static void notrace write_comp_data(u64 type, u64 arg1, u64 arg2, u64 ip)
{
struct task_struct *t;
u64 *area;
diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
index 2823d41..9562aaa 100644
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
@@ -1726,12 +1726,33 @@ void rt_mutex_proxy_unlock(struct rt_mutex *lock,
rt_mutex_set_owner(lock, NULL);
}
+/**
+ * __rt_mutex_start_proxy_lock() - Start lock acquisition for another task
+ * @lock: the rt_mutex to take
+ * @waiter: the pre-initialized rt_mutex_waiter
+ * @task: the task to prepare
+ *
+ * Starts the rt_mutex acquire; it enqueues the @waiter and does deadlock
+ * detection. It does not wait, see rt_mutex_wait_proxy_lock() for that.
+ *
+ * NOTE: does _NOT_ remove the @waiter on failure; must either call
+ * rt_mutex_wait_proxy_lock() or rt_mutex_cleanup_proxy_lock() after this.
+ *
+ * Returns:
+ * 0 - task blocked on lock
+ * 1 - acquired the lock for task, caller should wake it up
+ * <0 - error
+ *
+ * Special API call for PI-futex support.
+ */
int __rt_mutex_start_proxy_lock(struct rt_mutex *lock,
struct rt_mutex_waiter *waiter,
struct task_struct *task)
{
int ret;
+ lockdep_assert_held(&lock->wait_lock);
+
if (try_to_take_rt_mutex(lock, task, NULL))
return 1;
@@ -1749,9 +1770,6 @@ int __rt_mutex_start_proxy_lock(struct rt_mutex *lock,
ret = 0;
}
- if (unlikely(ret))
- remove_waiter(lock, waiter);
-
debug_rt_mutex_print_deadlock(waiter);
return ret;
@@ -1763,12 +1781,18 @@ int __rt_mutex_start_proxy_lock(struct rt_mutex *lock,
* @waiter: the pre-initialized rt_mutex_waiter
* @task: the task to prepare
*
+ * Starts the rt_mutex acquire; it enqueues the @waiter and does deadlock
+ * detection. It does not wait, see rt_mutex_wait_proxy_lock() for that.
+ *
+ * NOTE: unlike __rt_mutex_start_proxy_lock this _DOES_ remove the @waiter
+ * on failure.
+ *
* Returns:
* 0 - task blocked on lock
* 1 - acquired the lock for task, caller should wake it up
* <0 - error
*
- * Special API call for FUTEX_REQUEUE_PI support.
+ * Special API call for PI-futex support.
*/
int rt_mutex_start_proxy_lock(struct rt_mutex *lock,
struct rt_mutex_waiter *waiter,
@@ -1778,6 +1802,8 @@ int rt_mutex_start_proxy_lock(struct rt_mutex *lock,
raw_spin_lock_irq(&lock->wait_lock);
ret = __rt_mutex_start_proxy_lock(lock, waiter, task);
+ if (unlikely(ret))
+ remove_waiter(lock, waiter);
raw_spin_unlock_irq(&lock->wait_lock);
return ret;
@@ -1845,7 +1871,8 @@ int rt_mutex_wait_proxy_lock(struct rt_mutex *lock,
* @lock: the rt_mutex we were woken on
* @waiter: the pre-initialized rt_mutex_waiter
*
- * Attempt to clean up after a failed rt_mutex_wait_proxy_lock().
+ * Attempt to clean up after a failed __rt_mutex_start_proxy_lock() or
+ * rt_mutex_wait_proxy_lock().
*
* Unless we acquired the lock; we're still enqueued on the wait-list and can
* in fact still be granted ownership until we're removed. Therefore we can
diff --git a/kernel/module.c b/kernel/module.c
index f896873..e179303 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -1207,8 +1207,10 @@ static ssize_t store_uevent(struct module_attribute *mattr,
struct module_kobject *mk,
const char *buffer, size_t count)
{
- kobject_synth_uevent(&mk->kobj, buffer, count);
- return count;
+ int rc;
+
+ rc = kobject_synth_uevent(&mk->kobj, buffer, count);
+ return rc ? rc : count;
}
struct module_attribute module_uevent =
diff --git a/kernel/sched/cpudeadline.c b/kernel/sched/cpudeadline.c
index 5031645..46c1b5e 100644
--- a/kernel/sched/cpudeadline.c
+++ b/kernel/sched/cpudeadline.c
@@ -124,7 +124,8 @@ int cpudl_find(struct cpudl *cp, struct task_struct *p,
const struct sched_dl_entity *dl_se = &p->dl;
if (later_mask &&
- cpumask_and(later_mask, cp->free_cpus, &p->cpus_allowed)) {
+ cpumask_and(later_mask, cp->free_cpus, &p->cpus_allowed) &&
+ cpumask_and(later_mask, later_mask, cpu_active_mask)) {
return 1;
} else {
int best_cpu = cpudl_maximum(cp);
@@ -132,6 +133,7 @@ int cpudl_find(struct cpudl *cp, struct task_struct *p,
WARN_ON(best_cpu != -1 && !cpu_present(best_cpu));
if (cpumask_test_cpu(best_cpu, &p->cpus_allowed) &&
+ cpumask_test_cpu(best_cpu, cpu_active_mask) &&
dl_time_before(dl_se->deadline, cp->elements[0].dl)) {
if (later_mask)
cpumask_set_cpu(best_cpu, later_mask);
diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
index a12940b..9677ef3 100644
--- a/kernel/sched/cpufreq_schedutil.c
+++ b/kernel/sched/cpufreq_schedutil.c
@@ -127,7 +127,7 @@ static bool sugov_should_update_freq(struct sugov_policy *sg_policy, u64 time)
static inline bool use_pelt(void)
{
#ifdef CONFIG_SCHED_WALT
- return (!sysctl_sched_use_walt_cpu_util || walt_disabled);
+ return false;
#else
return true;
#endif
@@ -180,7 +180,7 @@ static void sugov_track_cycles(struct sugov_policy *sg_policy,
{
u64 delta_ns, cycles;
- if (unlikely(!sysctl_sched_use_walt_cpu_util))
+ if (use_pelt())
return;
/* Track cycles in current window */
@@ -198,7 +198,7 @@ static void sugov_calc_avg_cap(struct sugov_policy *sg_policy, u64 curr_ws,
u64 last_ws = sg_policy->last_ws;
unsigned int avg_freq;
- if (unlikely(!sysctl_sched_use_walt_cpu_util))
+ if (use_pelt())
return;
BUG_ON(curr_ws < last_ws);
@@ -571,7 +571,7 @@ static void sugov_walt_adjust(struct sugov_cpu *sg_cpu, unsigned long *util,
unsigned long cpu_util = sg_cpu->util;
bool is_hiload;
- if (unlikely(!sysctl_sched_use_walt_cpu_util))
+ if (use_pelt())
return;
is_hiload = (cpu_util >= mult_frac(sg_policy->avg_cap,
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 7ab94c0..6a45c8a 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -135,8 +135,6 @@ const_debug unsigned int sysctl_sched_migration_cost = 500000UL;
DEFINE_PER_CPU_READ_MOSTLY(int, sched_load_boost);
#ifdef CONFIG_SCHED_WALT
-unsigned int sysctl_sched_use_walt_cpu_util = 1;
-unsigned int sysctl_sched_use_walt_task_util = 1;
__read_mostly unsigned int sysctl_sched_walt_cpu_high_irqload =
(10 * NSEC_PER_MSEC);
#endif
@@ -3708,8 +3706,7 @@ static inline unsigned long _task_util_est(struct task_struct *p)
static inline unsigned long task_util_est(struct task_struct *p)
{
#ifdef CONFIG_SCHED_WALT
- if (likely(!walt_disabled && sysctl_sched_use_walt_task_util))
- return p->ravg.demand_scaled;
+ return p->ravg.demand_scaled;
#endif
return max(task_util(p), _task_util_est(p));
}
@@ -6295,6 +6292,7 @@ static inline int find_idlest_cpu(struct sched_domain *sd, struct task_struct *p
#ifdef CONFIG_SCHED_SMT
DEFINE_STATIC_KEY_FALSE(sched_smt_present);
+EXPORT_SYMBOL_GPL(sched_smt_present);
static inline void set_idle_cores(int cpu, int val)
{
@@ -6553,8 +6551,7 @@ static unsigned long cpu_util_without(int cpu, struct task_struct *p)
* utilization from cpu utilization. Instead just use
* cpu_util for this case.
*/
- if (likely(!walt_disabled && sysctl_sched_use_walt_cpu_util) &&
- p->state == TASK_WAKING)
+ if (p->state == TASK_WAKING)
return cpu_util(cpu);
#endif
@@ -7436,8 +7433,7 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu, int sy
}
#ifdef CONFIG_SCHED_WALT
- if (!walt_disabled && sysctl_sched_use_walt_cpu_util &&
- p->state == TASK_WAKING)
+ if (p->state == TASK_WAKING)
delta = task_util(p);
#endif
if (task_placement_boost_enabled(p) || need_idle || boosted ||
diff --git a/kernel/sched/psi.c b/kernel/sched/psi.c
index 5954145..0e97ca9 100644
--- a/kernel/sched/psi.c
+++ b/kernel/sched/psi.c
@@ -124,6 +124,7 @@
* sampling of the aggregate task states would be.
*/
+#include "../workqueue_internal.h"
#include <linux/sched/loadavg.h>
#include <linux/seq_file.h>
#include <linux/proc_fs.h>
@@ -136,8 +137,18 @@
static int psi_bug __read_mostly;
-bool psi_disabled __read_mostly;
-core_param(psi_disabled, psi_disabled, bool, 0644);
+DEFINE_STATIC_KEY_FALSE(psi_disabled);
+
+#ifdef CONFIG_PSI_DEFAULT_DISABLED
+bool psi_enable;
+#else
+bool psi_enable = true;
+#endif
+static int __init setup_psi(char *str)
+{
+ return kstrtobool(str, &psi_enable) == 0;
+}
+__setup("psi=", setup_psi);
/* Running averages - we need to be higher-res than loadavg */
#define PSI_FREQ (2*HZ+1) /* 2 sec intervals */
@@ -169,8 +180,10 @@ static void group_init(struct psi_group *group)
void __init psi_init(void)
{
- if (psi_disabled)
+ if (!psi_enable) {
+ static_branch_enable(&psi_disabled);
return;
+ }
psi_period = jiffies_to_nsecs(PSI_FREQ);
group_init(&psi_system);
@@ -309,7 +322,7 @@ static bool update_stats(struct psi_group *group)
expires = group->next_update;
if (now < expires)
goto out;
- if (now - expires > psi_period)
+ if (now - expires >= psi_period)
missed_periods = div_u64(now - expires, psi_period);
/*
@@ -468,14 +481,38 @@ static void psi_group_change(struct psi_group *group, int cpu,
groupc->tasks[t]++;
write_seqcount_end(&groupc->seq);
+}
- if (!delayed_work_pending(&group->clock_work))
- schedule_delayed_work(&group->clock_work, PSI_FREQ);
+static struct psi_group *iterate_groups(struct task_struct *task, void **iter)
+{
+#ifdef CONFIG_CGROUPS
+ struct cgroup *cgroup = NULL;
+
+ if (!*iter)
+ cgroup = task->cgroups->dfl_cgrp;
+ else if (*iter == &psi_system)
+ return NULL;
+ else
+ cgroup = cgroup_parent(*iter);
+
+ if (cgroup && cgroup_parent(cgroup)) {
+ *iter = cgroup;
+ return cgroup_psi(cgroup);
+ }
+#else
+ if (*iter)
+ return NULL;
+#endif
+ *iter = &psi_system;
+ return &psi_system;
}
void psi_task_change(struct task_struct *task, int clear, int set)
{
int cpu = task_cpu(task);
+ struct psi_group *group;
+ bool wake_clock = true;
+ void *iter = NULL;
if (!task->pid)
return;
@@ -492,17 +529,37 @@ void psi_task_change(struct task_struct *task, int clear, int set)
task->psi_flags &= ~clear;
task->psi_flags |= set;
- psi_group_change(&psi_system, cpu, clear, set);
+ /*
+ * Periodic aggregation shuts off if there is a period of no
+ * task changes, so we wake it back up if necessary. However,
+ * don't do this if the task change is the aggregation worker
+ * itself going to sleep, or we'll ping-pong forever.
+ */
+ if (unlikely((clear & TSK_RUNNING) &&
+ (task->flags & PF_WQ_WORKER) &&
+ wq_worker_last_func(task) == psi_update_work))
+ wake_clock = false;
+
+ while ((group = iterate_groups(task, &iter))) {
+ psi_group_change(group, cpu, clear, set);
+ if (wake_clock && !delayed_work_pending(&group->clock_work))
+ schedule_delayed_work(&group->clock_work, PSI_FREQ);
+ }
}
void psi_memstall_tick(struct task_struct *task, int cpu)
{
- struct psi_group_cpu *groupc;
+ struct psi_group *group;
+ void *iter = NULL;
- groupc = per_cpu_ptr(psi_system.pcpu, cpu);
- write_seqcount_begin(&groupc->seq);
- record_times(groupc, cpu, true);
- write_seqcount_end(&groupc->seq);
+ while ((group = iterate_groups(task, &iter))) {
+ struct psi_group_cpu *groupc;
+
+ groupc = per_cpu_ptr(group->pcpu, cpu);
+ write_seqcount_begin(&groupc->seq);
+ record_times(groupc, cpu, true);
+ write_seqcount_end(&groupc->seq);
+ }
}
/**
@@ -517,7 +574,7 @@ void psi_memstall_enter(unsigned long *flags)
struct rq_flags rf;
struct rq *rq;
- if (psi_disabled)
+ if (static_branch_likely(&psi_disabled))
return;
*flags = current->flags & PF_MEMSTALL;
@@ -547,7 +604,7 @@ void psi_memstall_leave(unsigned long *flags)
struct rq_flags rf;
struct rq *rq;
- if (psi_disabled)
+ if (static_branch_likely(&psi_disabled))
return;
if (*flags)
@@ -565,12 +622,83 @@ void psi_memstall_leave(unsigned long *flags)
rq_unlock_irq(rq, &rf);
}
-static int psi_show(struct seq_file *m, struct psi_group *group,
- enum psi_res res)
+#ifdef CONFIG_CGROUPS
+int psi_cgroup_alloc(struct cgroup *cgroup)
+{
+ if (static_branch_likely(&psi_disabled))
+ return 0;
+
+ cgroup->psi.pcpu = alloc_percpu(struct psi_group_cpu);
+ if (!cgroup->psi.pcpu)
+ return -ENOMEM;
+ group_init(&cgroup->psi);
+ return 0;
+}
+
+void psi_cgroup_free(struct cgroup *cgroup)
+{
+ if (static_branch_likely(&psi_disabled))
+ return;
+
+ cancel_delayed_work_sync(&cgroup->psi.clock_work);
+ free_percpu(cgroup->psi.pcpu);
+}
+
+/**
+ * cgroup_move_task - move task to a different cgroup
+ * @task: the task
+ * @to: the target css_set
+ *
+ * Move task to a new cgroup and safely migrate its associated stall
+ * state between the different groups.
+ *
+ * This function acquires the task's rq lock to lock out concurrent
+ * changes to the task's scheduling state and - in case the task is
+ * running - concurrent changes to its stall state.
+ */
+void cgroup_move_task(struct task_struct *task, struct css_set *to)
+{
+ unsigned int task_flags = 0;
+ struct rq_flags rf;
+ struct rq *rq;
+
+ if (static_branch_likely(&psi_disabled)) {
+ /*
+ * Lame to do this here, but the scheduler cannot be locked
+ * from the outside, so we move cgroups from inside sched/.
+ */
+ rcu_assign_pointer(task->cgroups, to);
+ return;
+ }
+
+ rq = task_rq_lock(task, &rf);
+
+ if (task_on_rq_queued(task))
+ task_flags = TSK_RUNNING;
+ else if (task->in_iowait)
+ task_flags = TSK_IOWAIT;
+
+ if (task->flags & PF_MEMSTALL)
+ task_flags |= TSK_MEMSTALL;
+
+ if (task_flags)
+ psi_task_change(task, task_flags, 0);
+
+ /* See comment above */
+ rcu_assign_pointer(task->cgroups, to);
+
+ if (task_flags)
+ psi_task_change(task, 0, task_flags);
+
+ task_rq_unlock(rq, task, &rf);
+}
+#endif /* CONFIG_CGROUPS */
+
+int psi_show(struct seq_file *m, struct psi_group *group, enum psi_res res)
{
int full;
- if (psi_disabled)
+ if (static_branch_likely(&psi_disabled))
return -EOPNOTSUPP;
update_stats(group);
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 9eff4e9..a0b7281 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -142,17 +142,9 @@ struct sched_cluster {
unsigned int cur_freq, max_freq, max_mitigated_freq, min_freq;
unsigned int max_possible_freq;
bool freq_init_done;
- int dstate, dstate_wakeup_latency, dstate_wakeup_energy;
- unsigned int static_cluster_pwr_cost;
- int notifier_sent;
- bool wake_up_idle;
u64 aggr_grp_load;
u64 coloc_boost_load;
};
-
-extern unsigned int sched_disable_window_stats;
-
-extern struct timer_list sched_grp_timer;
#endif /* CONFIG_SCHED_WALT */
/* task_struct::on_rq states: */
@@ -2069,14 +2061,10 @@ static inline unsigned long capacity_orig_of(int cpu)
return cpu_rq(cpu)->cpu_capacity_orig;
}
-extern unsigned int sysctl_sched_use_walt_cpu_util;
-extern unsigned int walt_disabled;
-
static inline unsigned long task_util(struct task_struct *p)
{
#ifdef CONFIG_SCHED_WALT
- if (likely(!walt_disabled && sysctl_sched_use_walt_task_util))
- return p->ravg.demand_scaled;
+ return p->ravg.demand_scaled;
#endif
return READ_ONCE(p->se.avg.util_avg);
}
@@ -2125,13 +2113,10 @@ static inline unsigned long cpu_util(int cpu)
unsigned int util;
#ifdef CONFIG_SCHED_WALT
- if (likely(!walt_disabled && sysctl_sched_use_walt_cpu_util)) {
- u64 walt_cpu_util =
- cpu_rq(cpu)->walt_stats.cumulative_runnable_avg_scaled;
+ u64 walt_cpu_util =
+ cpu_rq(cpu)->walt_stats.cumulative_runnable_avg_scaled;
- return min_t(unsigned long, walt_cpu_util,
- capacity_orig_of(cpu));
- }
+ return min_t(unsigned long, walt_cpu_util, capacity_orig_of(cpu));
#endif
cfs_rq = &cpu_rq(cpu)->cfs;
@@ -2156,8 +2141,7 @@ static inline unsigned long cpu_util_cum(int cpu, int delta)
unsigned long capacity = capacity_orig_of(cpu);
#ifdef CONFIG_SCHED_WALT
- if (!walt_disabled && sysctl_sched_use_walt_cpu_util)
- util = cpu_rq(cpu)->cum_window_demand_scaled;
+ util = cpu_rq(cpu)->cum_window_demand_scaled;
#endif
delta += util;
if (delta < 0)
@@ -2184,9 +2168,6 @@ cpu_util_freq_walt(int cpu, struct sched_walt_cpu_load *walt_load)
unsigned long capacity = capacity_orig_of(cpu);
int boost;
- if (walt_disabled || !sysctl_sched_use_walt_cpu_util)
- return cpu_util(cpu);
-
boost = per_cpu(sched_load_boost, cpu);
util_unboosted = util = freq_policy_load(rq);
util = div64_u64(util * (100 + boost),
@@ -2231,7 +2212,6 @@ cpu_util_freq(int cpu, struct sched_walt_cpu_load *walt_load)
}
#define sched_ravg_window TICK_NSEC
-#define sysctl_sched_use_walt_cpu_util 0
#endif /* CONFIG_SCHED_WALT */
@@ -2718,26 +2698,12 @@ struct related_thread_group {
u64 last_update;
};
-extern struct list_head cluster_head;
extern struct sched_cluster *sched_cluster[NR_CPUS];
-#define for_each_sched_cluster(cluster) \
- list_for_each_entry_rcu(cluster, &cluster_head, list)
-
-#define WINDOW_STATS_RECENT 0
-#define WINDOW_STATS_MAX 1
-#define WINDOW_STATS_MAX_RECENT_AVG 2
-#define WINDOW_STATS_AVG 3
-#define WINDOW_STATS_INVALID_POLICY 4
-
-#define SCHED_UPMIGRATE_MIN_NICE 15
-#define EXITING_TASK_MARKER 0xdeaddead
-
#define UP_MIGRATION 1
#define DOWN_MIGRATION 2
#define IRQLOAD_MIGRATION 3
-extern struct mutex policy_mutex;
extern unsigned int sched_disable_window_stats;
extern unsigned int max_possible_freq;
extern unsigned int min_max_freq;
@@ -2745,23 +2711,10 @@ extern unsigned int max_possible_efficiency;
extern unsigned int min_possible_efficiency;
extern unsigned int max_capacity;
extern unsigned int min_capacity;
-extern unsigned int max_load_scale_factor;
extern unsigned int max_possible_capacity;
extern unsigned int min_max_possible_capacity;
extern unsigned int max_power_cost;
extern unsigned int __read_mostly sched_init_task_load_windows;
-extern unsigned int up_down_migrate_scale_factor;
-extern unsigned int sysctl_sched_restrict_cluster_spill;
-extern unsigned int sched_pred_alert_load;
-extern struct sched_cluster init_cluster;
-extern unsigned int __read_mostly sched_short_sleep_task_threshold;
-extern unsigned int __read_mostly sched_long_cpu_selection_threshold;
-extern unsigned int __read_mostly sched_big_waker_task_load;
-extern unsigned int __read_mostly sched_small_wakee_task_load;
-extern unsigned int __read_mostly sched_spill_load;
-extern unsigned int __read_mostly sched_upmigrate;
-extern unsigned int __read_mostly sched_downmigrate;
-extern unsigned int __read_mostly sysctl_sched_spill_nr_run;
extern unsigned int __read_mostly sched_load_granule;
extern int register_cpu_cycle_counter_cb(struct cpu_cycle_counter_cb *cb);
@@ -2794,16 +2747,6 @@ static inline int cpu_load_scale_factor(int cpu)
return cpu_rq(cpu)->cluster->load_scale_factor;
}
-static inline int cpu_efficiency(int cpu)
-{
- return cpu_rq(cpu)->cluster->efficiency;
-}
-
-static inline unsigned int cpu_min_freq(int cpu)
-{
- return cpu_rq(cpu)->cluster->min_freq;
-}
-
static inline unsigned int cluster_max_freq(struct sched_cluster *cluster)
{
/*
@@ -2883,16 +2826,6 @@ static inline int compute_load_scale_factor(struct sched_cluster *cluster)
return load_scale;
}
-static inline int cpu_max_power_cost(int cpu)
-{
- return cpu_rq(cpu)->cluster->max_power_cost;
-}
-
-static inline int cpu_min_power_cost(int cpu)
-{
- return cpu_rq(cpu)->cluster->min_power_cost;
-}
-
static inline bool hmp_capable(void)
{
return max_possible_capacity != min_max_possible_capacity;
@@ -3246,11 +3179,6 @@ static inline int same_freq_domain(int src_cpu, int dst_cpu)
static inline void clear_reserved(int cpu) { }
static inline int alloc_related_thread_groups(void) { return 0; }
-#define trace_sched_cpu_load(...)
-#define trace_sched_cpu_load_lb(...)
-#define trace_sched_cpu_load_cgroup(...)
-#define trace_sched_cpu_load_wakeup(...)
-
static inline void walt_fixup_cum_window_demand(struct rq *rq,
s64 scaled_delta) { }
diff --git a/kernel/sched/sched_avg.c b/kernel/sched/sched_avg.c
index 80e2e38..41e6e2d 100644
--- a/kernel/sched/sched_avg.c
+++ b/kernel/sched/sched_avg.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (c) 2012, 2015-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012, 2015-2019, The Linux Foundation. All rights reserved.
*/
/*
@@ -173,11 +173,8 @@ unsigned int sched_get_cpu_util(int cpu)
capacity = capacity_orig_of(cpu);
#ifdef CONFIG_SCHED_WALT
- if (!walt_disabled && sysctl_sched_use_walt_cpu_util) {
- util = rq->prev_runnable_sum + rq->grp_time.prev_runnable_sum;
- util = div64_u64(util,
- sched_ravg_window >> SCHED_CAPACITY_SHIFT);
- }
+ util = rq->prev_runnable_sum + rq->grp_time.prev_runnable_sum;
+ util = div64_u64(util, sched_ravg_window >> SCHED_CAPACITY_SHIFT);
#endif
raw_spin_unlock_irqrestore(&rq->lock, flags);
diff --git a/kernel/sched/stats.h b/kernel/sched/stats.h
index 4904c46..aa0de24 100644
--- a/kernel/sched/stats.h
+++ b/kernel/sched/stats.h
@@ -66,7 +66,7 @@ static inline void psi_enqueue(struct task_struct *p, bool wakeup)
{
int clear = 0, set = TSK_RUNNING;
- if (psi_disabled)
+ if (static_branch_likely(&psi_disabled))
return;
if (!wakeup || p->sched_psi_wake_requeue) {
@@ -86,7 +86,7 @@ static inline void psi_dequeue(struct task_struct *p, bool sleep)
{
int clear = TSK_RUNNING, set = 0;
- if (psi_disabled)
+ if (static_branch_likely(&psi_disabled))
return;
if (!sleep) {
@@ -102,7 +102,7 @@ static inline void psi_dequeue(struct task_struct *p, bool sleep)
static inline void psi_ttwu_dequeue(struct task_struct *p)
{
- if (psi_disabled)
+ if (static_branch_likely(&psi_disabled))
return;
/*
* Is the task being migrated during a wakeup? Make sure to
@@ -128,7 +128,7 @@ static inline void psi_ttwu_dequeue(struct task_struct *p)
static inline void psi_task_tick(struct rq *rq)
{
- if (psi_disabled)
+ if (static_branch_likely(&psi_disabled))
return;
if (unlikely(rq->curr->flags & PF_MEMSTALL))
diff --git a/kernel/sched/walt.c b/kernel/sched/walt.c
index f1d7af6..f015033 100644
--- a/kernel/sched/walt.c
+++ b/kernel/sched/walt.c
@@ -109,9 +109,6 @@ static void release_rq_locks_irqrestore(const cpumask_t *cpus,
/* Max window size (in ns) = 1s */
#define MAX_SCHED_RAVG_WINDOW 1000000000
-/* 1 -> use PELT based load stats, 0 -> use window-based load stats */
-unsigned int __read_mostly walt_disabled = 0;
-
__read_mostly unsigned int sysctl_sched_cpu_high_irqload = (10 * NSEC_PER_MSEC);
unsigned int sysctl_sched_walt_rotate_big_tasks;
@@ -2137,9 +2134,6 @@ static struct sched_cluster *alloc_new_cluster(const struct cpumask *cpus)
cluster->max_mitigated_freq = UINT_MAX;
cluster->min_freq = 1;
cluster->max_possible_freq = 1;
- cluster->dstate = 0;
- cluster->dstate_wakeup_energy = 0;
- cluster->dstate_wakeup_latency = 0;
cluster->freq_init_done = false;
raw_spin_lock_init(&cluster->load_lock);
@@ -2151,7 +2145,6 @@ static struct sched_cluster *alloc_new_cluster(const struct cpumask *cpus)
if (cluster->efficiency < min_possible_efficiency)
min_possible_efficiency = cluster->efficiency;
- cluster->notifier_sent = 0;
return cluster;
}
@@ -2323,12 +2316,7 @@ struct sched_cluster init_cluster = {
.max_mitigated_freq = UINT_MAX,
.min_freq = 1,
.max_possible_freq = 1,
- .dstate = 0,
- .dstate_wakeup_energy = 0,
- .dstate_wakeup_latency = 0,
.exec_scale_factor = 1024,
- .notifier_sent = 0,
- .wake_up_idle = 0,
.aggr_grp_load = 0,
.coloc_boost_load = 0,
};
@@ -2644,8 +2632,6 @@ int update_preferred_cluster(struct related_thread_group *grp,
return 0;
}
-DEFINE_MUTEX(policy_mutex);
-
#define pct_to_real(tunable) \
(div64_u64((u64)tunable * (u64)max_task_load(), 100))
diff --git a/kernel/sched/walt.h b/kernel/sched/walt.h
index 44d1277..fe971da 100644
--- a/kernel/sched/walt.h
+++ b/kernel/sched/walt.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
*/
#ifndef __WALT_H
@@ -31,8 +31,6 @@ extern unsigned int sched_ravg_window;
extern unsigned int max_possible_efficiency;
extern unsigned int min_possible_efficiency;
extern unsigned int max_possible_freq;
-extern unsigned int sched_major_task_runtime;
-extern unsigned int __read_mostly sched_init_task_load_windows;
extern unsigned int __read_mostly sched_load_granule;
extern struct mutex cluster_lock;
diff --git a/kernel/signal.c b/kernel/signal.c
index 03f4fa9..e3bb6c1 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -683,6 +683,48 @@ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
return signr;
}
+static int dequeue_synchronous_signal(siginfo_t *info)
+{
+ struct task_struct *tsk = current;
+ struct sigpending *pending = &tsk->pending;
+ struct sigqueue *q, *sync = NULL;
+
+ /*
+ * Might a synchronous signal be in the queue?
+ */
+ if (!((pending->signal.sig[0] & ~tsk->blocked.sig[0]) & SYNCHRONOUS_MASK))
+ return 0;
+
+ /*
+ * Return the first synchronous signal in the queue.
+ */
+ list_for_each_entry(q, &pending->list, list) {
+ /* Synchronous signals have a postive si_code */
+ if ((q->info.si_code > SI_USER) &&
+ (sigmask(q->info.si_signo) & SYNCHRONOUS_MASK)) {
+ sync = q;
+ goto next;
+ }
+ }
+ return 0;
+next:
+ /*
+ * Check if there is another siginfo for the same signal.
+ */
+ list_for_each_entry_continue(q, &pending->list, list) {
+ if (q->info.si_signo == sync->info.si_signo)
+ goto still_pending;
+ }
+
+ sigdelset(&pending->signal, sync->info.si_signo);
+ recalc_sigpending();
+still_pending:
+ list_del_init(&sync->list);
+ copy_siginfo(info, &sync->info);
+ __sigqueue_free(sync);
+ return info->si_signo;
+}
+
/*
* Tell a process that it has a new active signal..
*
@@ -2395,6 +2437,14 @@ bool get_signal(struct ksignal *ksig)
goto relock;
}
+ /* Has this task already been marked for death? */
+ if (signal_group_exit(signal)) {
+ ksig->info.si_signo = signr = SIGKILL;
+ sigdelset(¤t->pending.signal, SIGKILL);
+ recalc_sigpending();
+ goto fatal;
+ }
+
for (;;) {
struct k_sigaction *ka;
@@ -2408,7 +2458,15 @@ bool get_signal(struct ksignal *ksig)
goto relock;
}
- signr = dequeue_signal(current, ¤t->blocked, &ksig->info);
+ /*
+ * Signals generated by the execution of an instruction
+ * need to be delivered before any other pending signals
+ * so that the instruction pointer in the signal stack
+ * frame points to the faulting instruction.
+ */
+ signr = dequeue_synchronous_signal(&ksig->info);
+ if (!signr)
+ signr = dequeue_signal(current, ¤t->blocked, &ksig->info);
if (!signr)
break; /* will return 0 */
@@ -2490,6 +2548,7 @@ bool get_signal(struct ksignal *ksig)
continue;
}
+ fatal:
spin_unlock_irq(&sighand->siglock);
/*
diff --git a/kernel/smp.c b/kernel/smp.c
index d19a663..6e5cb7e 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -19,6 +19,7 @@
#include <linux/sched.h>
#include <linux/sched/idle.h>
#include <linux/hypervisor.h>
+#include <linux/suspend.h>
#include "smpboot.h"
@@ -615,8 +616,6 @@ void __init smp_init(void)
num_nodes, (num_nodes > 1 ? "s" : ""),
num_cpus, (num_cpus > 1 ? "s" : ""));
- /* Final decision about SMT support */
- cpu_smt_check_topology();
/* Any cleanup work */
smp_cpus_done(setup_max_cpus);
}
@@ -771,8 +770,8 @@ void wake_up_all_idle_cpus(void)
for_each_online_cpu(cpu) {
if (cpu == smp_processor_id())
continue;
-
- if (!cpu_isolated(cpu))
+ if (s2idle_state == S2IDLE_STATE_ENTER ||
+ !cpu_isolated(cpu))
wake_up_if_idle(cpu);
}
preempt_enable();
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 9de8648..2b331b5 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -2982,6 +2982,8 @@ static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, int
bool neg;
left -= proc_skip_spaces(&p);
+ if (!left)
+ break;
err = proc_get_long(&p, &left, &val, &neg,
proc_wspace_sep,
diff --git a/kernel/time/sched_clock.c b/kernel/time/sched_clock.c
index 5446510..145e0c7 100644
--- a/kernel/time/sched_clock.c
+++ b/kernel/time/sched_clock.c
@@ -278,7 +278,7 @@ static u64 notrace suspended_sched_clock_read(void)
return cd.read_data[seq & 1].epoch_cyc;
}
-static int sched_clock_suspend(void)
+int sched_clock_suspend(void)
{
struct clock_read_data *rd = &cd.read_data[0];
@@ -294,7 +294,7 @@ static int sched_clock_suspend(void)
return 0;
}
-static void sched_clock_resume(void)
+void sched_clock_resume(void)
{
struct clock_read_data *rd = &cd.read_data[0];
diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c
index 14de372..de7ebe5 100644
--- a/kernel/time/tick-common.c
+++ b/kernel/time/tick-common.c
@@ -18,6 +18,7 @@
#include <linux/percpu.h>
#include <linux/profile.h>
#include <linux/sched.h>
+#include <linux/sched_clock.h>
#include <linux/module.h>
#include <trace/events/power.h>
@@ -491,6 +492,7 @@ void tick_freeze(void)
trace_suspend_resume(TPS("timekeeping_freeze"),
smp_processor_id(), true);
system_state = SYSTEM_SUSPEND;
+ sched_clock_suspend();
timekeeping_suspend();
} else {
tick_suspend_local();
@@ -515,6 +517,7 @@ void tick_unfreeze(void)
if (tick_freeze_depth == num_online_cpus()) {
timekeeping_resume();
system_state = SYSTEM_RUNNING;
+ sched_clock_resume();
trace_suspend_resume(TPS("timekeeping_freeze"),
smp_processor_id(), false);
} else {
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index f3b22f4..7846ce2 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -50,7 +50,9 @@ enum timekeeping_adv_mode {
static struct {
seqcount_t seq;
struct timekeeper timekeeper;
-} tk_core ____cacheline_aligned;
+} tk_core ____cacheline_aligned = {
+ .seq = SEQCNT_ZERO(tk_core.seq),
+};
static DEFINE_RAW_SPINLOCK(timekeeper_lock);
static struct timekeeper shadow_timekeeper;
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index c98179f..c2ed6a7 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -3386,6 +3386,8 @@ static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file
const char tgid_space[] = " ";
const char space[] = " ";
+ print_event_info(buf, m);
+
seq_printf(m, "# %s _-----=> irqs-off\n",
tgid ? tgid_space : space);
seq_printf(m, "# %s / _----=> need-resched\n",
diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c
index 3061901..1df8e38 100644
--- a/kernel/trace/trace_uprobe.c
+++ b/kernel/trace/trace_uprobe.c
@@ -5,7 +5,7 @@
* Copyright (C) IBM Corporation, 2010-2012
* Author: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
*/
-#define pr_fmt(fmt) "trace_kprobe: " fmt
+#define pr_fmt(fmt) "trace_uprobe: " fmt
#include <linux/module.h>
#include <linux/uaccess.h>
@@ -141,7 +141,14 @@ static void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
ret = strncpy_from_user(dst, src, maxlen);
if (ret == maxlen)
- dst[--ret] = '\0';
+ dst[ret - 1] = '\0';
+ else if (ret >= 0)
+ /*
+ * Include the terminating null byte. In this case it
+ * was copied by strncpy_from_user but not accounted
+ * for in ret.
+ */
+ ret++;
if (ret < 0) { /* Failed to fetch string */
((u8 *)get_rloc_data(dest))[0] = '\0';
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 3c76867..9ef9ece 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -912,6 +912,26 @@ struct task_struct *wq_worker_sleeping(struct task_struct *task)
}
/**
+ * wq_worker_last_func - retrieve worker's last work function
+ *
+ * Determine the last function a worker executed. This is called from
+ * the scheduler to get a worker's last known identity.
+ *
+ * CONTEXT:
+ * spin_lock_irq(rq->lock)
+ *
+ * Return:
+ * The last work function %current executed as a worker, NULL if it
+ * hasn't executed any work yet.
+ */
+work_func_t wq_worker_last_func(struct task_struct *task)
+{
+ struct worker *worker = kthread_data(task);
+
+ return worker->last_func;
+}
+
+/**
* worker_set_flags - set worker flags and adjust nr_running accordingly
* @worker: self
* @flags: flags to set
@@ -2192,6 +2212,9 @@ __acquires(&pool->lock)
if (unlikely(cpu_intensive))
worker_clr_flags(worker, WORKER_CPU_INTENSIVE);
+ /* tag the worker for identification in schedule() */
+ worker->last_func = worker->current_func;
+
/* we're done with it, release */
hash_del(&worker->hentry);
worker->current_work = NULL;
diff --git a/kernel/workqueue_internal.h b/kernel/workqueue_internal.h
index 66fbb5a..cb68b03 100644
--- a/kernel/workqueue_internal.h
+++ b/kernel/workqueue_internal.h
@@ -53,6 +53,9 @@ struct worker {
/* used only by rescuers to point to the target workqueue */
struct workqueue_struct *rescue_wq; /* I: the workqueue to rescue */
+
+ /* used by the scheduler to determine a worker's last known identity */
+ work_func_t last_func;
};
/**
@@ -67,9 +70,10 @@ static inline struct worker *current_wq_worker(void)
/*
* Scheduler hooks for concurrency managed workqueue. Only to be used from
- * sched/core.c and workqueue.c.
+ * sched/ and workqueue.c.
*/
void wq_worker_waking_up(struct task_struct *task, int cpu);
struct task_struct *wq_worker_sleeping(struct task_struct *task);
+work_func_t wq_worker_last_func(struct task_struct *task);
#endif /* _KERNEL_WORKQUEUE_INTERNAL_H */
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 3eba254..2cf6663 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -1114,7 +1114,6 @@
select DEBUG_MUTEXES
select DEBUG_RT_MUTEXES if RT_MUTEXES
select DEBUG_RWSEMS if RWSEM_SPIN_ON_OWNER
- select DEBUG_WW_MUTEX_SLOWPATH
select DEBUG_LOCK_ALLOC
select TRACE_IRQFLAGS
default n
diff --git a/lib/seq_buf.c b/lib/seq_buf.c
index 11f2ae0..6aabb60 100644
--- a/lib/seq_buf.c
+++ b/lib/seq_buf.c
@@ -144,9 +144,13 @@ int seq_buf_puts(struct seq_buf *s, const char *str)
WARN_ON(s->size == 0);
+ /* Add 1 to len for the trailing null byte which must be there */
+ len += 1;
+
if (seq_buf_can_fit(s, len)) {
memcpy(s->buffer + s->len, str, len);
- s->len += len;
+ /* Don't count the trailing null byte against the capacity */
+ s->len += len - 1;
return 0;
}
seq_buf_set_overflow(s);
diff --git a/lib/test_rhashtable.c b/lib/test_rhashtable.c
index 82ac39c..aecc099 100644
--- a/lib/test_rhashtable.c
+++ b/lib/test_rhashtable.c
@@ -541,38 +541,45 @@ static unsigned int __init print_ht(struct rhltable *rhlt)
static int __init test_insert_dup(struct test_obj_rhl *rhl_test_objects,
int cnt, bool slow)
{
- struct rhltable rhlt;
+ struct rhltable *rhlt;
unsigned int i, ret;
const char *key;
int err = 0;
- err = rhltable_init(&rhlt, &test_rht_params_dup);
- if (WARN_ON(err))
+ rhlt = kmalloc(sizeof(*rhlt), GFP_KERNEL);
+ if (WARN_ON(!rhlt))
+ return -EINVAL;
+
+ err = rhltable_init(rhlt, &test_rht_params_dup);
+ if (WARN_ON(err)) {
+ kfree(rhlt);
return err;
+ }
for (i = 0; i < cnt; i++) {
rhl_test_objects[i].value.tid = i;
- key = rht_obj(&rhlt.ht, &rhl_test_objects[i].list_node.rhead);
+ key = rht_obj(&rhlt->ht, &rhl_test_objects[i].list_node.rhead);
key += test_rht_params_dup.key_offset;
if (slow) {
- err = PTR_ERR(rhashtable_insert_slow(&rhlt.ht, key,
+ err = PTR_ERR(rhashtable_insert_slow(&rhlt->ht, key,
&rhl_test_objects[i].list_node.rhead));
if (err == -EAGAIN)
err = 0;
} else
- err = rhltable_insert(&rhlt,
+ err = rhltable_insert(rhlt,
&rhl_test_objects[i].list_node,
test_rht_params_dup);
if (WARN(err, "error %d on element %d/%d (%s)\n", err, i, cnt, slow? "slow" : "fast"))
goto skip_print;
}
- ret = print_ht(&rhlt);
+ ret = print_ht(rhlt);
WARN(ret != cnt, "missing rhltable elements (%d != %d, %s)\n", ret, cnt, slow? "slow" : "fast");
skip_print:
- rhltable_destroy(&rhlt);
+ rhltable_destroy(rhlt);
+ kfree(rhlt);
return 0;
}
diff --git a/mm/filemap.c b/mm/filemap.c
index a90a180..cbb202b 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -2536,11 +2536,6 @@ vm_fault_t filemap_fault(struct vm_fault *vmf)
do_async_mmap_readahead(vmf->vma, ra, file, page, offset);
} else if (!page) {
/* No page in the page cache at all */
- struct address_space *mapping = file->f_mapping;
-
- if (mapping && (mapping->gfp_mask & __GFP_MOVABLE))
- mapping->gfp_mask |= __GFP_CMA;
-
do_sync_mmap_readahead(vmf->vma, ra, file, offset);
count_vm_event(PGMAJFAULT);
count_memcg_event_mm(vmf->vma->vm_mm, PGMAJFAULT);
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 106ae83..1cfb9f8 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -1301,7 +1301,7 @@ static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode,
nodemask_t *nodes)
{
unsigned long copy = ALIGN(maxnode-1, 64) / 8;
- const int nbytes = BITS_TO_LONGS(MAX_NUMNODES) * sizeof(long);
+ unsigned int nbytes = BITS_TO_LONGS(nr_node_ids) * sizeof(long);
if (copy > nbytes) {
if (copy > PAGE_SIZE)
@@ -1478,7 +1478,7 @@ static int kernel_get_mempolicy(int __user *policy,
int uninitialized_var(pval);
nodemask_t nodes;
- if (nmask != NULL && maxnode < MAX_NUMNODES)
+ if (nmask != NULL && maxnode < nr_node_ids)
return -EINVAL;
err = do_get_mempolicy(&pval, &nodes, addr, flags);
@@ -1514,7 +1514,7 @@ COMPAT_SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
unsigned long nr_bits, alloc_size;
DECLARE_BITMAP(bm, MAX_NUMNODES);
- nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
+ nr_bits = min_t(unsigned long, maxnode-1, nr_node_ids);
alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
if (nmask)
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index c34ead7..c24fc87 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -308,6 +308,32 @@ EXPORT_SYMBOL(nr_online_nodes);
int page_group_by_mobility_disabled __read_mostly;
#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
+/*
+ * During boot we initialize deferred pages on-demand, as needed, but once
+ * page_alloc_init_late() has finished, the deferred pages are all initialized,
+ * and we can permanently disable that path.
+ */
+static DEFINE_STATIC_KEY_TRUE(deferred_pages);
+
+/*
+ * Calling kasan_free_pages() only after deferred memory initialization
+ * has completed. Poisoning pages during deferred memory init will greatly
+ * lengthen the process and cause problem in large memory systems as the
+ * deferred pages initialization is done with interrupt disabled.
+ *
+ * Assuming that there will be no reference to those newly initialized
+ * pages before they are ever allocated, this should have no effect on
+ * KASAN memory tracking as the poison will be properly inserted at page
+ * allocation time. The only corner case is when pages are allocated by
+ * on-demand allocation and then freed again before the deferred pages
+ * initialization is done, but this is not likely to happen.
+ */
+static inline void kasan_free_nondeferred_pages(struct page *page, int order)
+{
+ if (!static_branch_unlikely(&deferred_pages))
+ kasan_free_pages(page, order);
+}
+
/* Returns true if the struct page for the pfn is uninitialised */
static inline bool __meminit early_page_uninitialised(unsigned long pfn)
{
@@ -340,6 +366,8 @@ static inline bool update_defer_init(pg_data_t *pgdat,
return true;
}
#else
+#define kasan_free_nondeferred_pages(p, o) kasan_free_pages(p, o)
+
static inline bool early_page_uninitialised(unsigned long pfn)
{
return false;
@@ -1044,7 +1072,7 @@ static __always_inline bool free_pages_prepare(struct page *page,
arch_free_page(page, order);
kernel_poison_pages(page, 1 << order, 0);
kernel_map_pages(page, 1 << order, 0);
- kasan_free_pages(page, order);
+ kasan_free_nondeferred_pages(page, order);
return true;
}
@@ -1608,13 +1636,6 @@ static int __init deferred_init_memmap(void *data)
}
/*
- * During boot we initialize deferred pages on-demand, as needed, but once
- * page_alloc_init_late() has finished, the deferred pages are all initialized,
- * and we can permanently disable that path.
- */
-static DEFINE_STATIC_KEY_TRUE(deferred_pages);
-
-/*
* If this zone has deferred pages, try to grow it by initializing enough
* deferred pages to satisfy the allocation specified by order, rounded up to
* the nearest PAGES_PER_SECTION boundary. So we're adding memory in increments
diff --git a/mm/percpu-km.c b/mm/percpu-km.c
index 38de70a..0f643dc 100644
--- a/mm/percpu-km.c
+++ b/mm/percpu-km.c
@@ -50,6 +50,7 @@ static struct pcpu_chunk *pcpu_create_chunk(gfp_t gfp)
const int nr_pages = pcpu_group_sizes[0] >> PAGE_SHIFT;
struct pcpu_chunk *chunk;
struct page *pages;
+ unsigned long flags;
int i;
chunk = pcpu_alloc_chunk(gfp);
@@ -68,9 +69,9 @@ static struct pcpu_chunk *pcpu_create_chunk(gfp_t gfp)
chunk->data = pages;
chunk->base_addr = page_address(pages) - pcpu_group_offsets[0];
- spin_lock_irq(&pcpu_lock);
+ spin_lock_irqsave(&pcpu_lock, flags);
pcpu_chunk_populated(chunk, 0, nr_pages, false);
- spin_unlock_irq(&pcpu_lock);
+ spin_unlock_irqrestore(&pcpu_lock, flags);
pcpu_stats_chunk_alloc();
trace_percpu_create_chunk(chunk->base_addr);
diff --git a/mm/usercopy.c b/mm/usercopy.c
index 51411f9..ac85aeb 100644
--- a/mm/usercopy.c
+++ b/mm/usercopy.c
@@ -167,6 +167,8 @@ static inline void check_page_span(const void *ptr, unsigned long n,
const void *end = ptr + n - 1;
struct page *endpage;
bool is_reserved, is_cma;
+ const void * const stack = task_stack_page(current);
+ const void * const stackend = stack + THREAD_SIZE;
/*
* Sometimes the kernel data regions are not marked Reserved (see
@@ -191,6 +193,10 @@ static inline void check_page_span(const void *ptr, unsigned long n,
end <= (const void *)__bss_stop)
return;
+ /* Allow stack region to span multiple pages */
+ if (ptr >= stack && end <= stackend)
+ return;
+
/* Is the object wholly within one base page? */
if (likely(((unsigned long)ptr & (unsigned long)PAGE_MASK) ==
((unsigned long)end & (unsigned long)PAGE_MASK)))
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 085477d..261ff7b 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -483,16 +483,6 @@ static unsigned long do_shrink_slab(struct shrink_control *shrinkctl,
delta *= 4;
do_div(delta, shrinker->seeks);
- /*
- * Make sure we apply some minimal pressure on default priority
- * even on small cgroups. Stale objects are not only consuming memory
- * by themselves, but can also hold a reference to a dying cgroup,
- * preventing it from being reclaimed. A dying cgroup with all
- * corresponding structures like per-cpu stats and kmem caches
- * can be really big, so it may lead to a significant waste of memory.
- */
- delta = max_t(unsigned long long, delta, min(freeable, batch_size));
-
total_scan += delta;
if (total_scan < 0) {
pr_err("shrink_slab: %pF negative objects to delete nr=%ld\n",
diff --git a/net/ax25/ax25_ip.c b/net/ax25/ax25_ip.c
index 70417e9..314bbc8 100644
--- a/net/ax25/ax25_ip.c
+++ b/net/ax25/ax25_ip.c
@@ -114,6 +114,7 @@ netdev_tx_t ax25_ip_xmit(struct sk_buff *skb)
dst = (ax25_address *)(bp + 1);
src = (ax25_address *)(bp + 8);
+ ax25_route_lock_use();
route = ax25_get_route(dst, NULL);
if (route) {
digipeat = route->digipeat;
@@ -206,9 +207,8 @@ netdev_tx_t ax25_ip_xmit(struct sk_buff *skb)
ax25_queue_xmit(skb, dev);
put:
- if (route)
- ax25_put_route(route);
+ ax25_route_lock_unuse();
return NETDEV_TX_OK;
}
diff --git a/net/ax25/ax25_route.c b/net/ax25/ax25_route.c
index a0eff32..66f74c8 100644
--- a/net/ax25/ax25_route.c
+++ b/net/ax25/ax25_route.c
@@ -40,7 +40,7 @@
#include <linux/export.h>
static ax25_route *ax25_route_list;
-static DEFINE_RWLOCK(ax25_route_lock);
+DEFINE_RWLOCK(ax25_route_lock);
void ax25_rt_device_down(struct net_device *dev)
{
@@ -335,6 +335,7 @@ const struct seq_operations ax25_rt_seqops = {
* Find AX.25 route
*
* Only routes with a reference count of zero can be destroyed.
+ * Must be called with ax25_route_lock read locked.
*/
ax25_route *ax25_get_route(ax25_address *addr, struct net_device *dev)
{
@@ -342,7 +343,6 @@ ax25_route *ax25_get_route(ax25_address *addr, struct net_device *dev)
ax25_route *ax25_def_rt = NULL;
ax25_route *ax25_rt;
- read_lock(&ax25_route_lock);
/*
* Bind to the physical interface we heard them on, or the default
* route if none is found;
@@ -365,11 +365,6 @@ ax25_route *ax25_get_route(ax25_address *addr, struct net_device *dev)
if (ax25_spe_rt != NULL)
ax25_rt = ax25_spe_rt;
- if (ax25_rt != NULL)
- ax25_hold_route(ax25_rt);
-
- read_unlock(&ax25_route_lock);
-
return ax25_rt;
}
@@ -400,9 +395,12 @@ int ax25_rt_autobind(ax25_cb *ax25, ax25_address *addr)
ax25_route *ax25_rt;
int err = 0;
- if ((ax25_rt = ax25_get_route(addr, NULL)) == NULL)
+ ax25_route_lock_use();
+ ax25_rt = ax25_get_route(addr, NULL);
+ if (!ax25_rt) {
+ ax25_route_lock_unuse();
return -EHOSTUNREACH;
-
+ }
if ((ax25->ax25_dev = ax25_dev_ax25dev(ax25_rt->dev)) == NULL) {
err = -EHOSTUNREACH;
goto put;
@@ -437,8 +435,7 @@ int ax25_rt_autobind(ax25_cb *ax25, ax25_address *addr)
}
put:
- ax25_put_route(ax25_rt);
-
+ ax25_route_lock_unuse();
return err;
}
diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
index 2f0d42f..08690d0 100644
--- a/net/batman-adv/hard-interface.c
+++ b/net/batman-adv/hard-interface.c
@@ -20,7 +20,6 @@
#include "main.h"
#include <linux/atomic.h>
-#include <linux/bug.h>
#include <linux/byteorder/generic.h>
#include <linux/errno.h>
#include <linux/gfp.h>
@@ -179,8 +178,10 @@ static bool batadv_is_on_batman_iface(const struct net_device *net_dev)
parent_dev = __dev_get_by_index((struct net *)parent_net,
dev_get_iflink(net_dev));
/* if we got a NULL parent_dev there is something broken.. */
- if (WARN(!parent_dev, "Cannot find parent device"))
+ if (!parent_dev) {
+ pr_err("Cannot find parent device\n");
return false;
+ }
if (batadv_mutual_parents(net_dev, net, parent_dev, parent_net))
return false;
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
index 626ddca..a2976ad 100644
--- a/net/batman-adv/soft-interface.c
+++ b/net/batman-adv/soft-interface.c
@@ -221,10 +221,14 @@ static netdev_tx_t batadv_interface_tx(struct sk_buff *skb,
netif_trans_update(soft_iface);
vid = batadv_get_vid(skb, 0);
+
+ skb_reset_mac_header(skb);
ethhdr = eth_hdr(skb);
switch (ntohs(ethhdr->h_proto)) {
case ETH_P_8021Q:
+ if (!pskb_may_pull(skb, sizeof(*vhdr)))
+ goto dropped;
vhdr = vlan_eth_hdr(skb);
/* drop batman-in-batman packets to prevent loops */
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
index 502f663..4d4b9b5 100644
--- a/net/bridge/br_fdb.c
+++ b/net/bridge/br_fdb.c
@@ -1088,6 +1088,8 @@ int br_fdb_external_learn_add(struct net_bridge *br, struct net_bridge_port *p,
err = -ENOMEM;
goto err_unlock;
}
+ if (swdev_notify)
+ fdb->added_by_user = 1;
fdb->added_by_external_learn = 1;
fdb_notify(br, fdb, RTM_NEWNEIGH, swdev_notify);
} else {
@@ -1107,6 +1109,9 @@ int br_fdb_external_learn_add(struct net_bridge *br, struct net_bridge_port *p,
modified = true;
}
+ if (swdev_notify)
+ fdb->added_by_user = 1;
+
if (modified)
fdb_notify(br, fdb, RTM_NEWNEIGH, swdev_notify);
}
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index 6dec8e9..20ed7ad 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -1420,14 +1420,7 @@ static void br_multicast_query_received(struct net_bridge *br,
return;
br_multicast_update_query_timer(br, query, max_delay);
-
- /* Based on RFC4541, section 2.1.1 IGMP Forwarding Rules,
- * the arrival port for IGMP Queries where the source address
- * is 0.0.0.0 should not be added to router port list.
- */
- if ((saddr->proto == htons(ETH_P_IP) && saddr->u.ip4) ||
- saddr->proto == htons(ETH_P_IPV6))
- br_multicast_mark_router(br, port);
+ br_multicast_mark_router(br, port);
}
static void br_ip4_multicast_query(struct net_bridge *br,
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
index 9a1c27c..f7d7f32 100644
--- a/net/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -2091,6 +2091,8 @@ static int process_connect(struct ceph_connection *con)
dout("process_connect on %p tag %d\n", con, (int)con->in_tag);
if (con->auth) {
+ int len = le32_to_cpu(con->in_reply.authorizer_len);
+
/*
* Any connection that defines ->get_authorizer()
* should also define ->add_authorizer_challenge() and
@@ -2100,8 +2102,7 @@ static int process_connect(struct ceph_connection *con)
*/
if (con->in_reply.tag == CEPH_MSGR_TAG_CHALLENGE_AUTHORIZER) {
ret = con->ops->add_authorizer_challenge(
- con, con->auth->authorizer_reply_buf,
- le32_to_cpu(con->in_reply.authorizer_len));
+ con, con->auth->authorizer_reply_buf, len);
if (ret < 0)
return ret;
@@ -2111,10 +2112,12 @@ static int process_connect(struct ceph_connection *con)
return 0;
}
- ret = con->ops->verify_authorizer_reply(con);
- if (ret < 0) {
- con->error_msg = "bad authorize reply";
- return ret;
+ if (len) {
+ ret = con->ops->verify_authorizer_reply(con);
+ if (ret < 0) {
+ con->error_msg = "bad authorize reply";
+ return ret;
+ }
}
}
@@ -3240,9 +3243,10 @@ void ceph_con_keepalive(struct ceph_connection *con)
dout("con_keepalive %p\n", con);
mutex_lock(&con->mutex);
clear_standby(con);
+ con_flag_set(con, CON_FLAG_KEEPALIVE_PENDING);
mutex_unlock(&con->mutex);
- if (con_flag_test_and_set(con, CON_FLAG_KEEPALIVE_PENDING) == 0 &&
- con_flag_test_and_set(con, CON_FLAG_WRITE_PENDING) == 0)
+
+ if (con_flag_test_and_set(con, CON_FLAG_WRITE_PENDING) == 0)
queue_con(con);
}
EXPORT_SYMBOL(ceph_con_keepalive);
diff --git a/net/core/dev.c b/net/core/dev.c
index 6ac9603..35ea93b 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -8069,7 +8069,7 @@ static netdev_features_t netdev_sync_upper_features(struct net_device *lower,
netdev_features_t feature;
int feature_bit;
- for_each_netdev_feature(&upper_disables, feature_bit) {
+ for_each_netdev_feature(upper_disables, feature_bit) {
feature = __NETIF_F_BIT(feature_bit);
if (!(upper->wanted_features & feature)
&& (features & feature)) {
@@ -8089,7 +8089,7 @@ static void netdev_sync_lower_features(struct net_device *upper,
netdev_features_t feature;
int feature_bit;
- for_each_netdev_feature(&upper_disables, feature_bit) {
+ for_each_netdev_feature(upper_disables, feature_bit) {
feature = __NETIF_F_BIT(feature_bit);
if (!(features & feature) && (lower->features & feature)) {
netdev_dbg(upper, "Disabling feature %pNF on lower dev %s.\n",
diff --git a/net/core/filter.c b/net/core/filter.c
index 8c2411f..fb0080e 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -3930,7 +3930,10 @@ BPF_CALL_5(bpf_setsockopt, struct bpf_sock_ops_kern *, bpf_sock,
sk->sk_rcvlowat = val ? : 1;
break;
case SO_MARK:
- sk->sk_mark = val;
+ if (sk->sk_mark != val) {
+ sk->sk_mark = val;
+ sk_dst_reset(sk);
+ }
break;
default:
ret = -EINVAL;
@@ -4001,7 +4004,7 @@ BPF_CALL_5(bpf_setsockopt, struct bpf_sock_ops_kern *, bpf_sock,
/* Only some options are supported */
switch (optname) {
case TCP_BPF_IW:
- if (val <= 0 || tp->data_segs_out > 0)
+ if (val <= 0 || tp->data_segs_out > tp->syn_data)
ret = -EINVAL;
else
tp->snd_cwnd = val;
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 589ec5b..8656b1e 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -353,6 +353,8 @@ static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
*/
void *netdev_alloc_frag(unsigned int fragsz)
{
+ fragsz = SKB_DATA_ALIGN(fragsz);
+
return __netdev_alloc_frag(fragsz, GFP_ATOMIC);
}
EXPORT_SYMBOL(netdev_alloc_frag);
@@ -366,6 +368,8 @@ static void *__napi_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
void *napi_alloc_frag(unsigned int fragsz)
{
+ fragsz = SKB_DATA_ALIGN(fragsz);
+
return __napi_alloc_frag(fragsz, GFP_ATOMIC);
}
EXPORT_SYMBOL(napi_alloc_frag);
diff --git a/net/dccp/ccid.h b/net/dccp/ccid.h
index 6eb837a..baaaeb2 100644
--- a/net/dccp/ccid.h
+++ b/net/dccp/ccid.h
@@ -202,7 +202,7 @@ static inline void ccid_hc_tx_packet_recv(struct ccid *ccid, struct sock *sk,
static inline int ccid_hc_tx_parse_options(struct ccid *ccid, struct sock *sk,
u8 pkt, u8 opt, u8 *val, u8 len)
{
- if (ccid->ccid_ops->ccid_hc_tx_parse_options == NULL)
+ if (!ccid || !ccid->ccid_ops->ccid_hc_tx_parse_options)
return 0;
return ccid->ccid_ops->ccid_hc_tx_parse_options(sk, pkt, opt, val, len);
}
@@ -214,7 +214,7 @@ static inline int ccid_hc_tx_parse_options(struct ccid *ccid, struct sock *sk,
static inline int ccid_hc_rx_parse_options(struct ccid *ccid, struct sock *sk,
u8 pkt, u8 opt, u8 *val, u8 len)
{
- if (ccid->ccid_ops->ccid_hc_rx_parse_options == NULL)
+ if (!ccid || !ccid->ccid_ops->ccid_hc_rx_parse_options)
return 0;
return ccid->ccid_ops->ccid_hc_rx_parse_options(sk, pkt, opt, val, len);
}
diff --git a/net/dsa/master.c b/net/dsa/master.c
index c90ee32..aae478d 100644
--- a/net/dsa/master.c
+++ b/net/dsa/master.c
@@ -158,6 +158,8 @@ static void dsa_master_ethtool_teardown(struct net_device *dev)
cpu_dp->orig_ethtool_ops = NULL;
}
+static struct lock_class_key dsa_master_addr_list_lock_key;
+
int dsa_master_setup(struct net_device *dev, struct dsa_port *cpu_dp)
{
/* If we use a tagging format that doesn't have an ethertype
@@ -167,6 +169,8 @@ int dsa_master_setup(struct net_device *dev, struct dsa_port *cpu_dp)
wmb();
dev->dsa_ptr = cpu_dp;
+ lockdep_set_class(&dev->addr_list_lock,
+ &dsa_master_addr_list_lock_key);
return dsa_master_ethtool_setup(dev);
}
diff --git a/net/dsa/slave.c b/net/dsa/slave.c
index 1c45c1d..b39720d 100644
--- a/net/dsa/slave.c
+++ b/net/dsa/slave.c
@@ -140,11 +140,14 @@ static int dsa_slave_close(struct net_device *dev)
static void dsa_slave_change_rx_flags(struct net_device *dev, int change)
{
struct net_device *master = dsa_slave_to_master(dev);
-
- if (change & IFF_ALLMULTI)
- dev_set_allmulti(master, dev->flags & IFF_ALLMULTI ? 1 : -1);
- if (change & IFF_PROMISC)
- dev_set_promiscuity(master, dev->flags & IFF_PROMISC ? 1 : -1);
+ if (dev->flags & IFF_UP) {
+ if (change & IFF_ALLMULTI)
+ dev_set_allmulti(master,
+ dev->flags & IFF_ALLMULTI ? 1 : -1);
+ if (change & IFF_PROMISC)
+ dev_set_promiscuity(master,
+ dev->flags & IFF_PROMISC ? 1 : -1);
+ }
}
static void dsa_slave_set_rx_mode(struct net_device *dev)
@@ -639,7 +642,7 @@ static int dsa_slave_set_eee(struct net_device *dev, struct ethtool_eee *e)
int ret;
/* Port's PHY and MAC both need to be EEE capable */
- if (!dev->phydev && !dp->pl)
+ if (!dev->phydev || !dp->pl)
return -ENODEV;
if (!ds->ops->set_mac_eee)
@@ -659,7 +662,7 @@ static int dsa_slave_get_eee(struct net_device *dev, struct ethtool_eee *e)
int ret;
/* Port's PHY and MAC both need to be EEE capable */
- if (!dev->phydev && !dp->pl)
+ if (!dev->phydev || !dp->pl)
return -ENODEV;
if (!ds->ops->get_mac_eee)
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index 1a4e9ff..5731670 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -108,6 +108,7 @@ static size_t inet_sk_attr_size(struct sock *sk,
+ nla_total_size(1) /* INET_DIAG_TOS */
+ nla_total_size(1) /* INET_DIAG_TCLASS */
+ nla_total_size(4) /* INET_DIAG_MARK */
+ + nla_total_size(4) /* INET_DIAG_CLASS_ID */
+ nla_total_size(sizeof(struct inet_diag_meminfo))
+ nla_total_size(sizeof(struct inet_diag_msg))
+ nla_total_size(SK_MEMINFO_VARS * sizeof(u32))
@@ -287,12 +288,19 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
goto errout;
}
- if (ext & (1 << (INET_DIAG_CLASS_ID - 1))) {
+ if (ext & (1 << (INET_DIAG_CLASS_ID - 1)) ||
+ ext & (1 << (INET_DIAG_TCLASS - 1))) {
u32 classid = 0;
#ifdef CONFIG_SOCK_CGROUP_DATA
classid = sock_cgroup_classid(&sk->sk_cgrp_data);
#endif
+ /* Fallback to socket priority if class id isn't set.
+ * Classful qdiscs use it as direct reference to class.
+ * For cgroup2 classid is always zero.
+ */
+ if (!classid)
+ classid = sk->sk_priority;
if (nla_put_u32(skb, INET_DIAG_CLASS_ID, classid))
goto errout;
diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
index d757b96..be77859 100644
--- a/net/ipv4/inetpeer.c
+++ b/net/ipv4/inetpeer.c
@@ -216,6 +216,7 @@ struct inet_peer *inet_getpeer(struct inet_peer_base *base,
atomic_set(&p->rid, 0);
p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW;
p->rate_tokens = 0;
+ p->n_redirects = 0;
/* 60*HZ is arbitrary, but chosen enough high so that the first
* calculation of tokens is at its maximum.
*/
diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c
index fb1e7f2..3cd237b 100644
--- a/net/ipv4/netfilter/ipt_CLUSTERIP.c
+++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c
@@ -56,7 +56,7 @@ struct clusterip_config {
#endif
enum clusterip_hashmode hash_mode; /* which hashing mode */
u_int32_t hash_initval; /* hash initialization */
- struct rcu_head rcu;
+ struct rcu_head rcu; /* for call_rcu_bh */
struct net *net; /* netns for pernet list */
char ifname[IFNAMSIZ]; /* device ifname */
};
@@ -72,6 +72,8 @@ struct clusterip_net {
#ifdef CONFIG_PROC_FS
struct proc_dir_entry *procdir;
+ /* mutex protects the config->pde*/
+ struct mutex mutex;
#endif
};
@@ -118,17 +120,18 @@ clusterip_config_entry_put(struct clusterip_config *c)
local_bh_disable();
if (refcount_dec_and_lock(&c->entries, &cn->lock)) {
+ list_del_rcu(&c->list);
+ spin_unlock(&cn->lock);
+ local_bh_enable();
/* In case anyone still accesses the file, the open/close
* functions are also incrementing the refcount on their own,
* so it's safe to remove the entry even if it's in use. */
#ifdef CONFIG_PROC_FS
+ mutex_lock(&cn->mutex);
if (cn->procdir)
proc_remove(c->pde);
+ mutex_unlock(&cn->mutex);
#endif
- list_del_rcu(&c->list);
- spin_unlock(&cn->lock);
- local_bh_enable();
-
return;
}
local_bh_enable();
@@ -278,9 +281,11 @@ clusterip_config_init(struct net *net, const struct ipt_clusterip_tgt_info *i,
/* create proc dir entry */
sprintf(buffer, "%pI4", &ip);
+ mutex_lock(&cn->mutex);
c->pde = proc_create_data(buffer, 0600,
cn->procdir,
&clusterip_proc_fops, c);
+ mutex_unlock(&cn->mutex);
if (!c->pde) {
err = -ENOMEM;
goto err;
@@ -833,6 +838,7 @@ static int clusterip_net_init(struct net *net)
pr_err("Unable to proc dir entry\n");
return -ENOMEM;
}
+ mutex_init(&cn->mutex);
#endif /* CONFIG_PROC_FS */
return 0;
@@ -841,9 +847,12 @@ static int clusterip_net_init(struct net *net)
static void clusterip_net_exit(struct net *net)
{
struct clusterip_net *cn = clusterip_pernet(net);
+
#ifdef CONFIG_PROC_FS
+ mutex_lock(&cn->mutex);
proc_remove(cn->procdir);
cn->procdir = NULL;
+ mutex_unlock(&cn->mutex);
#endif
nf_unregister_net_hook(net, &cip_arp_ops);
}
diff --git a/net/ipv4/netfilter/nf_nat_snmp_basic_main.c b/net/ipv4/netfilter/nf_nat_snmp_basic_main.c
index ac110c1..481437f 100644
--- a/net/ipv4/netfilter/nf_nat_snmp_basic_main.c
+++ b/net/ipv4/netfilter/nf_nat_snmp_basic_main.c
@@ -104,6 +104,8 @@ static void fast_csum(struct snmp_ctx *ctx, unsigned char offset)
int snmp_version(void *context, size_t hdrlen, unsigned char tag,
const void *data, size_t datalen)
{
+ if (datalen != 1)
+ return -EINVAL;
if (*(unsigned char *)data > 1)
return -ENOTSUPP;
return 1;
@@ -113,8 +115,11 @@ int snmp_helper(void *context, size_t hdrlen, unsigned char tag,
const void *data, size_t datalen)
{
struct snmp_ctx *ctx = (struct snmp_ctx *)context;
- __be32 *pdata = (__be32 *)data;
+ __be32 *pdata;
+ if (datalen != 4)
+ return -EINVAL;
+ pdata = (__be32 *)data;
if (*pdata == ctx->from) {
pr_debug("%s: %pI4 to %pI4\n", __func__,
(void *)&ctx->from, (void *)&ctx->to);
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 8501554..436b46c 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -887,13 +887,15 @@ void ip_rt_send_redirect(struct sk_buff *skb)
/* No redirected packets during ip_rt_redirect_silence;
* reset the algorithm.
*/
- if (time_after(jiffies, peer->rate_last + ip_rt_redirect_silence))
+ if (time_after(jiffies, peer->rate_last + ip_rt_redirect_silence)) {
peer->rate_tokens = 0;
+ peer->n_redirects = 0;
+ }
/* Too many ignored redirects; do not send anything
* set dst.rate_last to the last seen redirected packet.
*/
- if (peer->rate_tokens >= ip_rt_redirect_number) {
+ if (peer->n_redirects >= ip_rt_redirect_number) {
peer->rate_last = jiffies;
goto out_put_peer;
}
@@ -910,6 +912,7 @@ void ip_rt_send_redirect(struct sk_buff *skb)
icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, gw);
peer->rate_last = jiffies;
++peer->rate_tokens;
+ ++peer->n_redirects;
#ifdef CONFIG_IP_ROUTE_VERBOSE
if (log_martians &&
peer->rate_tokens == ip_rt_redirect_number)
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index e5b6c23..d561464 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -2519,6 +2519,7 @@ void tcp_write_queue_purge(struct sock *sk)
sk_mem_reclaim(sk);
tcp_clear_all_retrans_hints(tcp_sk(sk));
tcp_sk(sk)->packets_out = 0;
+ inet_csk(sk)->icsk_backoff = 0;
}
int tcp_disconnect(struct sock *sk, int flags)
@@ -2567,7 +2568,6 @@ int tcp_disconnect(struct sock *sk, int flags)
tp->write_seq += tp->max_window + 2;
if (tp->write_seq == 0)
tp->write_seq = 1;
- icsk->icsk_backoff = 0;
tp->snd_cwnd = 2;
icsk->icsk_probes_out = 0;
tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 3147931..5f880b0 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -535,14 +535,15 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
if (sock_owned_by_user(sk))
break;
+ skb = tcp_rtx_queue_head(sk);
+ if (WARN_ON_ONCE(!skb))
+ break;
+
icsk->icsk_backoff--;
icsk->icsk_rto = tp->srtt_us ? __tcp_set_rto(tp) :
TCP_TIMEOUT_INIT;
icsk->icsk_rto = inet_csk_rto_backoff(icsk, TCP_RTO_MAX);
- skb = tcp_rtx_queue_head(sk);
- BUG_ON(!skb);
-
tcp_mstamp_refresh(tp);
delta_us = (u32)(tp->tcp_mstamp - skb->skb_mstamp);
remaining = icsk->icsk_rto -
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 37ac864..6bd1ba10 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -1150,7 +1150,8 @@ check_cleanup_prefix_route(struct inet6_ifaddr *ifp, unsigned long *expires)
list_for_each_entry(ifa, &idev->addr_list, if_list) {
if (ifa == ifp)
continue;
- if (!ipv6_prefix_equal(&ifa->addr, &ifp->addr,
+ if (ifa->prefix_len != ifp->prefix_len ||
+ !ipv6_prefix_equal(&ifa->addr, &ifp->addr,
ifp->prefix_len))
continue;
if (ifa->flags & (IFA_F_PERMANENT | IFA_F_NOPREFIXROUTE))
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index be04877..faed98d 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -1735,6 +1735,24 @@ static int ip6erspan_tap_validate(struct nlattr *tb[], struct nlattr *data[],
return 0;
}
+static void ip6erspan_set_version(struct nlattr *data[],
+ struct __ip6_tnl_parm *parms)
+{
+ parms->erspan_ver = 1;
+ if (data[IFLA_GRE_ERSPAN_VER])
+ parms->erspan_ver = nla_get_u8(data[IFLA_GRE_ERSPAN_VER]);
+
+ if (parms->erspan_ver == 1) {
+ if (data[IFLA_GRE_ERSPAN_INDEX])
+ parms->index = nla_get_u32(data[IFLA_GRE_ERSPAN_INDEX]);
+ } else if (parms->erspan_ver == 2) {
+ if (data[IFLA_GRE_ERSPAN_DIR])
+ parms->dir = nla_get_u8(data[IFLA_GRE_ERSPAN_DIR]);
+ if (data[IFLA_GRE_ERSPAN_HWID])
+ parms->hwid = nla_get_u16(data[IFLA_GRE_ERSPAN_HWID]);
+ }
+}
+
static void ip6gre_netlink_parms(struct nlattr *data[],
struct __ip6_tnl_parm *parms)
{
@@ -1783,20 +1801,6 @@ static void ip6gre_netlink_parms(struct nlattr *data[],
if (data[IFLA_GRE_COLLECT_METADATA])
parms->collect_md = true;
-
- parms->erspan_ver = 1;
- if (data[IFLA_GRE_ERSPAN_VER])
- parms->erspan_ver = nla_get_u8(data[IFLA_GRE_ERSPAN_VER]);
-
- if (parms->erspan_ver == 1) {
- if (data[IFLA_GRE_ERSPAN_INDEX])
- parms->index = nla_get_u32(data[IFLA_GRE_ERSPAN_INDEX]);
- } else if (parms->erspan_ver == 2) {
- if (data[IFLA_GRE_ERSPAN_DIR])
- parms->dir = nla_get_u8(data[IFLA_GRE_ERSPAN_DIR]);
- if (data[IFLA_GRE_ERSPAN_HWID])
- parms->hwid = nla_get_u16(data[IFLA_GRE_ERSPAN_HWID]);
- }
}
static int ip6gre_tap_init(struct net_device *dev)
@@ -2225,6 +2229,7 @@ static int ip6erspan_newlink(struct net *src_net, struct net_device *dev,
int err;
ip6gre_netlink_parms(data, &nt->parms);
+ ip6erspan_set_version(data, &nt->parms);
ign = net_generic(net, ip6gre_net_id);
if (nt->parms.collect_md) {
@@ -2270,6 +2275,7 @@ static int ip6erspan_changelink(struct net_device *dev, struct nlattr *tb[],
if (IS_ERR(t))
return PTR_ERR(t);
+ ip6erspan_set_version(data, &p);
ip6gre_tunnel_unlink_md(ign, t);
ip6gre_tunnel_unlink(ign, t);
ip6erspan_tnl_change(t, &p, !tb[IFLA_MTU]);
diff --git a/net/ipv6/netfilter.c b/net/ipv6/netfilter.c
index 8b075f0..6d0b1f3 100644
--- a/net/ipv6/netfilter.c
+++ b/net/ipv6/netfilter.c
@@ -23,9 +23,11 @@ int ip6_route_me_harder(struct net *net, struct sk_buff *skb)
struct sock *sk = sk_to_full_sk(skb->sk);
unsigned int hh_len;
struct dst_entry *dst;
+ int strict = (ipv6_addr_type(&iph->daddr) &
+ (IPV6_ADDR_MULTICAST | IPV6_ADDR_LINKLOCAL));
struct flowi6 fl6 = {
.flowi6_oif = sk && sk->sk_bound_dev_if ? sk->sk_bound_dev_if :
- rt6_need_strict(&iph->daddr) ? skb_dst(skb)->dev->ifindex : 0,
+ strict ? skb_dst(skb)->dev->ifindex : 0,
.flowi6_mark = skb->mark,
.flowi6_uid = sock_net_uid(net, sk),
.daddr = iph->daddr,
diff --git a/net/ipv6/seg6.c b/net/ipv6/seg6.c
index 8d0ba75..9b2f272 100644
--- a/net/ipv6/seg6.c
+++ b/net/ipv6/seg6.c
@@ -221,9 +221,7 @@ static int seg6_genl_get_tunsrc(struct sk_buff *skb, struct genl_info *info)
rcu_read_unlock();
genlmsg_end(msg, hdr);
- genlmsg_reply(msg, info);
-
- return 0;
+ return genlmsg_reply(msg, info);
nla_put_failure:
rcu_read_unlock();
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index eb162bd..da6d5a3 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -546,7 +546,8 @@ static int ipip6_err(struct sk_buff *skb, u32 info)
}
err = 0;
- if (!ip6_err_gen_icmpv6_unreach(skb, iph->ihl * 4, type, data_len))
+ if (__in6_dev_get(skb->dev) &&
+ !ip6_err_gen_icmpv6_unreach(skb, iph->ihl * 4, type, data_len))
goto out;
if (t->parms.iph.daddr == 0)
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index f13382e..1efce16 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -1347,10 +1347,7 @@ int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
ipc6.opt = opt;
fl6.flowi6_proto = sk->sk_protocol;
- if (!ipv6_addr_any(daddr))
- fl6.daddr = *daddr;
- else
- fl6.daddr.s6_addr[15] = 0x1; /* :: means loopback (BSD'ism) */
+ fl6.daddr = *daddr;
if (ipv6_addr_any(&fl6.saddr) && !ipv6_addr_any(&np->saddr))
fl6.saddr = np->saddr;
fl6.fl6_sport = inet->inet_sport;
@@ -1378,6 +1375,9 @@ int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
}
}
+ if (ipv6_addr_any(&fl6.daddr))
+ fl6.daddr.s6_addr[15] = 0x1; /* :: means loopback (BSD'ism) */
+
final_p = fl6_update_dst(&fl6, opt, &final);
if (final_p)
connected = false;
diff --git a/net/ipv6/xfrm6_tunnel.c b/net/ipv6/xfrm6_tunnel.c
index 4a46df8..f5b4feb 100644
--- a/net/ipv6/xfrm6_tunnel.c
+++ b/net/ipv6/xfrm6_tunnel.c
@@ -144,6 +144,9 @@ static u32 __xfrm6_tunnel_alloc_spi(struct net *net, xfrm_address_t *saddr)
index = __xfrm6_tunnel_spi_check(net, spi);
if (index >= 0)
goto alloc_spi;
+
+ if (spi == XFRM6_TUNNEL_SPI_MAX)
+ break;
}
for (spi = XFRM6_TUNNEL_SPI_MIN; spi < xfrm6_tn->spi; spi++) {
index = __xfrm6_tunnel_spi_check(net, spi);
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index 5d22eda..c2abe9d 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -887,6 +887,7 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev,
BSS_CHANGED_P2P_PS |
BSS_CHANGED_TXPOWER;
int err;
+ int prev_beacon_int;
old = sdata_dereference(sdata->u.ap.beacon, sdata);
if (old)
@@ -909,6 +910,7 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev,
sdata->needed_rx_chains = sdata->local->rx_chains;
+ prev_beacon_int = sdata->vif.bss_conf.beacon_int;
sdata->vif.bss_conf.beacon_int = params->beacon_interval;
mutex_lock(&local->mtx);
@@ -917,8 +919,10 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev,
if (!err)
ieee80211_vif_copy_chanctx_to_vlans(sdata, false);
mutex_unlock(&local->mtx);
- if (err)
+ if (err) {
+ sdata->vif.bss_conf.beacon_int = prev_beacon_int;
return err;
+ }
/*
* Apply control port protocol, this allows us to
diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h
index 2152663..e84103b 100644
--- a/net/mac80211/mesh.h
+++ b/net/mac80211/mesh.h
@@ -70,6 +70,7 @@ enum mesh_deferred_task_flags {
* @dst: mesh path destination mac address
* @mpp: mesh proxy mac address
* @rhash: rhashtable list pointer
+ * @walk_list: linked list containing all mesh_path objects.
* @gate_list: list pointer for known gates list
* @sdata: mesh subif
* @next_hop: mesh neighbor to which frames for this destination will be
@@ -105,6 +106,7 @@ struct mesh_path {
u8 dst[ETH_ALEN];
u8 mpp[ETH_ALEN]; /* used for MPP or MAP */
struct rhash_head rhash;
+ struct hlist_node walk_list;
struct hlist_node gate_list;
struct ieee80211_sub_if_data *sdata;
struct sta_info __rcu *next_hop;
@@ -133,12 +135,16 @@ struct mesh_path {
* gate's mpath may or may not be resolved and active.
* @gates_lock: protects updates to known_gates
* @rhead: the rhashtable containing struct mesh_paths, keyed by dest addr
+ * @walk_head: linked list containging all mesh_path objects
+ * @walk_lock: lock protecting walk_head
* @entries: number of entries in the table
*/
struct mesh_table {
struct hlist_head known_gates;
spinlock_t gates_lock;
struct rhashtable rhead;
+ struct hlist_head walk_head;
+ spinlock_t walk_lock;
atomic_t entries; /* Up to MAX_MESH_NEIGHBOURS */
};
diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c
index a512562..c3a7396 100644
--- a/net/mac80211/mesh_pathtbl.c
+++ b/net/mac80211/mesh_pathtbl.c
@@ -59,8 +59,10 @@ static struct mesh_table *mesh_table_alloc(void)
return NULL;
INIT_HLIST_HEAD(&newtbl->known_gates);
+ INIT_HLIST_HEAD(&newtbl->walk_head);
atomic_set(&newtbl->entries, 0);
spin_lock_init(&newtbl->gates_lock);
+ spin_lock_init(&newtbl->walk_lock);
return newtbl;
}
@@ -249,28 +251,15 @@ mpp_path_lookup(struct ieee80211_sub_if_data *sdata, const u8 *dst)
static struct mesh_path *
__mesh_path_lookup_by_idx(struct mesh_table *tbl, int idx)
{
- int i = 0, ret;
- struct mesh_path *mpath = NULL;
- struct rhashtable_iter iter;
+ int i = 0;
+ struct mesh_path *mpath;
- ret = rhashtable_walk_init(&tbl->rhead, &iter, GFP_ATOMIC);
- if (ret)
- return NULL;
-
- rhashtable_walk_start(&iter);
-
- while ((mpath = rhashtable_walk_next(&iter))) {
- if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN)
- continue;
- if (IS_ERR(mpath))
- break;
+ hlist_for_each_entry_rcu(mpath, &tbl->walk_head, walk_list) {
if (i++ == idx)
break;
}
- rhashtable_walk_stop(&iter);
- rhashtable_walk_exit(&iter);
- if (IS_ERR(mpath) || !mpath)
+ if (!mpath)
return NULL;
if (mpath_expired(mpath)) {
@@ -432,6 +421,7 @@ struct mesh_path *mesh_path_add(struct ieee80211_sub_if_data *sdata,
return ERR_PTR(-ENOMEM);
tbl = sdata->u.mesh.mesh_paths;
+ spin_lock_bh(&tbl->walk_lock);
do {
ret = rhashtable_lookup_insert_fast(&tbl->rhead,
&new_mpath->rhash,
@@ -441,20 +431,20 @@ struct mesh_path *mesh_path_add(struct ieee80211_sub_if_data *sdata,
mpath = rhashtable_lookup_fast(&tbl->rhead,
dst,
mesh_rht_params);
-
+ else if (!ret)
+ hlist_add_head(&new_mpath->walk_list, &tbl->walk_head);
} while (unlikely(ret == -EEXIST && !mpath));
+ spin_unlock_bh(&tbl->walk_lock);
- if (ret && ret != -EEXIST)
- return ERR_PTR(ret);
-
- /* At this point either new_mpath was added, or we found a
- * matching entry already in the table; in the latter case
- * free the unnecessary new entry.
- */
- if (ret == -EEXIST) {
+ if (ret) {
kfree(new_mpath);
+
+ if (ret != -EEXIST)
+ return ERR_PTR(ret);
+
new_mpath = mpath;
}
+
sdata->u.mesh.mesh_paths_generation++;
return new_mpath;
}
@@ -480,9 +470,17 @@ int mpp_path_add(struct ieee80211_sub_if_data *sdata,
memcpy(new_mpath->mpp, mpp, ETH_ALEN);
tbl = sdata->u.mesh.mpp_paths;
+
+ spin_lock_bh(&tbl->walk_lock);
ret = rhashtable_lookup_insert_fast(&tbl->rhead,
&new_mpath->rhash,
mesh_rht_params);
+ if (!ret)
+ hlist_add_head_rcu(&new_mpath->walk_list, &tbl->walk_head);
+ spin_unlock_bh(&tbl->walk_lock);
+
+ if (ret)
+ kfree(new_mpath);
sdata->u.mesh.mpp_paths_generation++;
return ret;
@@ -503,20 +501,9 @@ void mesh_plink_broken(struct sta_info *sta)
struct mesh_table *tbl = sdata->u.mesh.mesh_paths;
static const u8 bcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
struct mesh_path *mpath;
- struct rhashtable_iter iter;
- int ret;
- ret = rhashtable_walk_init(&tbl->rhead, &iter, GFP_ATOMIC);
- if (ret)
- return;
-
- rhashtable_walk_start(&iter);
-
- while ((mpath = rhashtable_walk_next(&iter))) {
- if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN)
- continue;
- if (IS_ERR(mpath))
- break;
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(mpath, &tbl->walk_head, walk_list) {
if (rcu_access_pointer(mpath->next_hop) == sta &&
mpath->flags & MESH_PATH_ACTIVE &&
!(mpath->flags & MESH_PATH_FIXED)) {
@@ -530,8 +517,7 @@ void mesh_plink_broken(struct sta_info *sta)
WLAN_REASON_MESH_PATH_DEST_UNREACHABLE, bcast);
}
}
- rhashtable_walk_stop(&iter);
- rhashtable_walk_exit(&iter);
+ rcu_read_unlock();
}
static void mesh_path_free_rcu(struct mesh_table *tbl,
@@ -551,6 +537,7 @@ static void mesh_path_free_rcu(struct mesh_table *tbl,
static void __mesh_path_del(struct mesh_table *tbl, struct mesh_path *mpath)
{
+ hlist_del_rcu(&mpath->walk_list);
rhashtable_remove_fast(&tbl->rhead, &mpath->rhash, mesh_rht_params);
mesh_path_free_rcu(tbl, mpath);
}
@@ -571,27 +558,14 @@ void mesh_path_flush_by_nexthop(struct sta_info *sta)
struct ieee80211_sub_if_data *sdata = sta->sdata;
struct mesh_table *tbl = sdata->u.mesh.mesh_paths;
struct mesh_path *mpath;
- struct rhashtable_iter iter;
- int ret;
+ struct hlist_node *n;
- ret = rhashtable_walk_init(&tbl->rhead, &iter, GFP_ATOMIC);
- if (ret)
- return;
-
- rhashtable_walk_start(&iter);
-
- while ((mpath = rhashtable_walk_next(&iter))) {
- if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN)
- continue;
- if (IS_ERR(mpath))
- break;
-
+ spin_lock_bh(&tbl->walk_lock);
+ hlist_for_each_entry_safe(mpath, n, &tbl->walk_head, walk_list) {
if (rcu_access_pointer(mpath->next_hop) == sta)
__mesh_path_del(tbl, mpath);
}
-
- rhashtable_walk_stop(&iter);
- rhashtable_walk_exit(&iter);
+ spin_unlock_bh(&tbl->walk_lock);
}
static void mpp_flush_by_proxy(struct ieee80211_sub_if_data *sdata,
@@ -599,51 +573,26 @@ static void mpp_flush_by_proxy(struct ieee80211_sub_if_data *sdata,
{
struct mesh_table *tbl = sdata->u.mesh.mpp_paths;
struct mesh_path *mpath;
- struct rhashtable_iter iter;
- int ret;
+ struct hlist_node *n;
- ret = rhashtable_walk_init(&tbl->rhead, &iter, GFP_ATOMIC);
- if (ret)
- return;
-
- rhashtable_walk_start(&iter);
-
- while ((mpath = rhashtable_walk_next(&iter))) {
- if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN)
- continue;
- if (IS_ERR(mpath))
- break;
-
+ spin_lock_bh(&tbl->walk_lock);
+ hlist_for_each_entry_safe(mpath, n, &tbl->walk_head, walk_list) {
if (ether_addr_equal(mpath->mpp, proxy))
__mesh_path_del(tbl, mpath);
}
-
- rhashtable_walk_stop(&iter);
- rhashtable_walk_exit(&iter);
+ spin_unlock_bh(&tbl->walk_lock);
}
static void table_flush_by_iface(struct mesh_table *tbl)
{
struct mesh_path *mpath;
- struct rhashtable_iter iter;
- int ret;
+ struct hlist_node *n;
- ret = rhashtable_walk_init(&tbl->rhead, &iter, GFP_ATOMIC);
- if (ret)
- return;
-
- rhashtable_walk_start(&iter);
-
- while ((mpath = rhashtable_walk_next(&iter))) {
- if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN)
- continue;
- if (IS_ERR(mpath))
- break;
+ spin_lock_bh(&tbl->walk_lock);
+ hlist_for_each_entry_safe(mpath, n, &tbl->walk_head, walk_list) {
__mesh_path_del(tbl, mpath);
}
-
- rhashtable_walk_stop(&iter);
- rhashtable_walk_exit(&iter);
+ spin_unlock_bh(&tbl->walk_lock);
}
/**
@@ -675,7 +624,7 @@ static int table_path_del(struct mesh_table *tbl,
{
struct mesh_path *mpath;
- rcu_read_lock();
+ spin_lock_bh(&tbl->walk_lock);
mpath = rhashtable_lookup_fast(&tbl->rhead, addr, mesh_rht_params);
if (!mpath) {
rcu_read_unlock();
@@ -683,7 +632,7 @@ static int table_path_del(struct mesh_table *tbl,
}
__mesh_path_del(tbl, mpath);
- rcu_read_unlock();
+ spin_unlock_bh(&tbl->walk_lock);
return 0;
}
@@ -854,28 +803,16 @@ void mesh_path_tbl_expire(struct ieee80211_sub_if_data *sdata,
struct mesh_table *tbl)
{
struct mesh_path *mpath;
- struct rhashtable_iter iter;
- int ret;
+ struct hlist_node *n;
- ret = rhashtable_walk_init(&tbl->rhead, &iter, GFP_KERNEL);
- if (ret)
- return;
-
- rhashtable_walk_start(&iter);
-
- while ((mpath = rhashtable_walk_next(&iter))) {
- if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN)
- continue;
- if (IS_ERR(mpath))
- break;
+ spin_lock_bh(&tbl->walk_lock);
+ hlist_for_each_entry_safe(mpath, n, &tbl->walk_head, walk_list) {
if ((!(mpath->flags & MESH_PATH_RESOLVING)) &&
(!(mpath->flags & MESH_PATH_FIXED)) &&
time_after(jiffies, mpath->exp_time + MESH_PATH_EXPIRE))
__mesh_path_del(tbl, mpath);
}
-
- rhashtable_walk_stop(&iter);
- rhashtable_walk_exit(&iter);
+ spin_unlock_bh(&tbl->walk_lock);
}
void mesh_path_expire(struct ieee80211_sub_if_data *sdata)
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 5e2b4a4..828348b 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -142,6 +142,9 @@ ieee80211_rx_radiotap_hdrlen(struct ieee80211_local *local,
/* allocate extra bitmaps */
if (status->chains)
len += 4 * hweight8(status->chains);
+ /* vendor presence bitmap */
+ if (status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA)
+ len += 4;
if (ieee80211_have_rx_timestamp(status)) {
len = ALIGN(len, 8);
@@ -197,8 +200,6 @@ ieee80211_rx_radiotap_hdrlen(struct ieee80211_local *local,
if (status->flag & RX_FLAG_RADIOTAP_VENDOR_DATA) {
struct ieee80211_vendor_radiotap *rtap = (void *)skb->data;
- /* vendor presence bitmap */
- len += 4;
/* alignment for fixed 6-byte vendor data header */
len = ALIGN(len, 2);
/* vendor data header */
@@ -2597,6 +2598,7 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
struct ieee80211_sub_if_data *sdata = rx->sdata;
struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
u16 ac, q, hdrlen;
+ int tailroom = 0;
hdr = (struct ieee80211_hdr *) skb->data;
hdrlen = ieee80211_hdrlen(hdr->frame_control);
@@ -2683,8 +2685,12 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
if (!ifmsh->mshcfg.dot11MeshForwarding)
goto out;
+ if (sdata->crypto_tx_tailroom_needed_cnt)
+ tailroom = IEEE80211_ENCRYPT_TAILROOM;
+
fwd_skb = skb_copy_expand(skb, local->tx_headroom +
- sdata->encrypt_headroom, 0, GFP_ATOMIC);
+ sdata->encrypt_headroom,
+ tailroom, GFP_ATOMIC);
if (!fwd_skb)
goto out;
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 995a491..c7ccd7b 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -1913,9 +1913,16 @@ static int ieee80211_skb_resize(struct ieee80211_sub_if_data *sdata,
int head_need, bool may_encrypt)
{
struct ieee80211_local *local = sdata->local;
+ struct ieee80211_hdr *hdr;
+ bool enc_tailroom;
int tail_need = 0;
- if (may_encrypt && sdata->crypto_tx_tailroom_needed_cnt) {
+ hdr = (struct ieee80211_hdr *) skb->data;
+ enc_tailroom = may_encrypt &&
+ (sdata->crypto_tx_tailroom_needed_cnt ||
+ ieee80211_is_mgmt(hdr->frame_control));
+
+ if (enc_tailroom) {
tail_need = IEEE80211_ENCRYPT_TAILROOM;
tail_need -= skb_tailroom(skb);
tail_need = max_t(int, tail_need, 0);
@@ -1923,8 +1930,7 @@ static int ieee80211_skb_resize(struct ieee80211_sub_if_data *sdata,
if (skb_cloned(skb) &&
(!ieee80211_hw_check(&local->hw, SUPPORTS_CLONED_SKBS) ||
- !skb_clone_writable(skb, ETH_HLEN) ||
- (may_encrypt && sdata->crypto_tx_tailroom_needed_cnt)))
+ !skb_clone_writable(skb, ETH_HLEN) || enc_tailroom))
I802_DEBUG_INC(local->tx_expand_skb_head_cloned);
else if (head_need || tail_need)
I802_DEBUG_INC(local->tx_expand_skb_head);
diff --git a/net/netfilter/nf_flow_table_core.c b/net/netfilter/nf_flow_table_core.c
index d812561..e1537ac 100644
--- a/net/netfilter/nf_flow_table_core.c
+++ b/net/netfilter/nf_flow_table_core.c
@@ -28,6 +28,7 @@ flow_offload_fill_dir(struct flow_offload *flow, struct nf_conn *ct,
{
struct flow_offload_tuple *ft = &flow->tuplehash[dir].tuple;
struct nf_conntrack_tuple *ctt = &ct->tuplehash[dir].tuple;
+ struct dst_entry *other_dst = route->tuple[!dir].dst;
struct dst_entry *dst = route->tuple[dir].dst;
ft->dir = dir;
@@ -50,8 +51,8 @@ flow_offload_fill_dir(struct flow_offload *flow, struct nf_conn *ct,
ft->src_port = ctt->src.u.tcp.port;
ft->dst_port = ctt->dst.u.tcp.port;
- ft->iifidx = route->tuple[dir].ifindex;
- ft->oifidx = route->tuple[!dir].ifindex;
+ ft->iifidx = other_dst->dev->ifindex;
+ ft->oifidx = dst->dev->ifindex;
ft->dst_cache = dst;
}
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index ed9af46..7d424fd 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -291,6 +291,9 @@ static int nft_delrule_by_chain(struct nft_ctx *ctx)
int err;
list_for_each_entry(rule, &ctx->chain->rules, list) {
+ if (!nft_is_active_next(ctx->net, rule))
+ continue;
+
err = nft_delrule(ctx, rule);
if (err < 0)
return err;
@@ -4439,6 +4442,8 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
err5:
kfree(trans);
err4:
+ if (obj)
+ obj->use--;
kfree(elem.priv);
err3:
if (nla[NFTA_SET_ELEM_DATA] != NULL)
diff --git a/net/netfilter/nfnetlink_osf.c b/net/netfilter/nfnetlink_osf.c
index 00db27d..b0bc130 100644
--- a/net/netfilter/nfnetlink_osf.c
+++ b/net/netfilter/nfnetlink_osf.c
@@ -71,6 +71,7 @@ static bool nf_osf_match_one(const struct sk_buff *skb,
int ttl_check,
struct nf_osf_hdr_ctx *ctx)
{
+ const __u8 *optpinit = ctx->optp;
unsigned int check_WSS = 0;
int fmatch = FMATCH_WRONG;
int foptsize, optnum;
@@ -160,6 +161,9 @@ static bool nf_osf_match_one(const struct sk_buff *skb,
}
}
+ if (fmatch != FMATCH_OK)
+ ctx->optp = optpinit;
+
return fmatch == FMATCH_OK;
}
diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c
index 29d6fc7..38da1f5 100644
--- a/net/netfilter/nft_compat.c
+++ b/net/netfilter/nft_compat.c
@@ -282,6 +282,7 @@ nft_target_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr)
{
struct xt_target *target = expr->ops->data;
void *info = nft_expr_priv(expr);
+ struct module *me = target->me;
struct xt_tgdtor_param par;
par.net = ctx->net;
@@ -292,7 +293,7 @@ nft_target_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr)
par.target->destroy(&par);
if (nft_xt_put(container_of(expr->ops, struct nft_xt, ops)))
- module_put(target->me);
+ module_put(me);
}
static int nft_target_dump(struct sk_buff *skb, const struct nft_expr *expr)
diff --git a/net/netfilter/nft_flow_offload.c b/net/netfilter/nft_flow_offload.c
index 5fd4c57..436cc14 100644
--- a/net/netfilter/nft_flow_offload.c
+++ b/net/netfilter/nft_flow_offload.c
@@ -12,6 +12,7 @@
#include <net/netfilter/nf_conntrack_core.h>
#include <linux/netfilter/nf_conntrack_common.h>
#include <net/netfilter/nf_flow_table.h>
+#include <net/netfilter/nf_conntrack_helper.h>
struct nft_flow_offload {
struct nft_flowtable *flowtable;
@@ -29,10 +30,12 @@ static int nft_flow_route(const struct nft_pktinfo *pkt,
memset(&fl, 0, sizeof(fl));
switch (nft_pf(pkt)) {
case NFPROTO_IPV4:
- fl.u.ip4.daddr = ct->tuplehash[!dir].tuple.dst.u3.ip;
+ fl.u.ip4.daddr = ct->tuplehash[dir].tuple.src.u3.ip;
+ fl.u.ip4.flowi4_oif = nft_in(pkt)->ifindex;
break;
case NFPROTO_IPV6:
- fl.u.ip6.daddr = ct->tuplehash[!dir].tuple.dst.u3.in6;
+ fl.u.ip6.daddr = ct->tuplehash[dir].tuple.src.u3.in6;
+ fl.u.ip6.flowi6_oif = nft_in(pkt)->ifindex;
break;
}
@@ -41,9 +44,7 @@ static int nft_flow_route(const struct nft_pktinfo *pkt,
return -ENOENT;
route->tuple[dir].dst = this_dst;
- route->tuple[dir].ifindex = nft_in(pkt)->ifindex;
route->tuple[!dir].dst = other_dst;
- route->tuple[!dir].ifindex = nft_out(pkt)->ifindex;
return 0;
}
@@ -66,6 +67,7 @@ static void nft_flow_offload_eval(const struct nft_expr *expr,
{
struct nft_flow_offload *priv = nft_expr_priv(expr);
struct nf_flowtable *flowtable = &priv->flowtable->data;
+ const struct nf_conn_help *help;
enum ip_conntrack_info ctinfo;
struct nf_flow_route route;
struct flow_offload *flow;
@@ -88,7 +90,8 @@ static void nft_flow_offload_eval(const struct nft_expr *expr,
goto out;
}
- if (test_bit(IPS_HELPER_BIT, &ct->status))
+ help = nfct_help(ct);
+ if (help)
goto out;
if (ctinfo == IP_CT_NEW ||
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index b0b4b0e..e451eb4 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -2887,7 +2887,8 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
goto out_free;
} else if (reserve) {
skb_reserve(skb, -reserve);
- if (len < reserve)
+ if (len < reserve + sizeof(struct ipv6hdr) &&
+ dev->min_header_len != dev->hard_header_len)
skb_reset_network_header(skb);
}
@@ -4274,7 +4275,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
rb->frames_per_block = req->tp_block_size / req->tp_frame_size;
if (unlikely(rb->frames_per_block == 0))
goto out;
- if (unlikely(req->tp_block_size > UINT_MAX / req->tp_block_nr))
+ if (unlikely(rb->frames_per_block > UINT_MAX / req->tp_block_nr))
goto out;
if (unlikely((rb->frames_per_block * req->tp_block_nr) !=
req->tp_frame_nr))
diff --git a/net/qrtr/qrtr.c b/net/qrtr/qrtr.c
index f18469c..6c32eb9 100644
--- a/net/qrtr/qrtr.c
+++ b/net/qrtr/qrtr.c
@@ -36,6 +36,8 @@
#define QRTR_MIN_EPH_SOCKET 0x4000
#define QRTR_MAX_EPH_SOCKET 0x7fff
+#define QRTR_PORT_CTRL_LEGACY 0xffff
+
/* qrtr socket states */
#define QRTR_STATE_MULTI -2
#define QRTR_STATE_INIT -1
@@ -617,6 +619,48 @@ static void qrtr_node_assign(struct qrtr_node *node, unsigned int nid)
}
/**
+ * qrtr_peek_pkt_size() - Peek into the packet header to get potential pkt size
+ *
+ * @data: Starting address of the packet which points to router header.
+ *
+ * @returns: potential packet size on success, < 0 on error.
+ *
+ * This function is used by the underlying transport abstraction layer to
+ * peek into the potential packet size of an incoming packet. This information
+ * is used to perform link layer fragmentation and re-assembly
+ */
+int qrtr_peek_pkt_size(const void *data)
+{
+ const struct qrtr_hdr_v1 *v1;
+ const struct qrtr_hdr_v2 *v2;
+ unsigned int hdrlen;
+ unsigned int size;
+ unsigned int ver;
+
+ /* Version field in v1 is little endian, so this works for both cases */
+ ver = *(u8 *)data;
+
+ switch (ver) {
+ case QRTR_PROTO_VER_1:
+ v1 = data;
+ hdrlen = sizeof(*v1);
+ size = le32_to_cpu(v1->size);
+ break;
+ case QRTR_PROTO_VER_2:
+ v2 = data;
+ hdrlen = sizeof(*v2) + v2->optlen;
+ size = le32_to_cpu(v2->size);
+ break;
+ default:
+ pr_err("qrtr: Invalid version %d\n", ver);
+ return -EINVAL;
+ }
+
+ return ALIGN(size, 4) + hdrlen;
+}
+EXPORT_SYMBOL(qrtr_peek_pkt_size);
+
+/**
* qrtr_endpoint_post() - post incoming data
* @ep: endpoint handle
* @data: data pointer
@@ -684,6 +728,9 @@ int qrtr_endpoint_post(struct qrtr_endpoint *ep, const void *data, size_t len)
goto err;
}
+ if (cb->dst_port == QRTR_PORT_CTRL_LEGACY)
+ cb->dst_port = QRTR_PORT_CTRL;
+
if (len != ALIGN(size, 4) + hdrlen)
goto err;
diff --git a/net/qrtr/qrtr.h b/net/qrtr/qrtr.h
index b80a149..f9aede4 100644
--- a/net/qrtr/qrtr.h
+++ b/net/qrtr/qrtr.h
@@ -32,4 +32,5 @@ void qrtr_endpoint_unregister(struct qrtr_endpoint *ep);
int qrtr_endpoint_post(struct qrtr_endpoint *ep, const void *data, size_t len);
+int qrtr_peek_pkt_size(const void *data);
#endif
diff --git a/net/qrtr/usb.c b/net/qrtr/usb.c
index af21585..fd71df9 100644
--- a/net/qrtr/usb.c
+++ b/net/qrtr/usb.c
@@ -290,6 +290,7 @@ static void qcom_usb_qrtr_disconnect(struct usb_interface *interface)
static const struct usb_device_id qcom_usb_qrtr_ids[] = {
{ USB_DEVICE_INTERFACE_NUMBER(QRTR_VENDOR_ID, 0x90ef, 3) },
{ USB_DEVICE_INTERFACE_NUMBER(QRTR_VENDOR_ID, 0x90f0, 3) },
+ { USB_DEVICE_INTERFACE_NUMBER(QRTR_VENDOR_ID, 0x90f3, 2) },
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, qcom_usb_qrtr_ids);
diff --git a/net/rds/bind.c b/net/rds/bind.c
index 762d2c6..17c9d9f 100644
--- a/net/rds/bind.c
+++ b/net/rds/bind.c
@@ -78,10 +78,10 @@ struct rds_sock *rds_find_bound(const struct in6_addr *addr, __be16 port,
__rds_create_bind_key(key, addr, port, scope_id);
rcu_read_lock();
rs = rhashtable_lookup(&bind_hash_table, key, ht_parms);
- if (rs && !sock_flag(rds_rs_to_sk(rs), SOCK_DEAD))
- rds_sock_addref(rs);
- else
+ if (rs && (sock_flag(rds_rs_to_sk(rs), SOCK_DEAD) ||
+ !refcount_inc_not_zero(&rds_rs_to_sk(rs)->sk_refcnt)))
rs = NULL;
+
rcu_read_unlock();
rdsdebug("returning rs %p for %pI6c:%u\n", rs, addr,
diff --git a/net/rxrpc/recvmsg.c b/net/rxrpc/recvmsg.c
index 816b19a..0374b06 100644
--- a/net/rxrpc/recvmsg.c
+++ b/net/rxrpc/recvmsg.c
@@ -596,6 +596,7 @@ int rxrpc_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
}
error_no_call:
release_sock(&rx->sk);
+error_trace:
trace_rxrpc_recvmsg(call, rxrpc_recvmsg_return, 0, 0, 0, ret);
return ret;
@@ -604,7 +605,7 @@ int rxrpc_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
wait_error:
finish_wait(sk_sleep(&rx->sk), &wait);
call = NULL;
- goto error_no_call;
+ goto error_trace;
}
/**
diff --git a/net/sched/cls_tcindex.c b/net/sched/cls_tcindex.c
index 9ccc93f..38bb882 100644
--- a/net/sched/cls_tcindex.c
+++ b/net/sched/cls_tcindex.c
@@ -48,7 +48,7 @@ struct tcindex_data {
u32 hash; /* hash table size; 0 if undefined */
u32 alloc_hash; /* allocated size */
u32 fall_through; /* 0: only classify if explicit match */
- struct rcu_head rcu;
+ struct rcu_work rwork;
};
static inline int tcindex_filter_is_set(struct tcindex_filter_result *r)
@@ -221,17 +221,11 @@ static int tcindex_delete(struct tcf_proto *tp, void *arg, bool *last,
return 0;
}
-static int tcindex_destroy_element(struct tcf_proto *tp,
- void *arg, struct tcf_walker *walker)
+static void tcindex_destroy_work(struct work_struct *work)
{
- bool last;
-
- return tcindex_delete(tp, arg, &last, NULL);
-}
-
-static void __tcindex_destroy(struct rcu_head *head)
-{
- struct tcindex_data *p = container_of(head, struct tcindex_data, rcu);
+ struct tcindex_data *p = container_of(to_rcu_work(work),
+ struct tcindex_data,
+ rwork);
kfree(p->perfect);
kfree(p->h);
@@ -258,9 +252,11 @@ static int tcindex_filter_result_init(struct tcindex_filter_result *r)
return tcf_exts_init(&r->exts, TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE);
}
-static void __tcindex_partial_destroy(struct rcu_head *head)
+static void tcindex_partial_destroy_work(struct work_struct *work)
{
- struct tcindex_data *p = container_of(head, struct tcindex_data, rcu);
+ struct tcindex_data *p = container_of(to_rcu_work(work),
+ struct tcindex_data,
+ rwork);
kfree(p->perfect);
kfree(p);
@@ -275,7 +271,7 @@ static void tcindex_free_perfect_hash(struct tcindex_data *cp)
kfree(cp->perfect);
}
-static int tcindex_alloc_perfect_hash(struct tcindex_data *cp)
+static int tcindex_alloc_perfect_hash(struct net *net, struct tcindex_data *cp)
{
int i, err = 0;
@@ -289,6 +285,9 @@ static int tcindex_alloc_perfect_hash(struct tcindex_data *cp)
TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE);
if (err < 0)
goto errout;
+#ifdef CONFIG_NET_CLS_ACT
+ cp->perfect[i].exts.net = net;
+#endif
}
return 0;
@@ -305,9 +304,9 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
struct nlattr *est, bool ovr, struct netlink_ext_ack *extack)
{
struct tcindex_filter_result new_filter_result, *old_r = r;
- struct tcindex_filter_result cr;
struct tcindex_data *cp = NULL, *oldp;
struct tcindex_filter *f = NULL; /* make gcc behave */
+ struct tcf_result cr = {};
int err, balloc = 0;
struct tcf_exts e;
@@ -337,7 +336,7 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
if (p->perfect) {
int i;
- if (tcindex_alloc_perfect_hash(cp) < 0)
+ if (tcindex_alloc_perfect_hash(net, cp) < 0)
goto errout;
for (i = 0; i < cp->hash; i++)
cp->perfect[i].res = p->perfect[i].res;
@@ -348,11 +347,8 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
err = tcindex_filter_result_init(&new_filter_result);
if (err < 0)
goto errout1;
- err = tcindex_filter_result_init(&cr);
- if (err < 0)
- goto errout1;
if (old_r)
- cr.res = r->res;
+ cr = r->res;
if (tb[TCA_TCINDEX_HASH])
cp->hash = nla_get_u32(tb[TCA_TCINDEX_HASH]);
@@ -406,7 +402,7 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
err = -ENOMEM;
if (!cp->perfect && !cp->h) {
if (valid_perfect_hash(cp)) {
- if (tcindex_alloc_perfect_hash(cp) < 0)
+ if (tcindex_alloc_perfect_hash(net, cp) < 0)
goto errout_alloc;
balloc = 1;
} else {
@@ -443,8 +439,8 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
}
if (tb[TCA_TCINDEX_CLASSID]) {
- cr.res.classid = nla_get_u32(tb[TCA_TCINDEX_CLASSID]);
- tcf_bind_filter(tp, &cr.res, base);
+ cr.classid = nla_get_u32(tb[TCA_TCINDEX_CLASSID]);
+ tcf_bind_filter(tp, &cr, base);
}
if (old_r && old_r != r) {
@@ -456,7 +452,7 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
}
oldp = p;
- r->res = cr.res;
+ r->res = cr;
tcf_exts_change(&r->exts, &e);
rcu_assign_pointer(tp->root, cp);
@@ -475,10 +471,12 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
; /* nothing */
rcu_assign_pointer(*fp, f);
+ } else {
+ tcf_exts_destroy(&new_filter_result.exts);
}
if (oldp)
- call_rcu(&oldp->rcu, __tcindex_partial_destroy);
+ tcf_queue_work(&oldp->rwork, tcindex_partial_destroy_work);
return 0;
errout_alloc:
@@ -487,7 +485,6 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
else if (balloc == 2)
kfree(cp->h);
errout1:
- tcf_exts_destroy(&cr.exts);
tcf_exts_destroy(&new_filter_result.exts);
errout:
kfree(cp);
@@ -562,15 +559,34 @@ static void tcindex_destroy(struct tcf_proto *tp,
struct netlink_ext_ack *extack)
{
struct tcindex_data *p = rtnl_dereference(tp->root);
- struct tcf_walker walker;
+ int i;
pr_debug("tcindex_destroy(tp %p),p %p\n", tp, p);
- walker.count = 0;
- walker.skip = 0;
- walker.fn = tcindex_destroy_element;
- tcindex_walk(tp, &walker);
- call_rcu(&p->rcu, __tcindex_destroy);
+ if (p->perfect) {
+ for (i = 0; i < p->hash; i++) {
+ struct tcindex_filter_result *r = p->perfect + i;
+
+ tcf_unbind_filter(tp, &r->res);
+ if (tcf_exts_get_net(&r->exts))
+ tcf_queue_work(&r->rwork,
+ tcindex_destroy_rexts_work);
+ else
+ __tcindex_destroy_rexts(r);
+ }
+ }
+
+ for (i = 0; p->h && i < p->hash; i++) {
+ struct tcindex_filter *f, *next;
+ bool last;
+
+ for (f = rtnl_dereference(p->h[i]); f; f = next) {
+ next = rtnl_dereference(f->next);
+ tcindex_delete(tp, &f->result, &last, NULL);
+ }
+ }
+
+ tcf_queue_work(&p->rwork, tcindex_destroy_work);
}
diff --git a/net/sctp/diag.c b/net/sctp/diag.c
index 078f01a..435847d 100644
--- a/net/sctp/diag.c
+++ b/net/sctp/diag.c
@@ -256,6 +256,7 @@ static size_t inet_assoc_attr_size(struct sctp_association *asoc)
+ nla_total_size(1) /* INET_DIAG_TOS */
+ nla_total_size(1) /* INET_DIAG_TCLASS */
+ nla_total_size(4) /* INET_DIAG_MARK */
+ + nla_total_size(4) /* INET_DIAG_CLASS_ID */
+ nla_total_size(addrlen * asoc->peer.transport_count)
+ nla_total_size(addrlen * addrcnt)
+ nla_total_size(sizeof(struct inet_diag_meminfo))
diff --git a/net/sctp/offload.c b/net/sctp/offload.c
index 123e9f2..edfcf16 100644
--- a/net/sctp/offload.c
+++ b/net/sctp/offload.c
@@ -36,6 +36,7 @@ static __le32 sctp_gso_make_checksum(struct sk_buff *skb)
{
skb->ip_summed = CHECKSUM_NONE;
skb->csum_not_inet = 0;
+ gso_reset_checksum(skb, ~0);
return sctp_compute_cksum(skb, skb_transport_offset(skb));
}
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 876393c..e5e70cf 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -2045,7 +2045,7 @@ static int sctp_sendmsg(struct sock *sk, struct msghdr *msg, size_t msg_len)
struct sctp_endpoint *ep = sctp_sk(sk)->ep;
struct sctp_transport *transport = NULL;
struct sctp_sndrcvinfo _sinfo, *sinfo;
- struct sctp_association *asoc;
+ struct sctp_association *asoc, *tmp;
struct sctp_cmsgs cmsgs;
union sctp_addr *daddr;
bool new = false;
@@ -2071,7 +2071,7 @@ static int sctp_sendmsg(struct sock *sk, struct msghdr *msg, size_t msg_len)
/* SCTP_SENDALL process */
if ((sflags & SCTP_SENDALL) && sctp_style(sk, UDP)) {
- list_for_each_entry(asoc, &ep->asocs, asocs) {
+ list_for_each_entry_safe(asoc, tmp, &ep->asocs, asocs) {
err = sctp_sendmsg_check_sflags(asoc, sflags, msg,
msg_len);
if (err == 0)
diff --git a/net/sctp/stream.c b/net/sctp/stream.c
index 80e0ae5..2936ed1 100644
--- a/net/sctp/stream.c
+++ b/net/sctp/stream.c
@@ -84,6 +84,19 @@ static void fa_zero(struct flex_array *fa, size_t index, size_t count)
}
}
+static size_t fa_index(struct flex_array *fa, void *elem, size_t count)
+{
+ size_t index = 0;
+
+ while (count--) {
+ if (elem == flex_array_get(fa, index))
+ break;
+ index++;
+ }
+
+ return index;
+}
+
/* Migrates chunks from stream queues to new stream queues if needed,
* but not across associations. Also, removes those chunks to streams
* higher than the new max.
@@ -131,8 +144,10 @@ static void sctp_stream_outq_migrate(struct sctp_stream *stream,
}
}
- for (i = outcnt; i < stream->outcnt; i++)
+ for (i = outcnt; i < stream->outcnt; i++) {
kfree(SCTP_SO(stream, i)->ext);
+ SCTP_SO(stream, i)->ext = NULL;
+ }
}
static int sctp_stream_alloc_out(struct sctp_stream *stream, __u16 outcnt,
@@ -147,6 +162,13 @@ static int sctp_stream_alloc_out(struct sctp_stream *stream, __u16 outcnt,
if (stream->out) {
fa_copy(out, stream->out, 0, min(outcnt, stream->outcnt));
+ if (stream->out_curr) {
+ size_t index = fa_index(stream->out, stream->out_curr,
+ stream->outcnt);
+
+ BUG_ON(index == stream->outcnt);
+ stream->out_curr = flex_array_get(out, index);
+ }
fa_free(stream->out);
}
diff --git a/net/socket.c b/net/socket.c
index e286674..7d2703f 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -951,8 +951,7 @@ void dlci_ioctl_set(int (*hook) (unsigned int, void __user *))
EXPORT_SYMBOL(dlci_ioctl_set);
static long sock_do_ioctl(struct net *net, struct socket *sock,
- unsigned int cmd, unsigned long arg,
- unsigned int ifreq_size)
+ unsigned int cmd, unsigned long arg)
{
int err;
void __user *argp = (void __user *)arg;
@@ -978,11 +977,11 @@ static long sock_do_ioctl(struct net *net, struct socket *sock,
} else {
struct ifreq ifr;
bool need_copyout;
- if (copy_from_user(&ifr, argp, ifreq_size))
+ if (copy_from_user(&ifr, argp, sizeof(struct ifreq)))
return -EFAULT;
err = dev_ioctl(net, cmd, &ifr, &need_copyout);
if (!err && need_copyout)
- if (copy_to_user(argp, &ifr, ifreq_size))
+ if (copy_to_user(argp, &ifr, sizeof(struct ifreq)))
return -EFAULT;
}
return err;
@@ -1081,8 +1080,7 @@ static long sock_ioctl(struct file *file, unsigned cmd, unsigned long arg)
err = open_related_ns(&net->ns, get_net_ns);
break;
default:
- err = sock_do_ioctl(net, sock, cmd, arg,
- sizeof(struct ifreq));
+ err = sock_do_ioctl(net, sock, cmd, arg);
break;
}
return err;
@@ -2773,8 +2771,7 @@ static int do_siocgstamp(struct net *net, struct socket *sock,
int err;
set_fs(KERNEL_DS);
- err = sock_do_ioctl(net, sock, cmd, (unsigned long)&ktv,
- sizeof(struct compat_ifreq));
+ err = sock_do_ioctl(net, sock, cmd, (unsigned long)&ktv);
set_fs(old_fs);
if (!err)
err = compat_put_timeval(&ktv, up);
@@ -2790,8 +2787,7 @@ static int do_siocgstampns(struct net *net, struct socket *sock,
int err;
set_fs(KERNEL_DS);
- err = sock_do_ioctl(net, sock, cmd, (unsigned long)&kts,
- sizeof(struct compat_ifreq));
+ err = sock_do_ioctl(net, sock, cmd, (unsigned long)&kts);
set_fs(old_fs);
if (!err)
err = compat_put_timespec(&kts, up);
@@ -2987,6 +2983,54 @@ static int compat_ifr_data_ioctl(struct net *net, unsigned int cmd,
return dev_ioctl(net, cmd, &ifreq, NULL);
}
+static int compat_ifreq_ioctl(struct net *net, struct socket *sock,
+ unsigned int cmd,
+ struct compat_ifreq __user *uifr32)
+{
+ struct ifreq __user *uifr;
+ int err;
+
+ /* Handle the fact that while struct ifreq has the same *layout* on
+ * 32/64 for everything but ifreq::ifru_ifmap and ifreq::ifru_data,
+ * which are handled elsewhere, it still has different *size* due to
+ * ifreq::ifru_ifmap (which is 16 bytes on 32 bit, 24 bytes on 64-bit,
+ * resulting in struct ifreq being 32 and 40 bytes respectively).
+ * As a result, if the struct happens to be at the end of a page and
+ * the next page isn't readable/writable, we get a fault. To prevent
+ * that, copy back and forth to the full size.
+ */
+
+ uifr = compat_alloc_user_space(sizeof(*uifr));
+ if (copy_in_user(uifr, uifr32, sizeof(*uifr32)))
+ return -EFAULT;
+
+ err = sock_do_ioctl(net, sock, cmd, (unsigned long)uifr);
+
+ if (!err) {
+ switch (cmd) {
+ case SIOCGIFFLAGS:
+ case SIOCGIFMETRIC:
+ case SIOCGIFMTU:
+ case SIOCGIFMEM:
+ case SIOCGIFHWADDR:
+ case SIOCGIFINDEX:
+ case SIOCGIFADDR:
+ case SIOCGIFBRDADDR:
+ case SIOCGIFDSTADDR:
+ case SIOCGIFNETMASK:
+ case SIOCGIFPFLAGS:
+ case SIOCGIFTXQLEN:
+ case SIOCGMIIPHY:
+ case SIOCGMIIREG:
+ case SIOCGIFNAME:
+ if (copy_in_user(uifr32, uifr, sizeof(*uifr32)))
+ err = -EFAULT;
+ break;
+ }
+ }
+ return err;
+}
+
static int compat_sioc_ifmap(struct net *net, unsigned int cmd,
struct compat_ifreq __user *uifr32)
{
@@ -3102,8 +3146,7 @@ static int routing_ioctl(struct net *net, struct socket *sock,
}
set_fs(KERNEL_DS);
- ret = sock_do_ioctl(net, sock, cmd, (unsigned long) r,
- sizeof(struct compat_ifreq));
+ ret = sock_do_ioctl(net, sock, cmd, (unsigned long) r);
set_fs(old_fs);
out:
@@ -3203,21 +3246,22 @@ static int compat_sock_ioctl_trans(struct file *file, struct socket *sock,
case SIOCSIFTXQLEN:
case SIOCBRADDIF:
case SIOCBRDELIF:
+ case SIOCGIFNAME:
case SIOCSIFNAME:
case SIOCGMIIPHY:
case SIOCGMIIREG:
case SIOCSMIIREG:
- case SIOCSARP:
- case SIOCGARP:
- case SIOCDARP:
- case SIOCATMARK:
case SIOCBONDENSLAVE:
case SIOCBONDRELEASE:
case SIOCBONDSETHWADDR:
case SIOCBONDCHANGEACTIVE:
- case SIOCGIFNAME:
- return sock_do_ioctl(net, sock, cmd, arg,
- sizeof(struct compat_ifreq));
+ return compat_ifreq_ioctl(net, sock, cmd, argp);
+
+ case SIOCSARP:
+ case SIOCGARP:
+ case SIOCDARP:
+ case SIOCATMARK:
+ return sock_do_ioctl(net, sock, cmd, arg);
}
return -ENOIOCTLCMD;
diff --git a/net/sunrpc/auth_gss/gss_krb5_seqnum.c b/net/sunrpc/auth_gss/gss_krb5_seqnum.c
index fb66562..70922d9 100644
--- a/net/sunrpc/auth_gss/gss_krb5_seqnum.c
+++ b/net/sunrpc/auth_gss/gss_krb5_seqnum.c
@@ -44,7 +44,7 @@ krb5_make_rc4_seq_num(struct krb5_ctx *kctx, int direction, s32 seqnum,
unsigned char *cksum, unsigned char *buf)
{
struct crypto_sync_skcipher *cipher;
- unsigned char plain[8];
+ unsigned char *plain;
s32 code;
dprintk("RPC: %s:\n", __func__);
@@ -52,6 +52,10 @@ krb5_make_rc4_seq_num(struct krb5_ctx *kctx, int direction, s32 seqnum,
if (IS_ERR(cipher))
return PTR_ERR(cipher);
+ plain = kmalloc(8, GFP_NOFS);
+ if (!plain)
+ return -ENOMEM;
+
plain[0] = (unsigned char) ((seqnum >> 24) & 0xff);
plain[1] = (unsigned char) ((seqnum >> 16) & 0xff);
plain[2] = (unsigned char) ((seqnum >> 8) & 0xff);
@@ -68,6 +72,7 @@ krb5_make_rc4_seq_num(struct krb5_ctx *kctx, int direction, s32 seqnum,
code = krb5_encrypt(cipher, cksum, plain, buf, 8);
out:
crypto_free_sync_skcipher(cipher);
+ kfree(plain);
return code;
}
s32
@@ -77,12 +82,17 @@ krb5_make_seq_num(struct krb5_ctx *kctx,
u32 seqnum,
unsigned char *cksum, unsigned char *buf)
{
- unsigned char plain[8];
+ unsigned char *plain;
+ s32 code;
if (kctx->enctype == ENCTYPE_ARCFOUR_HMAC)
return krb5_make_rc4_seq_num(kctx, direction, seqnum,
cksum, buf);
+ plain = kmalloc(8, GFP_NOFS);
+ if (!plain)
+ return -ENOMEM;
+
plain[0] = (unsigned char) (seqnum & 0xff);
plain[1] = (unsigned char) ((seqnum >> 8) & 0xff);
plain[2] = (unsigned char) ((seqnum >> 16) & 0xff);
@@ -93,7 +103,9 @@ krb5_make_seq_num(struct krb5_ctx *kctx,
plain[6] = direction;
plain[7] = direction;
- return krb5_encrypt(key, cksum, plain, buf, 8);
+ code = krb5_encrypt(key, cksum, plain, buf, 8);
+ kfree(plain);
+ return code;
}
static s32
@@ -101,7 +113,7 @@ krb5_get_rc4_seq_num(struct krb5_ctx *kctx, unsigned char *cksum,
unsigned char *buf, int *direction, s32 *seqnum)
{
struct crypto_sync_skcipher *cipher;
- unsigned char plain[8];
+ unsigned char *plain;
s32 code;
dprintk("RPC: %s:\n", __func__);
@@ -113,20 +125,28 @@ krb5_get_rc4_seq_num(struct krb5_ctx *kctx, unsigned char *cksum,
if (code)
goto out;
+ plain = kmalloc(8, GFP_NOFS);
+ if (!plain) {
+ code = -ENOMEM;
+ goto out;
+ }
+
code = krb5_decrypt(cipher, cksum, buf, plain, 8);
if (code)
- goto out;
+ goto out_plain;
if ((plain[4] != plain[5]) || (plain[4] != plain[6])
|| (plain[4] != plain[7])) {
code = (s32)KG_BAD_SEQ;
- goto out;
+ goto out_plain;
}
*direction = plain[4];
*seqnum = ((plain[0] << 24) | (plain[1] << 16) |
(plain[2] << 8) | (plain[3]));
+out_plain:
+ kfree(plain);
out:
crypto_free_sync_skcipher(cipher);
return code;
@@ -139,26 +159,33 @@ krb5_get_seq_num(struct krb5_ctx *kctx,
int *direction, u32 *seqnum)
{
s32 code;
- unsigned char plain[8];
struct crypto_sync_skcipher *key = kctx->seq;
+ unsigned char *plain;
dprintk("RPC: krb5_get_seq_num:\n");
if (kctx->enctype == ENCTYPE_ARCFOUR_HMAC)
return krb5_get_rc4_seq_num(kctx, cksum, buf,
direction, seqnum);
+ plain = kmalloc(8, GFP_NOFS);
+ if (!plain)
+ return -ENOMEM;
if ((code = krb5_decrypt(key, cksum, buf, plain, 8)))
- return code;
+ goto out;
if ((plain[4] != plain[5]) || (plain[4] != plain[6]) ||
- (plain[4] != plain[7]))
- return (s32)KG_BAD_SEQ;
+ (plain[4] != plain[7])) {
+ code = (s32)KG_BAD_SEQ;
+ goto out;
+ }
*direction = plain[4];
*seqnum = ((plain[0]) |
(plain[1] << 8) | (plain[2] << 16) | (plain[3] << 24));
- return 0;
+out:
+ kfree(plain);
+ return code;
}
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index 8ea2f5f..1fc812b 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -1992,13 +1992,15 @@ call_transmit(struct rpc_task *task)
static void
call_transmit_status(struct rpc_task *task)
{
+ struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt;
task->tk_action = call_status;
/*
* Common case: success. Force the compiler to put this
- * test first.
+ * test first. Or, if any error and xprt_close_wait,
+ * release the xprt lock so the socket can close.
*/
- if (task->tk_status == 0) {
+ if (task->tk_status == 0 || xprt_close_wait(xprt)) {
xprt_end_transmit(task);
rpc_task_force_reencode(task);
return;
diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
index 8602a5f..e8ad7dd 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
@@ -563,6 +563,99 @@ void svc_rdma_sync_reply_hdr(struct svcxprt_rdma *rdma,
DMA_TO_DEVICE);
}
+/* If the xdr_buf has more elements than the device can
+ * transmit in a single RDMA Send, then the reply will
+ * have to be copied into a bounce buffer.
+ */
+static bool svc_rdma_pull_up_needed(struct svcxprt_rdma *rdma,
+ struct xdr_buf *xdr,
+ __be32 *wr_lst)
+{
+ int elements;
+
+ /* xdr->head */
+ elements = 1;
+
+ /* xdr->pages */
+ if (!wr_lst) {
+ unsigned int remaining;
+ unsigned long pageoff;
+
+ pageoff = xdr->page_base & ~PAGE_MASK;
+ remaining = xdr->page_len;
+ while (remaining) {
+ ++elements;
+ remaining -= min_t(u32, PAGE_SIZE - pageoff,
+ remaining);
+ pageoff = 0;
+ }
+ }
+
+ /* xdr->tail */
+ if (xdr->tail[0].iov_len)
+ ++elements;
+
+ /* assume 1 SGE is needed for the transport header */
+ return elements >= rdma->sc_max_send_sges;
+}
+
+/* The device is not capable of sending the reply directly.
+ * Assemble the elements of @xdr into the transport header
+ * buffer.
+ */
+static int svc_rdma_pull_up_reply_msg(struct svcxprt_rdma *rdma,
+ struct svc_rdma_send_ctxt *ctxt,
+ struct xdr_buf *xdr, __be32 *wr_lst)
+{
+ unsigned char *dst, *tailbase;
+ unsigned int taillen;
+
+ dst = ctxt->sc_xprt_buf;
+ dst += ctxt->sc_sges[0].length;
+
+ memcpy(dst, xdr->head[0].iov_base, xdr->head[0].iov_len);
+ dst += xdr->head[0].iov_len;
+
+ tailbase = xdr->tail[0].iov_base;
+ taillen = xdr->tail[0].iov_len;
+ if (wr_lst) {
+ u32 xdrpad;
+
+ xdrpad = xdr_padsize(xdr->page_len);
+ if (taillen && xdrpad) {
+ tailbase += xdrpad;
+ taillen -= xdrpad;
+ }
+ } else {
+ unsigned int len, remaining;
+ unsigned long pageoff;
+ struct page **ppages;
+
+ ppages = xdr->pages + (xdr->page_base >> PAGE_SHIFT);
+ pageoff = xdr->page_base & ~PAGE_MASK;
+ remaining = xdr->page_len;
+ while (remaining) {
+ len = min_t(u32, PAGE_SIZE - pageoff, remaining);
+
+ memcpy(dst, page_address(*ppages), len);
+ remaining -= len;
+ dst += len;
+ pageoff = 0;
+ }
+ }
+
+ if (taillen)
+ memcpy(dst, tailbase, taillen);
+
+ ctxt->sc_sges[0].length += xdr->len;
+ ib_dma_sync_single_for_device(rdma->sc_pd->device,
+ ctxt->sc_sges[0].addr,
+ ctxt->sc_sges[0].length,
+ DMA_TO_DEVICE);
+
+ return 0;
+}
+
/* svc_rdma_map_reply_msg - Map the buffer holding RPC message
* @rdma: controlling transport
* @ctxt: send_ctxt for the Send WR
@@ -585,8 +678,10 @@ int svc_rdma_map_reply_msg(struct svcxprt_rdma *rdma,
u32 xdr_pad;
int ret;
- if (++ctxt->sc_cur_sge_no >= rdma->sc_max_send_sges)
- return -EIO;
+ if (svc_rdma_pull_up_needed(rdma, xdr, wr_lst))
+ return svc_rdma_pull_up_reply_msg(rdma, ctxt, xdr, wr_lst);
+
+ ++ctxt->sc_cur_sge_no;
ret = svc_rdma_dma_map_buf(rdma, ctxt,
xdr->head[0].iov_base,
xdr->head[0].iov_len);
@@ -617,8 +712,7 @@ int svc_rdma_map_reply_msg(struct svcxprt_rdma *rdma,
while (remaining) {
len = min_t(u32, PAGE_SIZE - page_off, remaining);
- if (++ctxt->sc_cur_sge_no >= rdma->sc_max_send_sges)
- return -EIO;
+ ++ctxt->sc_cur_sge_no;
ret = svc_rdma_dma_map_page(rdma, ctxt, *ppages++,
page_off, len);
if (ret < 0)
@@ -632,8 +726,7 @@ int svc_rdma_map_reply_msg(struct svcxprt_rdma *rdma,
len = xdr->tail[0].iov_len;
tail:
if (len) {
- if (++ctxt->sc_cur_sge_no >= rdma->sc_max_send_sges)
- return -EIO;
+ ++ctxt->sc_cur_sge_no;
ret = svc_rdma_dma_map_buf(rdma, ctxt, base, len);
if (ret < 0)
return ret;
diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c
index 2848caf..ce5c610 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
@@ -475,13 +475,12 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt)
/* Qualify the transport resource defaults with the
* capabilities of this particular device */
- newxprt->sc_max_send_sges = dev->attrs.max_send_sge;
- /* transport hdr, head iovec, one page list entry, tail iovec */
- if (newxprt->sc_max_send_sges < 4) {
- pr_err("svcrdma: too few Send SGEs available (%d)\n",
- newxprt->sc_max_send_sges);
- goto errout;
- }
+ /* Transport header, head iovec, tail iovec */
+ newxprt->sc_max_send_sges = 3;
+ /* Add one SGE per page list entry */
+ newxprt->sc_max_send_sges += (svcrdma_max_req_size / PAGE_SIZE) + 1;
+ if (newxprt->sc_max_send_sges > dev->attrs.max_send_sge)
+ newxprt->sc_max_send_sges = dev->attrs.max_send_sge;
newxprt->sc_max_req_size = svcrdma_max_req_size;
newxprt->sc_max_requests = svcrdma_max_requests;
newxprt->sc_max_bc_requests = svcrdma_max_bc_requests;
diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c
index 956a5ea..3d6bf79 100644
--- a/net/sunrpc/xprtrdma/verbs.c
+++ b/net/sunrpc/xprtrdma/verbs.c
@@ -872,7 +872,7 @@ static int rpcrdma_sendctxs_create(struct rpcrdma_xprt *r_xprt)
for (i = 0; i <= buf->rb_sc_last; i++) {
sc = rpcrdma_sendctx_create(&r_xprt->rx_ia);
if (!sc)
- goto out_destroy;
+ return -ENOMEM;
sc->sc_xprt = r_xprt;
buf->rb_sc_ctxs[i] = sc;
@@ -880,10 +880,6 @@ static int rpcrdma_sendctxs_create(struct rpcrdma_xprt *r_xprt)
buf->rb_flags = 0;
return 0;
-
-out_destroy:
- rpcrdma_sendctxs_destroy(buf);
- return -ENOMEM;
}
/* The sendctx queue is not guaranteed to have a size that is a
diff --git a/net/tipc/node.c b/net/tipc/node.c
index 4880197..32556f4 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -624,6 +624,12 @@ static void tipc_node_timeout(struct timer_list *t)
__skb_queue_head_init(&xmitq);
+ /* Initial node interval to value larger (10 seconds), then it will be
+ * recalculated with link lowest tolerance
+ */
+ tipc_node_read_lock(n);
+ n->keepalive_intv = 10000;
+ tipc_node_read_unlock(n);
for (bearer_id = 0; remains && (bearer_id < MAX_BEARERS); bearer_id++) {
tipc_node_read_lock(n);
le = &n->links[bearer_id];
diff --git a/net/vmw_vsock/vmci_transport.c b/net/vmw_vsock/vmci_transport.c
index c361ce7..c3d5ab0 100644
--- a/net/vmw_vsock/vmci_transport.c
+++ b/net/vmw_vsock/vmci_transport.c
@@ -1651,6 +1651,10 @@ static void vmci_transport_cleanup(struct work_struct *work)
static void vmci_transport_destruct(struct vsock_sock *vsk)
{
+ /* transport can be NULL if we hit a failure at init() time */
+ if (!vmci_trans(vsk))
+ return;
+
/* Ensure that the detach callback doesn't use the sk/vsk
* we are about to destruct.
*/
diff --git a/net/wireless/core.h b/net/wireless/core.h
index 7f52ef5..4dda927 100644
--- a/net/wireless/core.h
+++ b/net/wireless/core.h
@@ -3,6 +3,7 @@
* Wireless configuration interface internals.
*
* Copyright 2006-2010 Johannes Berg <johannes@sipsolutions.net>
+ * Copyright (C) 2018-2019 Intel Corporation
*/
#ifndef __NET_WIRELESS_CORE_H
#define __NET_WIRELESS_CORE_H
@@ -170,12 +171,23 @@ static inline struct cfg80211_internal_bss *bss_from_pub(struct cfg80211_bss *pu
static inline void cfg80211_hold_bss(struct cfg80211_internal_bss *bss)
{
atomic_inc(&bss->hold);
+ if (bss->pub.transmitted_bss) {
+ bss = container_of(bss->pub.transmitted_bss,
+ struct cfg80211_internal_bss, pub);
+ atomic_inc(&bss->hold);
+ }
}
static inline void cfg80211_unhold_bss(struct cfg80211_internal_bss *bss)
{
int r = atomic_dec_return(&bss->hold);
WARN_ON(r < 0);
+ if (bss->pub.transmitted_bss) {
+ bss = container_of(bss->pub.transmitted_bss,
+ struct cfg80211_internal_bss, pub);
+ r = atomic_dec_return(&bss->hold);
+ WARN_ON(r < 0);
+ }
}
diff --git a/net/wireless/scan.c b/net/wireless/scan.c
index 3391c14..4529d74 100644
--- a/net/wireless/scan.c
+++ b/net/wireless/scan.c
@@ -5,6 +5,7 @@
* Copyright 2008 Johannes Berg <johannes@sipsolutions.net>
* Copyright 2013-2014 Intel Mobile Communications GmbH
* Copyright 2016 Intel Deutschland GmbH
+ * Copyright (C) 2018-2019 Intel Corporation
*/
#include <linux/kernel.h>
#include <linux/slab.h>
@@ -109,6 +110,12 @@ static inline void bss_ref_get(struct cfg80211_registered_device *rdev,
pub);
bss->refcount++;
}
+ if (bss->pub.transmitted_bss) {
+ bss = container_of(bss->pub.transmitted_bss,
+ struct cfg80211_internal_bss,
+ pub);
+ bss->refcount++;
+ }
}
static inline void bss_ref_put(struct cfg80211_registered_device *rdev,
@@ -125,6 +132,18 @@ static inline void bss_ref_put(struct cfg80211_registered_device *rdev,
if (hbss->refcount == 0)
bss_free(hbss);
}
+
+ if (bss->pub.transmitted_bss) {
+ struct cfg80211_internal_bss *tbss;
+
+ tbss = container_of(bss->pub.transmitted_bss,
+ struct cfg80211_internal_bss,
+ pub);
+ tbss->refcount--;
+ if (tbss->refcount == 0)
+ bss_free(tbss);
+ }
+
bss->refcount--;
if (bss->refcount == 0)
bss_free(bss);
@@ -150,6 +169,7 @@ static bool __cfg80211_unlink_bss(struct cfg80211_registered_device *rdev,
}
list_del_init(&bss->list);
+ list_del_init(&bss->pub.nontrans_list);
rb_erase(&bss->rbn, &rdev->bss_tree);
rdev->bss_entries--;
WARN_ONCE((rdev->bss_entries == 0) ^ list_empty(&rdev->bss_list),
@@ -159,6 +179,156 @@ static bool __cfg80211_unlink_bss(struct cfg80211_registered_device *rdev,
return true;
}
+static size_t cfg80211_gen_new_ie(const u8 *ie, size_t ielen,
+ const u8 *subelement, size_t subie_len,
+ u8 *new_ie, gfp_t gfp)
+{
+ u8 *pos, *tmp;
+ const u8 *tmp_old, *tmp_new;
+ u8 *sub_copy;
+
+ /* copy subelement as we need to change its content to
+ * mark an ie after it is processed.
+ */
+ sub_copy = kmalloc(subie_len, gfp);
+ if (!sub_copy)
+ return 0;
+ memcpy(sub_copy, subelement, subie_len);
+
+ pos = &new_ie[0];
+
+ /* set new ssid */
+ tmp_new = cfg80211_find_ie(WLAN_EID_SSID, sub_copy, subie_len);
+ if (tmp_new) {
+ memcpy(pos, tmp_new, tmp_new[1] + 2);
+ pos += (tmp_new[1] + 2);
+ }
+
+ /* go through IEs in ie (skip SSID) and subelement,
+ * merge them into new_ie
+ */
+ tmp_old = cfg80211_find_ie(WLAN_EID_SSID, ie, ielen);
+ tmp_old = (tmp_old) ? tmp_old + tmp_old[1] + 2 : ie;
+
+ while (tmp_old + tmp_old[1] + 2 - ie <= ielen) {
+ if (tmp_old[0] == 0) {
+ tmp_old++;
+ continue;
+ }
+
+ tmp = (u8 *)cfg80211_find_ie(tmp_old[0], sub_copy, subie_len);
+ if (!tmp) {
+ /* ie in old ie but not in subelement */
+ if (tmp_old[0] != WLAN_EID_MULTIPLE_BSSID) {
+ memcpy(pos, tmp_old, tmp_old[1] + 2);
+ pos += tmp_old[1] + 2;
+ }
+ } else {
+ /* ie in transmitting ie also in subelement,
+ * copy from subelement and flag the ie in subelement
+ * as copied (by setting eid field to 0xff). For
+ * vendor ie, compare OUI + type + subType to
+ * determine if they are the same ie.
+ */
+ if (tmp_old[0] == WLAN_EID_VENDOR_SPECIFIC) {
+ if (!memcmp(tmp_old + 2, tmp + 2, 5)) {
+ /* same vendor ie, copy from
+ * subelement
+ */
+ memcpy(pos, tmp, tmp[1] + 2);
+ pos += tmp[1] + 2;
+ tmp[0] = 0xff;
+ } else {
+ memcpy(pos, tmp_old, tmp_old[1] + 2);
+ pos += tmp_old[1] + 2;
+ }
+ } else {
+ /* copy ie from subelement into new ie */
+ memcpy(pos, tmp, tmp[1] + 2);
+ pos += tmp[1] + 2;
+ tmp[0] = 0xff;
+ }
+ }
+
+ if (tmp_old + tmp_old[1] + 2 - ie == ielen)
+ break;
+
+ tmp_old += tmp_old[1] + 2;
+ }
+
+ /* go through subelement again to check if there is any ie not
+ * copied to new ie, skip ssid, capability, bssid-index ie
+ */
+ tmp_new = sub_copy;
+ while (tmp_new + tmp_new[1] + 2 - sub_copy <= subie_len) {
+ if (!(tmp_new[0] == WLAN_EID_NON_TX_BSSID_CAP ||
+ tmp_new[0] == WLAN_EID_SSID ||
+ tmp_new[0] == WLAN_EID_MULTI_BSSID_IDX ||
+ tmp_new[0] == 0xff)) {
+ memcpy(pos, tmp_new, tmp_new[1] + 2);
+ pos += tmp_new[1] + 2;
+ }
+ if (tmp_new + tmp_new[1] + 2 - sub_copy == subie_len)
+ break;
+ tmp_new += tmp_new[1] + 2;
+ }
+
+ kfree(sub_copy);
+ return pos - new_ie;
+}
+
+static bool is_bss(struct cfg80211_bss *a, const u8 *bssid,
+ const u8 *ssid, size_t ssid_len)
+{
+ const struct cfg80211_bss_ies *ies;
+ const u8 *ssidie;
+
+ if (bssid && !ether_addr_equal(a->bssid, bssid))
+ return false;
+
+ if (!ssid)
+ return true;
+
+ ies = rcu_access_pointer(a->ies);
+ if (!ies)
+ return false;
+ ssidie = cfg80211_find_ie(WLAN_EID_SSID, ies->data, ies->len);
+ if (!ssidie)
+ return false;
+ if (ssidie[1] != ssid_len)
+ return false;
+ return memcmp(ssidie + 2, ssid, ssid_len) == 0;
+}
+
+static int
+cfg80211_add_nontrans_list(struct cfg80211_bss *trans_bss,
+ struct cfg80211_bss *nontrans_bss)
+{
+ const u8 *ssid;
+ size_t ssid_len;
+ struct cfg80211_bss *bss = NULL;
+
+ rcu_read_lock();
+ ssid = ieee80211_bss_get_ie(nontrans_bss, WLAN_EID_SSID);
+ if (!ssid) {
+ rcu_read_unlock();
+ return -EINVAL;
+ }
+ ssid_len = ssid[1];
+ ssid = ssid + 2;
+ rcu_read_unlock();
+
+ /* check if nontrans_bss is in the list */
+ list_for_each_entry(bss, &trans_bss->nontrans_list, nontrans_list) {
+ if (is_bss(bss, nontrans_bss->bssid, ssid, ssid_len))
+ return 0;
+ }
+
+ /* add to the list */
+ list_add_tail(&nontrans_bss->nontrans_list, &trans_bss->nontrans_list);
+ return 0;
+}
+
static void __cfg80211_bss_expire(struct cfg80211_registered_device *rdev,
unsigned long expire_time)
{
@@ -480,73 +650,43 @@ void cfg80211_bss_expire(struct cfg80211_registered_device *rdev)
__cfg80211_bss_expire(rdev, jiffies - IEEE80211_SCAN_RESULT_EXPIRE);
}
-const u8 *cfg80211_find_ie_match(u8 eid, const u8 *ies, int len,
- const u8 *match, int match_len,
- int match_offset)
+const struct element *
+cfg80211_find_elem_match(u8 eid, const u8 *ies, unsigned int len,
+ const u8 *match, unsigned int match_len,
+ unsigned int match_offset)
{
- /* match_offset can't be smaller than 2, unless match_len is
- * zero, in which case match_offset must be zero as well.
- */
- if (WARN_ON((match_len && match_offset < 2) ||
- (!match_len && match_offset)))
- return NULL;
+ const struct element *elem;
- while (len >= 2 && len >= ies[1] + 2) {
- if ((ies[0] == eid) &&
- (ies[1] + 2 >= match_offset + match_len) &&
- !memcmp(ies + match_offset, match, match_len))
- return ies;
-
- len -= ies[1] + 2;
- ies += ies[1] + 2;
+ for_each_element_id(elem, eid, ies, len) {
+ if (elem->datalen >= match_offset + match_len &&
+ !memcmp(elem->data + match_offset, match, match_len))
+ return elem;
}
return NULL;
}
-EXPORT_SYMBOL(cfg80211_find_ie_match);
+EXPORT_SYMBOL(cfg80211_find_elem_match);
-const u8 *cfg80211_find_vendor_ie(unsigned int oui, int oui_type,
- const u8 *ies, int len)
+const struct element *cfg80211_find_vendor_elem(unsigned int oui, int oui_type,
+ const u8 *ies,
+ unsigned int len)
{
- const u8 *ie;
+ const struct element *elem;
u8 match[] = { oui >> 16, oui >> 8, oui, oui_type };
int match_len = (oui_type < 0) ? 3 : sizeof(match);
if (WARN_ON(oui_type > 0xff))
return NULL;
- ie = cfg80211_find_ie_match(WLAN_EID_VENDOR_SPECIFIC, ies, len,
- match, match_len, 2);
+ elem = cfg80211_find_elem_match(WLAN_EID_VENDOR_SPECIFIC, ies, len,
+ match, match_len, 0);
- if (ie && (ie[1] < 4))
+ if (!elem || elem->datalen < 4)
return NULL;
- return ie;
+ return elem;
}
-EXPORT_SYMBOL(cfg80211_find_vendor_ie);
-
-static bool is_bss(struct cfg80211_bss *a, const u8 *bssid,
- const u8 *ssid, size_t ssid_len)
-{
- const struct cfg80211_bss_ies *ies;
- const u8 *ssidie;
-
- if (bssid && !ether_addr_equal(a->bssid, bssid))
- return false;
-
- if (!ssid)
- return true;
-
- ies = rcu_access_pointer(a->ies);
- if (!ies)
- return false;
- ssidie = cfg80211_find_ie(WLAN_EID_SSID, ies->data, ies->len);
- if (!ssidie)
- return false;
- if (ssidie[1] != ssid_len)
- return false;
- return memcmp(ssidie + 2, ssid, ssid_len) == 0;
-}
+EXPORT_SYMBOL(cfg80211_find_vendor_elem);
/**
* enum bss_compare_mode - BSS compare mode
@@ -882,6 +1022,12 @@ static bool cfg80211_combine_bsses(struct cfg80211_registered_device *rdev,
return true;
}
+struct cfg80211_non_tx_bss {
+ struct cfg80211_bss *tx_bss;
+ u8 max_bssid_indicator;
+ u8 bssid_index;
+};
+
/* Returned bss is reference counted and must be cleaned up appropriately. */
static struct cfg80211_internal_bss *
cfg80211_bss_update(struct cfg80211_registered_device *rdev,
@@ -985,6 +1131,8 @@ cfg80211_bss_update(struct cfg80211_registered_device *rdev,
memcpy(found->pub.chain_signal, tmp->pub.chain_signal,
IEEE80211_MAX_CHAINS);
ether_addr_copy(found->parent_bssid, tmp->parent_bssid);
+ found->pub.max_bssid_indicator = tmp->pub.max_bssid_indicator;
+ found->pub.bssid_index = tmp->pub.bssid_index;
} else {
struct cfg80211_internal_bss *new;
struct cfg80211_internal_bss *hidden;
@@ -1009,6 +1157,7 @@ cfg80211_bss_update(struct cfg80211_registered_device *rdev,
memcpy(new, tmp, sizeof(*new));
new->refcount = 1;
INIT_LIST_HEAD(&new->hidden_list);
+ INIT_LIST_HEAD(&new->pub.nontrans_list);
if (rcu_access_pointer(tmp->pub.proberesp_ies)) {
hidden = rb_find_bss(rdev, tmp, BSS_CMP_HIDE_ZLEN);
@@ -1042,6 +1191,17 @@ cfg80211_bss_update(struct cfg80211_registered_device *rdev,
goto drop;
}
+ /* This must be before the call to bss_ref_get */
+ if (tmp->pub.transmitted_bss) {
+ struct cfg80211_internal_bss *pbss =
+ container_of(tmp->pub.transmitted_bss,
+ struct cfg80211_internal_bss,
+ pub);
+
+ new->pub.transmitted_bss = tmp->pub.transmitted_bss;
+ bss_ref_get(rdev, pbss);
+ }
+
list_add_tail(&new->list, &rdev->bss_list);
rdev->bss_entries++;
rb_insert_bss(rdev, new);
@@ -1130,14 +1290,16 @@ cfg80211_get_bss_channel(struct wiphy *wiphy, const u8 *ie, size_t ielen,
}
/* Returned bss is reference counted and must be cleaned up appropriately. */
-struct cfg80211_bss *
-cfg80211_inform_bss_data(struct wiphy *wiphy,
- struct cfg80211_inform_bss *data,
- enum cfg80211_bss_frame_type ftype,
- const u8 *bssid, u64 tsf, u16 capability,
- u16 beacon_interval, const u8 *ie, size_t ielen,
- gfp_t gfp)
+static struct cfg80211_bss *
+cfg80211_inform_single_bss_data(struct wiphy *wiphy,
+ struct cfg80211_inform_bss *data,
+ enum cfg80211_bss_frame_type ftype,
+ const u8 *bssid, u64 tsf, u16 capability,
+ u16 beacon_interval, const u8 *ie, size_t ielen,
+ struct cfg80211_non_tx_bss *non_tx_data,
+ gfp_t gfp)
{
+ struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
struct cfg80211_bss_ies *ies;
struct ieee80211_channel *channel;
struct cfg80211_internal_bss tmp = {}, *res;
@@ -1163,6 +1325,11 @@ cfg80211_inform_bss_data(struct wiphy *wiphy,
tmp.pub.beacon_interval = beacon_interval;
tmp.pub.capability = capability;
tmp.ts_boottime = data->boottime_ns;
+ if (non_tx_data) {
+ tmp.pub.transmitted_bss = non_tx_data->tx_bss;
+ tmp.pub.bssid_index = non_tx_data->bssid_index;
+ tmp.pub.max_bssid_indicator = non_tx_data->max_bssid_indicator;
+ }
/*
* If we do not know here whether the IEs are from a Beacon or Probe
@@ -1209,19 +1376,246 @@ cfg80211_inform_bss_data(struct wiphy *wiphy,
regulatory_hint_found_beacon(wiphy, channel, gfp);
}
+ if (non_tx_data && non_tx_data->tx_bss) {
+ /* this is a nontransmitting bss, we need to add it to
+ * transmitting bss' list if it is not there
+ */
+ if (cfg80211_add_nontrans_list(non_tx_data->tx_bss,
+ &res->pub)) {
+ if (__cfg80211_unlink_bss(rdev, res))
+ rdev->bss_generation++;
+ }
+ }
+
trace_cfg80211_return_bss(&res->pub);
/* cfg80211_bss_update gives us a referenced result */
return &res->pub;
}
+
+static void cfg80211_parse_mbssid_data(struct wiphy *wiphy,
+ struct cfg80211_inform_bss *data,
+ enum cfg80211_bss_frame_type ftype,
+ const u8 *bssid, u64 tsf,
+ u16 beacon_interval, const u8 *ie,
+ size_t ielen,
+ struct cfg80211_non_tx_bss *non_tx_data,
+ gfp_t gfp)
+{
+ const u8 *mbssid_index_ie;
+ const struct element *elem, *sub;
+ size_t new_ie_len;
+ u8 new_bssid[ETH_ALEN];
+ u8 *new_ie;
+ u16 capability;
+ struct cfg80211_bss *bss;
+
+ if (!non_tx_data)
+ return;
+ if (!cfg80211_find_ie(WLAN_EID_MULTIPLE_BSSID, ie, ielen))
+ return;
+ if (!wiphy->support_mbssid)
+ return;
+ if (wiphy->support_only_he_mbssid &&
+ !cfg80211_find_ext_ie(WLAN_EID_EXT_HE_CAPABILITY, ie, ielen))
+ return;
+
+ new_ie = kmalloc(IEEE80211_MAX_DATA_LEN, gfp);
+ if (!new_ie)
+ return;
+
+ for_each_element_id(elem, WLAN_EID_MULTIPLE_BSSID, ie, ielen) {
+ if (elem->datalen < 4)
+ continue;
+ for_each_element(sub, elem->data + 1, elem->datalen - 1) {
+ if (sub->id != 0 || sub->datalen < 4) {
+ /* not a valid BSS profile */
+ continue;
+ }
+
+ if (sub->data[0] != WLAN_EID_NON_TX_BSSID_CAP ||
+ sub->data[1] != 2) {
+ /* The first element within the Nontransmitted
+ * BSSID Profile is not the Nontransmitted
+ * BSSID Capability element.
+ */
+ continue;
+ }
+
+ /* found a Nontransmitted BSSID Profile */
+ mbssid_index_ie = cfg80211_find_ie
+ (WLAN_EID_MULTI_BSSID_IDX,
+ sub->data, sub->datalen);
+ if (!mbssid_index_ie || mbssid_index_ie[1] < 1 ||
+ mbssid_index_ie[2] == 0) {
+ /* No valid Multiple BSSID-Index element */
+ continue;
+ }
+
+ non_tx_data->bssid_index = mbssid_index_ie[2];
+ non_tx_data->max_bssid_indicator = elem->data[0];
+
+ cfg80211_gen_new_bssid(bssid,
+ non_tx_data->max_bssid_indicator,
+ non_tx_data->bssid_index,
+ new_bssid);
+ memset(new_ie, 0, IEEE80211_MAX_DATA_LEN);
+ new_ie_len = cfg80211_gen_new_ie(ie, ielen, sub->data,
+ sub->datalen, new_ie,
+ gfp);
+ if (!new_ie_len)
+ continue;
+
+ capability = get_unaligned_le16(sub->data + 2);
+ bss = cfg80211_inform_single_bss_data(wiphy, data,
+ ftype,
+ new_bssid, tsf,
+ capability,
+ beacon_interval,
+ new_ie,
+ new_ie_len,
+ non_tx_data,
+ gfp);
+ if (!bss)
+ break;
+ cfg80211_put_bss(wiphy, bss);
+ }
+ }
+
+ kfree(new_ie);
+}
+
+struct cfg80211_bss *
+cfg80211_inform_bss_data(struct wiphy *wiphy,
+ struct cfg80211_inform_bss *data,
+ enum cfg80211_bss_frame_type ftype,
+ const u8 *bssid, u64 tsf, u16 capability,
+ u16 beacon_interval, const u8 *ie, size_t ielen,
+ gfp_t gfp)
+{
+ struct cfg80211_bss *res;
+ struct cfg80211_non_tx_bss non_tx_data;
+
+ res = cfg80211_inform_single_bss_data(wiphy, data, ftype, bssid, tsf,
+ capability, beacon_interval, ie,
+ ielen, NULL, gfp);
+ non_tx_data.tx_bss = res;
+ cfg80211_parse_mbssid_data(wiphy, data, ftype, bssid, tsf,
+ beacon_interval, ie, ielen, &non_tx_data,
+ gfp);
+ return res;
+}
EXPORT_SYMBOL(cfg80211_inform_bss_data);
-/* cfg80211_inform_bss_width_frame helper */
-struct cfg80211_bss *
-cfg80211_inform_bss_frame_data(struct wiphy *wiphy,
- struct cfg80211_inform_bss *data,
- struct ieee80211_mgmt *mgmt, size_t len,
- gfp_t gfp)
+static void
+cfg80211_parse_mbssid_frame_data(struct wiphy *wiphy,
+ struct cfg80211_inform_bss *data,
+ struct ieee80211_mgmt *mgmt, size_t len,
+ struct cfg80211_non_tx_bss *non_tx_data,
+ gfp_t gfp)
+{
+ enum cfg80211_bss_frame_type ftype;
+ const u8 *ie = mgmt->u.probe_resp.variable;
+ size_t ielen = len - offsetof(struct ieee80211_mgmt,
+ u.probe_resp.variable);
+ ftype = ieee80211_is_beacon(mgmt->frame_control) ?
+ CFG80211_BSS_FTYPE_BEACON : CFG80211_BSS_FTYPE_PRESP;
+
+ cfg80211_parse_mbssid_data(wiphy, data, ftype, mgmt->bssid,
+ le64_to_cpu(mgmt->u.probe_resp.timestamp),
+ le16_to_cpu(mgmt->u.probe_resp.beacon_int),
+ ie, ielen, non_tx_data, gfp);
+}
+
+static void
+cfg80211_update_notlisted_nontrans(struct wiphy *wiphy,
+ struct cfg80211_bss *nontrans_bss,
+ struct ieee80211_mgmt *mgmt, size_t len,
+ gfp_t gfp)
+{
+ u8 *ie, *new_ie, *pos;
+ const u8 *nontrans_ssid, *trans_ssid, *mbssid;
+ size_t ielen = len - offsetof(struct ieee80211_mgmt,
+ u.probe_resp.variable);
+ size_t new_ie_len;
+ struct cfg80211_bss_ies *new_ies;
+ const struct cfg80211_bss_ies *old;
+ u8 cpy_len;
+
+ ie = mgmt->u.probe_resp.variable;
+
+ new_ie_len = ielen;
+ trans_ssid = cfg80211_find_ie(WLAN_EID_SSID, ie, ielen);
+ if (!trans_ssid)
+ return;
+ new_ie_len -= trans_ssid[1];
+ mbssid = cfg80211_find_ie(WLAN_EID_MULTIPLE_BSSID, ie, ielen);
+ if (!mbssid)
+ return;
+ new_ie_len -= mbssid[1];
+ rcu_read_lock();
+ nontrans_ssid = ieee80211_bss_get_ie(nontrans_bss, WLAN_EID_SSID);
+ if (!nontrans_ssid) {
+ rcu_read_unlock();
+ return;
+ }
+ new_ie_len += nontrans_ssid[1];
+ rcu_read_unlock();
+
+ /* generate new ie for nontrans BSS
+ * 1. replace SSID with nontrans BSS' SSID
+ * 2. skip MBSSID IE
+ */
+ new_ie = kzalloc(new_ie_len, gfp);
+ if (!new_ie)
+ return;
+ new_ies = kzalloc(sizeof(*new_ies) + new_ie_len, gfp);
+ if (!new_ies) {
+ kfree(new_ie);
+ return;
+ }
+
+ pos = new_ie;
+
+ /* copy the nontransmitted SSID */
+ cpy_len = nontrans_ssid[1] + 2;
+ memcpy(pos, nontrans_ssid, cpy_len);
+ pos += cpy_len;
+ /* copy the IEs between SSID and MBSSID */
+ cpy_len = trans_ssid[1] + 2;
+ memcpy(pos, (trans_ssid + cpy_len), (mbssid - (trans_ssid + cpy_len)));
+ pos += (mbssid - (trans_ssid + cpy_len));
+ /* copy the IEs after MBSSID */
+ cpy_len = mbssid[1] + 2;
+ memcpy(pos, mbssid + cpy_len, ((ie + ielen) - (mbssid + cpy_len)));
+
+ /* update ie */
+ new_ies->len = new_ie_len;
+ new_ies->tsf = le64_to_cpu(mgmt->u.probe_resp.timestamp);
+ new_ies->from_beacon = ieee80211_is_beacon(mgmt->frame_control);
+ memcpy(new_ies->data, new_ie, new_ie_len);
+ if (ieee80211_is_probe_resp(mgmt->frame_control)) {
+ old = rcu_access_pointer(nontrans_bss->proberesp_ies);
+ rcu_assign_pointer(nontrans_bss->proberesp_ies, new_ies);
+ rcu_assign_pointer(nontrans_bss->ies, new_ies);
+ if (old)
+ kfree_rcu((struct cfg80211_bss_ies *)old, rcu_head);
+ } else {
+ old = rcu_access_pointer(nontrans_bss->beacon_ies);
+ rcu_assign_pointer(nontrans_bss->beacon_ies, new_ies);
+ rcu_assign_pointer(nontrans_bss->ies, new_ies);
+ if (old)
+ kfree_rcu((struct cfg80211_bss_ies *)old, rcu_head);
+ }
+}
+
+/* cfg80211_inform_bss_width_frame helper */
+static struct cfg80211_bss *
+cfg80211_inform_single_bss_frame_data(struct wiphy *wiphy,
+ struct cfg80211_inform_bss *data,
+ struct ieee80211_mgmt *mgmt, size_t len,
+ struct cfg80211_non_tx_bss *non_tx_data,
+ gfp_t gfp)
{
struct cfg80211_internal_bss tmp = {}, *res;
struct cfg80211_bss_ies *ies;
@@ -1279,6 +1673,11 @@ cfg80211_inform_bss_frame_data(struct wiphy *wiphy,
tmp.pub.chains = data->chains;
memcpy(tmp.pub.chain_signal, data->chain_signal, IEEE80211_MAX_CHAINS);
ether_addr_copy(tmp.parent_bssid, data->parent_bssid);
+ if (non_tx_data) {
+ tmp.pub.transmitted_bss = non_tx_data->tx_bss;
+ tmp.pub.bssid_index = non_tx_data->bssid_index;
+ tmp.pub.max_bssid_indicator = non_tx_data->max_bssid_indicator;
+ }
signal_valid = abs(data->chan->center_freq - channel->center_freq) <=
wiphy->max_adj_channel_rssi_comp;
@@ -1300,6 +1699,53 @@ cfg80211_inform_bss_frame_data(struct wiphy *wiphy,
/* cfg80211_bss_update gives us a referenced result */
return &res->pub;
}
+
+struct cfg80211_bss *
+cfg80211_inform_bss_frame_data(struct wiphy *wiphy,
+ struct cfg80211_inform_bss *data,
+ struct ieee80211_mgmt *mgmt, size_t len,
+ gfp_t gfp)
+{
+ struct cfg80211_bss *res, *tmp_bss;
+ const u8 *ie = mgmt->u.probe_resp.variable;
+ const struct cfg80211_bss_ies *ies1, *ies2;
+ size_t ielen = len - offsetof(struct ieee80211_mgmt,
+ u.probe_resp.variable);
+ struct cfg80211_non_tx_bss non_tx_data;
+
+ res = cfg80211_inform_single_bss_frame_data(wiphy, data, mgmt,
+ len, NULL, gfp);
+ if (!res || !wiphy->support_mbssid ||
+ !cfg80211_find_ie(WLAN_EID_MULTIPLE_BSSID, ie, ielen))
+ return res;
+ if (wiphy->support_only_he_mbssid &&
+ !cfg80211_find_ext_ie(WLAN_EID_EXT_HE_CAPABILITY, ie, ielen))
+ return res;
+
+ non_tx_data.tx_bss = res;
+ /* process each non-transmitting bss */
+ cfg80211_parse_mbssid_frame_data(wiphy, data, mgmt, len,
+ &non_tx_data, gfp);
+
+ /* check if the res has other nontransmitting bss which is not
+ * in MBSSID IE
+ */
+ ies1 = rcu_access_pointer(res->ies);
+
+ /* go through nontrans_list, if the timestamp of the BSS is
+ * earlier than the timestamp of the transmitting BSS then
+ * update it
+ */
+ list_for_each_entry(tmp_bss, &res->nontrans_list,
+ nontrans_list) {
+ ies2 = rcu_access_pointer(tmp_bss->ies);
+ if (ies2->tsf < ies1->tsf)
+ cfg80211_update_notlisted_nontrans(wiphy, tmp_bss,
+ mgmt, len, gfp);
+ }
+
+ return res;
+}
EXPORT_SYMBOL(cfg80211_inform_bss_frame_data);
void cfg80211_ref_bss(struct wiphy *wiphy, struct cfg80211_bss *pub)
@@ -1337,7 +1783,8 @@ EXPORT_SYMBOL(cfg80211_put_bss);
void cfg80211_unlink_bss(struct wiphy *wiphy, struct cfg80211_bss *pub)
{
struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
- struct cfg80211_internal_bss *bss;
+ struct cfg80211_internal_bss *bss, *tmp1;
+ struct cfg80211_bss *nontrans_bss, *tmp;
if (WARN_ON(!pub))
return;
@@ -1345,10 +1792,21 @@ void cfg80211_unlink_bss(struct wiphy *wiphy, struct cfg80211_bss *pub)
bss = container_of(pub, struct cfg80211_internal_bss, pub);
spin_lock_bh(&rdev->bss_lock);
- if (!list_empty(&bss->list)) {
- if (__cfg80211_unlink_bss(rdev, bss))
+ if (list_empty(&bss->list))
+ goto out;
+
+ list_for_each_entry_safe(nontrans_bss, tmp,
+ &pub->nontrans_list,
+ nontrans_list) {
+ tmp1 = container_of(nontrans_bss,
+ struct cfg80211_internal_bss, pub);
+ if (__cfg80211_unlink_bss(rdev, tmp1))
rdev->bss_generation++;
}
+
+ if (__cfg80211_unlink_bss(rdev, bss))
+ rdev->bss_generation++;
+out:
spin_unlock_bh(&rdev->bss_lock);
}
EXPORT_SYMBOL(cfg80211_unlink_bss);
diff --git a/net/wireless/util.c b/net/wireless/util.c
index aad1c8e..2821b3e 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -773,7 +773,7 @@ unsigned int cfg80211_classify8021d(struct sk_buff *skb,
}
EXPORT_SYMBOL(cfg80211_classify8021d);
-const u8 *ieee80211_bss_get_ie(struct cfg80211_bss *bss, u8 ie)
+const struct element *ieee80211_bss_get_elem(struct cfg80211_bss *bss, u8 id)
{
const struct cfg80211_bss_ies *ies;
@@ -781,9 +781,9 @@ const u8 *ieee80211_bss_get_ie(struct cfg80211_bss *bss, u8 ie)
if (!ies)
return NULL;
- return cfg80211_find_ie(ie, ies->data, ies->len);
+ return cfg80211_find_elem(id, ies->data, ies->len);
}
-EXPORT_SYMBOL(ieee80211_bss_get_ie);
+EXPORT_SYMBOL(ieee80211_bss_get_elem);
void cfg80211_upload_connect_keys(struct wireless_dev *wdev)
{
diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c
index d49aa79..fef473c 100644
--- a/net/x25/af_x25.c
+++ b/net/x25/af_x25.c
@@ -352,17 +352,15 @@ static unsigned int x25_new_lci(struct x25_neigh *nb)
unsigned int lci = 1;
struct sock *sk;
- read_lock_bh(&x25_list_lock);
-
- while ((sk = __x25_find_socket(lci, nb)) != NULL) {
+ while ((sk = x25_find_socket(lci, nb)) != NULL) {
sock_put(sk);
if (++lci == 4096) {
lci = 0;
break;
}
+ cond_resched();
}
- read_unlock_bh(&x25_list_lock);
return lci;
}
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index 5669198..ab55782 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -1488,10 +1488,15 @@ static int validate_tmpl(int nr, struct xfrm_user_tmpl *ut, u16 family)
if (!ut[i].family)
ut[i].family = family;
- if ((ut[i].mode == XFRM_MODE_TRANSPORT) &&
- (ut[i].family != prev_family))
- return -EINVAL;
-
+ switch (ut[i].mode) {
+ case XFRM_MODE_TUNNEL:
+ case XFRM_MODE_BEET:
+ break;
+ default:
+ if (ut[i].family != prev_family)
+ return -EINVAL;
+ break;
+ }
if (ut[i].mode >= XFRM_MODE_MAX)
return -EINVAL;
diff --git a/samples/livepatch/livepatch-shadow-fix1.c b/samples/livepatch/livepatch-shadow-fix1.c
index 49b1355..e8f1bd6 100644
--- a/samples/livepatch/livepatch-shadow-fix1.c
+++ b/samples/livepatch/livepatch-shadow-fix1.c
@@ -89,6 +89,11 @@ struct dummy *livepatch_fix1_dummy_alloc(void)
* pointer to handle resource release.
*/
leak = kzalloc(sizeof(int), GFP_KERNEL);
+ if (!leak) {
+ kfree(d);
+ return NULL;
+ }
+
klp_shadow_alloc(d, SV_LEAK, sizeof(leak), GFP_KERNEL,
shadow_leak_ctor, leak);
diff --git a/samples/livepatch/livepatch-shadow-mod.c b/samples/livepatch/livepatch-shadow-mod.c
index 4c54b25..4aa8a88 100644
--- a/samples/livepatch/livepatch-shadow-mod.c
+++ b/samples/livepatch/livepatch-shadow-mod.c
@@ -118,6 +118,10 @@ noinline struct dummy *dummy_alloc(void)
/* Oops, forgot to save leak! */
leak = kzalloc(sizeof(int), GFP_KERNEL);
+ if (!leak) {
+ kfree(d);
+ return NULL;
+ }
pr_info("%s: dummy @ %p, expires @ %lx\n",
__func__, d, d->jiffies_expire);
diff --git a/samples/mei/mei-amt-version.c b/samples/mei/mei-amt-version.c
index 57d0d87..bb99889 100644
--- a/samples/mei/mei-amt-version.c
+++ b/samples/mei/mei-amt-version.c
@@ -117,7 +117,7 @@ static bool mei_init(struct mei *me, const uuid_le *guid,
me->verbose = verbose;
- me->fd = open("/dev/mei", O_RDWR);
+ me->fd = open("/dev/mei0", O_RDWR);
if (me->fd == -1) {
mei_err(me, "Cannot establish a handle to the Intel MEI driver\n");
goto err;
diff --git a/scripts/decode_stacktrace.sh b/scripts/decode_stacktrace.sh
index 64220e3..98a7d63 100755
--- a/scripts/decode_stacktrace.sh
+++ b/scripts/decode_stacktrace.sh
@@ -78,7 +78,7 @@
fi
# Strip out the base of the path
- code=${code//$basepath/""}
+ code=${code//^$basepath/""}
# In the case of inlines, move everything to same line
code=${code//$'\n'/' '}
diff --git a/scripts/gdb/linux/proc.py b/scripts/gdb/linux/proc.py
index 086d272..0aebd75 100644
--- a/scripts/gdb/linux/proc.py
+++ b/scripts/gdb/linux/proc.py
@@ -41,7 +41,7 @@
def invoke(self, arg, from_tty):
# linux_banner should contain a newline
- gdb.write(gdb.parse_and_eval("linux_banner").string())
+ gdb.write(gdb.parse_and_eval("(char *)linux_banner").string())
LxVersion()
diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
index 5a5b378..5a77efd 100644
--- a/scripts/mod/modpost.c
+++ b/scripts/mod/modpost.c
@@ -1204,6 +1204,30 @@ static int secref_whitelist(const struct sectioncheck *mismatch,
return 1;
}
+static inline int is_arm_mapping_symbol(const char *str)
+{
+ return str[0] == '$' && strchr("axtd", str[1])
+ && (str[2] == '\0' || str[2] == '.');
+}
+
+/*
+ * If there's no name there, ignore it; likewise, ignore it if it's
+ * one of the magic symbols emitted used by current ARM tools.
+ *
+ * Otherwise if find_symbols_between() returns those symbols, they'll
+ * fail the whitelist tests and cause lots of false alarms ... fixable
+ * only by merging __exit and __init sections into __text, bloating
+ * the kernel (which is especially evil on embedded platforms).
+ */
+static inline int is_valid_name(struct elf_info *elf, Elf_Sym *sym)
+{
+ const char *name = elf->strtab + sym->st_name;
+
+ if (!name || !strlen(name))
+ return 0;
+ return !is_arm_mapping_symbol(name);
+}
+
/**
* Find symbol based on relocation record info.
* In some cases the symbol supplied is a valid symbol so
@@ -1229,6 +1253,8 @@ static Elf_Sym *find_elf_symbol(struct elf_info *elf, Elf64_Sword addr,
continue;
if (ELF_ST_TYPE(sym->st_info) == STT_SECTION)
continue;
+ if (!is_valid_name(elf, sym))
+ continue;
if (sym->st_value == addr)
return sym;
/* Find a symbol nearby - addr are maybe negative */
@@ -1247,30 +1273,6 @@ static Elf_Sym *find_elf_symbol(struct elf_info *elf, Elf64_Sword addr,
return NULL;
}
-static inline int is_arm_mapping_symbol(const char *str)
-{
- return str[0] == '$' && strchr("axtd", str[1])
- && (str[2] == '\0' || str[2] == '.');
-}
-
-/*
- * If there's no name there, ignore it; likewise, ignore it if it's
- * one of the magic symbols emitted used by current ARM tools.
- *
- * Otherwise if find_symbols_between() returns those symbols, they'll
- * fail the whitelist tests and cause lots of false alarms ... fixable
- * only by merging __exit and __init sections into __text, bloating
- * the kernel (which is especially evil on embedded platforms).
- */
-static inline int is_valid_name(struct elf_info *elf, Elf_Sym *sym)
-{
- const char *name = elf->strtab + sym->st_name;
-
- if (!name || !strlen(name))
- return 0;
- return !is_arm_mapping_symbol(name);
-}
-
/*
* Find symbols before or equal addr and after addr - in the section sec.
* If we find two symbols with equal offset prefer one with a valid name.
diff --git a/security/Kconfig b/security/Kconfig
index 6c379cd..f045dc7 100644
--- a/security/Kconfig
+++ b/security/Kconfig
@@ -6,6 +6,10 @@
source security/keys/Kconfig
+if ARCH_QCOM
+source security/pfe/Kconfig
+endif
+
config SECURITY_DMESG_RESTRICT
bool "Restrict unprivileged access to the kernel syslog"
default n
diff --git a/security/Makefile b/security/Makefile
index 4d2d378..47bffaa 100644
--- a/security/Makefile
+++ b/security/Makefile
@@ -10,6 +10,7 @@
subdir-$(CONFIG_SECURITY_APPARMOR) += apparmor
subdir-$(CONFIG_SECURITY_YAMA) += yama
subdir-$(CONFIG_SECURITY_LOADPIN) += loadpin
+subdir-$(CONFIG_ARCH_QCOM) += pfe
# always enable default capabilities
obj-y += commoncap.o
@@ -26,6 +27,7 @@
obj-$(CONFIG_SECURITY_YAMA) += yama/
obj-$(CONFIG_SECURITY_LOADPIN) += loadpin/
obj-$(CONFIG_CGROUP_DEVICE) += device_cgroup.o
+obj-$(CONFIG_ARCH_QCOM) += pfe/
# Object integrity file lists
subdir-$(CONFIG_INTEGRITY) += integrity
diff --git a/security/keys/key.c b/security/keys/key.c
index d97c939..249a6da4 100644
--- a/security/keys/key.c
+++ b/security/keys/key.c
@@ -265,8 +265,8 @@ struct key *key_alloc(struct key_type *type, const char *desc,
spin_lock(&user->lock);
if (!(flags & KEY_ALLOC_QUOTA_OVERRUN)) {
- if (user->qnkeys + 1 >= maxkeys ||
- user->qnbytes + quotalen >= maxbytes ||
+ if (user->qnkeys + 1 > maxkeys ||
+ user->qnbytes + quotalen > maxbytes ||
user->qnbytes + quotalen < user->qnbytes)
goto no_quota;
}
diff --git a/security/keys/keyring.c b/security/keys/keyring.c
index 41bcf57..99a5514 100644
--- a/security/keys/keyring.c
+++ b/security/keys/keyring.c
@@ -661,9 +661,6 @@ static bool search_nested_keyrings(struct key *keyring,
BUG_ON((ctx->flags & STATE_CHECKS) == 0 ||
(ctx->flags & STATE_CHECKS) == STATE_CHECKS);
- if (ctx->index_key.description)
- ctx->index_key.desc_len = strlen(ctx->index_key.description);
-
/* Check to see if this top-level keyring is what we are looking for
* and whether it is valid or not.
*/
@@ -914,6 +911,7 @@ key_ref_t keyring_search(key_ref_t keyring,
struct keyring_search_context ctx = {
.index_key.type = type,
.index_key.description = description,
+ .index_key.desc_len = strlen(description),
.cred = current_cred(),
.match_data.cmp = key_default_cmp,
.match_data.raw_data = description,
diff --git a/security/keys/proc.c b/security/keys/proc.c
index 5af2934..d38be9d 100644
--- a/security/keys/proc.c
+++ b/security/keys/proc.c
@@ -166,8 +166,7 @@ static int proc_keys_show(struct seq_file *m, void *v)
int rc;
struct keyring_search_context ctx = {
- .index_key.type = key->type,
- .index_key.description = key->description,
+ .index_key = key->index_key,
.cred = m->file->f_cred,
.match_data.cmp = lookup_user_key_possessed,
.match_data.raw_data = key,
diff --git a/security/keys/request_key.c b/security/keys/request_key.c
index 114f740..7385536 100644
--- a/security/keys/request_key.c
+++ b/security/keys/request_key.c
@@ -545,6 +545,7 @@ struct key *request_key_and_link(struct key_type *type,
struct keyring_search_context ctx = {
.index_key.type = type,
.index_key.description = description,
+ .index_key.desc_len = strlen(description),
.cred = current_cred(),
.match_data.cmp = key_default_cmp,
.match_data.raw_data = description,
diff --git a/security/keys/request_key_auth.c b/security/keys/request_key_auth.c
index 424e1d9..6797843 100644
--- a/security/keys/request_key_auth.c
+++ b/security/keys/request_key_auth.c
@@ -246,7 +246,7 @@ struct key *key_get_instantiation_authkey(key_serial_t target_id)
struct key *authkey;
key_ref_t authkey_ref;
- sprintf(description, "%x", target_id);
+ ctx.index_key.desc_len = sprintf(description, "%x", target_id);
authkey_ref = search_process_keyrings(&ctx);
diff --git a/security/pfe/Kconfig b/security/pfe/Kconfig
new file mode 100644
index 0000000..47c8a03
--- /dev/null
+++ b/security/pfe/Kconfig
@@ -0,0 +1,42 @@
+# SPDX-License-Identifier: GPL-2.0-only
+menu "Qualcomm Technologies, Inc Per File Encryption security device drivers"
+ depends on ARCH_QCOM
+
+config PFT
+ bool "Per-File-Tagger driver"
+ depends on SECURITY
+ default n
+ help
+ This driver is used for tagging enterprise files.
+ It is part of the Per-File-Encryption (PFE) feature.
+ The driver is tagging files when created by
+ registered application.
+ Tagged files are encrypted using the dm-req-crypt driver.
+
+config PFK
+ bool "Per-File-Key driver"
+ depends on SECURITY
+ depends on SECURITY_SELINUX
+ default n
+ help
+ This driver is used for storing eCryptfs information
+ in file node.
+ This is part of eCryptfs hardware enhanced solution
+ provided by Qualcomm Technologies, Inc.
+ Information is used when file is encrypted later using
+ ICE or dm crypto engine
+
+config PFK_WRAPPED_KEY_SUPPORTED
+ bool "Per-File-Key driver with wrapped key support"
+ depends on SECURITY
+ depends on SECURITY_SELINUX
+ depends on QSEECOM
+ depends on PFK
+ default n
+ help
+ Adds wrapped key support in PFK driver. Instead of setting
+ the key directly in ICE, it unwraps the key and sets the key
+ in ICE.
+ It ensures the key is protected within a secure environment
+ and only the wrapped key is present in the kernel.
+endmenu
diff --git a/security/pfe/Makefile b/security/pfe/Makefile
new file mode 100644
index 0000000..5758772
--- /dev/null
+++ b/security/pfe/Makefile
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0
+ccflags-y += -Isecurity/selinux -Isecurity/selinux/include
+ccflags-y += -Ifs/crypto
+ccflags-y += -Idrivers/misc
+
+obj-$(CONFIG_PFT) += pft.o
+obj-$(CONFIG_PFK) += pfk.o pfk_kc.o pfk_ice.o pfk_ext4.o pfk_f2fs.o
diff --git a/security/pfe/pfk.c b/security/pfe/pfk.c
new file mode 100644
index 0000000..218b283
--- /dev/null
+++ b/security/pfe/pfk.c
@@ -0,0 +1,547 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
+ */
+
+/*
+ * Per-File-Key (PFK).
+ *
+ * This driver is responsible for overall management of various
+ * Per File Encryption variants that work on top of or as part of different
+ * file systems.
+ *
+ * The driver has the following purpose :
+ * 1) Define priorities between PFE's if more than one is enabled
+ * 2) Extract key information from inode
+ * 3) Load and manage various keys in ICE HW engine
+ * 4) It should be invoked from various layers in FS/BLOCK/STORAGE DRIVER
+ * that need to take decision on HW encryption management of the data
+ * Some examples:
+ * BLOCK LAYER: when it takes decision on whether 2 chunks can be united
+ * to one encryption / decryption request sent to the HW
+ *
+ * UFS DRIVER: when it need to configure ICE HW with a particular key slot
+ * to be used for encryption / decryption
+ *
+ * PFE variants can differ on particular way of storing the cryptographic info
+ * inside inode, actions to be taken upon file operations, etc., but the common
+ * properties are described above
+ *
+ */
+
+#define pr_fmt(fmt) "pfk [%s]: " fmt, __func__
+
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/errno.h>
+#include <linux/printk.h>
+#include <linux/bio.h>
+#include <linux/security.h>
+#include <crypto/algapi.h>
+#include <crypto/ice.h>
+
+#include <linux/pfk.h>
+
+#include "pfk_kc.h"
+#include "objsec.h"
+#include "pfk_ice.h"
+#include "pfk_ext4.h"
+#include "pfk_f2fs.h"
+#include "pfk_internal.h"
+
+static bool pfk_ready;
+
+
+/* might be replaced by a table when more than one cipher is supported */
+#define PFK_SUPPORTED_KEY_SIZE 32
+#define PFK_SUPPORTED_SALT_SIZE 32
+
+/* Various PFE types and function tables to support each one of them */
+enum pfe_type {EXT4_CRYPT_PFE, F2FS_CRYPT_PFE, INVALID_PFE};
+
+typedef int (*pfk_parse_inode_type)(const struct bio *bio,
+ const struct inode *inode,
+ struct pfk_key_info *key_info,
+ enum ice_cryto_algo_mode *algo,
+ bool *is_pfe);
+
+typedef bool (*pfk_allow_merge_bio_type)(const struct bio *bio1,
+ const struct bio *bio2, const struct inode *inode1,
+ const struct inode *inode2);
+
+static const pfk_parse_inode_type pfk_parse_inode_ftable[] = {
+ &pfk_ext4_parse_inode, /* EXT4_CRYPT_PFE */
+ &pfk_f2fs_parse_inode, /* F2FS_CRYPT_PFE */
+};
+
+static const pfk_allow_merge_bio_type pfk_allow_merge_bio_ftable[] = {
+ &pfk_ext4_allow_merge_bio, /* EXT4_CRYPT_PFE */
+ &pfk_f2fs_allow_merge_bio, /* F2FS_CRYPT_PFE */
+};
+
+static void __exit pfk_exit(void)
+{
+ pfk_ready = false;
+ pfk_ext4_deinit();
+ pfk_f2fs_deinit();
+ pfk_kc_deinit();
+}
+
+static int __init pfk_init(void)
+{
+ int ret = 0;
+
+ ret = pfk_ext4_init();
+ if (ret != 0)
+ goto fail;
+
+ ret = pfk_f2fs_init();
+ if (ret != 0)
+ goto fail;
+
+ ret = pfk_kc_init();
+ if (ret != 0) {
+ pr_err("could init pfk key cache, error %d\n", ret);
+ pfk_ext4_deinit();
+ pfk_f2fs_deinit();
+ goto fail;
+ }
+
+ pfk_ready = true;
+ pr_debug("Driver initialized successfully\n");
+
+ return 0;
+
+fail:
+ pr_err("Failed to init driver\n");
+ return -ENODEV;
+}
+
+/*
+ * If more than one type is supported simultaneously, this function will also
+ * set the priority between them
+ */
+static enum pfe_type pfk_get_pfe_type(const struct inode *inode)
+{
+ if (!inode)
+ return INVALID_PFE;
+
+ if (pfk_is_ext4_type(inode))
+ return EXT4_CRYPT_PFE;
+
+ if (pfk_is_f2fs_type(inode))
+ return F2FS_CRYPT_PFE;
+
+ return INVALID_PFE;
+}
+
+/**
+ * inode_to_filename() - get the filename from inode pointer.
+ * @inode: inode pointer
+ *
+ * it is used for debug prints.
+ *
+ * Return: filename string or "unknown".
+ */
+char *inode_to_filename(const struct inode *inode)
+{
+ struct dentry *dentry = NULL;
+ char *filename = NULL;
+
+ if (!inode)
+ return "NULL";
+
+ if (hlist_empty(&inode->i_dentry))
+ return "unknown";
+
+ dentry = hlist_entry(inode->i_dentry.first, struct dentry, d_u.d_alias);
+ filename = dentry->d_iname;
+
+ return filename;
+}
+
+/**
+ * pfk_is_ready() - driver is initialized and ready.
+ *
+ * Return: true if the driver is ready.
+ */
+static inline bool pfk_is_ready(void)
+{
+ return pfk_ready;
+}
+
+/**
+ * pfk_bio_get_inode() - get the inode from a bio.
+ * @bio: Pointer to BIO structure.
+ *
+ * Walk the bio struct links to get the inode.
+ * Please note, that in general bio may consist of several pages from
+ * several files, but in our case we always assume that all pages come
+ * from the same file, since our logic ensures it. That is why we only
+ * walk through the first page to look for inode.
+ *
+ * Return: pointer to the inode struct if successful, or NULL otherwise.
+ *
+ */
+static struct inode *pfk_bio_get_inode(const struct bio *bio)
+{
+ if (!bio)
+ return NULL;
+ if (!bio_has_data((struct bio *)bio))
+ return NULL;
+ if (!bio->bi_io_vec)
+ return NULL;
+ if (!bio->bi_io_vec->bv_page)
+ return NULL;
+
+ if (PageAnon(bio->bi_io_vec->bv_page)) {
+ struct inode *inode;
+
+ /* Using direct-io (O_DIRECT) without page cache */
+ inode = dio_bio_get_inode((struct bio *)bio);
+ pr_debug("inode on direct-io, inode = 0x%pK.\n", inode);
+
+ return inode;
+ }
+
+ if (!page_mapping(bio->bi_io_vec->bv_page))
+ return NULL;
+
+ return page_mapping(bio->bi_io_vec->bv_page)->host;
+}
+
+/**
+ * pfk_key_size_to_key_type() - translate key size to key size enum
+ * @key_size: key size in bytes
+ * @key_size_type: pointer to store the output enum (can be null)
+ *
+ * return 0 in case of success, error otherwise (i.e not supported key size)
+ */
+int pfk_key_size_to_key_type(size_t key_size,
+ enum ice_crpto_key_size *key_size_type)
+{
+ /*
+ * currently only 32 bit key size is supported
+ * in the future, table with supported key sizes might
+ * be introduced
+ */
+
+ if (key_size != PFK_SUPPORTED_KEY_SIZE) {
+ pr_err("not supported key size %zu\n", key_size);
+ return -EINVAL;
+ }
+
+ if (key_size_type)
+ *key_size_type = ICE_CRYPTO_KEY_SIZE_256;
+
+ return 0;
+}
+
+/*
+ * Retrieves filesystem type from inode's superblock
+ */
+bool pfe_is_inode_filesystem_type(const struct inode *inode,
+ const char *fs_type)
+{
+ if (!inode || !fs_type)
+ return false;
+
+ if (!inode->i_sb)
+ return false;
+
+ if (!inode->i_sb->s_type)
+ return false;
+
+ return (strcmp(inode->i_sb->s_type->name, fs_type) == 0);
+}
+
+/**
+ * pfk_get_key_for_bio() - get the encryption key to be used for a bio
+ *
+ * @bio: pointer to the BIO
+ * @key_info: pointer to the key information which will be filled in
+ * @algo_mode: optional pointer to the algorithm identifier which will be set
+ * @is_pfe: will be set to false if the BIO should be left unencrypted
+ *
+ * Return: 0 if a key is being used, otherwise a -errno value
+ */
+static int pfk_get_key_for_bio(const struct bio *bio,
+ struct pfk_key_info *key_info,
+ enum ice_cryto_algo_mode *algo_mode,
+ bool *is_pfe, unsigned int *data_unit)
+{
+ const struct inode *inode;
+ enum pfe_type which_pfe;
+ const struct blk_encryption_key *key;
+ char *s_type = NULL;
+
+ inode = pfk_bio_get_inode(bio);
+ which_pfe = pfk_get_pfe_type(inode);
+ s_type = (char *)pfk_kc_get_storage_type();
+
+ /*
+ * Update dun based on storage type.
+ * 512 byte dun - For ext4 emmc
+ * 4K dun - For ext4 ufs, f2fs ufs and f2fs emmc
+ */
+
+ if (data_unit) {
+ if (!bio_dun(bio) && !memcmp(s_type, "sdcc", strlen("sdcc")))
+ *data_unit = 1 << ICE_CRYPTO_DATA_UNIT_512_B;
+ else
+ *data_unit = 1 << ICE_CRYPTO_DATA_UNIT_4_KB;
+ }
+
+ if (which_pfe != INVALID_PFE) {
+ /* Encrypted file; override ->bi_crypt_key */
+ pr_debug("parsing inode %lu with PFE type %d\n",
+ inode->i_ino, which_pfe);
+ return (*(pfk_parse_inode_ftable[which_pfe]))
+ (bio, inode, key_info, algo_mode, is_pfe);
+ }
+
+ /*
+ * bio is not for an encrypted file. Use ->bi_crypt_key if it was set.
+ * Otherwise, don't encrypt/decrypt the bio.
+ */
+ key = bio->bi_crypt_key;
+ if (!key) {
+ *is_pfe = false;
+ return -EINVAL;
+ }
+
+ /* Note: the "salt" is really just the second half of the XTS key. */
+ BUILD_BUG_ON(sizeof(key->raw) !=
+ PFK_SUPPORTED_KEY_SIZE + PFK_SUPPORTED_SALT_SIZE);
+ key_info->key = &key->raw[0];
+ key_info->key_size = PFK_SUPPORTED_KEY_SIZE;
+ key_info->salt = &key->raw[PFK_SUPPORTED_KEY_SIZE];
+ key_info->salt_size = PFK_SUPPORTED_SALT_SIZE;
+ if (algo_mode)
+ *algo_mode = ICE_CRYPTO_ALGO_MODE_AES_XTS;
+ return 0;
+}
+
+/**
+ * pfk_load_key_start() - loads PFE encryption key to the ICE
+ * Can also be invoked from non
+ * PFE context, in this case it
+ * is not relevant and is_pfe
+ * flag is set to false
+ *
+ * @bio: Pointer to the BIO structure
+ * @ice_setting: Pointer to ice setting structure that will be filled with
+ * ice configuration values, including the index to which the key was loaded
+ * @is_pfe: will be false if inode is not relevant to PFE, in such a case
+ * it should be treated as non PFE by the block layer
+ *
+ * Returns the index where the key is stored in encryption hw and additional
+ * information that will be used later for configuration of the encryption hw.
+ *
+ * Must be followed by pfk_load_key_end when key is no longer used by ice
+ *
+ */
+int pfk_load_key_start(const struct bio *bio,
+ struct ice_crypto_setting *ice_setting, bool *is_pfe,
+ bool async)
+{
+ int ret = 0;
+ struct pfk_key_info key_info = {NULL, NULL, 0, 0};
+ enum ice_cryto_algo_mode algo_mode = ICE_CRYPTO_ALGO_MODE_AES_XTS;
+ enum ice_crpto_key_size key_size_type = 0;
+ unsigned int data_unit = 1 << ICE_CRYPTO_DATA_UNIT_512_B;
+ u32 key_index = 0;
+
+ if (!is_pfe) {
+ pr_err("is_pfe is NULL\n");
+ return -EINVAL;
+ }
+
+ /*
+ * only a few errors below can indicate that
+ * this function was not invoked within PFE context,
+ * otherwise we will consider it PFE
+ */
+ *is_pfe = true;
+
+ if (!pfk_is_ready())
+ return -ENODEV;
+
+ if (!ice_setting) {
+ pr_err("ice setting is NULL\n");
+ return -EINVAL;
+ }
+
+ ret = pfk_get_key_for_bio(bio, &key_info, &algo_mode, is_pfe,
+ &data_unit);
+
+ if (ret != 0)
+ return ret;
+
+ ret = pfk_key_size_to_key_type(key_info.key_size, &key_size_type);
+ if (ret != 0)
+ return ret;
+
+ ret = pfk_kc_load_key_start(key_info.key, key_info.key_size,
+ key_info.salt, key_info.salt_size, &key_index, async,
+ data_unit);
+ if (ret) {
+ if (ret != -EBUSY && ret != -EAGAIN)
+ pr_err("start: could not load key into pfk key cache, error %d\n",
+ ret);
+
+ return ret;
+ }
+
+ ice_setting->key_size = key_size_type;
+ ice_setting->algo_mode = algo_mode;
+ /* hardcoded for now */
+ ice_setting->key_mode = ICE_CRYPTO_USE_LUT_SW_KEY;
+ ice_setting->key_index = key_index;
+
+ pr_debug("loaded key for file %s key_index %d\n",
+ inode_to_filename(pfk_bio_get_inode(bio)), key_index);
+
+ return 0;
+}
+
+/**
+ * pfk_load_key_end() - marks the PFE key as no longer used by ICE
+ * Can also be invoked from non
+ * PFE context, in this case it is not
+ * relevant and is_pfe flag is
+ * set to false
+ *
+ * @bio: Pointer to the BIO structure
+ * @is_pfe: Pointer to is_pfe flag, which will be true if function was invoked
+ * from PFE context
+ */
+int pfk_load_key_end(const struct bio *bio, bool *is_pfe)
+{
+ int ret = 0;
+ struct pfk_key_info key_info = {NULL, NULL, 0, 0};
+
+ if (!is_pfe) {
+ pr_err("is_pfe is NULL\n");
+ return -EINVAL;
+ }
+
+ /* only a few errors below can indicate that
+ * this function was not invoked within PFE context,
+ * otherwise we will consider it PFE
+ */
+ *is_pfe = true;
+
+ if (!pfk_is_ready())
+ return -ENODEV;
+
+ ret = pfk_get_key_for_bio(bio, &key_info, NULL, is_pfe, NULL);
+ if (ret != 0)
+ return ret;
+
+ pfk_kc_load_key_end(key_info.key, key_info.key_size,
+ key_info.salt, key_info.salt_size);
+
+ pr_debug("finished using key for file %s\n",
+ inode_to_filename(pfk_bio_get_inode(bio)));
+
+ return 0;
+}
+
+/**
+ * pfk_allow_merge_bio() - Check if 2 BIOs can be merged.
+ * @bio1: Pointer to first BIO structure.
+ * @bio2: Pointer to second BIO structure.
+ *
+ * Prevent merging of BIOs from encrypted and non-encrypted
+ * files, or files encrypted with different key.
+ * Also prevent non encrypted and encrypted data from the same file
+ * to be merged (ecryptfs header if stored inside file should be non
+ * encrypted)
+ * This API is called by the file system block layer.
+ *
+ * Return: true if the BIOs allowed to be merged, false
+ * otherwise.
+ */
+bool pfk_allow_merge_bio(const struct bio *bio1, const struct bio *bio2)
+{
+ const struct blk_encryption_key *key1;
+ const struct blk_encryption_key *key2;
+ const struct inode *inode1;
+ const struct inode *inode2;
+ enum pfe_type which_pfe1;
+ enum pfe_type which_pfe2;
+
+ if (!pfk_is_ready())
+ return false;
+
+ if (!bio1 || !bio2)
+ return false;
+
+ if (bio1 == bio2)
+ return true;
+
+ key1 = bio1->bi_crypt_key;
+ key2 = bio2->bi_crypt_key;
+
+ inode1 = pfk_bio_get_inode(bio1);
+ inode2 = pfk_bio_get_inode(bio2);
+
+ which_pfe1 = pfk_get_pfe_type(inode1);
+ which_pfe2 = pfk_get_pfe_type(inode2);
+
+ /*
+ * If one bio is for an encrypted file and the other is for a different
+ * type of encrypted file or for blocks that are not part of an
+ * encrypted file, do not merge.
+ */
+ if (which_pfe1 != which_pfe2)
+ return false;
+
+ if (which_pfe1 != INVALID_PFE) {
+ /* Both bios are for the same type of encrypted file. */
+ return (*(pfk_allow_merge_bio_ftable[which_pfe1]))(bio1, bio2,
+ inode1, inode2);
+ }
+
+ /*
+ * Neither bio is for an encrypted file. Merge only if the default keys
+ * are the same (or both are NULL).
+ */
+ return key1 == key2 ||
+ (key1 && key2 &&
+ !crypto_memneq(key1->raw, key2->raw, sizeof(key1->raw)));
+}
+
+int pfk_fbe_clear_key(const unsigned char *key, size_t key_size,
+ const unsigned char *salt, size_t salt_size)
+{
+ int ret = -EINVAL;
+
+ if (!key || !salt)
+ return ret;
+
+ ret = pfk_kc_remove_key_with_salt(key, key_size, salt, salt_size);
+ if (ret)
+ pr_err("Clear key error: ret value %d\n", ret);
+ return ret;
+}
+
+/**
+ * Flush key table on storage core reset. During core reset key configuration
+ * is lost in ICE. We need to flash the cache, so that the keys will be
+ * reconfigured again for every subsequent transaction
+ */
+void pfk_clear_on_reset(void)
+{
+ if (!pfk_is_ready())
+ return;
+
+ pfk_kc_clear_on_reset();
+}
+
+module_init(pfk_init);
+module_exit(pfk_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Per-File-Key driver");
diff --git a/security/pfe/pfk_ext4.c b/security/pfe/pfk_ext4.c
new file mode 100644
index 0000000..0ccd46b
--- /dev/null
+++ b/security/pfe/pfk_ext4.c
@@ -0,0 +1,177 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
+ */
+
+/*
+ * Per-File-Key (PFK) - EXT4
+ *
+ * This driver is used for working with EXT4 crypt extension
+ *
+ * The key information is stored in node by EXT4 when file is first opened
+ * and will be later accessed by Block Device Driver to actually load the key
+ * to encryption hw.
+ *
+ * PFK exposes API's for loading and removing keys from encryption hw
+ * and also API to determine whether 2 adjacent blocks can be agregated by
+ * Block Layer in one request to encryption hw.
+ *
+ */
+
+#define pr_fmt(fmt) "pfk_ext4 [%s]: " fmt, __func__
+
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/errno.h>
+#include <linux/printk.h>
+
+#include "fscrypt_ice.h"
+#include "pfk_ext4.h"
+//#include "ext4_ice.h"
+
+static bool pfk_ext4_ready;
+
+/*
+ * pfk_ext4_deinit() - Deinit function, should be invoked by upper PFK layer
+ */
+void pfk_ext4_deinit(void)
+{
+ pfk_ext4_ready = false;
+}
+
+/*
+ * pfk_ecryptfs_init() - Init function, should be invoked by upper PFK layer
+ */
+int __init pfk_ext4_init(void)
+{
+ pfk_ext4_ready = true;
+ pr_info("PFK EXT4 inited successfully\n");
+
+ return 0;
+}
+
+/**
+ * pfk_ecryptfs_is_ready() - driver is initialized and ready.
+ *
+ * Return: true if the driver is ready.
+ */
+static inline bool pfk_ext4_is_ready(void)
+{
+ return pfk_ext4_ready;
+}
+
+/**
+ * pfk_is_ext4_type() - return true if inode belongs to ICE EXT4 PFE
+ * @inode: inode pointer
+ */
+bool pfk_is_ext4_type(const struct inode *inode)
+{
+ if (!pfe_is_inode_filesystem_type(inode, "ext4"))
+ return false;
+
+ return fscrypt_should_be_processed_by_ice(inode);
+}
+
+/**
+ * pfk_ext4_parse_cipher() - parse cipher from inode to enum
+ * @inode: inode
+ * @algo: pointer to store the output enum (can be null)
+ *
+ * return 0 in case of success, error otherwise (i.e not supported cipher)
+ */
+static int pfk_ext4_parse_cipher(const struct inode *inode,
+ enum ice_cryto_algo_mode *algo)
+{
+ /*
+ * currently only AES XTS algo is supported
+ * in the future, table with supported ciphers might
+ * be introduced
+ */
+
+ if (!inode)
+ return -EINVAL;
+
+ if (!fscrypt_is_aes_xts_cipher(inode)) {
+ pr_err("ext4 alghoritm is not supported by pfk\n");
+ return -EINVAL;
+ }
+
+ if (algo)
+ *algo = ICE_CRYPTO_ALGO_MODE_AES_XTS;
+
+ return 0;
+}
+
+int pfk_ext4_parse_inode(const struct bio *bio,
+ const struct inode *inode,
+ struct pfk_key_info *key_info,
+ enum ice_cryto_algo_mode *algo,
+ bool *is_pfe)
+{
+ int ret = 0;
+
+ if (!is_pfe)
+ return -EINVAL;
+
+ /*
+ * only a few errors below can indicate that
+ * this function was not invoked within PFE context,
+ * otherwise we will consider it PFE
+ */
+ *is_pfe = true;
+
+ if (!pfk_ext4_is_ready())
+ return -ENODEV;
+
+ if (!inode)
+ return -EINVAL;
+
+ if (!key_info)
+ return -EINVAL;
+
+ key_info->key = fscrypt_get_ice_encryption_key(inode);
+ if (!key_info->key) {
+ pr_err("could not parse key from ext4\n");
+ return -EINVAL;
+ }
+
+ key_info->key_size = fscrypt_get_ice_encryption_key_size(inode);
+ if (!key_info->key_size) {
+ pr_err("could not parse key size from ext4\n");
+ return -EINVAL;
+ }
+
+ key_info->salt = fscrypt_get_ice_encryption_salt(inode);
+ if (!key_info->salt) {
+ pr_err("could not parse salt from ext4\n");
+ return -EINVAL;
+ }
+
+ key_info->salt_size = fscrypt_get_ice_encryption_salt_size(inode);
+ if (!key_info->salt_size) {
+ pr_err("could not parse salt size from ext4\n");
+ return -EINVAL;
+ }
+
+ ret = pfk_ext4_parse_cipher(inode, algo);
+ if (ret != 0) {
+ pr_err("not supported cipher\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+bool pfk_ext4_allow_merge_bio(const struct bio *bio1,
+ const struct bio *bio2, const struct inode *inode1,
+ const struct inode *inode2)
+{
+ /* if there is no ext4 pfk, don't disallow merging blocks */
+ if (!pfk_ext4_is_ready())
+ return true;
+
+ if (!inode1 || !inode2)
+ return false;
+
+ return fscrypt_is_ice_encryption_info_equal(inode1, inode2);
+}
diff --git a/security/pfe/pfk_ext4.h b/security/pfe/pfk_ext4.h
new file mode 100644
index 0000000..bca23f3
--- /dev/null
+++ b/security/pfe/pfk_ext4.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _PFK_EXT4_H_
+#define _PFK_EXT4_H_
+
+#include <linux/types.h>
+#include <linux/fs.h>
+#include <crypto/ice.h>
+#include "pfk_internal.h"
+
+bool pfk_is_ext4_type(const struct inode *inode);
+
+int pfk_ext4_parse_inode(const struct bio *bio,
+ const struct inode *inode,
+ struct pfk_key_info *key_info,
+ enum ice_cryto_algo_mode *algo,
+ bool *is_pfe);
+
+bool pfk_ext4_allow_merge_bio(const struct bio *bio1,
+ const struct bio *bio2, const struct inode *inode1,
+ const struct inode *inode2);
+
+int __init pfk_ext4_init(void);
+
+void pfk_ext4_deinit(void);
+
+#endif /* _PFK_EXT4_H_ */
diff --git a/security/pfe/pfk_f2fs.c b/security/pfe/pfk_f2fs.c
new file mode 100644
index 0000000..5ea79ace
--- /dev/null
+++ b/security/pfe/pfk_f2fs.c
@@ -0,0 +1,188 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
+ */
+
+/*
+ * Per-File-Key (PFK) - f2fs
+ *
+ * This driver is used for working with EXT4/F2FS crypt extension
+ *
+ * The key information is stored in node by EXT4/F2FS when file is first opened
+ * and will be later accessed by Block Device Driver to actually load the key
+ * to encryption hw.
+ *
+ * PFK exposes API's for loading and removing keys from encryption hw
+ * and also API to determine whether 2 adjacent blocks can be agregated by
+ * Block Layer in one request to encryption hw.
+ *
+ */
+
+#define pr_fmt(fmt) "pfk_f2fs [%s]: " fmt, __func__
+
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/errno.h>
+#include <linux/printk.h>
+
+#include "fscrypt_ice.h"
+#include "pfk_f2fs.h"
+
+static bool pfk_f2fs_ready;
+
+/*
+ * pfk_f2fs_deinit() - Deinit function, should be invoked by upper PFK layer
+ */
+void pfk_f2fs_deinit(void)
+{
+ pfk_f2fs_ready = false;
+}
+
+/*
+ * pfk_f2fs_init() - Init function, should be invoked by upper PFK layer
+ */
+int __init pfk_f2fs_init(void)
+{
+ pfk_f2fs_ready = true;
+ pr_info("PFK F2FS inited successfully\n");
+
+ return 0;
+}
+
+/**
+ * pfk_f2fs_is_ready() - driver is initialized and ready.
+ *
+ * Return: true if the driver is ready.
+ */
+static inline bool pfk_f2fs_is_ready(void)
+{
+ return pfk_f2fs_ready;
+}
+
+/**
+ * pfk_is_f2fs_type() - return true if inode belongs to ICE F2FS PFE
+ * @inode: inode pointer
+ */
+bool pfk_is_f2fs_type(const struct inode *inode)
+{
+ if (!pfe_is_inode_filesystem_type(inode, "f2fs"))
+ return false;
+
+ return fscrypt_should_be_processed_by_ice(inode);
+}
+
+/**
+ * pfk_f2fs_parse_cipher() - parse cipher from inode to enum
+ * @inode: inode
+ * @algo: pointer to store the output enum (can be null)
+ *
+ * return 0 in case of success, error otherwise (i.e not supported cipher)
+ */
+static int pfk_f2fs_parse_cipher(const struct inode *inode,
+ enum ice_cryto_algo_mode *algo)
+{
+ /*
+ * currently only AES XTS algo is supported
+ * in the future, table with supported ciphers might
+ * be introduced
+ */
+ if (!inode)
+ return -EINVAL;
+
+ if (!fscrypt_is_aes_xts_cipher(inode)) {
+ pr_err("f2fs alghoritm is not supported by pfk\n");
+ return -EINVAL;
+ }
+
+ if (algo)
+ *algo = ICE_CRYPTO_ALGO_MODE_AES_XTS;
+
+ return 0;
+}
+
+int pfk_f2fs_parse_inode(const struct bio *bio,
+ const struct inode *inode,
+ struct pfk_key_info *key_info,
+ enum ice_cryto_algo_mode *algo,
+ bool *is_pfe)
+{
+ int ret = 0;
+
+ if (!is_pfe)
+ return -EINVAL;
+
+ /*
+ * only a few errors below can indicate that
+ * this function was not invoked within PFE context,
+ * otherwise we will consider it PFE
+ */
+ *is_pfe = true;
+
+ if (!pfk_f2fs_is_ready())
+ return -ENODEV;
+
+ if (!inode)
+ return -EINVAL;
+
+ if (!key_info)
+ return -EINVAL;
+
+ key_info->key = fscrypt_get_ice_encryption_key(inode);
+ if (!key_info->key) {
+ pr_err("could not parse key from f2fs\n");
+ return -EINVAL;
+ }
+
+ key_info->key_size = fscrypt_get_ice_encryption_key_size(inode);
+ if (!key_info->key_size) {
+ pr_err("could not parse key size from f2fs\n");
+ return -EINVAL;
+ }
+
+ key_info->salt = fscrypt_get_ice_encryption_salt(inode);
+ if (!key_info->salt) {
+ pr_err("could not parse salt from f2fs\n");
+ return -EINVAL;
+ }
+
+ key_info->salt_size = fscrypt_get_ice_encryption_salt_size(inode);
+ if (!key_info->salt_size) {
+ pr_err("could not parse salt size from f2fs\n");
+ return -EINVAL;
+ }
+
+ ret = pfk_f2fs_parse_cipher(inode, algo);
+ if (ret != 0) {
+ pr_err("not supported cipher\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+bool pfk_f2fs_allow_merge_bio(const struct bio *bio1,
+ const struct bio *bio2, const struct inode *inode1,
+ const struct inode *inode2)
+{
+ bool mergeable;
+
+ /* if there is no f2fs pfk, don't disallow merging blocks */
+ if (!pfk_f2fs_is_ready())
+ return true;
+
+ if (!inode1 || !inode2)
+ return false;
+
+ mergeable = fscrypt_is_ice_encryption_info_equal(inode1, inode2);
+ if (!mergeable)
+ return false;
+
+
+ /* ICE allows only consecutive iv_key stream. */
+ if (!bio_dun(bio1) && !bio_dun(bio2))
+ return true;
+ else if (!bio_dun(bio1) || !bio_dun(bio2))
+ return false;
+
+ return bio_end_dun(bio1) == bio_dun(bio2);
+}
diff --git a/security/pfe/pfk_f2fs.h b/security/pfe/pfk_f2fs.h
new file mode 100644
index 0000000..3c6f7ec
--- /dev/null
+++ b/security/pfe/pfk_f2fs.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _PFK_F2FS_H_
+#define _PFK_F2FS_H_
+
+#include <linux/types.h>
+#include <linux/fs.h>
+#include <crypto/ice.h>
+#include "pfk_internal.h"
+
+bool pfk_is_f2fs_type(const struct inode *inode);
+
+int pfk_f2fs_parse_inode(const struct bio *bio,
+ const struct inode *inode,
+ struct pfk_key_info *key_info,
+ enum ice_cryto_algo_mode *algo,
+ bool *is_pfe);
+
+bool pfk_f2fs_allow_merge_bio(const struct bio *bio1,
+ const struct bio *bio2, const struct inode *inode1,
+ const struct inode *inode2);
+
+int __init pfk_f2fs_init(void);
+
+void pfk_f2fs_deinit(void);
+
+#endif /* _PFK_F2FS_H_ */
diff --git a/security/pfe/pfk_ice.c b/security/pfe/pfk_ice.c
new file mode 100644
index 0000000..501ff67
--- /dev/null
+++ b/security/pfe/pfk_ice.c
@@ -0,0 +1,189 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/async.h>
+#include <linux/mm.h>
+#include <linux/of.h>
+#include <linux/device-mapper.h>
+#include <soc/qcom/scm.h>
+#include <soc/qcom/qseecomi.h>
+#include <crypto/ice.h>
+#include "pfk_ice.h"
+
+/**********************************/
+/** global definitions **/
+/**********************************/
+
+#define TZ_ES_INVALIDATE_ICE_KEY 0x3
+#define TZ_ES_CONFIG_SET_ICE_KEY 0x4
+
+/* index 0 and 1 is reserved for FDE */
+#define MIN_ICE_KEY_INDEX 2
+
+#define MAX_ICE_KEY_INDEX 31
+
+#define TZ_ES_CONFIG_SET_ICE_KEY_ID \
+ TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_SIP, TZ_SVC_ES, \
+ TZ_ES_CONFIG_SET_ICE_KEY)
+
+#define TZ_ES_INVALIDATE_ICE_KEY_ID \
+ TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_SIP, \
+ TZ_SVC_ES, TZ_ES_INVALIDATE_ICE_KEY)
+
+#define TZ_ES_INVALIDATE_ICE_KEY_PARAM_ID \
+ TZ_SYSCALL_CREATE_PARAM_ID_1( \
+ TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_ES_CONFIG_SET_ICE_KEY_PARAM_ID \
+ TZ_SYSCALL_CREATE_PARAM_ID_5( \
+ TZ_SYSCALL_PARAM_TYPE_VAL, \
+ TZ_SYSCALL_PARAM_TYPE_BUF_RW, TZ_SYSCALL_PARAM_TYPE_VAL, \
+ TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define CONTEXT_SIZE 0x1000
+
+#define ICE_BUFFER_SIZE 64
+
+static uint8_t ice_buffer[ICE_BUFFER_SIZE];
+
+enum {
+ ICE_CIPHER_MODE_XTS_128 = 0,
+ ICE_CIPHER_MODE_CBC_128 = 1,
+ ICE_CIPHER_MODE_XTS_256 = 3,
+ ICE_CIPHER_MODE_CBC_256 = 4
+};
+
+static int set_key(uint32_t index, const uint8_t *key, const uint8_t *salt,
+ unsigned int data_unit)
+{
+ struct scm_desc desc = {0};
+ int ret = 0;
+ uint32_t smc_id = 0;
+ char *tzbuf = (char *)ice_buffer;
+ uint32_t size = ICE_BUFFER_SIZE / 2;
+
+ memset(tzbuf, 0, ICE_BUFFER_SIZE);
+
+ memcpy(ice_buffer, key, size);
+ memcpy(ice_buffer+size, salt, size);
+
+ dmac_flush_range(tzbuf, tzbuf + ICE_BUFFER_SIZE);
+
+ smc_id = TZ_ES_CONFIG_SET_ICE_KEY_ID;
+
+ desc.arginfo = TZ_ES_CONFIG_SET_ICE_KEY_PARAM_ID;
+ desc.args[0] = index;
+ desc.args[1] = virt_to_phys(tzbuf);
+ desc.args[2] = ICE_BUFFER_SIZE;
+ desc.args[3] = ICE_CIPHER_MODE_XTS_256;
+ desc.args[4] = data_unit;
+
+ ret = scm_call2_noretry(smc_id, &desc);
+ if (ret)
+ pr_err("%s:SCM call Error: 0x%x\n", __func__, ret);
+
+ return ret;
+}
+
+static int clear_key(uint32_t index)
+{
+ struct scm_desc desc = {0};
+ int ret = 0;
+ uint32_t smc_id = 0;
+
+ smc_id = TZ_ES_INVALIDATE_ICE_KEY_ID;
+
+ desc.arginfo = TZ_ES_INVALIDATE_ICE_KEY_PARAM_ID;
+ desc.args[0] = index;
+
+ ret = scm_call2_noretry(smc_id, &desc);
+ if (ret)
+ pr_err("%s:SCM call Error: 0x%x\n", __func__, ret);
+ return ret;
+}
+
+int qti_pfk_ice_set_key(uint32_t index, uint8_t *key, uint8_t *salt,
+ char *storage_type, unsigned int data_unit)
+{
+ int ret = 0, ret1 = 0;
+ char *s_type = storage_type;
+
+ if (index < MIN_ICE_KEY_INDEX || index > MAX_ICE_KEY_INDEX) {
+ pr_err("%s Invalid index %d\n", __func__, index);
+ return -EINVAL;
+ }
+ if (!key || !salt) {
+ pr_err("%s Invalid key/salt\n", __func__);
+ return -EINVAL;
+ }
+
+ if (s_type == NULL) {
+ pr_err("%s Invalid Storage type\n", __func__);
+ return -EINVAL;
+ }
+
+ ret = qcom_ice_setup_ice_hw((const char *)s_type, true);
+ if (ret) {
+ pr_err("%s: could not enable clocks: %d\n", __func__, ret);
+ goto out;
+ }
+
+ ret = set_key(index, key, salt, data_unit);
+ if (ret) {
+ pr_err("%s: Set Key Error: %d\n", __func__, ret);
+ if (ret == -EBUSY) {
+ if (qcom_ice_setup_ice_hw((const char *)s_type, false))
+ pr_err("%s: clock disable failed\n", __func__);
+ goto out;
+ }
+ /* Try to invalidate the key to keep ICE in proper state */
+ ret1 = clear_key(index);
+ if (ret1)
+ pr_err("%s: Invalidate key error: %d\n", __func__, ret);
+ }
+
+ ret1 = qcom_ice_setup_ice_hw((const char *)s_type, false);
+ if (ret)
+ pr_err("%s: Error %d disabling clocks\n", __func__, ret);
+
+out:
+ return ret;
+}
+
+int qti_pfk_ice_invalidate_key(uint32_t index, char *storage_type)
+{
+ int ret = 0;
+
+ if (index < MIN_ICE_KEY_INDEX || index > MAX_ICE_KEY_INDEX) {
+ pr_err("%s Invalid index %d\n", __func__, index);
+ return -EINVAL;
+ }
+
+ if (storage_type == NULL) {
+ pr_err("%s Invalid Storage type\n", __func__);
+ return -EINVAL;
+ }
+
+ ret = qcom_ice_setup_ice_hw((const char *)storage_type, true);
+ if (ret) {
+ pr_err("%s: could not enable clocks: 0x%x\n", __func__, ret);
+ return ret;
+ }
+
+ ret = clear_key(index);
+ if (ret)
+ pr_err("%s: Invalidate key error: %d\n", __func__, ret);
+
+ if (qcom_ice_setup_ice_hw((const char *)storage_type, false))
+ pr_err("%s: could not disable clocks\n", __func__);
+
+ return ret;
+}
diff --git a/security/pfe/pfk_ice.h b/security/pfe/pfk_ice.h
new file mode 100644
index 0000000..0331439
--- /dev/null
+++ b/security/pfe/pfk_ice.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef PFK_ICE_H_
+#define PFK_ICE_H_
+
+/*
+ * PFK ICE
+ *
+ * ICE keys configuration through scm calls.
+ *
+ */
+
+#include <linux/types.h>
+
+int pfk_ice_init(void);
+int pfk_ice_deinit(void);
+
+int qti_pfk_ice_set_key(uint32_t index, uint8_t *key, uint8_t *salt,
+ char *storage_type, unsigned int data_unit);
+int qti_pfk_ice_invalidate_key(uint32_t index, char *storage_type);
+
+#endif /* PFK_ICE_H_ */
diff --git a/security/pfe/pfk_internal.h b/security/pfe/pfk_internal.h
new file mode 100644
index 0000000..7a800d3
--- /dev/null
+++ b/security/pfe/pfk_internal.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _PFK_INTERNAL_H_
+#define _PFK_INTERNAL_H_
+
+#include <linux/types.h>
+#include <crypto/ice.h>
+
+struct pfk_key_info {
+ const unsigned char *key;
+ const unsigned char *salt;
+ size_t key_size;
+ size_t salt_size;
+};
+
+int pfk_key_size_to_key_type(size_t key_size,
+ enum ice_crpto_key_size *key_size_type);
+
+bool pfe_is_inode_filesystem_type(const struct inode *inode,
+ const char *fs_type);
+
+char *inode_to_filename(const struct inode *inode);
+
+#endif /* _PFK_INTERNAL_H_ */
diff --git a/security/pfe/pfk_kc.c b/security/pfe/pfk_kc.c
new file mode 100644
index 0000000..64c168d
--- /dev/null
+++ b/security/pfe/pfk_kc.c
@@ -0,0 +1,912 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
+ */
+
+/*
+ * PFK Key Cache
+ *
+ * Key Cache used internally in PFK.
+ * The purpose of the cache is to save access time to QSEE when loading keys.
+ * Currently the cache is the same size as the total number of keys that can
+ * be loaded to ICE. Since this number is relatively small, the algorithms for
+ * cache eviction are simple, linear and based on last usage timestamp, i.e
+ * the node that will be evicted is the one with the oldest timestamp.
+ * Empty entries always have the oldest timestamp.
+ */
+
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+#include <crypto/ice.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/jiffies.h>
+#include <linux/slab.h>
+#include <linux/printk.h>
+#include <linux/sched/signal.h>
+
+#include "pfk_kc.h"
+#include "pfk_ice.h"
+
+
+/** the first available index in ice engine */
+#define PFK_KC_STARTING_INDEX 2
+
+/** currently the only supported key and salt sizes */
+#define PFK_KC_KEY_SIZE 32
+#define PFK_KC_SALT_SIZE 32
+
+/** Table size */
+#define PFK_KC_TABLE_SIZE ((32) - (PFK_KC_STARTING_INDEX))
+
+/** The maximum key and salt size */
+#define PFK_MAX_KEY_SIZE PFK_KC_KEY_SIZE
+#define PFK_MAX_SALT_SIZE PFK_KC_SALT_SIZE
+#define PFK_UFS "ufs"
+
+static DEFINE_SPINLOCK(kc_lock);
+static unsigned long flags;
+static bool kc_ready;
+static char *s_type = "sdcc";
+
+/**
+ * enum pfk_kc_entry_state - state of the entry inside kc table
+ *
+ * @FREE: entry is free
+ * @ACTIVE_ICE_PRELOAD: entry is actively used by ICE engine
+ and cannot be used by others. SCM call
+ to load key to ICE is pending to be performed
+ * @ACTIVE_ICE_LOADED: entry is actively used by ICE engine and
+ cannot be used by others. SCM call to load the
+ key to ICE was successfully executed and key is
+ now loaded
+ * @INACTIVE_INVALIDATING: entry is being invalidated during file close
+ and cannot be used by others until invalidation
+ is complete
+ * @INACTIVE: entry's key is already loaded, but is not
+ currently being used. It can be re-used for
+ optimization and to avoid SCM call cost or
+ it can be taken by another key if there are
+ no FREE entries
+ * @SCM_ERROR: error occurred while scm call was performed to
+ load the key to ICE
+ */
+enum pfk_kc_entry_state {
+ FREE,
+ ACTIVE_ICE_PRELOAD,
+ ACTIVE_ICE_LOADED,
+ INACTIVE_INVALIDATING,
+ INACTIVE,
+ SCM_ERROR
+};
+
+struct kc_entry {
+ unsigned char key[PFK_MAX_KEY_SIZE];
+ size_t key_size;
+
+ unsigned char salt[PFK_MAX_SALT_SIZE];
+ size_t salt_size;
+
+ u64 time_stamp;
+ u32 key_index;
+
+ struct task_struct *thread_pending;
+
+ enum pfk_kc_entry_state state;
+
+ /* ref count for the number of requests in the HW queue for this key */
+ int loaded_ref_cnt;
+ int scm_error;
+};
+
+static struct kc_entry kc_table[PFK_KC_TABLE_SIZE];
+
+/**
+ * kc_is_ready() - driver is initialized and ready.
+ *
+ * Return: true if the key cache is ready.
+ */
+static inline bool kc_is_ready(void)
+{
+ return kc_ready;
+}
+
+static inline void kc_spin_lock(void)
+{
+ spin_lock_irqsave(&kc_lock, flags);
+}
+
+static inline void kc_spin_unlock(void)
+{
+ spin_unlock_irqrestore(&kc_lock, flags);
+}
+
+/**
+ * pfk_kc_get_storage_type() - return the hardware storage type.
+ *
+ * Return: storage type queried during bootup.
+ */
+const char *pfk_kc_get_storage_type(void)
+{
+ return s_type;
+}
+
+/**
+ * kc_entry_is_available() - checks whether the entry is available
+ *
+ * Return true if it is , false otherwise or if invalid
+ * Should be invoked under spinlock
+ */
+static bool kc_entry_is_available(const struct kc_entry *entry)
+{
+ if (!entry)
+ return false;
+
+ return (entry->state == FREE || entry->state == INACTIVE);
+}
+
+/**
+ * kc_entry_wait_till_available() - waits till entry is available
+ *
+ * Returns 0 in case of success or -ERESTARTSYS if the wait was interrupted
+ * by signal
+ *
+ * Should be invoked under spinlock
+ */
+static int kc_entry_wait_till_available(struct kc_entry *entry)
+{
+ int res = 0;
+
+ while (!kc_entry_is_available(entry)) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ if (signal_pending(current)) {
+ res = -ERESTARTSYS;
+ break;
+ }
+ /* assuming only one thread can try to invalidate
+ * the same entry
+ */
+ entry->thread_pending = current;
+ kc_spin_unlock();
+ schedule();
+ kc_spin_lock();
+ }
+ set_current_state(TASK_RUNNING);
+
+ return res;
+}
+
+/**
+ * kc_entry_start_invalidating() - moves entry to state
+ * INACTIVE_INVALIDATING
+ * If entry is in use, waits till
+ * it gets available
+ * @entry: pointer to entry
+ *
+ * Return 0 in case of success, otherwise error
+ * Should be invoked under spinlock
+ */
+static int kc_entry_start_invalidating(struct kc_entry *entry)
+{
+ int res;
+
+ res = kc_entry_wait_till_available(entry);
+ if (res)
+ return res;
+
+ entry->state = INACTIVE_INVALIDATING;
+
+ return 0;
+}
+
+/**
+ * kc_entry_finish_invalidating() - moves entry to state FREE
+ * wakes up all the tasks waiting
+ * on it
+ *
+ * @entry: pointer to entry
+ *
+ * Return 0 in case of success, otherwise error
+ * Should be invoked under spinlock
+ */
+static void kc_entry_finish_invalidating(struct kc_entry *entry)
+{
+ if (!entry)
+ return;
+
+ if (entry->state != INACTIVE_INVALIDATING)
+ return;
+
+ entry->state = FREE;
+}
+
+/**
+ * kc_min_entry() - compare two entries to find one with minimal time
+ * @a: ptr to the first entry. If NULL the other entry will be returned
+ * @b: pointer to the second entry
+ *
+ * Return the entry which timestamp is the minimal, or b if a is NULL
+ */
+static inline struct kc_entry *kc_min_entry(struct kc_entry *a,
+ struct kc_entry *b)
+{
+ if (!a)
+ return b;
+
+ if (time_before64(b->time_stamp, a->time_stamp))
+ return b;
+
+ return a;
+}
+
+/**
+ * kc_entry_at_index() - return entry at specific index
+ * @index: index of entry to be accessed
+ *
+ * Return entry
+ * Should be invoked under spinlock
+ */
+static struct kc_entry *kc_entry_at_index(int index)
+{
+ return &(kc_table[index]);
+}
+
+/**
+ * kc_find_key_at_index() - find kc entry starting at specific index
+ * @key: key to look for
+ * @key_size: the key size
+ * @salt: salt to look for
+ * @salt_size: the salt size
+ * @sarting_index: index to start search with, if entry found, updated with
+ * index of that entry
+ *
+ * Return entry or NULL in case of error
+ * Should be invoked under spinlock
+ */
+static struct kc_entry *kc_find_key_at_index(const unsigned char *key,
+ size_t key_size, const unsigned char *salt, size_t salt_size,
+ int *starting_index)
+{
+ struct kc_entry *entry = NULL;
+ int i = 0;
+
+ for (i = *starting_index; i < PFK_KC_TABLE_SIZE; i++) {
+ entry = kc_entry_at_index(i);
+
+ if (salt != NULL) {
+ if (entry->salt_size != salt_size)
+ continue;
+
+ if (memcmp(entry->salt, salt, salt_size) != 0)
+ continue;
+ }
+
+ if (entry->key_size != key_size)
+ continue;
+
+ if (memcmp(entry->key, key, key_size) == 0) {
+ *starting_index = i;
+ return entry;
+ }
+ }
+
+ return NULL;
+}
+
+/**
+ * kc_find_key() - find kc entry
+ * @key: key to look for
+ * @key_size: the key size
+ * @salt: salt to look for
+ * @salt_size: the salt size
+ *
+ * Return entry or NULL in case of error
+ * Should be invoked under spinlock
+ */
+static struct kc_entry *kc_find_key(const unsigned char *key, size_t key_size,
+ const unsigned char *salt, size_t salt_size)
+{
+ int index = 0;
+
+ return kc_find_key_at_index(key, key_size, salt, salt_size, &index);
+}
+
+/**
+ * kc_find_oldest_entry_non_locked() - finds the entry with minimal timestamp
+ * that is not locked
+ *
+ * Returns entry with minimal timestamp. Empty entries have timestamp
+ * of 0, therefore they are returned first.
+ * If all the entries are locked, will return NULL
+ * Should be invoked under spin lock
+ */
+static struct kc_entry *kc_find_oldest_entry_non_locked(void)
+{
+ struct kc_entry *curr_min_entry = NULL;
+ struct kc_entry *entry = NULL;
+ int i = 0;
+
+ for (i = 0; i < PFK_KC_TABLE_SIZE; i++) {
+ entry = kc_entry_at_index(i);
+
+ if (entry->state == FREE)
+ return entry;
+
+ if (entry->state == INACTIVE)
+ curr_min_entry = kc_min_entry(curr_min_entry, entry);
+ }
+
+ return curr_min_entry;
+}
+
+/**
+ * kc_update_timestamp() - updates timestamp of entry to current
+ *
+ * @entry: entry to update
+ *
+ */
+static void kc_update_timestamp(struct kc_entry *entry)
+{
+ if (!entry)
+ return;
+
+ entry->time_stamp = get_jiffies_64();
+}
+
+/**
+ * kc_clear_entry() - clear the key from entry and mark entry not in use
+ *
+ * @entry: pointer to entry
+ *
+ * Should be invoked under spinlock
+ */
+static void kc_clear_entry(struct kc_entry *entry)
+{
+ if (!entry)
+ return;
+
+ memset(entry->key, 0, entry->key_size);
+ memset(entry->salt, 0, entry->salt_size);
+
+ entry->key_size = 0;
+ entry->salt_size = 0;
+
+ entry->time_stamp = 0;
+ entry->scm_error = 0;
+
+ entry->state = FREE;
+
+ entry->loaded_ref_cnt = 0;
+ entry->thread_pending = NULL;
+}
+
+/**
+ * kc_update_entry() - replaces the key in given entry and
+ * loads the new key to ICE
+ *
+ * @entry: entry to replace key in
+ * @key: key
+ * @key_size: key_size
+ * @salt: salt
+ * @salt_size: salt_size
+ * @data_unit: dun size
+ *
+ * The previous key is securely released and wiped, the new one is loaded
+ * to ICE.
+ * Should be invoked under spinlock
+ * Caller to validate that key/salt_size matches the size in struct kc_entry
+ */
+static int kc_update_entry(struct kc_entry *entry, const unsigned char *key,
+ size_t key_size, const unsigned char *salt, size_t salt_size,
+ unsigned int data_unit)
+{
+ int ret;
+
+ kc_clear_entry(entry);
+
+ memcpy(entry->key, key, key_size);
+ entry->key_size = key_size;
+
+ memcpy(entry->salt, salt, salt_size);
+ entry->salt_size = salt_size;
+
+ /* Mark entry as no longer free before releasing the lock */
+ entry->state = ACTIVE_ICE_PRELOAD;
+ kc_spin_unlock();
+
+ ret = qti_pfk_ice_set_key(entry->key_index, entry->key,
+ entry->salt, s_type, data_unit);
+
+ kc_spin_lock();
+ return ret;
+}
+
+/**
+ * pfk_kc_init() - init function
+ *
+ * Return 0 in case of success, error otherwise
+ */
+int pfk_kc_init(void)
+{
+ int i = 0;
+ struct kc_entry *entry = NULL;
+
+ kc_spin_lock();
+ for (i = 0; i < PFK_KC_TABLE_SIZE; i++) {
+ entry = kc_entry_at_index(i);
+ entry->key_index = PFK_KC_STARTING_INDEX + i;
+ }
+ kc_ready = true;
+ kc_spin_unlock();
+
+ return 0;
+}
+
+/**
+ * pfk_kc_denit() - deinit function
+ *
+ * Return 0 in case of success, error otherwise
+ */
+int pfk_kc_deinit(void)
+{
+ int res = pfk_kc_clear();
+
+ kc_ready = false;
+
+ return res;
+}
+
+/**
+ * pfk_kc_load_key_start() - retrieve the key from cache or add it if
+ * it's not there and return the ICE hw key index in @key_index.
+ * @key: pointer to the key
+ * @key_size: the size of the key
+ * @salt: pointer to the salt
+ * @salt_size: the size of the salt
+ * @key_index: the pointer to key_index where the output will be stored
+ * @async: whether scm calls are allowed in the caller context
+ *
+ * If key is present in cache, than the key_index will be retrieved from cache.
+ * If it is not present, the oldest entry from kc table will be evicted,
+ * the key will be loaded to ICE via QSEE to the index that is the evicted
+ * entry number and stored in cache.
+ * Entry that is going to be used is marked as being used, it will mark
+ * as not being used when ICE finishes using it and pfk_kc_load_key_end
+ * will be invoked.
+ * As QSEE calls can only be done from a non-atomic context, when @async flag
+ * is set to 'false', it specifies that it is ok to make the calls in the
+ * current context. Otherwise, when @async is set, the caller should retry the
+ * call again from a different context, and -EAGAIN error will be returned.
+ *
+ * Return 0 in case of success, error otherwise
+ */
+int pfk_kc_load_key_start(const unsigned char *key, size_t key_size,
+ const unsigned char *salt, size_t salt_size, u32 *key_index,
+ bool async, unsigned int data_unit)
+{
+ int ret = 0;
+ struct kc_entry *entry = NULL;
+ bool entry_exists = false;
+
+ if (!kc_is_ready())
+ return -ENODEV;
+
+ if (!key || !salt || !key_index) {
+ pr_err("%s key/salt/key_index NULL\n", __func__);
+ return -EINVAL;
+ }
+
+ if (key_size != PFK_KC_KEY_SIZE) {
+ pr_err("unsupported key size %zu\n", key_size);
+ return -EINVAL;
+ }
+
+ if (salt_size != PFK_KC_SALT_SIZE) {
+ pr_err("unsupported salt size %zu\n", salt_size);
+ return -EINVAL;
+ }
+
+ kc_spin_lock();
+
+ entry = kc_find_key(key, key_size, salt, salt_size);
+ if (!entry) {
+ if (async) {
+ pr_debug("%s task will populate entry\n", __func__);
+ kc_spin_unlock();
+ return -EAGAIN;
+ }
+
+ entry = kc_find_oldest_entry_non_locked();
+ if (!entry) {
+ /* could not find a single non locked entry,
+ * return EBUSY to upper layers so that the
+ * request will be rescheduled
+ */
+ kc_spin_unlock();
+ return -EBUSY;
+ }
+ } else {
+ entry_exists = true;
+ }
+
+ pr_debug("entry with index %d is in state %d\n",
+ entry->key_index, entry->state);
+
+ switch (entry->state) {
+ case (INACTIVE):
+ if (entry_exists) {
+ kc_update_timestamp(entry);
+ entry->state = ACTIVE_ICE_LOADED;
+
+ if (!strcmp(s_type, (char *)PFK_UFS)) {
+ if (async)
+ entry->loaded_ref_cnt++;
+ } else {
+ entry->loaded_ref_cnt++;
+ }
+ break;
+ }
+ case (FREE):
+ ret = kc_update_entry(entry, key, key_size, salt, salt_size,
+ data_unit);
+ if (ret) {
+ entry->state = SCM_ERROR;
+ entry->scm_error = ret;
+ pr_err("%s: key load error (%d)\n", __func__, ret);
+ } else {
+ kc_update_timestamp(entry);
+ entry->state = ACTIVE_ICE_LOADED;
+
+ /*
+ * In case of UFS only increase ref cnt for async calls,
+ * sync calls from within work thread do not pass
+ * requests further to HW
+ */
+ if (!strcmp(s_type, (char *)PFK_UFS)) {
+ if (async)
+ entry->loaded_ref_cnt++;
+ } else {
+ entry->loaded_ref_cnt++;
+ }
+ }
+ break;
+ case (ACTIVE_ICE_PRELOAD):
+ case (INACTIVE_INVALIDATING):
+ ret = -EAGAIN;
+ break;
+ case (ACTIVE_ICE_LOADED):
+ kc_update_timestamp(entry);
+
+ if (!strcmp(s_type, (char *)PFK_UFS)) {
+ if (async)
+ entry->loaded_ref_cnt++;
+ } else {
+ entry->loaded_ref_cnt++;
+ }
+ break;
+ case(SCM_ERROR):
+ ret = entry->scm_error;
+ kc_clear_entry(entry);
+ entry->state = FREE;
+ break;
+ default:
+ pr_err("invalid state %d for entry with key index %d\n",
+ entry->state, entry->key_index);
+ ret = -EINVAL;
+ }
+
+ *key_index = entry->key_index;
+ kc_spin_unlock();
+
+ return ret;
+}
+
+/**
+ * pfk_kc_load_key_end() - finish the process of key loading that was started
+ * by pfk_kc_load_key_start
+ * by marking the entry as not
+ * being in use
+ * @key: pointer to the key
+ * @key_size: the size of the key
+ * @salt: pointer to the salt
+ * @salt_size: the size of the salt
+ *
+ */
+void pfk_kc_load_key_end(const unsigned char *key, size_t key_size,
+ const unsigned char *salt, size_t salt_size)
+{
+ struct kc_entry *entry = NULL;
+ struct task_struct *tmp_pending = NULL;
+ int ref_cnt = 0;
+
+ if (!kc_is_ready())
+ return;
+
+ if (!key || !salt)
+ return;
+
+ if (key_size != PFK_KC_KEY_SIZE)
+ return;
+
+ if (salt_size != PFK_KC_SALT_SIZE)
+ return;
+
+ kc_spin_lock();
+
+ entry = kc_find_key(key, key_size, salt, salt_size);
+ if (!entry) {
+ kc_spin_unlock();
+ pr_err("internal error, there should an entry to unlock\n");
+
+ return;
+ }
+ ref_cnt = --entry->loaded_ref_cnt;
+
+ if (ref_cnt < 0)
+ pr_err("internal error, ref count should never be negative\n");
+
+ if (!ref_cnt) {
+ entry->state = INACTIVE;
+ /*
+ * wake-up invalidation if it's waiting
+ * for the entry to be released
+ */
+ if (entry->thread_pending) {
+ tmp_pending = entry->thread_pending;
+ entry->thread_pending = NULL;
+
+ kc_spin_unlock();
+ wake_up_process(tmp_pending);
+ return;
+ }
+ }
+
+ kc_spin_unlock();
+}
+
+/**
+ * pfk_kc_remove_key() - remove the key from cache and from ICE engine
+ * @key: pointer to the key
+ * @key_size: the size of the key
+ * @salt: pointer to the key
+ * @salt_size: the size of the key
+ *
+ * Return 0 in case of success, error otherwise (also in case of non
+ * (existing key)
+ */
+int pfk_kc_remove_key_with_salt(const unsigned char *key, size_t key_size,
+ const unsigned char *salt, size_t salt_size)
+{
+ struct kc_entry *entry = NULL;
+ int res = 0;
+
+ if (!kc_is_ready())
+ return -ENODEV;
+
+ if (!key)
+ return -EINVAL;
+
+ if (!salt)
+ return -EINVAL;
+
+ if (key_size != PFK_KC_KEY_SIZE)
+ return -EINVAL;
+
+ if (salt_size != PFK_KC_SALT_SIZE)
+ return -EINVAL;
+
+ kc_spin_lock();
+
+ entry = kc_find_key(key, key_size, salt, salt_size);
+ if (!entry) {
+ pr_debug("%s: key does not exist\n", __func__);
+ kc_spin_unlock();
+ return -EINVAL;
+ }
+
+ res = kc_entry_start_invalidating(entry);
+ if (res != 0) {
+ kc_spin_unlock();
+ return res;
+ }
+ kc_clear_entry(entry);
+
+ kc_spin_unlock();
+
+ qti_pfk_ice_invalidate_key(entry->key_index, s_type);
+
+ kc_spin_lock();
+ kc_entry_finish_invalidating(entry);
+ kc_spin_unlock();
+
+ return 0;
+}
+
+/**
+ * pfk_kc_remove_key() - remove the key from cache and from ICE engine
+ * when no salt is available. Will only search key part, if there are several,
+ * all will be removed
+ *
+ * @key: pointer to the key
+ * @key_size: the size of the key
+ *
+ * Return 0 in case of success, error otherwise (also for non-existing key)
+ */
+int pfk_kc_remove_key(const unsigned char *key, size_t key_size)
+{
+ struct kc_entry *entry = NULL;
+ int index = 0;
+ int temp_indexes[PFK_KC_TABLE_SIZE] = {0};
+ int temp_indexes_size = 0;
+ int i = 0;
+ int res = 0;
+
+ if (!kc_is_ready())
+ return -ENODEV;
+
+ if (!key)
+ return -EINVAL;
+
+ if (key_size != PFK_KC_KEY_SIZE)
+ return -EINVAL;
+
+ memset(temp_indexes, -1, sizeof(temp_indexes));
+
+ kc_spin_lock();
+
+ entry = kc_find_key_at_index(key, key_size, NULL, 0, &index);
+ if (!entry) {
+ pr_err("%s: key does not exist\n", __func__);
+ kc_spin_unlock();
+ return -EINVAL;
+ }
+
+ res = kc_entry_start_invalidating(entry);
+ if (res != 0) {
+ kc_spin_unlock();
+ return res;
+ }
+
+ temp_indexes[temp_indexes_size++] = index;
+ kc_clear_entry(entry);
+
+ /* let's clean additional entries with the same key if there are any */
+ do {
+ index++;
+ entry = kc_find_key_at_index(key, key_size, NULL, 0, &index);
+ if (!entry)
+ break;
+
+ res = kc_entry_start_invalidating(entry);
+ if (res != 0) {
+ kc_spin_unlock();
+ goto out;
+ }
+
+ temp_indexes[temp_indexes_size++] = index;
+
+ kc_clear_entry(entry);
+
+
+ } while (true);
+
+ kc_spin_unlock();
+
+ temp_indexes_size--;
+ for (i = temp_indexes_size; i >= 0 ; i--)
+ qti_pfk_ice_invalidate_key(
+ kc_entry_at_index(temp_indexes[i])->key_index,
+ s_type);
+
+ /* fall through */
+ res = 0;
+
+out:
+ kc_spin_lock();
+ for (i = temp_indexes_size; i >= 0 ; i--)
+ kc_entry_finish_invalidating(
+ kc_entry_at_index(temp_indexes[i]));
+ kc_spin_unlock();
+
+ return res;
+}
+
+/**
+ * pfk_kc_clear() - clear the table and remove all keys from ICE
+ *
+ * Return 0 on success, error otherwise
+ *
+ */
+int pfk_kc_clear(void)
+{
+ struct kc_entry *entry = NULL;
+ int i = 0;
+ int res = 0;
+
+ if (!kc_is_ready())
+ return -ENODEV;
+
+ kc_spin_lock();
+ for (i = 0; i < PFK_KC_TABLE_SIZE; i++) {
+ entry = kc_entry_at_index(i);
+ res = kc_entry_start_invalidating(entry);
+ if (res != 0) {
+ kc_spin_unlock();
+ goto out;
+ }
+ kc_clear_entry(entry);
+ }
+ kc_spin_unlock();
+
+ for (i = 0; i < PFK_KC_TABLE_SIZE; i++)
+ qti_pfk_ice_invalidate_key(kc_entry_at_index(i)->key_index,
+ s_type);
+
+ /* fall through */
+ res = 0;
+out:
+ kc_spin_lock();
+ for (i = 0; i < PFK_KC_TABLE_SIZE; i++)
+ kc_entry_finish_invalidating(kc_entry_at_index(i));
+ kc_spin_unlock();
+
+ return res;
+}
+
+/**
+ * pfk_kc_clear_on_reset() - clear the table and remove all keys from ICE
+ * The assumption is that at this point we don't have any pending transactions
+ * Also, there is no need to clear keys from ICE
+ *
+ * Return 0 on success, error otherwise
+ *
+ */
+void pfk_kc_clear_on_reset(void)
+{
+ struct kc_entry *entry = NULL;
+ int i = 0;
+
+ if (!kc_is_ready())
+ return;
+
+ kc_spin_lock();
+ for (i = 0; i < PFK_KC_TABLE_SIZE; i++) {
+ entry = kc_entry_at_index(i);
+ kc_clear_entry(entry);
+ }
+ kc_spin_unlock();
+}
+
+static int pfk_kc_find_storage_type(char **device)
+{
+ char boot[20] = {'\0'};
+ char *match = (char *)strnstr(saved_command_line,
+ "androidboot.bootdevice=",
+ strlen(saved_command_line));
+ if (match) {
+ memcpy(boot, (match + strlen("androidboot.bootdevice=")),
+ sizeof(boot) - 1);
+ if (strnstr(boot, PFK_UFS, strlen(boot)))
+ *device = PFK_UFS;
+
+ return 0;
+ }
+ return -EINVAL;
+}
+
+static int __init pfk_kc_pre_init(void)
+{
+ return pfk_kc_find_storage_type(&s_type);
+}
+
+static void __exit pfk_kc_exit(void)
+{
+ s_type = NULL;
+}
+
+module_init(pfk_kc_pre_init);
+module_exit(pfk_kc_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Per-File-Key-KC driver");
diff --git a/security/pfe/pfk_kc.h b/security/pfe/pfk_kc.h
new file mode 100644
index 0000000..30765bf
--- /dev/null
+++ b/security/pfe/pfk_kc.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef PFK_KC_H_
+#define PFK_KC_H_
+
+#include <linux/types.h>
+
+int pfk_kc_init(void);
+int pfk_kc_deinit(void);
+int pfk_kc_load_key_start(const unsigned char *key, size_t key_size,
+ const unsigned char *salt, size_t salt_size, u32 *key_index,
+ bool async, unsigned int data_unit);
+void pfk_kc_load_key_end(const unsigned char *key, size_t key_size,
+ const unsigned char *salt, size_t salt_size);
+int pfk_kc_remove_key_with_salt(const unsigned char *key, size_t key_size,
+ const unsigned char *salt, size_t salt_size);
+int pfk_kc_remove_key(const unsigned char *key, size_t key_size);
+int pfk_kc_clear(void);
+void pfk_kc_clear_on_reset(void);
+const char *pfk_kc_get_storage_type(void);
+extern char *saved_command_line;
+
+
+#endif /* PFK_KC_H_ */
diff --git a/security/security.c b/security/security.c
index f71c586a..81cebf2 100644
--- a/security/security.c
+++ b/security/security.c
@@ -623,6 +623,14 @@ int security_inode_create(struct inode *dir, struct dentry *dentry, umode_t mode
}
EXPORT_SYMBOL_GPL(security_inode_create);
+int security_inode_post_create(struct inode *dir, struct dentry *dentry,
+ umode_t mode)
+{
+ if (unlikely(IS_PRIVATE(dir)))
+ return 0;
+ return call_int_hook(inode_post_create, 0, dir, dentry, mode);
+}
+
int security_inode_link(struct dentry *old_dentry, struct inode *dir,
struct dentry *new_dentry)
{
diff --git a/security/selinux/include/objsec.h b/security/selinux/include/objsec.h
index cc5e26b..4b0da5f 100644
--- a/security/selinux/include/objsec.h
+++ b/security/selinux/include/objsec.h
@@ -26,8 +26,7 @@
#include <linux/in.h>
#include <linux/spinlock.h>
#include <net/net_namespace.h>
-#include "flask.h"
-#include "avc.h"
+#include "security.h"
struct task_security_struct {
u32 osid; /* SID prior to last execve */
@@ -64,6 +63,8 @@ struct inode_security_struct {
u32 sid; /* SID of this object */
u16 sclass; /* security class of this object */
unsigned char initialized; /* initialization flag */
+ u32 tag; /* Per-File-Encryption tag */
+ void *pfk_data; /* Per-File-Key data from ecryptfs */
spinlock_t lock;
};
diff --git a/security/selinux/include/security.h b/security/selinux/include/security.h
index 23e762d..ba68c43 100644
--- a/security/selinux/include/security.h
+++ b/security/selinux/include/security.h
@@ -15,7 +15,6 @@
#include <linux/types.h>
#include <linux/refcount.h>
#include <linux/workqueue.h>
-#include "flask.h"
#define SECSID_NULL 0x00000000 /* unspecified SID */
#define SECSID_WILD 0xffffffff /* wildcard SID */
diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
index 70d3066..017c47e 100644
--- a/security/smack/smack_lsm.c
+++ b/security/smack/smack_lsm.c
@@ -4333,6 +4333,12 @@ static int smack_key_permission(key_ref_t key_ref,
int request = 0;
int rc;
+ /*
+ * Validate requested permissions
+ */
+ if (perm & ~KEY_NEED_ALL)
+ return -EINVAL;
+
keyp = key_ref_to_ptr(key_ref);
if (keyp == NULL)
return -EINVAL;
@@ -4356,10 +4362,10 @@ static int smack_key_permission(key_ref_t key_ref,
ad.a.u.key_struct.key = keyp->serial;
ad.a.u.key_struct.key_desc = keyp->description;
#endif
- if (perm & KEY_NEED_READ)
- request = MAY_READ;
+ if (perm & (KEY_NEED_READ | KEY_NEED_SEARCH | KEY_NEED_VIEW))
+ request |= MAY_READ;
if (perm & (KEY_NEED_WRITE | KEY_NEED_LINK | KEY_NEED_SETATTR))
- request = MAY_WRITE;
+ request |= MAY_WRITE;
rc = smk_access(tkp, keyp->security, request, &ad);
rc = smk_bu_note("key access", tkp, keyp->security, request, rc);
return rc;
diff --git a/sound/pci/hda/hda_bind.c b/sound/pci/hda/hda_bind.c
index d361bb7..8db1890 100644
--- a/sound/pci/hda/hda_bind.c
+++ b/sound/pci/hda/hda_bind.c
@@ -109,7 +109,8 @@ static int hda_codec_driver_probe(struct device *dev)
err = snd_hda_codec_build_controls(codec);
if (err < 0)
goto error_module;
- if (codec->card->registered) {
+ /* only register after the bus probe finished; otherwise it's racy */
+ if (!codec->bus->bus_probing && codec->card->registered) {
err = snd_card_register(codec->card);
if (err < 0)
goto error_module;
diff --git a/sound/pci/hda/hda_codec.h b/sound/pci/hda/hda_codec.h
index 0d98bb9..acacc19 100644
--- a/sound/pci/hda/hda_codec.h
+++ b/sound/pci/hda/hda_codec.h
@@ -68,6 +68,7 @@ struct hda_bus {
unsigned int response_reset:1; /* controller was reset */
unsigned int in_reset:1; /* during reset operation */
unsigned int no_response_fallback:1; /* don't fallback at RIRB error */
+ unsigned int bus_probing :1; /* during probing process */
int primary_dig_out_type; /* primary digital out PCM type */
unsigned int mixer_assigned; /* codec addr for mixer name */
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index 1ddeebc..1bb7613 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -2315,6 +2315,7 @@ static int azx_probe_continue(struct azx *chip)
int dev = chip->dev_index;
int err;
+ to_hda_bus(bus)->bus_probing = 1;
hda->probe_continued = 1;
/* bind with i915 if needed */
@@ -2410,6 +2411,7 @@ static int azx_probe_continue(struct azx *chip)
if (err < 0)
hda->init_failed = 1;
complete_all(&hda->probe_wait);
+ to_hda_bus(bus)->bus_probing = 0;
return err;
}
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
index 31a84a5..fead0ac 100644
--- a/sound/pci/hda/patch_conexant.c
+++ b/sound/pci/hda/patch_conexant.c
@@ -924,6 +924,7 @@ static const struct snd_pci_quirk cxt5066_fixups[] = {
SND_PCI_QUIRK(0x103c, 0x807C, "HP EliteBook 820 G3", CXT_FIXUP_HP_DOCK),
SND_PCI_QUIRK(0x103c, 0x80FD, "HP ProBook 640 G2", CXT_FIXUP_HP_DOCK),
SND_PCI_QUIRK(0x103c, 0x828c, "HP EliteBook 840 G4", CXT_FIXUP_HP_DOCK),
+ SND_PCI_QUIRK(0x103c, 0x83b2, "HP EliteBook 840 G5", CXT_FIXUP_HP_DOCK),
SND_PCI_QUIRK(0x103c, 0x83b3, "HP EliteBook 830 G5", CXT_FIXUP_HP_DOCK),
SND_PCI_QUIRK(0x103c, 0x83d3, "HP ProBook 640 G4", CXT_FIXUP_HP_DOCK),
SND_PCI_QUIRK(0x103c, 0x8174, "HP Spectre x360", CXT_FIXUP_HP_SPECTRE),
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index dbb38fe..bf1ffca 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -515,6 +515,15 @@ static void alc_auto_init_amp(struct hda_codec *codec, int type)
}
}
+/* get a primary headphone pin if available */
+static hda_nid_t alc_get_hp_pin(struct alc_spec *spec)
+{
+ if (spec->gen.autocfg.hp_pins[0])
+ return spec->gen.autocfg.hp_pins[0];
+ if (spec->gen.autocfg.line_out_type == AC_JACK_HP_OUT)
+ return spec->gen.autocfg.line_out_pins[0];
+ return 0;
+}
/*
* Realtek SSID verification
@@ -725,9 +734,7 @@ static int alc_subsystem_id(struct hda_codec *codec, const hda_nid_t *ports)
* 15 : 1 --> enable the function "Mute internal speaker
* when the external headphone out jack is plugged"
*/
- if (!spec->gen.autocfg.hp_pins[0] &&
- !(spec->gen.autocfg.line_out_pins[0] &&
- spec->gen.autocfg.line_out_type == AUTO_PIN_HP_OUT)) {
+ if (!alc_get_hp_pin(spec)) {
hda_nid_t nid;
tmp = (ass >> 11) & 0x3; /* HP to chassis */
nid = ports[tmp];
@@ -1848,6 +1855,8 @@ enum {
ALC887_FIXUP_BASS_CHMAP,
ALC1220_FIXUP_GB_DUAL_CODECS,
ALC1220_FIXUP_CLEVO_P950,
+ ALC1220_FIXUP_SYSTEM76_ORYP5,
+ ALC1220_FIXUP_SYSTEM76_ORYP5_PINS,
};
static void alc889_fixup_coef(struct hda_codec *codec,
@@ -2049,6 +2058,17 @@ static void alc1220_fixup_clevo_p950(struct hda_codec *codec,
snd_hda_override_conn_list(codec, 0x1b, 1, conn1);
}
+static void alc_fixup_headset_mode_no_hp_mic(struct hda_codec *codec,
+ const struct hda_fixup *fix, int action);
+
+static void alc1220_fixup_system76_oryp5(struct hda_codec *codec,
+ const struct hda_fixup *fix,
+ int action)
+{
+ alc1220_fixup_clevo_p950(codec, fix, action);
+ alc_fixup_headset_mode_no_hp_mic(codec, fix, action);
+}
+
static const struct hda_fixup alc882_fixups[] = {
[ALC882_FIXUP_ABIT_AW9D_MAX] = {
.type = HDA_FIXUP_PINS,
@@ -2293,6 +2313,19 @@ static const struct hda_fixup alc882_fixups[] = {
.type = HDA_FIXUP_FUNC,
.v.func = alc1220_fixup_clevo_p950,
},
+ [ALC1220_FIXUP_SYSTEM76_ORYP5] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc1220_fixup_system76_oryp5,
+ },
+ [ALC1220_FIXUP_SYSTEM76_ORYP5_PINS] = {
+ .type = HDA_FIXUP_PINS,
+ .v.pins = (const struct hda_pintbl[]) {
+ { 0x19, 0x01a1913c }, /* use as headset mic, without its own jack detect */
+ {}
+ },
+ .chained = true,
+ .chain_id = ALC1220_FIXUP_SYSTEM76_ORYP5,
+ },
};
static const struct snd_pci_quirk alc882_fixup_tbl[] = {
@@ -2369,6 +2402,8 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
SND_PCI_QUIRK(0x1558, 0x9501, "Clevo P950HR", ALC1220_FIXUP_CLEVO_P950),
SND_PCI_QUIRK(0x1558, 0x95e1, "Clevo P95xER", ALC1220_FIXUP_CLEVO_P950),
SND_PCI_QUIRK(0x1558, 0x95e2, "Clevo P950ER", ALC1220_FIXUP_CLEVO_P950),
+ SND_PCI_QUIRK(0x1558, 0x96e1, "System76 Oryx Pro (oryp5)", ALC1220_FIXUP_SYSTEM76_ORYP5_PINS),
+ SND_PCI_QUIRK(0x1558, 0x97e1, "System76 Oryx Pro (oryp5)", ALC1220_FIXUP_SYSTEM76_ORYP5_PINS),
SND_PCI_QUIRK_VENDOR(0x1558, "Clevo laptop", ALC882_FIXUP_EAPD),
SND_PCI_QUIRK(0x161f, 0x2054, "Medion laptop", ALC883_FIXUP_EAPD),
SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo Y530", ALC882_FIXUP_LENOVO_Y530),
@@ -2959,7 +2994,7 @@ static void alc282_restore_default_value(struct hda_codec *codec)
static void alc282_init(struct hda_codec *codec)
{
struct alc_spec *spec = codec->spec;
- hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0];
+ hda_nid_t hp_pin = alc_get_hp_pin(spec);
bool hp_pin_sense;
int coef78;
@@ -2996,7 +3031,7 @@ static void alc282_init(struct hda_codec *codec)
static void alc282_shutup(struct hda_codec *codec)
{
struct alc_spec *spec = codec->spec;
- hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0];
+ hda_nid_t hp_pin = alc_get_hp_pin(spec);
bool hp_pin_sense;
int coef78;
@@ -3074,14 +3109,9 @@ static void alc283_restore_default_value(struct hda_codec *codec)
static void alc283_init(struct hda_codec *codec)
{
struct alc_spec *spec = codec->spec;
- hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0];
+ hda_nid_t hp_pin = alc_get_hp_pin(spec);
bool hp_pin_sense;
- if (!spec->gen.autocfg.hp_outs) {
- if (spec->gen.autocfg.line_out_type == AC_JACK_HP_OUT)
- hp_pin = spec->gen.autocfg.line_out_pins[0];
- }
-
alc283_restore_default_value(codec);
if (!hp_pin)
@@ -3115,14 +3145,9 @@ static void alc283_init(struct hda_codec *codec)
static void alc283_shutup(struct hda_codec *codec)
{
struct alc_spec *spec = codec->spec;
- hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0];
+ hda_nid_t hp_pin = alc_get_hp_pin(spec);
bool hp_pin_sense;
- if (!spec->gen.autocfg.hp_outs) {
- if (spec->gen.autocfg.line_out_type == AC_JACK_HP_OUT)
- hp_pin = spec->gen.autocfg.line_out_pins[0];
- }
-
if (!hp_pin) {
alc269_shutup(codec);
return;
@@ -3156,7 +3181,7 @@ static void alc283_shutup(struct hda_codec *codec)
static void alc256_init(struct hda_codec *codec)
{
struct alc_spec *spec = codec->spec;
- hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0];
+ hda_nid_t hp_pin = alc_get_hp_pin(spec);
bool hp_pin_sense;
if (!hp_pin)
@@ -3192,7 +3217,7 @@ static void alc256_init(struct hda_codec *codec)
static void alc256_shutup(struct hda_codec *codec)
{
struct alc_spec *spec = codec->spec;
- hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0];
+ hda_nid_t hp_pin = alc_get_hp_pin(spec);
bool hp_pin_sense;
if (!hp_pin) {
@@ -3228,7 +3253,7 @@ static void alc256_shutup(struct hda_codec *codec)
static void alc225_init(struct hda_codec *codec)
{
struct alc_spec *spec = codec->spec;
- hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0];
+ hda_nid_t hp_pin = alc_get_hp_pin(spec);
bool hp1_pin_sense, hp2_pin_sense;
if (!hp_pin)
@@ -3271,7 +3296,7 @@ static void alc225_init(struct hda_codec *codec)
static void alc225_shutup(struct hda_codec *codec)
{
struct alc_spec *spec = codec->spec;
- hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0];
+ hda_nid_t hp_pin = alc_get_hp_pin(spec);
bool hp1_pin_sense, hp2_pin_sense;
if (!hp_pin) {
@@ -3315,7 +3340,7 @@ static void alc225_shutup(struct hda_codec *codec)
static void alc_default_init(struct hda_codec *codec)
{
struct alc_spec *spec = codec->spec;
- hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0];
+ hda_nid_t hp_pin = alc_get_hp_pin(spec);
bool hp_pin_sense;
if (!hp_pin)
@@ -3344,7 +3369,7 @@ static void alc_default_init(struct hda_codec *codec)
static void alc_default_shutup(struct hda_codec *codec)
{
struct alc_spec *spec = codec->spec;
- hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0];
+ hda_nid_t hp_pin = alc_get_hp_pin(spec);
bool hp_pin_sense;
if (!hp_pin) {
@@ -3376,7 +3401,7 @@ static void alc_default_shutup(struct hda_codec *codec)
static void alc294_hp_init(struct hda_codec *codec)
{
struct alc_spec *spec = codec->spec;
- hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0];
+ hda_nid_t hp_pin = alc_get_hp_pin(spec);
int i, val;
if (!hp_pin)
@@ -4780,7 +4805,7 @@ static void alc_update_headset_mode(struct hda_codec *codec)
struct alc_spec *spec = codec->spec;
hda_nid_t mux_pin = spec->gen.imux_pins[spec->gen.cur_mux[0]];
- hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0];
+ hda_nid_t hp_pin = alc_get_hp_pin(spec);
int new_headset_mode;
@@ -5059,7 +5084,7 @@ static void alc_fixup_tpt470_dock(struct hda_codec *codec,
static void alc_shutup_dell_xps13(struct hda_codec *codec)
{
struct alc_spec *spec = codec->spec;
- int hp_pin = spec->gen.autocfg.hp_pins[0];
+ int hp_pin = alc_get_hp_pin(spec);
/* Prevent pop noises when headphones are plugged in */
snd_hda_codec_write(codec, hp_pin, 0,
@@ -5152,7 +5177,7 @@ static void alc271_hp_gate_mic_jack(struct hda_codec *codec,
if (action == HDA_FIXUP_ACT_PROBE) {
int mic_pin = find_ext_mic_pin(codec);
- int hp_pin = spec->gen.autocfg.hp_pins[0];
+ int hp_pin = alc_get_hp_pin(spec);
if (snd_BUG_ON(!mic_pin || !hp_pin))
return;
@@ -5575,6 +5600,8 @@ enum {
ALC294_FIXUP_ASUS_MIC,
ALC294_FIXUP_ASUS_HEADSET_MIC,
ALC294_FIXUP_ASUS_SPK,
+ ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE,
+ ALC285_FIXUP_LENOVO_PC_BEEP_IN_NOISE,
};
static const struct hda_fixup alc269_fixups[] = {
@@ -6499,6 +6526,26 @@ static const struct hda_fixup alc269_fixups[] = {
.chained = true,
.chain_id = ALC294_FIXUP_ASUS_HEADSET_MIC
},
+ [ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE] = {
+ .type = HDA_FIXUP_PINS,
+ .v.pins = (const struct hda_pintbl[]) {
+ { 0x1a, 0x01a1913c }, /* use as headset mic, without its own jack detect */
+ { }
+ },
+ .chained = true,
+ .chain_id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC
+ },
+ [ALC285_FIXUP_LENOVO_PC_BEEP_IN_NOISE] = {
+ .type = HDA_FIXUP_VERBS,
+ .v.verbs = (const struct hda_verb[]) {
+ /* Disable PCBEEP-IN passthrough */
+ { 0x20, AC_VERB_SET_COEF_INDEX, 0x36 },
+ { 0x20, AC_VERB_SET_PROC_COEF, 0x57d7 },
+ { }
+ },
+ .chained = true,
+ .chain_id = ALC285_FIXUP_LENOVO_HEADPHONE_NOISE
+ },
};
static const struct snd_pci_quirk alc269_fixup_tbl[] = {
@@ -6677,6 +6724,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1458, 0xfa53, "Gigabyte BXBT-2807", ALC283_FIXUP_HEADSET_MIC),
SND_PCI_QUIRK(0x1462, 0xb120, "MSI Cubi MS-B120", ALC283_FIXUP_HEADSET_MIC),
SND_PCI_QUIRK(0x1462, 0xb171, "Cubi N 8GL (MS-B171)", ALC283_FIXUP_HEADSET_MIC),
+ SND_PCI_QUIRK(0x1558, 0x1325, "System76 Darter Pro (darp5)", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x17aa, 0x1036, "Lenovo P520", ALC233_FIXUP_LENOVO_MULTI_CODECS),
SND_PCI_QUIRK(0x17aa, 0x20f2, "Thinkpad SL410/510", ALC269_FIXUP_SKU_IGNORE),
SND_PCI_QUIRK(0x17aa, 0x215e, "Thinkpad L512", ALC269_FIXUP_SKU_IGNORE),
@@ -7179,7 +7227,7 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
{0x12, 0x90a60130},
{0x19, 0x03a11020},
{0x21, 0x0321101f}),
- SND_HDA_PIN_QUIRK(0x10ec0285, 0x17aa, "Lenovo", ALC285_FIXUP_LENOVO_HEADPHONE_NOISE,
+ SND_HDA_PIN_QUIRK(0x10ec0285, 0x17aa, "Lenovo", ALC285_FIXUP_LENOVO_PC_BEEP_IN_NOISE,
{0x12, 0x90a60130},
{0x14, 0x90170110},
{0x19, 0x04a11040},
diff --git a/sound/soc/codecs/hdmi-codec.c b/sound/soc/codecs/hdmi-codec.c
index d00734d..e5b6769 100644
--- a/sound/soc/codecs/hdmi-codec.c
+++ b/sound/soc/codecs/hdmi-codec.c
@@ -795,6 +795,8 @@ static int hdmi_codec_probe(struct platform_device *pdev)
if (hcd->spdif)
hcp->daidrv[i] = hdmi_spdif_dai;
+ dev_set_drvdata(dev, hcp);
+
ret = devm_snd_soc_register_component(dev, &hdmi_driver, hcp->daidrv,
dai_count);
if (ret) {
@@ -802,8 +804,6 @@ static int hdmi_codec_probe(struct platform_device *pdev)
__func__, ret);
return ret;
}
-
- dev_set_drvdata(dev, hcp);
return 0;
}
diff --git a/sound/soc/fsl/Kconfig b/sound/soc/fsl/Kconfig
index 6ec19fb..2e75b5b 100644
--- a/sound/soc/fsl/Kconfig
+++ b/sound/soc/fsl/Kconfig
@@ -221,7 +221,7 @@
config SND_SOC_EUKREA_TLV320
tristate "Eukrea TLV320"
- depends on ARCH_MXC && I2C
+ depends on ARCH_MXC && !ARM64 && I2C
select SND_SOC_TLV320AIC23_I2C
select SND_SOC_IMX_AUDMUX
select SND_SOC_IMX_SSI
diff --git a/sound/soc/intel/atom/sst/sst_loader.c b/sound/soc/intel/atom/sst/sst_loader.c
index 27413eb..b8c4567 100644
--- a/sound/soc/intel/atom/sst/sst_loader.c
+++ b/sound/soc/intel/atom/sst/sst_loader.c
@@ -354,14 +354,14 @@ static int sst_request_fw(struct intel_sst_drv *sst)
const struct firmware *fw;
retval = request_firmware(&fw, sst->firmware_name, sst->dev);
- if (fw == NULL) {
- dev_err(sst->dev, "fw is returning as null\n");
- return -EINVAL;
- }
if (retval) {
dev_err(sst->dev, "request fw failed %d\n", retval);
return retval;
}
+ if (fw == NULL) {
+ dev_err(sst->dev, "fw is returning as null\n");
+ return -EINVAL;
+ }
mutex_lock(&sst->sst_lock);
retval = sst_cache_and_parse_fw(sst, fw);
mutex_unlock(&sst->sst_lock);
diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c
index afba778..a893f5a 100644
--- a/sound/usb/pcm.c
+++ b/sound/usb/pcm.c
@@ -377,6 +377,9 @@ static int search_roland_implicit_fb(struct usb_device *dev, int ifnum,
return 0;
}
+/* Setup an implicit feedback endpoint from a quirk. Returns 0 if no quirk
+ * applies. Returns 1 if a quirk was found.
+ */
static int set_sync_ep_implicit_fb_quirk(struct snd_usb_substream *subs,
struct usb_device *dev,
struct usb_interface_descriptor *altsd,
@@ -447,7 +450,7 @@ static int set_sync_ep_implicit_fb_quirk(struct snd_usb_substream *subs,
subs->data_endpoint->sync_master = subs->sync_endpoint;
- return 0;
+ return 1;
}
static int set_sync_endpoint(struct snd_usb_substream *subs,
@@ -486,6 +489,10 @@ static int set_sync_endpoint(struct snd_usb_substream *subs,
if (err < 0)
return err;
+ /* endpoint set by quirk */
+ if (err > 0)
+ return 0;
+
if (altsd->bNumEndpoints < 2)
return 0;
diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
index 7e93686..d71e019 100644
--- a/sound/usb/quirks.c
+++ b/sound/usb/quirks.c
@@ -1448,6 +1448,7 @@ u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip,
case 0x20b1: /* XMOS based devices */
case 0x152a: /* Thesycon devices */
case 0x25ce: /* Mytek devices */
+ case 0x2ab6: /* T+A devices */
if (fp->dsd_raw)
return SNDRV_PCM_FMTBIT_DSD_U32_BE;
break;
diff --git a/sound/usb/usb_audio_qmi_svc.c b/sound/usb/usb_audio_qmi_svc.c
index ddbf62b..a942f1b 100644
--- a/sound/usb/usb_audio_qmi_svc.c
+++ b/sound/usb/usb_audio_qmi_svc.c
@@ -1229,13 +1229,13 @@ static void uaudio_qmi_bye_cb(struct qmi_handle *handle, unsigned int node)
{
struct uaudio_qmi_svc *svc = uaudio_svc;
- uaudio_dbg("client node:%x\n", node);
if (svc->uaudio_svc_hdl != handle) {
uaudio_err("handle mismatch\n");
return;
}
if (svc->client_connected && svc->client_sq.sq_node == node) {
+ uaudio_dbg("node:\n", node);
queue_work(svc->uaudio_wq, &svc->qmi_disconnect_work);
svc->client_sq.sq_node = 0;
svc->client_sq.sq_port = 0;
@@ -1249,7 +1249,6 @@ static void uaudio_qmi_svc_disconnect_cb(struct qmi_handle *handle,
{
struct uaudio_qmi_svc *svc = uaudio_svc;
- uaudio_dbg("client node:%x port:%x\n", node, port);
if (svc->uaudio_svc_hdl != handle) {
uaudio_err("handle mismatch\n");
return;
@@ -1257,6 +1256,7 @@ static void uaudio_qmi_svc_disconnect_cb(struct qmi_handle *handle,
if (svc->client_connected && svc->client_sq.sq_node == node &&
svc->client_sq.sq_port == port) {
+ uaudio_dbg("client node:%x port:%x\n", node, port);
queue_work(svc->uaudio_wq, &svc->qmi_disconnect_work);
svc->client_sq.sq_node = 0;
svc->client_sq.sq_port = 0;
diff --git a/tools/arch/riscv/include/uapi/asm/bitsperlong.h b/tools/arch/riscv/include/uapi/asm/bitsperlong.h
new file mode 100644
index 0000000..0b3cb52
--- /dev/null
+++ b/tools/arch/riscv/include/uapi/asm/bitsperlong.h
@@ -0,0 +1,25 @@
+/*
+ * Copyright (C) 2012 ARM Ltd.
+ * Copyright (C) 2015 Regents of the University of California
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _UAPI_ASM_RISCV_BITSPERLONG_H
+#define _UAPI_ASM_RISCV_BITSPERLONG_H
+
+#define __BITS_PER_LONG (__SIZEOF_POINTER__ * 8)
+
+#include <asm-generic/bitsperlong.h>
+
+#endif /* _UAPI_ASM_RISCV_BITSPERLONG_H */
diff --git a/tools/build/Makefile.feature b/tools/build/Makefile.feature
index f216b2f..42a7878 100644
--- a/tools/build/Makefile.feature
+++ b/tools/build/Makefile.feature
@@ -79,8 +79,8 @@
cplus-demangle \
hello \
libbabeltrace \
- liberty \
- liberty-z \
+ libbfd-liberty \
+ libbfd-liberty-z \
libunwind-debug-frame \
libunwind-debug-frame-arm \
libunwind-debug-frame-aarch64 \
diff --git a/tools/build/feature/Makefile b/tools/build/feature/Makefile
index 0516259..bf8a8eb 100644
--- a/tools/build/feature/Makefile
+++ b/tools/build/feature/Makefile
@@ -15,8 +15,8 @@
test-libbfd.bin \
test-disassembler-four-args.bin \
test-reallocarray.bin \
- test-liberty.bin \
- test-liberty-z.bin \
+ test-libbfd-liberty.bin \
+ test-libbfd-liberty-z.bin \
test-cplus-demangle.bin \
test-libelf.bin \
test-libelf-getphdrnum.bin \
@@ -200,7 +200,7 @@
$(BUILD)
$(OUTPUT)test-libbfd.bin:
- $(BUILD) -DPACKAGE='"perf"' -lbfd -lz -liberty -ldl
+ $(BUILD) -DPACKAGE='"perf"' -lbfd -ldl
$(OUTPUT)test-disassembler-four-args.bin:
$(BUILD) -DPACKAGE='"perf"' -lbfd -lopcodes
@@ -208,10 +208,10 @@
$(OUTPUT)test-reallocarray.bin:
$(BUILD)
-$(OUTPUT)test-liberty.bin:
+$(OUTPUT)test-libbfd-liberty.bin:
$(CC) $(CFLAGS) -Wall -Werror -o $@ test-libbfd.c -DPACKAGE='"perf"' $(LDFLAGS) -lbfd -ldl -liberty
-$(OUTPUT)test-liberty-z.bin:
+$(OUTPUT)test-libbfd-liberty-z.bin:
$(CC) $(CFLAGS) -Wall -Werror -o $@ test-libbfd.c -DPACKAGE='"perf"' $(LDFLAGS) -lbfd -ldl -liberty -lz
$(OUTPUT)test-cplus-demangle.bin:
diff --git a/tools/hv/hv_kvp_daemon.c b/tools/hv/hv_kvp_daemon.c
index bbb2a8e..d7e06fe 100644
--- a/tools/hv/hv_kvp_daemon.c
+++ b/tools/hv/hv_kvp_daemon.c
@@ -1178,6 +1178,7 @@ static int kvp_set_ip_info(char *if_name, struct hv_kvp_ipaddr_value *new_val)
FILE *file;
char cmd[PATH_MAX];
char *mac_addr;
+ int str_len;
/*
* Set the configuration for the specified interface with
@@ -1301,8 +1302,18 @@ static int kvp_set_ip_info(char *if_name, struct hv_kvp_ipaddr_value *new_val)
* invoke the external script to do its magic.
*/
- snprintf(cmd, sizeof(cmd), KVP_SCRIPTS_PATH "%s %s",
- "hv_set_ifconfig", if_file);
+ str_len = snprintf(cmd, sizeof(cmd), KVP_SCRIPTS_PATH "%s %s",
+ "hv_set_ifconfig", if_file);
+ /*
+ * This is a little overcautious, but it's necessary to suppress some
+ * false warnings from gcc 8.0.1.
+ */
+ if (str_len <= 0 || (unsigned int)str_len >= sizeof(cmd)) {
+ syslog(LOG_ERR, "Cmd '%s' (len=%d) may be too long",
+ cmd, str_len);
+ return HV_E_FAIL;
+ }
+
if (system(cmd)) {
syslog(LOG_ERR, "Failed to execute cmd '%s'; error: %d %s",
cmd, errno, strerror(errno));
diff --git a/tools/iio/iio_generic_buffer.c b/tools/iio/iio_generic_buffer.c
index 3040830..8454566 100644
--- a/tools/iio/iio_generic_buffer.c
+++ b/tools/iio/iio_generic_buffer.c
@@ -330,7 +330,7 @@ static const struct option longopts[] = {
int main(int argc, char **argv)
{
- unsigned long long num_loops = 2;
+ long long num_loops = 2;
unsigned long timedelay = 1000000;
unsigned long buf_len = 128;
diff --git a/tools/include/uapi/asm/bitsperlong.h b/tools/include/uapi/asm/bitsperlong.h
index 8dd6aef..57aaeaf8 100644
--- a/tools/include/uapi/asm/bitsperlong.h
+++ b/tools/include/uapi/asm/bitsperlong.h
@@ -13,6 +13,10 @@
#include "../../arch/mips/include/uapi/asm/bitsperlong.h"
#elif defined(__ia64__)
#include "../../arch/ia64/include/uapi/asm/bitsperlong.h"
+#elif defined(__riscv)
+#include "../../arch/riscv/include/uapi/asm/bitsperlong.h"
+#elif defined(__alpha__)
+#include "../../arch/alpha/include/uapi/asm/bitsperlong.h"
#else
#include <asm-generic/bitsperlong.h>
#endif
diff --git a/tools/lib/bpf/bpf.c b/tools/lib/bpf/bpf.c
index 60aa4ca..7a00147 100644
--- a/tools/lib/bpf/bpf.c
+++ b/tools/lib/bpf/bpf.c
@@ -77,6 +77,7 @@ int bpf_create_map_xattr(const struct bpf_create_map_attr *create_attr)
{
__u32 name_len = create_attr->name ? strlen(create_attr->name) : 0;
union bpf_attr attr;
+ int ret;
memset(&attr, '\0', sizeof(attr));
@@ -94,7 +95,15 @@ int bpf_create_map_xattr(const struct bpf_create_map_attr *create_attr)
attr.map_ifindex = create_attr->map_ifindex;
attr.inner_map_fd = create_attr->inner_map_fd;
- return sys_bpf(BPF_MAP_CREATE, &attr, sizeof(attr));
+ ret = sys_bpf(BPF_MAP_CREATE, &attr, sizeof(attr));
+ if (ret < 0 && errno == EINVAL && create_attr->name) {
+ /* Retry the same syscall, but without the name.
+ * Pre v4.14 kernels don't support map names.
+ */
+ memset(attr.map_name, 0, sizeof(attr.map_name));
+ return sys_bpf(BPF_MAP_CREATE, &attr, sizeof(attr));
+ }
+ return ret;
}
int bpf_create_map_node(enum bpf_map_type map_type, const char *name,
diff --git a/tools/perf/Makefile.config b/tools/perf/Makefile.config
index f00ea77..849b3be 100644
--- a/tools/perf/Makefile.config
+++ b/tools/perf/Makefile.config
@@ -688,18 +688,20 @@
ifeq ($(feature-libbfd), 1)
EXTLIBS += -lbfd
+else
+ # we are on a system that requires -liberty and (maybe) -lz
+ # to link against -lbfd; test each case individually here
# call all detections now so we get correct
# status in VF output
- $(call feature_check,liberty)
- $(call feature_check,liberty-z)
- $(call feature_check,cplus-demangle)
+ $(call feature_check,libbfd-liberty)
+ $(call feature_check,libbfd-liberty-z)
- ifeq ($(feature-liberty), 1)
- EXTLIBS += -liberty
+ ifeq ($(feature-libbfd-liberty), 1)
+ EXTLIBS += -lbfd -liberty
else
- ifeq ($(feature-liberty-z), 1)
- EXTLIBS += -liberty -lz
+ ifeq ($(feature-libbfd-liberty-z), 1)
+ EXTLIBS += -lbfd -liberty -lz
endif
endif
endif
@@ -709,24 +711,24 @@
else
ifdef HAVE_CPLUS_DEMANGLE_SUPPORT
EXTLIBS += -liberty
- CFLAGS += -DHAVE_CPLUS_DEMANGLE_SUPPORT
else
- ifneq ($(feature-libbfd), 1)
- ifneq ($(feature-liberty), 1)
- ifneq ($(feature-liberty-z), 1)
- # we dont have neither HAVE_CPLUS_DEMANGLE_SUPPORT
- # or any of 'bfd iberty z' trinity
- ifeq ($(feature-cplus-demangle), 1)
- EXTLIBS += -liberty
- CFLAGS += -DHAVE_CPLUS_DEMANGLE_SUPPORT
- else
- msg := $(warning No bfd.h/libbfd found, please install binutils-dev[el]/zlib-static/libiberty-dev to gain symbol demangling)
- CFLAGS += -DNO_DEMANGLE
- endif
- endif
+ ifeq ($(filter -liberty,$(EXTLIBS)),)
+ $(call feature_check,cplus-demangle)
+
+ # we dont have neither HAVE_CPLUS_DEMANGLE_SUPPORT
+ # or any of 'bfd iberty z' trinity
+ ifeq ($(feature-cplus-demangle), 1)
+ EXTLIBS += -liberty
+ else
+ msg := $(warning No bfd.h/libbfd found, please install binutils-dev[el]/zlib-static/libiberty-dev to gain symbol demangling)
+ CFLAGS += -DNO_DEMANGLE
endif
endif
endif
+
+ ifneq ($(filter -liberty,$(EXTLIBS)),)
+ CFLAGS += -DHAVE_CPLUS_DEMANGLE_SUPPORT
+ endif
endif
ifneq ($(filter -lbfd,$(EXTLIBS)),)
diff --git a/tools/perf/arch/x86/util/kvm-stat.c b/tools/perf/arch/x86/util/kvm-stat.c
index b32409a..081353d 100644
--- a/tools/perf/arch/x86/util/kvm-stat.c
+++ b/tools/perf/arch/x86/util/kvm-stat.c
@@ -156,7 +156,7 @@ int cpu_isa_init(struct perf_kvm_stat *kvm, const char *cpuid)
if (strstr(cpuid, "Intel")) {
kvm->exit_reasons = vmx_exit_reasons;
kvm->exit_reasons_isa = "VMX";
- } else if (strstr(cpuid, "AMD")) {
+ } else if (strstr(cpuid, "AMD") || strstr(cpuid, "Hygon")) {
kvm->exit_reasons = svm_exit_reasons;
kvm->exit_reasons_isa = "SVM";
} else
diff --git a/tools/perf/tests/attr.py b/tools/perf/tests/attr.py
index ff9b60b..44090a9 100644
--- a/tools/perf/tests/attr.py
+++ b/tools/perf/tests/attr.py
@@ -116,7 +116,7 @@
if not self.has_key(t) or not other.has_key(t):
continue
if not data_equal(self[t], other[t]):
- log.warning("expected %s=%s, got %s" % (t, self[t], other[t]))
+ log.warning("expected %s=%s, got %s" % (t, self[t], other[t]))
# Test file description needs to have following sections:
# [config]
diff --git a/tools/perf/tests/evsel-tp-sched.c b/tools/perf/tests/evsel-tp-sched.c
index 699561f..67bcbf8 100644
--- a/tools/perf/tests/evsel-tp-sched.c
+++ b/tools/perf/tests/evsel-tp-sched.c
@@ -17,7 +17,7 @@ static int perf_evsel__test_field(struct perf_evsel *evsel, const char *name,
return -1;
}
- is_signed = !!(field->flags | FIELD_IS_SIGNED);
+ is_signed = !!(field->flags & FIELD_IS_SIGNED);
if (should_be_signed && !is_signed) {
pr_debug("%s: \"%s\" signedness(%d) is wrong, should be %d\n",
evsel->name, name, is_signed, should_be_signed);
diff --git a/tools/perf/tests/shell/lib/probe_vfs_getname.sh b/tools/perf/tests/shell/lib/probe_vfs_getname.sh
index 1c16e56..7cb99b4 100644
--- a/tools/perf/tests/shell/lib/probe_vfs_getname.sh
+++ b/tools/perf/tests/shell/lib/probe_vfs_getname.sh
@@ -13,7 +13,8 @@
local verbose=$1
if [ $had_vfs_getname -eq 1 ] ; then
line=$(perf probe -L getname_flags 2>&1 | egrep 'result.*=.*filename;' | sed -r 's/[[:space:]]+([[:digit:]]+)[[:space:]]+result->uptr.*/\1/')
- perf probe $verbose "vfs_getname=getname_flags:${line} pathname=result->name:string"
+ perf probe -q "vfs_getname=getname_flags:${line} pathname=result->name:string" || \
+ perf probe $verbose "vfs_getname=getname_flags:${line} pathname=filename:string"
fi
}
diff --git a/tools/perf/util/callchain.c b/tools/perf/util/callchain.c
index 32ef7bd..dc2212e 100644
--- a/tools/perf/util/callchain.c
+++ b/tools/perf/util/callchain.c
@@ -766,6 +766,7 @@ static enum match_result match_chain(struct callchain_cursor_node *node,
cnode->cycles_count += node->branch_flags.cycles;
cnode->iter_count += node->nr_loop_iter;
cnode->iter_cycles += node->iter_cycles;
+ cnode->from_count++;
}
}
@@ -1345,10 +1346,10 @@ static int branch_to_str(char *bf, int bfsize,
static int branch_from_str(char *bf, int bfsize,
u64 branch_count,
u64 cycles_count, u64 iter_count,
- u64 iter_cycles)
+ u64 iter_cycles, u64 from_count)
{
int printed = 0, i = 0;
- u64 cycles;
+ u64 cycles, v = 0;
cycles = cycles_count / branch_count;
if (cycles) {
@@ -1357,14 +1358,16 @@ static int branch_from_str(char *bf, int bfsize,
bf + printed, bfsize - printed);
}
- if (iter_count) {
- printed += count_pri64_printf(i++, "iter",
- iter_count,
- bf + printed, bfsize - printed);
+ if (iter_count && from_count) {
+ v = iter_count / from_count;
+ if (v) {
+ printed += count_pri64_printf(i++, "iter",
+ v, bf + printed, bfsize - printed);
- printed += count_pri64_printf(i++, "avg_cycles",
- iter_cycles / iter_count,
- bf + printed, bfsize - printed);
+ printed += count_pri64_printf(i++, "avg_cycles",
+ iter_cycles / iter_count,
+ bf + printed, bfsize - printed);
+ }
}
if (i)
@@ -1377,6 +1380,7 @@ static int counts_str_build(char *bf, int bfsize,
u64 branch_count, u64 predicted_count,
u64 abort_count, u64 cycles_count,
u64 iter_count, u64 iter_cycles,
+ u64 from_count,
struct branch_type_stat *brtype_stat)
{
int printed;
@@ -1389,7 +1393,8 @@ static int counts_str_build(char *bf, int bfsize,
predicted_count, abort_count, brtype_stat);
} else {
printed = branch_from_str(bf, bfsize, branch_count,
- cycles_count, iter_count, iter_cycles);
+ cycles_count, iter_count, iter_cycles,
+ from_count);
}
if (!printed)
@@ -1402,13 +1407,14 @@ static int callchain_counts_printf(FILE *fp, char *bf, int bfsize,
u64 branch_count, u64 predicted_count,
u64 abort_count, u64 cycles_count,
u64 iter_count, u64 iter_cycles,
+ u64 from_count,
struct branch_type_stat *brtype_stat)
{
char str[256];
counts_str_build(str, sizeof(str), branch_count,
predicted_count, abort_count, cycles_count,
- iter_count, iter_cycles, brtype_stat);
+ iter_count, iter_cycles, from_count, brtype_stat);
if (fp)
return fprintf(fp, "%s", str);
@@ -1422,6 +1428,7 @@ int callchain_list_counts__printf_value(struct callchain_list *clist,
u64 branch_count, predicted_count;
u64 abort_count, cycles_count;
u64 iter_count, iter_cycles;
+ u64 from_count;
branch_count = clist->branch_count;
predicted_count = clist->predicted_count;
@@ -1429,11 +1436,12 @@ int callchain_list_counts__printf_value(struct callchain_list *clist,
cycles_count = clist->cycles_count;
iter_count = clist->iter_count;
iter_cycles = clist->iter_cycles;
+ from_count = clist->from_count;
return callchain_counts_printf(fp, bf, bfsize, branch_count,
predicted_count, abort_count,
cycles_count, iter_count, iter_cycles,
- &clist->brtype_stat);
+ from_count, &clist->brtype_stat);
}
static void free_callchain_node(struct callchain_node *node)
diff --git a/tools/perf/util/callchain.h b/tools/perf/util/callchain.h
index 154560b..99d38ac 100644
--- a/tools/perf/util/callchain.h
+++ b/tools/perf/util/callchain.h
@@ -118,6 +118,7 @@ struct callchain_list {
bool has_children;
};
u64 branch_count;
+ u64 from_count;
u64 predicted_count;
u64 abort_count;
u64 cycles_count;
diff --git a/tools/perf/util/dso.c b/tools/perf/util/dso.c
index bbed90e..cee717a 100644
--- a/tools/perf/util/dso.c
+++ b/tools/perf/util/dso.c
@@ -295,7 +295,7 @@ static int decompress_kmodule(struct dso *dso, const char *name,
unlink(tmpbuf);
if (pathname && (fd >= 0))
- strncpy(pathname, tmpbuf, len);
+ strlcpy(pathname, tmpbuf, len);
return fd;
}
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
index 3cadc25..bd9226b 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -2636,6 +2636,7 @@ int perf_header__fprintf_info(struct perf_session *session, FILE *fp, bool full)
struct perf_header *header = &session->header;
int fd = perf_data__fd(session->data);
struct stat st;
+ time_t stctime;
int ret, bit;
hd.fp = fp;
@@ -2645,7 +2646,8 @@ int perf_header__fprintf_info(struct perf_session *session, FILE *fp, bool full)
if (ret == -1)
return -1;
- fprintf(fp, "# captured on : %s", ctime(&st.st_ctime));
+ stctime = st.st_ctime;
+ fprintf(fp, "# captured on : %s", ctime(&stctime));
fprintf(fp, "# header version : %u\n", header->version);
fprintf(fp, "# data offset : %" PRIu64 "\n", header->data_offset);
@@ -3521,7 +3523,7 @@ perf_event__synthesize_event_update_unit(struct perf_tool *tool,
if (ev == NULL)
return -ENOMEM;
- strncpy(ev->data, evsel->unit, size);
+ strlcpy(ev->data, evsel->unit, size + 1);
err = process(tool, (union perf_event *)ev, NULL, NULL);
free(ev);
return err;
diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
index d7403d1..b1508ce 100644
--- a/tools/perf/util/machine.c
+++ b/tools/perf/util/machine.c
@@ -1988,7 +1988,7 @@ static void save_iterations(struct iterations *iter,
{
int i;
- iter->nr_loop_iter = nr;
+ iter->nr_loop_iter++;
iter->cycles = 0;
for (i = 0; i < nr; i++)
diff --git a/tools/perf/util/probe-file.c b/tools/perf/util/probe-file.c
index b76088f..6a65488 100644
--- a/tools/perf/util/probe-file.c
+++ b/tools/perf/util/probe-file.c
@@ -424,7 +424,7 @@ static int probe_cache__open(struct probe_cache *pcache, const char *target,
if (target && build_id_cache__cached(target)) {
/* This is a cached buildid */
- strncpy(sbuildid, target, SBUILD_ID_SIZE);
+ strlcpy(sbuildid, target, SBUILD_ID_SIZE);
dir_name = build_id_cache__linkname(sbuildid, NULL, 0);
goto found;
}
diff --git a/tools/perf/util/python.c b/tools/perf/util/python.c
index ce501ba..69f5f61 100644
--- a/tools/perf/util/python.c
+++ b/tools/perf/util/python.c
@@ -939,7 +939,8 @@ static PyObject *pyrf_evlist__get_pollfd(struct pyrf_evlist *pevlist,
file = PyFile_FromFile(fp, "perf", "r", NULL);
#else
- file = PyFile_FromFd(evlist->pollfd.entries[i].fd, "perf", "r", -1, NULL, NULL, NULL, 1);
+ file = PyFile_FromFd(evlist->pollfd.entries[i].fd, "perf", "r", -1,
+ NULL, NULL, NULL, 0);
#endif
if (file == NULL)
goto free_list;
diff --git a/tools/perf/util/s390-cpumsf.c b/tools/perf/util/s390-cpumsf.c
index d2c78ff..aa7f8c1 100644
--- a/tools/perf/util/s390-cpumsf.c
+++ b/tools/perf/util/s390-cpumsf.c
@@ -499,7 +499,7 @@ static int s390_cpumsf_samples(struct s390_cpumsf_queue *sfq, u64 *ts)
aux_ts = get_trailer_time(buf);
if (!aux_ts) {
pr_err("[%#08" PRIx64 "] Invalid AUX trailer entry TOD clock base\n",
- sfq->buffer->data_offset);
+ (s64)sfq->buffer->data_offset);
aux_ts = ~0ULL;
goto out;
}
diff --git a/tools/power/x86/intel_pstate_tracer/intel_pstate_tracer.py b/tools/power/x86/intel_pstate_tracer/intel_pstate_tracer.py
index 84e2b64..2fa3c57 100755
--- a/tools/power/x86/intel_pstate_tracer/intel_pstate_tracer.py
+++ b/tools/power/x86/intel_pstate_tracer/intel_pstate_tracer.py
@@ -585,9 +585,9 @@
read_trace_data(filename)
-clear_trace_file()
-# Free the memory
if interval:
+ clear_trace_file()
+ # Free the memory
free_trace_buffer()
if graph_data_present == False:
diff --git a/tools/testing/selftests/bpf/test_progs.c b/tools/testing/selftests/bpf/test_progs.c
index 0ef6820..89f8b0d 100644
--- a/tools/testing/selftests/bpf/test_progs.c
+++ b/tools/testing/selftests/bpf/test_progs.c
@@ -51,10 +51,10 @@ static struct {
struct iphdr iph;
struct tcphdr tcp;
} __packed pkt_v4 = {
- .eth.h_proto = bpf_htons(ETH_P_IP),
+ .eth.h_proto = __bpf_constant_htons(ETH_P_IP),
.iph.ihl = 5,
.iph.protocol = 6,
- .iph.tot_len = bpf_htons(MAGIC_BYTES),
+ .iph.tot_len = __bpf_constant_htons(MAGIC_BYTES),
.tcp.urg_ptr = 123,
};
@@ -64,9 +64,9 @@ static struct {
struct ipv6hdr iph;
struct tcphdr tcp;
} __packed pkt_v6 = {
- .eth.h_proto = bpf_htons(ETH_P_IPV6),
+ .eth.h_proto = __bpf_constant_htons(ETH_P_IPV6),
.iph.nexthdr = 6,
- .iph.payload_len = bpf_htons(MAGIC_BYTES),
+ .iph.payload_len = __bpf_constant_htons(MAGIC_BYTES),
.tcp.urg_ptr = 123,
};
@@ -1136,7 +1136,9 @@ static void test_stacktrace_build_id(void)
int i, j;
struct bpf_stack_build_id id_offs[PERF_MAX_STACK_DEPTH];
int build_id_matches = 0;
+ int retry = 1;
+retry:
err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd);
if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno))
goto out;
@@ -1249,6 +1251,19 @@ static void test_stacktrace_build_id(void)
previous_key = key;
} while (bpf_map_get_next_key(stackmap_fd, &previous_key, &key) == 0);
+ /* stack_map_get_build_id_offset() is racy and sometimes can return
+ * BPF_STACK_BUILD_ID_IP instead of BPF_STACK_BUILD_ID_VALID;
+ * try it one more time.
+ */
+ if (build_id_matches < 1 && retry--) {
+ ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE);
+ close(pmu_fd);
+ bpf_object__close(obj);
+ printf("%s:WARN:Didn't find expected build ID from the map, retrying\n",
+ __func__);
+ goto retry;
+ }
+
if (CHECK(build_id_matches < 1, "build id match",
"Didn't find expected build ID from the map\n"))
goto disable_pmu;
@@ -1289,7 +1304,9 @@ static void test_stacktrace_build_id_nmi(void)
int i, j;
struct bpf_stack_build_id id_offs[PERF_MAX_STACK_DEPTH];
int build_id_matches = 0;
+ int retry = 1;
+retry:
err = bpf_prog_load(file, BPF_PROG_TYPE_PERF_EVENT, &obj, &prog_fd);
if (CHECK(err, "prog_load", "err %d errno %d\n", err, errno))
return;
@@ -1384,6 +1401,19 @@ static void test_stacktrace_build_id_nmi(void)
previous_key = key;
} while (bpf_map_get_next_key(stackmap_fd, &previous_key, &key) == 0);
+ /* stack_map_get_build_id_offset() is racy and sometimes can return
+ * BPF_STACK_BUILD_ID_IP instead of BPF_STACK_BUILD_ID_VALID;
+ * try it one more time.
+ */
+ if (build_id_matches < 1 && retry--) {
+ ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE);
+ close(pmu_fd);
+ bpf_object__close(obj);
+ printf("%s:WARN:Didn't find expected build ID from the map, retrying\n",
+ __func__);
+ goto retry;
+ }
+
if (CHECK(build_id_matches < 1, "build id match",
"Didn't find expected build ID from the map\n"))
goto disable_pmu;
diff --git a/tools/testing/selftests/bpf/test_sock_addr.c b/tools/testing/selftests/bpf/test_sock_addr.c
index aeeb76a..e38f1cb 100644
--- a/tools/testing/selftests/bpf/test_sock_addr.c
+++ b/tools/testing/selftests/bpf/test_sock_addr.c
@@ -44,6 +44,7 @@
#define SERV6_V4MAPPED_IP "::ffff:192.168.0.4"
#define SRC6_IP "::1"
#define SRC6_REWRITE_IP "::6"
+#define WILDCARD6_IP "::"
#define SERV6_PORT 6060
#define SERV6_REWRITE_PORT 6666
@@ -85,12 +86,14 @@ static int bind4_prog_load(const struct sock_addr_test *test);
static int bind6_prog_load(const struct sock_addr_test *test);
static int connect4_prog_load(const struct sock_addr_test *test);
static int connect6_prog_load(const struct sock_addr_test *test);
+static int sendmsg_allow_prog_load(const struct sock_addr_test *test);
static int sendmsg_deny_prog_load(const struct sock_addr_test *test);
static int sendmsg4_rw_asm_prog_load(const struct sock_addr_test *test);
static int sendmsg4_rw_c_prog_load(const struct sock_addr_test *test);
static int sendmsg6_rw_asm_prog_load(const struct sock_addr_test *test);
static int sendmsg6_rw_c_prog_load(const struct sock_addr_test *test);
static int sendmsg6_rw_v4mapped_prog_load(const struct sock_addr_test *test);
+static int sendmsg6_rw_wildcard_prog_load(const struct sock_addr_test *test);
static struct sock_addr_test tests[] = {
/* bind */
@@ -463,6 +466,34 @@ static struct sock_addr_test tests[] = {
SYSCALL_ENOTSUPP,
},
{
+ "sendmsg6: set dst IP = [::] (BSD'ism)",
+ sendmsg6_rw_wildcard_prog_load,
+ BPF_CGROUP_UDP6_SENDMSG,
+ BPF_CGROUP_UDP6_SENDMSG,
+ AF_INET6,
+ SOCK_DGRAM,
+ SERV6_IP,
+ SERV6_PORT,
+ SERV6_REWRITE_IP,
+ SERV6_REWRITE_PORT,
+ SRC6_REWRITE_IP,
+ SUCCESS,
+ },
+ {
+ "sendmsg6: preserve dst IP = [::] (BSD'ism)",
+ sendmsg_allow_prog_load,
+ BPF_CGROUP_UDP6_SENDMSG,
+ BPF_CGROUP_UDP6_SENDMSG,
+ AF_INET6,
+ SOCK_DGRAM,
+ WILDCARD6_IP,
+ SERV6_PORT,
+ SERV6_REWRITE_IP,
+ SERV6_PORT,
+ SRC6_IP,
+ SUCCESS,
+ },
+ {
"sendmsg6: deny call",
sendmsg_deny_prog_load,
BPF_CGROUP_UDP6_SENDMSG,
@@ -714,16 +745,27 @@ static int connect6_prog_load(const struct sock_addr_test *test)
return load_path(test, CONNECT6_PROG_PATH);
}
-static int sendmsg_deny_prog_load(const struct sock_addr_test *test)
+static int sendmsg_ret_only_prog_load(const struct sock_addr_test *test,
+ int32_t rc)
{
struct bpf_insn insns[] = {
- /* return 0 */
- BPF_MOV64_IMM(BPF_REG_0, 0),
+ /* return rc */
+ BPF_MOV64_IMM(BPF_REG_0, rc),
BPF_EXIT_INSN(),
};
return load_insns(test, insns, sizeof(insns) / sizeof(struct bpf_insn));
}
+static int sendmsg_allow_prog_load(const struct sock_addr_test *test)
+{
+ return sendmsg_ret_only_prog_load(test, /*rc*/ 1);
+}
+
+static int sendmsg_deny_prog_load(const struct sock_addr_test *test)
+{
+ return sendmsg_ret_only_prog_load(test, /*rc*/ 0);
+}
+
static int sendmsg4_rw_asm_prog_load(const struct sock_addr_test *test)
{
struct sockaddr_in dst4_rw_addr;
@@ -844,6 +886,11 @@ static int sendmsg6_rw_v4mapped_prog_load(const struct sock_addr_test *test)
return sendmsg6_rw_dst_asm_prog_load(test, SERV6_V4MAPPED_IP);
}
+static int sendmsg6_rw_wildcard_prog_load(const struct sock_addr_test *test)
+{
+ return sendmsg6_rw_dst_asm_prog_load(test, WILDCARD6_IP);
+}
+
static int sendmsg6_rw_c_prog_load(const struct sock_addr_test *test)
{
return load_path(test, SENDMSG6_PROG_PATH);
diff --git a/tools/testing/selftests/net/forwarding/bridge_vlan_aware.sh b/tools/testing/selftests/net/forwarding/bridge_vlan_aware.sh
index d8313d0..b90dff8 100755
--- a/tools/testing/selftests/net/forwarding/bridge_vlan_aware.sh
+++ b/tools/testing/selftests/net/forwarding/bridge_vlan_aware.sh
@@ -1,7 +1,7 @@
#!/bin/bash
# SPDX-License-Identifier: GPL-2.0
-ALL_TESTS="ping_ipv4 ping_ipv6 learning flooding"
+ALL_TESTS="ping_ipv4 ping_ipv6 learning flooding vlan_deletion extern_learn"
NUM_NETIFS=4
CHECK_TC="yes"
source lib.sh
@@ -96,6 +96,51 @@
flood_test $swp2 $h1 $h2
}
+vlan_deletion()
+{
+ # Test that the deletion of a VLAN on a bridge port does not affect
+ # the PVID VLAN
+ log_info "Add and delete a VLAN on bridge port $swp1"
+
+ bridge vlan add vid 10 dev $swp1
+ bridge vlan del vid 10 dev $swp1
+
+ ping_ipv4
+ ping_ipv6
+}
+
+extern_learn()
+{
+ local mac=de:ad:be:ef:13:37
+ local ageing_time
+
+ # Test that externally learned FDB entries can roam, but not age out
+ RET=0
+
+ bridge fdb add de:ad:be:ef:13:37 dev $swp1 master extern_learn vlan 1
+
+ bridge fdb show brport $swp1 | grep -q de:ad:be:ef:13:37
+ check_err $? "Did not find FDB entry when should"
+
+ # Wait for 10 seconds after the ageing time to make sure the FDB entry
+ # was not aged out
+ ageing_time=$(bridge_ageing_time_get br0)
+ sleep $((ageing_time + 10))
+
+ bridge fdb show brport $swp1 | grep -q de:ad:be:ef:13:37
+ check_err $? "FDB entry was aged out when should not"
+
+ $MZ $h2 -c 1 -p 64 -a $mac -t ip -q
+
+ bridge fdb show brport $swp2 | grep -q de:ad:be:ef:13:37
+ check_err $? "FDB entry did not roam when should"
+
+ log_test "Externally learned FDB entry - ageing & roaming"
+
+ bridge fdb del de:ad:be:ef:13:37 dev $swp2 master vlan 1 &> /dev/null
+ bridge fdb del de:ad:be:ef:13:37 dev $swp1 master vlan 1 &> /dev/null
+}
+
trap cleanup EXIT
setup_prepare
diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/ife.json b/tools/testing/selftests/tc-testing/tc-tests/actions/ife.json
index 637ea02..0da3545 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/actions/ife.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/actions/ife.json
@@ -17,7 +17,7 @@
"cmdUnderTest": "$TC actions add action ife encode allow mark pass index 2",
"expExitCode": "0",
"verifyCmd": "$TC actions get action ife index 2",
- "matchPattern": "action order [0-9]*: ife encode action pass.*type 0xED3E.*allow mark.*index 2",
+ "matchPattern": "action order [0-9]*: ife encode action pass.*type 0[xX]ED3E.*allow mark.*index 2",
"matchCount": "1",
"teardown": [
"$TC actions flush action ife"
@@ -41,7 +41,7 @@
"cmdUnderTest": "$TC actions add action ife encode use mark 10 pipe index 2",
"expExitCode": "0",
"verifyCmd": "$TC actions get action ife index 2",
- "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0xED3E.*use mark.*index 2",
+ "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]ED3E.*use mark.*index 2",
"matchCount": "1",
"teardown": [
"$TC actions flush action ife"
@@ -65,7 +65,7 @@
"cmdUnderTest": "$TC actions add action ife encode allow mark continue index 2",
"expExitCode": "0",
"verifyCmd": "$TC actions get action ife index 2",
- "matchPattern": "action order [0-9]*: ife encode action continue.*type 0xED3E.*allow mark.*index 2",
+ "matchPattern": "action order [0-9]*: ife encode action continue.*type 0[xX]ED3E.*allow mark.*index 2",
"matchCount": "1",
"teardown": [
"$TC actions flush action ife"
@@ -89,7 +89,7 @@
"cmdUnderTest": "$TC actions add action ife encode use mark 789 drop index 2",
"expExitCode": "0",
"verifyCmd": "$TC actions get action ife index 2",
- "matchPattern": "action order [0-9]*: ife encode action drop.*type 0xED3E.*use mark 789.*index 2",
+ "matchPattern": "action order [0-9]*: ife encode action drop.*type 0[xX]ED3E.*use mark 789.*index 2",
"matchCount": "1",
"teardown": [
"$TC actions flush action ife"
@@ -113,7 +113,7 @@
"cmdUnderTest": "$TC actions add action ife encode use mark 656768 reclassify index 2",
"expExitCode": "0",
"verifyCmd": "$TC actions get action ife index 2",
- "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0xED3E.*use mark 656768.*index 2",
+ "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0[xX]ED3E.*use mark 656768.*index 2",
"matchCount": "1",
"teardown": [
"$TC actions flush action ife"
@@ -137,7 +137,7 @@
"cmdUnderTest": "$TC actions add action ife encode use mark 65 jump 1 index 2",
"expExitCode": "0",
"verifyCmd": "$TC actions get action ife index 2",
- "matchPattern": "action order [0-9]*: ife encode action jump 1.*type 0xED3E.*use mark 65.*index 2",
+ "matchPattern": "action order [0-9]*: ife encode action jump 1.*type 0[xX]ED3E.*use mark 65.*index 2",
"matchCount": "1",
"teardown": [
"$TC actions flush action ife"
@@ -161,7 +161,7 @@
"cmdUnderTest": "$TC actions add action ife encode use mark 4294967295 reclassify index 90",
"expExitCode": "0",
"verifyCmd": "$TC actions get action ife index 90",
- "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0xED3E.*use mark 4294967295.*index 90",
+ "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0[xX]ED3E.*use mark 4294967295.*index 90",
"matchCount": "1",
"teardown": [
"$TC actions flush action ife"
@@ -185,7 +185,7 @@
"cmdUnderTest": "$TC actions add action ife encode use mark 4294967295999 pipe index 90",
"expExitCode": "255",
"verifyCmd": "$TC actions get action ife index 90",
- "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0xED3E.*use mark 4294967295999.*index 90",
+ "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]ED3E.*use mark 4294967295999.*index 90",
"matchCount": "0",
"teardown": []
},
@@ -207,7 +207,7 @@
"cmdUnderTest": "$TC actions add action ife encode allow prio pass index 9",
"expExitCode": "0",
"verifyCmd": "$TC actions get action ife index 9",
- "matchPattern": "action order [0-9]*: ife encode action pass.*type 0xED3E.*allow prio.*index 9",
+ "matchPattern": "action order [0-9]*: ife encode action pass.*type 0[xX]ED3E.*allow prio.*index 9",
"matchCount": "1",
"teardown": [
"$TC actions flush action ife"
@@ -231,7 +231,7 @@
"cmdUnderTest": "$TC actions add action ife encode use prio 7 pipe index 9",
"expExitCode": "0",
"verifyCmd": "$TC actions get action ife index 9",
- "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0xED3E.*use prio 7.*index 9",
+ "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]ED3E.*use prio 7.*index 9",
"matchCount": "1",
"teardown": [
"$TC actions flush action ife"
@@ -255,7 +255,7 @@
"cmdUnderTest": "$TC actions add action ife encode use prio 3 continue index 9",
"expExitCode": "0",
"verifyCmd": "$TC actions get action ife index 9",
- "matchPattern": "action order [0-9]*: ife encode action continue.*type 0xED3E.*use prio 3.*index 9",
+ "matchPattern": "action order [0-9]*: ife encode action continue.*type 0[xX]ED3E.*use prio 3.*index 9",
"matchCount": "1",
"teardown": [
"$TC actions flush action ife"
@@ -279,7 +279,7 @@
"cmdUnderTest": "$TC actions add action ife encode allow prio drop index 9",
"expExitCode": "0",
"verifyCmd": "$TC actions get action ife index 9",
- "matchPattern": "action order [0-9]*: ife encode action drop.*type 0xED3E.*allow prio.*index 9",
+ "matchPattern": "action order [0-9]*: ife encode action drop.*type 0[xX]ED3E.*allow prio.*index 9",
"matchCount": "1",
"teardown": [
"$TC actions flush action ife"
@@ -303,7 +303,7 @@
"cmdUnderTest": "$TC actions add action ife encode use prio 998877 reclassify index 9",
"expExitCode": "0",
"verifyCmd": "$TC actions get action ife index 9",
- "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0xED3E.*use prio 998877.*index 9",
+ "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0[xX]ED3E.*use prio 998877.*index 9",
"matchCount": "1",
"teardown": [
"$TC actions flush action ife"
@@ -327,7 +327,7 @@
"cmdUnderTest": "$TC actions add action ife encode use prio 998877 jump 10 index 9",
"expExitCode": "0",
"verifyCmd": "$TC actions get action ife index 9",
- "matchPattern": "action order [0-9]*: ife encode action jump 10.*type 0xED3E.*use prio 998877.*index 9",
+ "matchPattern": "action order [0-9]*: ife encode action jump 10.*type 0[xX]ED3E.*use prio 998877.*index 9",
"matchCount": "1",
"teardown": [
"$TC actions flush action ife"
@@ -351,7 +351,7 @@
"cmdUnderTest": "$TC actions add action ife encode use prio 4294967295 reclassify index 99",
"expExitCode": "0",
"verifyCmd": "$TC actions get action ife index 99",
- "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0xED3E.*use prio 4294967295.*index 99",
+ "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0[xX]ED3E.*use prio 4294967295.*index 99",
"matchCount": "1",
"teardown": [
"$TC actions flush action ife"
@@ -375,7 +375,7 @@
"cmdUnderTest": "$TC actions add action ife encode use prio 4294967298 pipe index 99",
"expExitCode": "255",
"verifyCmd": "$TC actions get action ife index 99",
- "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0xED3E.*use prio 4294967298.*index 99",
+ "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]ED3E.*use prio 4294967298.*index 99",
"matchCount": "0",
"teardown": []
},
@@ -397,7 +397,7 @@
"cmdUnderTest": "$TC actions add action ife encode allow tcindex pass index 1",
"expExitCode": "0",
"verifyCmd": "$TC actions get action ife index 1",
- "matchPattern": "action order [0-9]*: ife encode action pass.*type 0xED3E.*allow tcindex.*index 1",
+ "matchPattern": "action order [0-9]*: ife encode action pass.*type 0[xX]ED3E.*allow tcindex.*index 1",
"matchCount": "1",
"teardown": [
"$TC actions flush action ife"
@@ -421,7 +421,7 @@
"cmdUnderTest": "$TC actions add action ife encode use tcindex 111 pipe index 1",
"expExitCode": "0",
"verifyCmd": "$TC actions get action ife index 1",
- "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0xED3E.*use tcindex 111.*index 1",
+ "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]ED3E.*use tcindex 111.*index 1",
"matchCount": "1",
"teardown": [
"$TC actions flush action ife"
@@ -445,7 +445,7 @@
"cmdUnderTest": "$TC actions add action ife encode use tcindex 1 continue index 1",
"expExitCode": "0",
"verifyCmd": "$TC actions get action ife index 1",
- "matchPattern": "action order [0-9]*: ife encode action continue.*type 0xED3E.*use tcindex 1.*index 1",
+ "matchPattern": "action order [0-9]*: ife encode action continue.*type 0[xX]ED3E.*use tcindex 1.*index 1",
"matchCount": "1",
"teardown": [
"$TC actions flush action ife"
@@ -469,7 +469,7 @@
"cmdUnderTest": "$TC actions add action ife encode use tcindex 1 continue index 1",
"expExitCode": "0",
"verifyCmd": "$TC actions get action ife index 1",
- "matchPattern": "action order [0-9]*: ife encode action continue.*type 0xED3E.*use tcindex 1.*index 1",
+ "matchPattern": "action order [0-9]*: ife encode action continue.*type 0[xX]ED3E.*use tcindex 1.*index 1",
"matchCount": "1",
"teardown": [
"$TC actions flush action ife"
@@ -493,7 +493,7 @@
"cmdUnderTest": "$TC actions add action ife encode allow tcindex drop index 77",
"expExitCode": "0",
"verifyCmd": "$TC actions get action ife index 77",
- "matchPattern": "action order [0-9]*: ife encode action drop.*type 0xED3E.*allow tcindex.*index 77",
+ "matchPattern": "action order [0-9]*: ife encode action drop.*type 0[xX]ED3E.*allow tcindex.*index 77",
"matchCount": "1",
"teardown": [
"$TC actions flush action ife"
@@ -517,7 +517,7 @@
"cmdUnderTest": "$TC actions add action ife encode allow tcindex reclassify index 77",
"expExitCode": "0",
"verifyCmd": "$TC actions get action ife index 77",
- "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0xED3E.*allow tcindex.*index 77",
+ "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0[xX]ED3E.*allow tcindex.*index 77",
"matchCount": "1",
"teardown": [
"$TC actions flush action ife"
@@ -541,7 +541,7 @@
"cmdUnderTest": "$TC actions add action ife encode allow tcindex jump 999 index 77",
"expExitCode": "0",
"verifyCmd": "$TC actions get action ife index 77",
- "matchPattern": "action order [0-9]*: ife encode action jump 999.*type 0xED3E.*allow tcindex.*index 77",
+ "matchPattern": "action order [0-9]*: ife encode action jump 999.*type 0[xX]ED3E.*allow tcindex.*index 77",
"matchCount": "1",
"teardown": [
"$TC actions flush action ife"
@@ -565,7 +565,7 @@
"cmdUnderTest": "$TC actions add action ife encode use tcindex 65535 pass index 1",
"expExitCode": "0",
"verifyCmd": "$TC actions get action ife index 1",
- "matchPattern": "action order [0-9]*: ife encode action pass.*type 0xED3E.*use tcindex 65535.*index 1",
+ "matchPattern": "action order [0-9]*: ife encode action pass.*type 0[xX]ED3E.*use tcindex 65535.*index 1",
"matchCount": "1",
"teardown": [
"$TC actions flush action ife"
@@ -589,7 +589,7 @@
"cmdUnderTest": "$TC actions add action ife encode use tcindex 65539 pipe index 1",
"expExitCode": "255",
"verifyCmd": "$TC actions get action ife index 1",
- "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0xED3E.*use tcindex 65539.*index 1",
+ "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]ED3E.*use tcindex 65539.*index 1",
"matchCount": "0",
"teardown": []
},
@@ -611,7 +611,7 @@
"cmdUnderTest": "$TC actions add action ife encode allow mark src 00:11:22:33:44:55 pipe index 1",
"expExitCode": "0",
"verifyCmd": "$TC actions get action ife index 1",
- "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0xED3E.*allow mark src 00:11:22:33:44:55.*index 1",
+ "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]ED3E.*allow mark src 00:11:22:33:44:55.*index 1",
"matchCount": "1",
"teardown": [
"$TC actions flush action ife"
@@ -635,7 +635,7 @@
"cmdUnderTest": "$TC actions add action ife encode use prio 9876 dst 00:11:22:33:44:55 reclassify index 1",
"expExitCode": "0",
"verifyCmd": "$TC actions get action ife index 1",
- "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0xED3E.*use prio 9876 dst 00:11:22:33:44:55.*index 1",
+ "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0[xX]ED3E.*use prio 9876 dst 00:11:22:33:44:55.*index 1",
"matchCount": "1",
"teardown": [
"$TC actions flush action ife"
@@ -659,7 +659,7 @@
"cmdUnderTest": "$TC actions add action ife encode allow tcindex src 00:aa:bb:cc:dd:ee dst 00:11:22:33:44:55 pass index 11",
"expExitCode": "0",
"verifyCmd": "$TC actions get action ife index 11",
- "matchPattern": "action order [0-9]*: ife encode action pass.*type 0xED3E.*allow tcindex dst 00:11:22:33:44:55 src 00:aa:bb:cc:dd:ee .*index 11",
+ "matchPattern": "action order [0-9]*: ife encode action pass.*type 0[xX]ED3E.*allow tcindex dst 00:11:22:33:44:55 src 00:aa:bb:cc:dd:ee .*index 11",
"matchCount": "1",
"teardown": [
"$TC actions flush action ife"
@@ -683,7 +683,7 @@
"cmdUnderTest": "$TC actions add action ife encode use mark 7 type 0xfefe pass index 1",
"expExitCode": "0",
"verifyCmd": "$TC actions get action ife index 1",
- "matchPattern": "action order [0-9]*: ife encode action pass.*type 0xFEFE.*use mark 7.*index 1",
+ "matchPattern": "action order [0-9]*: ife encode action pass.*type 0[xX]FEFE.*use mark 7.*index 1",
"matchCount": "1",
"teardown": [
"$TC actions flush action ife"
@@ -707,7 +707,7 @@
"cmdUnderTest": "$TC actions add action ife encode use prio 444 type 0xabba pipe index 21",
"expExitCode": "0",
"verifyCmd": "$TC actions get action ife index 21",
- "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0xABBA.*use prio 444.*index 21",
+ "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]ABBA.*use prio 444.*index 21",
"matchCount": "1",
"teardown": [
"$TC actions flush action ife"
@@ -731,7 +731,7 @@
"cmdUnderTest": "$TC actions add action ife encode use tcindex 5000 type 0xabcd reclassify index 21",
"expExitCode": "0",
"verifyCmd": "$TC actions get action ife index 21",
- "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0xABCD.*use tcindex 5000.*index 21",
+ "matchPattern": "action order [0-9]*: ife encode action reclassify.*type 0[xX]ABCD.*use tcindex 5000.*index 21",
"matchCount": "1",
"teardown": [
"$TC actions flush action ife"
@@ -739,7 +739,7 @@
},
{
"id": "fac3",
- "name": "Create valid ife encode action with index at 32-bit maximnum",
+ "name": "Create valid ife encode action with index at 32-bit maximum",
"category": [
"actions",
"ife"
@@ -755,7 +755,7 @@
"cmdUnderTest": "$TC actions add action ife encode allow mark pass index 4294967295",
"expExitCode": "0",
"verifyCmd": "$TC actions get action ife index 4294967295",
- "matchPattern": "action order [0-9]*: ife encode action pass.*type 0xED3E.*allow mark.*index 4294967295",
+ "matchPattern": "action order [0-9]*: ife encode action pass.*type 0[xX]ED3E.*allow mark.*index 4294967295",
"matchCount": "1",
"teardown": [
"$TC actions flush action ife"
@@ -779,7 +779,7 @@
"cmdUnderTest": "$TC actions add action ife decode pass index 1",
"expExitCode": "0",
"verifyCmd": "$TC actions get action ife index 1",
- "matchPattern": "action order [0-9]*: ife decode action pass.*type 0x0.*allow mark allow tcindex allow prio.*index 1",
+ "matchPattern": "action order [0-9]*: ife decode action pass.*type 0(x0)?.*allow mark allow tcindex allow prio.*index 1",
"matchCount": "1",
"teardown": [
"$TC actions flush action ife"
@@ -803,7 +803,7 @@
"cmdUnderTest": "$TC actions add action ife decode pipe index 1",
"expExitCode": "0",
"verifyCmd": "$TC actions get action ife index 1",
- "matchPattern": "action order [0-9]*: ife decode action pipe.*type 0x0.*allow mark allow tcindex allow prio.*index 1",
+ "matchPattern": "action order [0-9]*: ife decode action pipe.*type 0(x0)?.*allow mark allow tcindex allow prio.*index 1",
"matchCount": "1",
"teardown": [
"$TC actions flush action ife"
@@ -827,7 +827,7 @@
"cmdUnderTest": "$TC actions add action ife decode continue index 1",
"expExitCode": "0",
"verifyCmd": "$TC actions get action ife index 1",
- "matchPattern": "action order [0-9]*: ife decode action continue.*type 0x0.*allow mark allow tcindex allow prio.*index 1",
+ "matchPattern": "action order [0-9]*: ife decode action continue.*type 0(x0)?.*allow mark allow tcindex allow prio.*index 1",
"matchCount": "1",
"teardown": [
"$TC actions flush action ife"
@@ -851,7 +851,7 @@
"cmdUnderTest": "$TC actions add action ife decode drop index 1",
"expExitCode": "0",
"verifyCmd": "$TC actions get action ife index 1",
- "matchPattern": "action order [0-9]*: ife decode action drop.*type 0x0.*allow mark allow tcindex allow prio.*index 1",
+ "matchPattern": "action order [0-9]*: ife decode action drop.*type 0(x0)?.*allow mark allow tcindex allow prio.*index 1",
"matchCount": "1",
"teardown": [
"$TC actions flush action ife"
@@ -875,7 +875,7 @@
"cmdUnderTest": "$TC actions add action ife decode reclassify index 1",
"expExitCode": "0",
"verifyCmd": "$TC actions get action ife index 1",
- "matchPattern": "action order [0-9]*: ife decode action reclassify.*type 0x0.*allow mark allow tcindex allow prio.*index 1",
+ "matchPattern": "action order [0-9]*: ife decode action reclassify.*type 0(x0)?.*allow mark allow tcindex allow prio.*index 1",
"matchCount": "1",
"teardown": [
"$TC actions flush action ife"
@@ -899,7 +899,7 @@
"cmdUnderTest": "$TC actions add action ife decode jump 10 index 1",
"expExitCode": "0",
"verifyCmd": "$TC actions get action ife index 1",
- "matchPattern": "action order [0-9]*: ife decode action jump 10.*type 0x0.*allow mark allow tcindex allow prio.*index 1",
+ "matchPattern": "action order [0-9]*: ife decode action jump 10.*type 0(x0)?.*allow mark allow tcindex allow prio.*index 1",
"matchCount": "1",
"teardown": [
"$TC actions flush action ife"
@@ -923,7 +923,7 @@
"cmdUnderTest": "$TC actions add action ife encode allow mark pass index 4294967295999",
"expExitCode": "255",
"verifyCmd": "$TC actions get action ife index 4294967295999",
- "matchPattern": "action order [0-9]*: ife encode action pass.*type 0xED3E.*allow mark.*index 4294967295999",
+ "matchPattern": "action order [0-9]*: ife encode action pass.*type 0[xX]ED3E.*allow mark.*index 4294967295999",
"matchCount": "0",
"teardown": []
},
@@ -945,7 +945,7 @@
"cmdUnderTest": "$TC actions add action ife encode allow mark kuka index 4",
"expExitCode": "255",
"verifyCmd": "$TC actions get action ife index 4",
- "matchPattern": "action order [0-9]*: ife encode action kuka.*type 0xED3E.*allow mark.*index 4",
+ "matchPattern": "action order [0-9]*: ife encode action kuka.*type 0[xX]ED3E.*allow mark.*index 4",
"matchCount": "0",
"teardown": []
},
@@ -967,7 +967,7 @@
"cmdUnderTest": "$TC actions add action ife encode allow prio pipe index 4 cookie aabbccddeeff112233445566778800a1",
"expExitCode": "0",
"verifyCmd": "$TC actions get action ife index 4",
- "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0xED3E.*allow prio.*index 4.*cookie aabbccddeeff112233445566778800a1",
+ "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]ED3E.*allow prio.*index 4.*cookie aabbccddeeff112233445566778800a1",
"matchCount": "1",
"teardown": [
"$TC actions flush action ife"
@@ -991,7 +991,7 @@
"cmdUnderTest": "$TC actions add action ife encode allow foo pipe index 4",
"expExitCode": "255",
"verifyCmd": "$TC actions get action ife index 4",
- "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0xED3E.*allow foo.*index 4",
+ "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]ED3E.*allow foo.*index 4",
"matchCount": "0",
"teardown": []
},
@@ -1013,7 +1013,7 @@
"cmdUnderTest": "$TC actions add action ife encode allow prio type 70000 pipe index 4",
"expExitCode": "255",
"verifyCmd": "$TC actions get action ife index 4",
- "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0x11170.*allow prio.*index 4",
+ "matchPattern": "action order [0-9]*: ife encode action pipe.*type 0[xX]11170.*allow prio.*index 4",
"matchCount": "0",
"teardown": []
},
diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/tunnel_key.json b/tools/testing/selftests/tc-testing/tc-tests/actions/tunnel_key.json
index 10b2d89..e7e15a7 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/actions/tunnel_key.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/actions/tunnel_key.json
@@ -82,35 +82,6 @@
]
},
{
- "id": "ba4e",
- "name": "Add tunnel_key set action with missing mandatory id parameter",
- "category": [
- "actions",
- "tunnel_key"
- ],
- "setup": [
- [
- "$TC actions flush action tunnel_key",
- 0,
- 1,
- 255
- ]
- ],
- "cmdUnderTest": "$TC actions add action tunnel_key set src_ip 10.10.10.1 dst_ip 20.20.20.2",
- "expExitCode": "255",
- "verifyCmd": "$TC actions list action tunnel_key",
- "matchPattern": "action order [0-9]+: tunnel_key set.*src_ip 10.10.10.1.*dst_ip 20.20.20.2",
- "matchCount": "0",
- "teardown": [
- [
- "$TC actions flush action tunnel_key",
- 0,
- 1,
- 255
- ]
- ]
- },
- {
"id": "a5e0",
"name": "Add tunnel_key set action with invalid src_ip parameter",
"category": [
@@ -634,7 +605,7 @@
"cmdUnderTest": "$TC actions add action tunnel_key set src_ip 10.10.10.1 dst_ip 10.10.10.2 id 7 index 4 cookie aa11bb22cc33dd44ee55ff66aa11b1b2",
"expExitCode": "0",
"verifyCmd": "$TC actions get action tunnel_key index 4",
- "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 10.10.10.1.*dst_ip 10.10.10.2.*key_id 7.*dst_port 0.*csum pipe.*index 4 ref.*cookie aa11bb22cc33dd44ee55ff66aa11b1b2",
+ "matchPattern": "action order [0-9]+: tunnel_key.*set.*src_ip 10.10.10.1.*dst_ip 10.10.10.2.*key_id 7.*csum pipe.*index 4 ref.*cookie aa11bb22cc33dd44ee55ff66aa11b1b2",
"matchCount": "1",
"teardown": [
"$TC actions flush action tunnel_key"
diff --git a/virt/kvm/arm/mmio.c b/virt/kvm/arm/mmio.c
index dac7ceb1..08443a1 100644
--- a/virt/kvm/arm/mmio.c
+++ b/virt/kvm/arm/mmio.c
@@ -117,6 +117,12 @@ int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
vcpu_set_reg(vcpu, vcpu->arch.mmio_decode.rt, data);
}
+ /*
+ * The MMIO instruction is emulated and should not be re-executed
+ * in the guest.
+ */
+ kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
+
return 0;
}
@@ -144,11 +150,6 @@ static int decode_hsr(struct kvm_vcpu *vcpu, bool *is_write, int *len)
vcpu->arch.mmio_decode.sign_extend = sign_extend;
vcpu->arch.mmio_decode.rt = rt;
- /*
- * The MMIO instruction is emulated and should not be re-executed
- * in the guest.
- */
- kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
return 0;
}
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index f986e31f..0ffb02f 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -1959,7 +1959,8 @@ int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
EXPORT_SYMBOL_GPL(kvm_gfn_to_hva_cache_init);
int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
- void *data, int offset, unsigned long len)
+ void *data, unsigned int offset,
+ unsigned long len)
{
struct kvm_memslots *slots = kvm_memslots(kvm);
int r;
@@ -2912,8 +2913,10 @@ static int kvm_ioctl_create_device(struct kvm *kvm,
if (ops->init)
ops->init(dev);
+ kvm_get_kvm(kvm);
ret = anon_inode_getfd(ops->name, &kvm_device_fops, dev, O_RDWR | O_CLOEXEC);
if (ret < 0) {
+ kvm_put_kvm(kvm);
mutex_lock(&kvm->lock);
list_del(&dev->vm_node);
mutex_unlock(&kvm->lock);
@@ -2921,7 +2924,6 @@ static int kvm_ioctl_create_device(struct kvm *kvm,
return ret;
}
- kvm_get_kvm(kvm);
cd->fd = ret;
return 0;
}