Merge 34816d20f173 ("Merge tag 'gfs2-v5.10-rc5-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/gfs2/linux-gfs2") into android-mainline

Steps on the way to 5.10-rc7

Resolves a merge issue in:
	arch/arm64/kernel/process.c

Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
Change-Id: If22f5ca1f09e08cdb95f841f3381eda5cd31ee00
diff --git a/Documentation/ABI/testing/sysfs-class-typec b/Documentation/ABI/testing/sysfs-class-typec
index b7794e0..8eab41e 100644
--- a/Documentation/ABI/testing/sysfs-class-typec
+++ b/Documentation/ABI/testing/sysfs-class-typec
@@ -139,6 +139,49 @@
 		Shows if the partner supports USB Power Delivery communication:
 		Valid values: yes, no
 
+What:		/sys/class/typec/<port>-partner/number_of_alternate_modes
+Date:		November 2020
+Contact:	Prashant Malani <pmalani@chromium.org>
+Description:
+		Shows the number of alternate modes which are advertised by the partner
+		during Power Delivery discovery. This file remains hidden until a value
+		greater than or equal to 0 is set by Type C port driver.
+
+What:		/sys/class/typec/<port>-partner/type
+Date:		December 2020
+Contact:	Heikki Krogerus <heikki.krogerus@linux.intel.com>
+Description:	USB Power Delivery Specification defines a set of product types
+		for the partner devices. This file will show the product type of
+		the partner if it is known. Dual-role capable partners will have
+		both UFP and DFP product types defined, but only one that
+		matches the current role will be active at the time. If the
+		product type of the partner is not visible to the device driver,
+		this file will not exist.
+
+		When the partner product type is detected, or changed with role
+		swap, uvevent is also raised that contains PRODUCT_TYPE=<product
+		type> (for example PRODUCT_TYPE=hub).
+
+		Valid values:
+
+		UFP / device role
+		======================  ==========================
+		undefined		-
+		hub			PDUSB Hub
+		peripheral		PDUSB Peripheral
+		psd			Power Bank
+		ama			Alternate Mode Adapter
+		======================  ==========================
+
+		DFP / host role
+		======================  ==========================
+		undefined		-
+		hub			PDUSB Hub
+		host			PDUSB Host
+		power_brick		Power Brick
+		amc			Alternate Mode Controller
+		======================  ==========================
+
 What:		/sys/class/typec/<port>-partner>/identity/
 Date:		April 2017
 Contact:	Heikki Krogerus <heikki.krogerus@linux.intel.com>
@@ -151,31 +194,6 @@
 		directory exists, it will have an attribute file for every VDO
 		in Discover Identity command result.
 
-What:		/sys/class/typec/<port>-partner/identity/id_header
-Date:		April 2017
-Contact:	Heikki Krogerus <heikki.krogerus@linux.intel.com>
-Description:
-		ID Header VDO part of Discover Identity command result. The
-		value will show 0 until Discover Identity command result becomes
-		available. The value can be polled.
-
-What:		/sys/class/typec/<port>-partner/identity/cert_stat
-Date:		April 2017
-Contact:	Heikki Krogerus <heikki.krogerus@linux.intel.com>
-Description:
-		Cert Stat VDO part of Discover Identity command result. The
-		value will show 0 until Discover Identity command result becomes
-		available. The value can be polled.
-
-What:		/sys/class/typec/<port>-partner/identity/product
-Date:		April 2017
-Contact:	Heikki Krogerus <heikki.krogerus@linux.intel.com>
-Description:
-		Product VDO part of Discover Identity command result. The value
-		will show 0 until Discover Identity command result becomes
-		available. The value can be polled.
-
-
 USB Type-C cable devices (eg. /sys/class/typec/port0-cable/)
 
 Note: Electronically Marked Cables will have a device also for one cable plug
@@ -187,9 +205,21 @@
 What:		/sys/class/typec/<port>-cable/type
 Date:		April 2017
 Contact:	Heikki Krogerus <heikki.krogerus@linux.intel.com>
-Description:
-		Shows if the cable is active.
-		Valid values: active, passive
+Description:	USB Power Delivery Specification defines a set of product types
+		for the cables. This file will show the product type of the
+		cable if it is known. If the product type of the cable is not
+		visible to the device driver, this file will not exist.
+
+		When the cable product type is detected, uvevent is also raised
+		with PRODUCT_TYPE showing the product type of the cable.
+
+		Valid values:
+
+		======================  ==========================
+		undefined		-
+		active			Active Cable
+		passive			Passive Cable
+		======================  ==========================
 
 What:		/sys/class/typec/<port>-cable/plug_type
 Date:		April 2017
@@ -202,17 +232,37 @@
 		- type-c
 		- captive
 
-What:		/sys/class/typec/<port>-cable/identity/
+What:		/sys/class/typec/<port>-<plug>/number_of_alternate_modes
+Date:		November 2020
+Contact:	Prashant Malani <pmalani@chromium.org>
+Description:
+		Shows the number of alternate modes which are advertised by the plug
+		associated with a particular cable during Power Delivery discovery.
+		This file remains hidden until a value greater than or equal to 0
+		is set by Type C port driver.
+
+
+USB Type-C partner/cable Power Delivery Identity objects
+
+NOTE: The following attributes will be applicable to both
+partner (e.g /sys/class/typec/port0-partner/) and
+cable (e.g /sys/class/typec/port0-cable/) devices. Consequently, the example file
+paths below are prefixed with "/sys/class/typec/<port>-{partner|cable}/" to
+reflect this.
+
+What:		/sys/class/typec/<port>-{partner|cable}/identity/
 Date:		April 2017
 Contact:	Heikki Krogerus <heikki.krogerus@linux.intel.com>
 Description:
 		This directory appears only if the port device driver is capable
 		of showing the result of Discover Identity USB power delivery
 		command. That will not always be possible even when USB power
-		delivery is supported. If the directory exists, it will have an
-		attribute for every VDO returned by Discover Identity command.
+		delivery is supported, for example when USB power delivery
+		communication for the port is mostly handled in firmware. If the
+		directory exists, it will have an attribute file for every VDO
+		in Discover Identity command result.
 
-What:		/sys/class/typec/<port>-cable/identity/id_header
+What:		/sys/class/typec/<port>-{partner|cable}/identity/id_header
 Date:		April 2017
 Contact:	Heikki Krogerus <heikki.krogerus@linux.intel.com>
 Description:
@@ -220,7 +270,7 @@
 		value will show 0 until Discover Identity command result becomes
 		available. The value can be polled.
 
-What:		/sys/class/typec/<port>-cable/identity/cert_stat
+What:		/sys/class/typec/<port>-{partner|cable}/identity/cert_stat
 Date:		April 2017
 Contact:	Heikki Krogerus <heikki.krogerus@linux.intel.com>
 Description:
@@ -228,7 +278,7 @@
 		value will show 0 until Discover Identity command result becomes
 		available. The value can be polled.
 
-What:		/sys/class/typec/<port>-cable/identity/product
+What:		/sys/class/typec/<port>-{partner|cable}/identity/product
 Date:		April 2017
 Contact:	Heikki Krogerus <heikki.krogerus@linux.intel.com>
 Description:
@@ -236,6 +286,30 @@
 		will show 0 until Discover Identity command result becomes
 		available. The value can be polled.
 
+What:		/sys/class/typec/<port>-{partner|cable}/identity/product_type_vdo1
+Date:		October 2020
+Contact:	Prashant Malani <pmalani@chromium.org>
+Description:
+		1st Product Type VDO of Discover Identity command result.
+		The value will show 0 until Discover Identity command result becomes
+		available and a valid Product Type VDO is returned.
+
+What:		/sys/class/typec/<port>-{partner|cable}/identity/product_type_vdo2
+Date:		October 2020
+Contact:	Prashant Malani <pmalani@chromium.org>
+Description:
+		2nd Product Type VDO of Discover Identity command result.
+		The value will show 0 until Discover Identity command result becomes
+		available and a valid Product Type VDO is returned.
+
+What:		/sys/class/typec/<port>-{partner|cable}/identity/product_type_vdo3
+Date:		October 2020
+Contact:	Prashant Malani <pmalani@chromium.org>
+Description:
+		3rd Product Type VDO of Discover Identity command result.
+		The value will show 0 until Discover Identity command result becomes
+		available and a valid Product Type VDO is returned.
+
 
 USB Type-C port alternate mode devices.
 
diff --git a/Documentation/ABI/testing/sysfs-kernel-ion b/Documentation/ABI/testing/sysfs-kernel-ion
new file mode 100644
index 0000000..f57f970
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-kernel-ion
@@ -0,0 +1,27 @@
+What:		/sys/kernel/ion
+Date:		Dec 2019
+KernelVersion:	4.14.158
+Contact:	Suren Baghdasaryan <surenb@google.com>,
+		Sandeep Patil <sspatil@google.com>
+Description:
+		The /sys/kernel/ion directory contains a snapshot of the
+		internal state of ION memory heaps and pools.
+Users:		kernel memory tuning tools
+
+What:		/sys/kernel/ion/total_heaps_kb
+Date:		Dec 2019
+KernelVersion:	4.14.158
+Contact:	Suren Baghdasaryan <surenb@google.com>,
+		Sandeep Patil <sspatil@google.com>
+Description:
+		The total_heaps_kb file is read-only and specifies how much
+		memory in Kb is allocated to ION heaps.
+
+What:		/sys/kernel/ion/total_pools_kb
+Date:		Dec 2019
+KernelVersion:	4.14.158
+Contact:	Suren Baghdasaryan <surenb@google.com>,
+		Sandeep Patil <sspatil@google.com>
+Description:
+		The total_pools_kb file is read-only and specifies how much
+		memory in Kb is allocated to ION pools.
diff --git a/Documentation/ABI/testing/sysfs-kernel-wakeup_reasons b/Documentation/ABI/testing/sysfs-kernel-wakeup_reasons
new file mode 100644
index 0000000..acb19b9
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-kernel-wakeup_reasons
@@ -0,0 +1,16 @@
+What:		/sys/kernel/wakeup_reasons/last_resume_reason
+Date:		February 2014
+Contact:	Ruchi Kandoi <kandoiruchi@google.com>
+Description:
+		The /sys/kernel/wakeup_reasons/last_resume_reason is
+		used to report wakeup reasons after system exited suspend.
+
+What:		/sys/kernel/wakeup_reasons/last_suspend_time
+Date:		March 2015
+Contact:	jinqian <jinqian@google.com>
+Description:
+		The /sys/kernel/wakeup_reasons/last_suspend_time is
+		used to report time spent in last suspend cycle. It contains
+		two numbers (in seconds) separated by space. First number is
+		the time spent in suspend and resume processes. Second number
+		is the time spent in sleep state.
\ No newline at end of file
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index 44fde25..b4b4e54 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -1567,6 +1567,10 @@
 				If specified, z/VM IUCV HVC accepts connections
 				from listed z/VM user IDs only.
 
+	hvc_dcc.enable=	[ARM,ARM64]	Enable DCC driver at runtime. For GKI,
+				disabled at runtime by default to prevent
+				crashes in devices which do not support DCC.
+
 	hv_nopvspin	[X86,HYPER_V] Disables the paravirt spinlock optimizations
 				      which allow the hypervisor to 'idle' the
 				      guest on lock contention.
diff --git a/Documentation/admin-guide/sysctl/vm.rst b/Documentation/admin-guide/sysctl/vm.rst
index f455fa0..9535523 100644
--- a/Documentation/admin-guide/sysctl/vm.rst
+++ b/Documentation/admin-guide/sysctl/vm.rst
@@ -38,6 +38,7 @@
 - dirty_writeback_centisecs
 - drop_caches
 - extfrag_threshold
+- extra_free_kbytes
 - highmem_is_dirtyable
 - hugetlb_shm_group
 - laptop_mode
@@ -307,6 +308,21 @@
 any throttling.
 
 
+extra_free_kbytes
+
+This parameter tells the VM to keep extra free memory between the threshold
+where background reclaim (kswapd) kicks in, and the threshold where direct
+reclaim (by allocating processes) kicks in.
+
+This is useful for workloads that require low latency memory allocations
+and have a bounded burstiness in memory allocations, for example a
+realtime application that receives and transmits network traffic
+(causing in-kernel memory allocations) with a maximum total message burst
+size of 200MB may need 200MB of extra free memory to avoid direct reclaim
+related latencies.
+
+==============================================================
+
 hugetlb_shm_group
 =================
 
diff --git a/Documentation/device-mapper/dm-bow.txt b/Documentation/device-mapper/dm-bow.txt
new file mode 100644
index 0000000..e3fc4d2
--- /dev/null
+++ b/Documentation/device-mapper/dm-bow.txt
@@ -0,0 +1,99 @@
+dm_bow (backup on write)
+========================
+
+dm_bow is a device mapper driver that uses the free space on a device to back up
+data that is overwritten. The changes can then be committed by a simple state
+change, or rolled back by removing the dm_bow device and running a command line
+utility over the underlying device.
+
+dm_bow has three states, set by writing ‘1’ or ‘2’ to /sys/block/dm-?/bow/state.
+It is only possible to go from state 0 (initial state) to state 1, and then from
+state 1 to state 2.
+
+State 0: dm_bow collects all trims to the device and assumes that these mark
+free space on the overlying file system that can be safely used. Typically the
+mount code would create the dm_bow device, mount the file system, call the
+FITRIM ioctl on the file system then switch to state 1. These trims are not
+propagated to the underlying device.
+
+State 1: All writes to the device cause the underlying data to be backed up to
+the free (trimmed) area as needed in such a way as they can be restored.
+However, the writes, with one exception, then happen exactly as they would
+without dm_bow, so the device is always in a good final state. The exception is
+that sector 0 is used to keep a log of the latest changes, both to indicate that
+we are in this state and to allow rollback. See below for all details. If there
+isn't enough free space, writes are failed with -ENOSPC.
+
+State 2: The transition to state 2 triggers replacing the special sector 0 with
+the normal sector 0, and the freeing of all state information. dm_bow then
+becomes a pass-through driver, allowing the device to continue to be used with
+minimal performance impact.
+
+Usage
+=====
+dm-bow takes one command line parameter, the name of the underlying device.
+
+dm-bow will typically be used in the following way. dm-bow will be loaded with a
+suitable underlying device and the resultant device will be mounted. A file
+system trim will be issued via the FITRIM ioctl, then the device will be
+switched to state 1. The file system will now be used as normal. At some point,
+the changes can either be committed by switching to state 2, or rolled back by
+unmounting the file system, removing the dm-bow device and running the command
+line utility. Note that rebooting the device will be equivalent to unmounting
+and removing, but the command line utility must still be run
+
+Details of operation in state 1
+===============================
+
+dm_bow maintains a type for all sectors. A sector can be any of:
+
+SECTOR0
+SECTOR0_CURRENT
+UNCHANGED
+FREE
+CHANGED
+BACKUP
+
+SECTOR0 is the first sector on the device, and is used to hold the log of
+changes. This is the one exception.
+
+SECTOR0_CURRENT is a sector picked from the FREE sectors, and is where reads and
+writes from the true sector zero are redirected to. Note that like any backup
+sector, if the sector is written to directly, it must be moved again.
+
+UNCHANGED means that the sector has not been changed since we entered state 1.
+Thus if it is written to or trimmed, the contents must first be backed up.
+
+FREE means that the sector was trimmed in state 0 and has not yet been written
+to or used for backup. On being written to, a FREE sector is changed to CHANGED.
+
+CHANGED means that the sector has been modified, and can be further modified
+without further backup.
+
+BACKUP means that this is a free sector being used as a backup. On being written
+to, the contents must first be backed up again.
+
+All backup operations are logged to the first sector. The log sector has the
+format:
+--------------------------------------------------------
+| Magic | Count | Sequence | Log entry | Log entry | …
+--------------------------------------------------------
+
+Magic is a magic number. Count is the number of log entries. Sequence is 0
+initially. A log entry is
+
+-----------------------------------
+| Source | Dest | Size | Checksum |
+-----------------------------------
+
+When SECTOR0 is full, the log sector is backed up and another empty log sector
+created with sequence number one higher. The first entry in any log entry with
+sequence > 0 therefore must be the log of the backing up of the previous log
+sector. Note that sequence is not strictly needed, but is a useful sanity check
+and potentially limits the time spent trying to restore a corrupted snapshot.
+
+On entering state 1, dm_bow has a list of free sectors. All other sectors are
+unchanged. Sector0_current is selected from the free sectors and the contents of
+sector 0 are copied there. The sector 0 is backed up, which triggers the first
+log entry to be written.
+
diff --git a/Documentation/devicetree/bindings/interrupt-controller/qcom,pdc.txt b/Documentation/devicetree/bindings/interrupt-controller/qcom,pdc.txt
index 1df2939..004584e 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/qcom,pdc.txt
+++ b/Documentation/devicetree/bindings/interrupt-controller/qcom,pdc.txt
@@ -25,6 +25,9 @@
 	Usage: required
 	Value type: <prop-encoded-array>
 	Definition: Specifies the base physical address for PDC hardware.
+		    Optionally, specify the PDC's GIC interface registers that
+		    need to be configured for wakeup capable GPIOs routed to
+		    the PDC.
 
 - interrupt-cells:
 	Usage: required
@@ -51,15 +54,23 @@
 		    The second element is the GIC hwirq number for the PDC port.
 		    The third element is the number of interrupts in sequence.
 
+- qcom,scm-spi-cfg:
+	Usage: optional
+	Value type: <bool>
+	Definition: Specifies if the SPI configuration registers have to be
+		    written from the firmware. Sometimes the PDC interface
+		    register to the GIC can only be written from the firmware.
+
 Example:
 
 	pdc: interrupt-controller@b220000 {
 		compatible = "qcom,sdm845-pdc";
-		reg = <0xb220000 0x30000>;
+		reg = <0 0x0b220000 0 0x30000>, <0 0x179900f0 0 0x60>;
 		qcom,pdc-ranges = <0 512 94>, <94 641 15>, <115 662 7>;
 		#interrupt-cells = <2>;
 		interrupt-parent = <&intc>;
 		interrupt-controller;
+		qcom,scm-spi-cfg;
 	};
 
 DT binding of a device that wants to use the GIC SPI 514 as a wakeup
diff --git a/Documentation/devicetree/bindings/usb/maxim,max33359.yaml b/Documentation/devicetree/bindings/usb/maxim,max33359.yaml
new file mode 100644
index 0000000..93a19ed
--- /dev/null
+++ b/Documentation/devicetree/bindings/usb/maxim,max33359.yaml
@@ -0,0 +1,75 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: "http://devicetree.org/schemas/usb/maxim,max33359.yaml#"
+$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+
+title: Maxim TCPCI Type-C PD controller DT bindings
+
+maintainers:
+  - Badhri Jagan Sridharan <badhri@google.com>
+
+description: Maxim TCPCI Type-C PD controller
+
+properties:
+  compatible:
+    enum:
+      - maxim,max33359
+
+  reg:
+    maxItems: 1
+
+  interrupts:
+    maxItems: 1
+
+  connector:
+    type: object
+    $ref: ../connector/usb-connector.yaml#
+    description:
+      Properties for usb c connector.
+
+required:
+  - compatible
+  - reg
+  - interrupts
+  - connector
+
+additionalProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/interrupt-controller/irq.h>
+    #include <dt-bindings/usb/pd.h>
+    i2c0 {
+        #address-cells = <1>;
+        #size-cells = <0>;
+
+        maxtcpc@25 {
+            compatible = "maxim,max33359";
+            reg = <0x25>;
+            interrupt-parent = <&gpa8>;
+            interrupts = <2 IRQ_TYPE_LEVEL_LOW>;
+
+            connector {
+                compatible = "usb-c-connector";
+                label = "USB-C";
+                data-role = "dual";
+                power-role = "dual";
+                try-power-role = "sink";
+                self-powered;
+                op-sink-microwatt = <2600000>;
+                new-source-frs-typec-current = <FRS_5V_1P5A>;
+                source-pdos = <PDO_FIXED(5000, 900,
+                                         PDO_FIXED_SUSPEND |
+                                         PDO_FIXED_USB_COMM |
+                                         PDO_FIXED_DATA_SWAP |
+                                         PDO_FIXED_DUAL_ROLE)>;
+                sink-pdos = <PDO_FIXED(5000, 3000,
+                                       PDO_FIXED_USB_COMM |
+                                       PDO_FIXED_DATA_SWAP |
+                                       PDO_FIXED_DUAL_ROLE)
+                                       PDO_FIXED(9000, 2000, 0)>;
+            };
+        };
+    };
+...
diff --git a/Documentation/filesystems/ext4/directory.rst b/Documentation/filesystems/ext4/directory.rst
index 073940c..55f618b 100644
--- a/Documentation/filesystems/ext4/directory.rst
+++ b/Documentation/filesystems/ext4/directory.rst
@@ -121,6 +121,31 @@
    * - 0x7
      - Symbolic link.
 
+To support directories that are both encrypted and casefolded directories, we
+must also include hash information in the directory entry. We append
+``ext4_extended_dir_entry_2`` to ``ext4_dir_entry_2`` except for the entries
+for dot and dotdot, which are kept the same. The structure follows immediately
+after ``name`` and is included in the size listed by ``rec_len`` If a directory
+entry uses this extension, it may be up to 271 bytes.
+
+.. list-table::
+   :widths: 8 8 24 40
+   :header-rows: 1
+
+   * - Offset
+     - Size
+     - Name
+     - Description
+   * - 0x0
+     - \_\_le32
+     - hash
+     - The hash of the directory name
+   * - 0x4
+     - \_\_le32
+     - minor\_hash
+     - The minor hash of the directory name
+
+
 In order to add checksums to these classic directory blocks, a phony
 ``struct ext4_dir_entry`` is placed at the end of each leaf block to
 hold the checksum. The directory entry is 12 bytes long. The inode
@@ -322,6 +347,8 @@
      - Half MD4, unsigned.
    * - 0x5
      - Tea, unsigned.
+   * - 0x6
+     - Siphash.
 
 Interior nodes of an htree are recorded as ``struct dx_node``, which is
 also the full length of a data block:
diff --git a/Documentation/filesystems/locking.rst b/Documentation/filesystems/locking.rst
index c0f2c75..bcb092e 100644
--- a/Documentation/filesystems/locking.rst
+++ b/Documentation/filesystems/locking.rst
@@ -125,7 +125,7 @@
 	bool (*list)(struct dentry *dentry);
 	int (*get)(const struct xattr_handler *handler, struct dentry *dentry,
 		   struct inode *inode, const char *name, void *buffer,
-		   size_t size);
+		   size_t size, int flags);
 	int (*set)(const struct xattr_handler *handler, struct dentry *dentry,
 		   struct inode *inode, const char *name, const void *buffer,
 		   size_t size, int flags);
diff --git a/Documentation/filesystems/overlayfs.rst b/Documentation/filesystems/overlayfs.rst
index 580ab9a..18ac7d3 100644
--- a/Documentation/filesystems/overlayfs.rst
+++ b/Documentation/filesystems/overlayfs.rst
@@ -137,6 +137,29 @@
 such as metadata and extended attributes are reported for the upper
 directory only.  These attributes of the lower directory are hidden.
 
+credentials
+-----------
+
+By default, all access to the upper, lower and work directories is the
+recorded mounter's MAC and DAC credentials.  The incoming accesses are
+checked against the caller's credentials.
+
+In the case where caller MAC or DAC credentials do not overlap, a
+use case available in older versions of the driver, the
+override_creds mount flag can be turned off and help when the use
+pattern has caller with legitimate credentials where the mounter
+does not.  Several unintended side effects will occur though.  The
+caller without certain key capabilities or lower privilege will not
+always be able to delete files or directories, create nodes, or
+search some restricted directories.  The ability to search and read
+a directory entry is spotty as a result of the cache mechanism not
+retesting the credentials because of the assumption, a privileged
+caller can fill cache, then a lower privilege can read the directory
+cache.  The uneven security model where cache, upperdir and workdir
+are opened at privilege, but accessed without creating a form of
+privilege escalation, should only be used with strict understanding
+of the side effects and of the security policies.
+
 whiteouts and opaque directories
 --------------------------------
 
diff --git a/Documentation/filesystems/proc.rst b/Documentation/filesystems/proc.rst
index 533c79e..e72c931 100644
--- a/Documentation/filesystems/proc.rst
+++ b/Documentation/filesystems/proc.rst
@@ -429,6 +429,8 @@
  [stack]                    the stack of the main process
  [vdso]                     the "virtual dynamic shared object",
                             the kernel system call handler
+ [anon:<name>]              an anonymous mapping that has been
+                            named by userspace
  =======                    ====================================
 
  or if empty, the mapping is anonymous.
@@ -462,6 +464,7 @@
     Locked:                0 kB
     THPeligible:           0
     VmFlags: rd ex mr mw me dw
+    Name: name from userspace
 
 The first of these lines shows the same information as is displayed for the
 mapping in /proc/PID/maps.  Following lines show the size of the mapping
@@ -554,6 +557,9 @@
 might change in future as well. So each consumer of these flags has to
 follow each specific kernel version for the exact semantic.
 
+The "Name" field will only be present on a mapping that has been named by
+userspace, and will show the name passed in by userspace.
+
 This file is only present if the CONFIG_MMU kernel configuration option is
 enabled.
 
diff --git a/Documentation/kbuild/modules.rst b/Documentation/kbuild/modules.rst
index 85ccc87..d4b3e0c 100644
--- a/Documentation/kbuild/modules.rst
+++ b/Documentation/kbuild/modules.rst
@@ -21,6 +21,7 @@
 	   --- 4.1 Kernel Includes
 	   --- 4.2 Single Subdirectory
 	   --- 4.3 Several Subdirectories
+	   --- 4.4 UAPI Headers Installation
 	=== 5. Module Installation
 	   --- 5.1 INSTALL_MOD_PATH
 	   --- 5.2 INSTALL_MOD_DIR
@@ -131,6 +132,10 @@
 		/lib/modules/<kernel_release>/extra/, but a prefix may
 		be added with INSTALL_MOD_PATH (discussed in section 5).
 
+	headers_install
+		Export headers in a format suitable for userspace. The default
+		location is $PWD/usr. INSTALL_HDR_PATH can change this path.
+
 	clean
 		Remove all generated files in the module directory only.
 
@@ -406,6 +411,17 @@
 	pointing to the directory where the currently executing kbuild
 	file is located.
 
+4.4 UAPI Headers Installation
+-----------------------------
+
+	External modules may export headers to userspace in a similar
+	fashion to the in-tree counterpart drivers. kbuild supports
+	running headers_install target in an out-of-tree. The location
+	where kbuild searches for headers is $(M)/include/uapi and
+	$(M)/arch/$(SRCARCH)/include/uapi.
+
+	See also Documentation/kbuild/headers_install.rst.
+
 
 5. Module Installation
 ======================
diff --git a/Documentation/scheduler/sched-energy.rst b/Documentation/scheduler/sched-energy.rst
index 001e09c..1f5792c 100644
--- a/Documentation/scheduler/sched-energy.rst
+++ b/Documentation/scheduler/sched-energy.rst
@@ -397,7 +397,7 @@
 because it is the only one providing some degree of consistency between
 frequency requests and energy predictions.
 
-Using EAS with any other governor than schedutil is not supported.
+Using EAS with any other governor than schedutil is not recommended.
 
 
 6.5 Scale-invariant utilization signals
diff --git a/MAINTAINERS b/MAINTAINERS
index 2daa6ee..fe4521a 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -8628,6 +8628,13 @@
 F:	drivers/hwmon/ina2xx.c
 F:	include/linux/platform_data/ina2xx.h
 
+INCREMENTAL FILE SYSTEM
+M:	Paul Lawrence <paullawrence@google.com>
+L:	linux-unionfs@vger.kernel.org
+S:	Supported
+F:	fs/incfs/
+F:	tools/testing/selftests/filesystems/incfs/
+
 INDUSTRY PACK SUBSYSTEM (IPACK)
 M:	Samuel Iglesias Gonsalvez <siglesias@igalia.com>
 M:	Jens Taprogge <jens.taprogge@taprogge.org>
@@ -12358,6 +12365,12 @@
 F:	Documentation/scsi/NinjaSCSI.rst
 F:	drivers/scsi/nsp32*
 
+NINTENDO HID DRIVER
+M:	Daniel J. Ogorchock <djogorchock@gmail.com>
+L:	linux-input@vger.kernel.org
+S:	Maintained
+F:	drivers/hid/hid-nintendo*
+
 NIOS2 ARCHITECTURE
 M:	Ley Foon Tan <ley.foon.tan@intel.com>
 S:	Maintained
diff --git a/Makefile b/Makefile
index 43ecede..5edb115 100644
--- a/Makefile
+++ b/Makefile
@@ -434,6 +434,7 @@
 OBJDUMP		= llvm-objdump
 READELF		= llvm-readelf
 STRIP		= llvm-strip
+KBUILD_HOSTLDFLAGS	+= -fuse-ld=lld --rtlib=compiler-rt
 else
 CC		= $(CROSS_COMPILE)gcc
 LD		= $(CROSS_COMPILE)ld
@@ -460,7 +461,7 @@
 KBZIP2		= bzip2
 KLZOP		= lzop
 LZMA		= lzma
-LZ4		= lz4c
+LZ4		= lz4
 XZ		= xz
 ZSTD		= zstd
 
@@ -826,7 +827,9 @@
 DEBUG_CFLAGS	+= -g
 endif
 
+ifneq ($(LLVM_IAS),1)
 KBUILD_AFLAGS	+= -Wa,-gdwarf-2
+endif
 
 ifdef CONFIG_DEBUG_INFO_DWARF4
 DEBUG_CFLAGS	+= -gdwarf-4
@@ -1093,6 +1096,41 @@
 export MODORDER := $(extmod-prefix)modules.order
 export MODULES_NSDEPS := $(extmod-prefix)modules.nsdeps
 
+# ---------------------------------------------------------------------------
+# Kernel headers
+
+PHONY += headers
+
+#Default location for installed headers
+ifeq ($(KBUILD_EXTMOD),)
+PHONY += archheaders archscripts
+hdr-inst := -f $(srctree)/scripts/Makefile.headersinst obj
+headers: $(version_h) scripts_unifdef uapi-asm-generic archheaders archscripts
+else
+hdr-prefix = $(KBUILD_EXTMOD)/
+hdr-inst := -f $(srctree)/scripts/Makefile.headersinst dst=$(KBUILD_EXTMOD)/usr/include objtree=$(objtree)/$(KBUILD_EXTMOD) obj
+endif
+
+export INSTALL_HDR_PATH = $(objtree)/$(hdr-prefix)usr
+
+quiet_cmd_headers_install = INSTALL $(INSTALL_HDR_PATH)/include
+      cmd_headers_install = \
+	mkdir -p $(INSTALL_HDR_PATH); \
+	rsync -mrl --include='*/' --include='*\.h' --exclude='*' \
+	usr/include $(INSTALL_HDR_PATH)
+
+PHONY += headers_install
+headers_install: headers
+	$(call cmd,headers_install)
+
+headers:
+ifeq ($(KBUILD_EXTMOD),)
+	$(if $(wildcard $(srctree)/arch/$(SRCARCH)/include/uapi/asm/Kbuild),, \
+	  $(error Headers not exportable for the $(SRCARCH) architecture))
+endif
+	$(Q)$(MAKE) $(hdr-inst)=$(hdr-prefix)include/uapi
+	$(Q)$(MAKE) $(hdr-inst)=$(hdr-prefix)arch/$(SRCARCH)/include/uapi
+
 ifeq ($(KBUILD_EXTMOD),)
 core-y		+= kernel/ certs/ mm/ fs/ ipc/ security/ crypto/ block/
 
@@ -1168,7 +1206,8 @@
 $(sort $(vmlinux-deps) $(subdir-modorder)): descend ;
 
 filechk_kernel.release = \
-	echo "$(KERNELVERSION)$$($(CONFIG_SHELL) $(srctree)/scripts/setlocalversion $(srctree))"
+	echo "$(KERNELVERSION)$$($(CONFIG_SHELL) $(srctree)/scripts/setlocalversion \
+		$(srctree) $(BRANCH) $(KMI_GENERATION))"
 
 # Store (new) KERNELRELEASE string in include/config/kernel.release
 include/config/kernel.release: FORCE
@@ -1234,12 +1273,17 @@
 # needs to be updated, so this check is forced on all builds
 
 uts_len := 64
+ifneq (,$(BUILD_NUMBER))
+	UTS_RELEASE=$(KERNELRELEASE)-ab$(BUILD_NUMBER)
+else
+	UTS_RELEASE=$(KERNELRELEASE)
+endif
 define filechk_utsrelease.h
-	if [ `echo -n "$(KERNELRELEASE)" | wc -c ` -gt $(uts_len) ]; then \
-	  echo '"$(KERNELRELEASE)" exceeds $(uts_len) characters' >&2;    \
-	  exit 1;                                                         \
-	fi;                                                               \
-	echo \#define UTS_RELEASE \"$(KERNELRELEASE)\"
+	if [ `echo -n "$(UTS_RELEASE)" | wc -c ` -gt $(uts_len) ]; then \
+		echo '"$(UTS_RELEASE)" exceeds $(uts_len) characters' >&2;    \
+		exit 1;                                                       \
+	fi;                                                             \
+	echo \#define UTS_RELEASE \"$(UTS_RELEASE)\"
 endef
 
 define filechk_version.h
@@ -1260,33 +1304,6 @@
 	$(Q)find $(srctree)/include/ -name '*.h' | xargs --max-args 1 \
 	$(srctree)/scripts/headerdep.pl -I$(srctree)/include
 
-# ---------------------------------------------------------------------------
-# Kernel headers
-
-#Default location for installed headers
-export INSTALL_HDR_PATH = $(objtree)/usr
-
-quiet_cmd_headers_install = INSTALL $(INSTALL_HDR_PATH)/include
-      cmd_headers_install = \
-	mkdir -p $(INSTALL_HDR_PATH); \
-	rsync -mrl --include='*/' --include='*\.h' --exclude='*' \
-	usr/include $(INSTALL_HDR_PATH)
-
-PHONY += headers_install
-headers_install: headers
-	$(call cmd,headers_install)
-
-PHONY += archheaders archscripts
-
-hdr-inst := -f $(srctree)/scripts/Makefile.headersinst obj
-
-PHONY += headers
-headers: $(version_h) scripts_unifdef uapi-asm-generic archheaders archscripts
-	$(if $(wildcard $(srctree)/arch/$(SRCARCH)/include/uapi/asm/Kbuild),, \
-	  $(error Headers not exportable for the $(SRCARCH) architecture))
-	$(Q)$(MAKE) $(hdr-inst)=include/uapi
-	$(Q)$(MAKE) $(hdr-inst)=arch/$(SRCARCH)/include/uapi
-
 # Deprecated. It is no-op now.
 PHONY += headers_check
 headers_check:
@@ -1730,6 +1747,8 @@
 	@echo  ''
 	@echo  '  modules         - default target, build the module(s)'
 	@echo  '  modules_install - install the module'
+	@echo  '  headers_install - Install sanitised kernel headers to INSTALL_HDR_PATH'
+	@echo  '                    (default: $(abspath $(INSTALL_HDR_PATH)))'
 	@echo  '  clean           - remove generated files in module directory only'
 	@echo  ''
 
@@ -1900,7 +1919,8 @@
 	$(PERL) $(srctree)/scripts/checkstack.pl $(CHECKSTACK_ARCH)
 
 kernelrelease:
-	@echo "$(KERNELVERSION)$$($(CONFIG_SHELL) $(srctree)/scripts/setlocalversion $(srctree))"
+	@echo "$(KERNELVERSION)$$($(CONFIG_SHELL) $(srctree)/scripts/setlocalversion \
+		$(srctree) $(BRANCH) $(KMI_GENERATION))"
 
 kernelversion:
 	@echo $(KERNELVERSION)
diff --git a/README.md b/README.md
new file mode 100644
index 0000000..468ac20
--- /dev/null
+++ b/README.md
@@ -0,0 +1,150 @@
+# How do I submit patches to Android Common Kernels
+
+1. BEST: Make all of your changes to upstream Linux. If appropriate, backport to the stable releases.
+   These patches will be merged automatically in the corresponding common kernels. If the patch is already
+   in upstream Linux, post a backport of the patch that conforms to the patch requirements below.
+   - Do not send patches upstream that contain only symbol exports. To be considered for upstream Linux,
+additions of `EXPORT_SYMBOL_GPL()` require an in-tree modular driver that uses the symbol -- so include
+the new driver or changes to an existing driver in the same patchset as the export.
+   - When sending patches upstream, the commit message must contain a clear case for why the patch
+is needed and beneficial to the community. Enabling out-of-tree drivers or functionality is not
+not a persuasive case.
+
+2. LESS GOOD: Develop your patches out-of-tree (from an upstream Linux point-of-view). Unless these are
+   fixing an Android-specific bug, these are very unlikely to be accepted unless they have been
+   coordinated with kernel-team@android.com. If you want to proceed, post a patch that conforms to the
+   patch requirements below.
+
+# Common Kernel patch requirements
+
+- All patches must conform to the Linux kernel coding standards and pass `script/checkpatch.pl`
+- Patches shall not break gki_defconfig or allmodconfig builds for arm, arm64, x86, x86_64 architectures
+(see  https://source.android.com/setup/build/building-kernels)
+- If the patch is not merged from an upstream branch, the subject must be tagged with the type of patch:
+`UPSTREAM:`, `BACKPORT:`, `FROMGIT:`, `FROMLIST:`, or `ANDROID:`.
+- All patches must have a `Change-Id:` tag (see https://gerrit-review.googlesource.com/Documentation/user-changeid.html)
+- If an Android bug has been assigned, there must be a `Bug:` tag.
+- All patches must have a `Signed-off-by:` tag by the author and the submitter
+
+Additional requirements are listed below based on patch type
+
+## Requirements for backports from mainline Linux: `UPSTREAM:`, `BACKPORT:`
+
+- If the patch is a cherry-pick from Linux mainline with no changes at all
+    - tag the patch subject with `UPSTREAM:`.
+    - add upstream commit information with a `(cherry picked from commit ...)` line
+    - Example:
+        - if the upstream commit message is
+```
+        important patch from upstream
+
+        This is the detailed description of the important patch
+
+        Signed-off-by: Fred Jones <fred.jones@foo.org>
+```
+>- then Joe Smith would upload the patch for the common kernel as
+```
+        UPSTREAM: important patch from upstream
+
+        This is the detailed description of the important patch
+
+        Signed-off-by: Fred Jones <fred.jones@foo.org>
+
+        Bug: 135791357
+        Change-Id: I4caaaa566ea080fa148c5e768bb1a0b6f7201c01
+        (cherry picked from commit c31e73121f4c1ec41143423ac6ce3ce6dafdcec1)
+        Signed-off-by: Joe Smith <joe.smith@foo.org>
+```
+
+- If the patch requires any changes from the upstream version, tag the patch with `BACKPORT:`
+instead of `UPSTREAM:`.
+    - use the same tags as `UPSTREAM:`
+    - add comments about the changes under the `(cherry picked from commit ...)` line
+    - Example:
+```
+        BACKPORT: important patch from upstream
+
+        This is the detailed description of the important patch
+
+        Signed-off-by: Fred Jones <fred.jones@foo.org>
+
+        Bug: 135791357
+        Change-Id: I4caaaa566ea080fa148c5e768bb1a0b6f7201c01
+        (cherry picked from commit c31e73121f4c1ec41143423ac6ce3ce6dafdcec1)
+        [joe: Resolved minor conflict in drivers/foo/bar.c ]
+        Signed-off-by: Joe Smith <joe.smith@foo.org>
+```
+
+## Requirements for other backports: `FROMGIT:`, `FROMLIST:`,
+
+- If the patch has been merged into an upstream maintainer tree, but has not yet
+been merged into Linux mainline
+    - tag the patch subject with `FROMGIT:`
+    - add info on where the patch came from as `(cherry picked from commit <sha1> <repo> <branch>)`. This
+must be a stable maintainer branch (not rebased, so don't use `linux-next` for example).
+    - if changes were required, use `BACKPORT: FROMGIT:`
+    - Example:
+        - if the commit message in the maintainer tree is
+```
+        important patch from upstream
+
+        This is the detailed description of the important patch
+
+        Signed-off-by: Fred Jones <fred.jones@foo.org>
+```
+>- then Joe Smith would upload the patch for the common kernel as
+```
+        FROMGIT: important patch from upstream
+
+        This is the detailed description of the important patch
+
+        Signed-off-by: Fred Jones <fred.jones@foo.org>
+
+        Bug: 135791357
+        (cherry picked from commit 878a2fd9de10b03d11d2f622250285c7e63deace
+         https://git.kernel.org/pub/scm/linux/kernel/git/foo/bar.git test-branch)
+        Change-Id: I4caaaa566ea080fa148c5e768bb1a0b6f7201c01
+        Signed-off-by: Joe Smith <joe.smith@foo.org>
+```
+
+
+- If the patch has been submitted to LKML, but not accepted into any maintainer tree
+    - tag the patch subject with `FROMLIST:`
+    - add a `Link:` tag with a link to the submittal on lore.kernel.org
+    - add a `Bug:` tag with the Android bug (required for patches not accepted into
+a maintainer tree)
+    - if changes were required, use `BACKPORT: FROMLIST:`
+    - Example:
+```
+        FROMLIST: important patch from upstream
+
+        This is the detailed description of the important patch
+
+        Signed-off-by: Fred Jones <fred.jones@foo.org>
+
+        Bug: 135791357
+        Link: https://lore.kernel.org/lkml/20190619171517.GA17557@someone.com/
+        Change-Id: I4caaaa566ea080fa148c5e768bb1a0b6f7201c01
+        Signed-off-by: Joe Smith <joe.smith@foo.org>
+```
+
+## Requirements for Android-specific patches: `ANDROID:`
+
+- If the patch is fixing a bug to Android-specific code
+    - tag the patch subject with `ANDROID:`
+    - add a `Fixes:` tag that cites the patch with the bug
+    - Example:
+```
+        ANDROID: fix android-specific bug in foobar.c
+
+        This is the detailed description of the important fix
+
+        Fixes: 1234abcd2468 ("foobar: add cool feature")
+        Change-Id: I4caaaa566ea080fa148c5e768bb1a0b6f7201c01
+        Signed-off-by: Joe Smith <joe.smith@foo.org>
+```
+
+- If the patch is a new feature
+    - tag the patch subject with `ANDROID:`
+    - add a `Bug:` tag with the Android bug (required for android-specific features)
+
diff --git a/arch/arm/include/asm/system_misc.h b/arch/arm/include/asm/system_misc.h
index 66f6a3a..98b3734 100644
--- a/arch/arm/include/asm/system_misc.h
+++ b/arch/arm/include/asm/system_misc.h
@@ -13,7 +13,6 @@
 extern void cpu_init(void);
 
 void soft_restart(unsigned long);
-extern void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd);
 extern void (*arm_pm_idle)(void);
 
 #ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
diff --git a/arch/arm/include/uapi/asm/setup.h b/arch/arm/include/uapi/asm/setup.h
index 25ceda6..345bf0a 100644
--- a/arch/arm/include/uapi/asm/setup.h
+++ b/arch/arm/include/uapi/asm/setup.h
@@ -17,7 +17,7 @@
 
 #include <linux/types.h>
 
-#define COMMAND_LINE_SIZE 1024
+#define COMMAND_LINE_SIZE 2048
 
 /* The list ends with an ATAG_NONE node. */
 #define ATAG_NONE	0x00000000
diff --git a/arch/arm/kernel/reboot.c b/arch/arm/kernel/reboot.c
index 0ce388f..3044fcb 100644
--- a/arch/arm/kernel/reboot.c
+++ b/arch/arm/kernel/reboot.c
@@ -18,7 +18,6 @@
 /*
  * Function pointers to optional machine specific functions
  */
-void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd);
 void (*pm_power_off)(void);
 EXPORT_SYMBOL(pm_power_off);
 
@@ -138,10 +137,7 @@
 	local_irq_disable();
 	smp_send_stop();
 
-	if (arm_pm_restart)
-		arm_pm_restart(reboot_mode, cmd);
-	else
-		do_kernel_restart(cmd);
+	do_kernel_restart(cmd);
 
 	/* Give a grace period for failure to restart of 1s */
 	mdelay(1000);
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index 3f65d0ac..50b1803 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -1079,6 +1079,20 @@
 #endif
 }
 
+static void (*__arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd);
+
+static int arm_restart(struct notifier_block *nb, unsigned long action,
+		       void *data)
+{
+	__arm_pm_restart(action, data);
+	return NOTIFY_DONE;
+}
+
+static struct notifier_block arm_restart_nb = {
+	.notifier_call = arm_restart,
+	.priority = 128,
+};
+
 void __init setup_arch(char **cmdline_p)
 {
 	const struct machine_desc *mdesc;
@@ -1138,8 +1152,10 @@
 	paging_init(mdesc);
 	request_standard_resources(mdesc);
 
-	if (mdesc->restart)
-		arm_pm_restart = mdesc->restart;
+	if (mdesc->restart) {
+		__arm_pm_restart = mdesc->restart;
+		register_restart_handler(&arm_restart_nb);
+	}
 
 	unflatten_device_tree();
 
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index 48099c6e..e3ae634 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -51,6 +51,10 @@
 #define CREATE_TRACE_POINTS
 #include <trace/events/ipi.h>
 
+EXPORT_TRACEPOINT_SYMBOL_GPL(ipi_raise);
+EXPORT_TRACEPOINT_SYMBOL_GPL(ipi_entry);
+EXPORT_TRACEPOINT_SYMBOL_GPL(ipi_exit);
+
 /*
  * as from 2.5, kernels no longer have an init_tasks structure
  * so we need some other way of telling a new secondary core
diff --git a/arch/arm/mach-prima2/rstc.c b/arch/arm/mach-prima2/rstc.c
index 9d56606..825dd5f 100644
--- a/arch/arm/mach-prima2/rstc.c
+++ b/arch/arm/mach-prima2/rstc.c
@@ -64,11 +64,18 @@
 
 #define SIRFSOC_SYS_RST_BIT  BIT(31)
 
-static void sirfsoc_restart(enum reboot_mode mode, const char *cmd)
+static int sirfsoc_restart(struct notifier_block *nb, unsigned long action,
+			   void *data)
 {
 	writel(SIRFSOC_SYS_RST_BIT, sirfsoc_rstc_base);
+	return NOTIFY_DONE;
 }
 
+static struct notifier_block sirfsoc_restart_nb = {
+	.notifier_call  = sirfsoc_restart,
+	.priority       = 192,
+};
+
 static int sirfsoc_rstc_probe(struct platform_device *pdev)
 {
 	struct device_node *np = pdev->dev.of_node;
@@ -79,7 +86,7 @@
 	}
 
 	sirfsoc_reset_controller.of_node = np;
-	arm_pm_restart = sirfsoc_restart;
+	register_restart_handler(&sirfsoc_restart_nb);
 
 	if (IS_ENABLED(CONFIG_RESET_CONTROLLER))
 		reset_controller_register(&sirfsoc_reset_controller);
diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c
index 60e901c..b81231e 100644
--- a/arch/arm/xen/enlighten.c
+++ b/arch/arm/xen/enlighten.c
@@ -29,6 +29,7 @@
 #include <linux/cpu.h>
 #include <linux/console.h>
 #include <linux/pvclock_gtod.h>
+#include <linux/reboot.h>
 #include <linux/time64.h>
 #include <linux/timekeeping.h>
 #include <linux/timekeeper_internal.h>
@@ -181,11 +182,18 @@
 	BUG_ON(rc);
 }
 
-static void xen_restart(enum reboot_mode reboot_mode, const char *cmd)
+static int xen_restart(struct notifier_block *nb, unsigned long action,
+		       void *data)
 {
 	xen_reboot(SHUTDOWN_reboot);
+
+	return NOTIFY_DONE;
 }
 
+static struct notifier_block xen_restart_nb = {
+	.notifier_call = xen_restart,
+	.priority = 192,
+};
 
 static void xen_power_off(void)
 {
@@ -406,7 +414,7 @@
 		return -ENODEV;
 
 	pm_power_off = xen_power_off;
-	arm_pm_restart = xen_restart;
+	register_restart_handler(&xen_restart_nb);
 	if (!xen_initial_domain()) {
 		struct timespec64 ts;
 		xen_read_wallclock(&ts);
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 1515f6f..d6e49ac 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -1846,6 +1846,23 @@
 	  entering them here. As a minimum, you should specify the the
 	  root device (e.g. root=/dev/nfs).
 
+choice
+	prompt "Kernel command line type" if CMDLINE != ""
+	default CMDLINE_FROM_BOOTLOADER
+
+config CMDLINE_FROM_BOOTLOADER
+	bool "Use bootloader kernel arguments if available"
+	help
+	  Uses the command-line options passed by the boot loader. If
+	  the boot loader doesn't provide any, the default kernel command
+	  string provided in CMDLINE will be used.
+
+config CMDLINE_EXTEND
+	bool "Extend bootloader kernel arguments"
+	help
+	  The command-line arguments provided by the boot loader will be
+	  appended to the default kernel command string.
+
 config CMDLINE_FORCE
 	bool "Always use the default kernel command string"
 	depends on CMDLINE != ""
@@ -1854,6 +1871,7 @@
 	  loader passes other arguments to the kernel.
 	  This is useful if you cannot or don't want to change the
 	  command-line options your boot loader passes to the kernel.
+endchoice
 
 config EFI_STUB
 	bool
@@ -1890,6 +1908,21 @@
 
 endmenu
 
+config ASYMMETRIC_AARCH32
+	bool "Allow support for asymmetric AArch32 support"
+	depends on COMPAT && EXPERT
+	help
+	  Enable this option to allow support for asymmetric AArch32 EL0
+	  CPU configurations. Once the AArch32 EL0 support is detected
+	  on a CPU, the feature is made available to user space to allow
+	  the execution of 32-bit (compat) applications by migrating
+	  them to the capable CPUs. You will not be able to offline all
+	  such CPUs to prevent any running 32-bit application from being
+	  killed. I.e: we guarantee there will always be at least one
+	  32-bit capable CPU online.
+
+	  If unsure say N.
+
 config SYSVIPC_COMPAT
 	def_bool y
 	depends on COMPAT && SYSVIPC
diff --git a/arch/arm64/configs/db845c_gki.fragment b/arch/arm64/configs/db845c_gki.fragment
new file mode 100644
index 0000000..3fc3c24
--- /dev/null
+++ b/arch/arm64/configs/db845c_gki.fragment
@@ -0,0 +1,272 @@
+CONFIG_QRTR=m
+CONFIG_QRTR_TUN=m
+CONFIG_SCSI_UFS_QCOM=m
+CONFIG_USB_NET_AX8817X=m
+CONFIG_USB_NET_AX88179_178A=m
+CONFIG_INPUT_PM8941_PWRKEY=m
+CONFIG_SERIAL_MSM=m
+CONFIG_SERIAL_QCOM_GENI=m
+CONFIG_SERIAL_QCOM_GENI_CONSOLE=y
+CONFIG_I2C_QCOM_GENI=m
+CONFIG_I2C_QUP=m
+CONFIG_PINCTRL_QCOM_SPMI_PMIC=m
+CONFIG_PINCTRL_SDM845=m
+CONFIG_POWER_RESET_QCOM_PON=m
+CONFIG_SYSCON_REBOOT_MODE=m
+CONFIG_QCOM_WDT=m
+CONFIG_PM8916_WATCHDOG=m
+CONFIG_MFD_SPMI_PMIC=m
+CONFIG_SPMI_MSM_PMIC_ARB=m
+CONFIG_REGULATOR_QCOM_RPMH=m
+CONFIG_REGULATOR_QCOM_SPMI=m
+CONFIG_DRM_MSM=m
+# CONFIG_DRM_MSM_DSI_28NM_PHY is not set
+# CONFIG_DRM_MSM_DSI_20NM_PHY is not set
+# CONFIG_DRM_MSM_DSI_28NM_8960_PHY is not set
+CONFIG_DRM_LONTIUM_LT9611=m
+CONFIG_USB_OHCI_HCD=m
+CONFIG_USB_OHCI_HCD_PLATFORM=m
+CONFIG_USB_DWC3=m
+# CONFIG_USB_DWC3_HAPS is not set
+# CONFIG_USB_DWC3_OF_SIMPLE is not set
+CONFIG_USB_GADGET_VBUS_DRAW=500
+# CONFIG_USB_DUMMY_HCD is not set
+CONFIG_USB_ROLE_SWITCH=m
+CONFIG_USB_ULPI_BUS=m
+CONFIG_MMC_SDHCI_MSM=m
+CONFIG_RTC_DRV_PM8XXX=m
+CONFIG_COMMON_CLK_QCOM=m
+CONFIG_SDM_GPUCC_845=m
+CONFIG_QCOM_CLK_RPMH=m
+CONFIG_SDM_DISPCC_845=m
+CONFIG_HWSPINLOCK_QCOM=m
+CONFIG_QCOM_GENI_SE=m
+CONFIG_QCOM_LLCC=m
+CONFIG_QCOM_RMTFS_MEM=m
+CONFIG_QCOM_SMEM=m
+CONFIG_QCOM_SMSM=m
+CONFIG_EXTCON_USB_GPIO=m
+CONFIG_RESET_QCOM_AOSS=m
+CONFIG_RESET_QCOM_PDC=m
+CONFIG_PHY_QCOM_QMP=m
+CONFIG_PHY_QCOM_QUSB2=m
+CONFIG_PHY_QCOM_USB_HS=m
+CONFIG_QCOM_QFPROM=m
+CONFIG_INTERCONNECT_QCOM=y
+CONFIG_INTERCONNECT_QCOM_OSM_L3=m
+CONFIG_INTERCONNECT_QCOM_SDM845=m
+CONFIG_QCOM_RPMH=m
+CONFIG_QCOM_RPMHPD=m
+CONFIG_WLAN_VENDOR_ATH=y
+CONFIG_ATH10K_AHB=y
+CONFIG_ATH10K=m
+CONFIG_ATH10K_PCI=m
+CONFIG_ATH10K_SNOC=m
+CONFIG_QRTR_SMD=m
+CONFIG_QCOM_FASTRPC=m
+CONFIG_QCOM_APCS_IPC=m
+CONFIG_QCOM_Q6V5_COMMON=m
+CONFIG_QCOM_RPROC_COMMON=m
+CONFIG_QCOM_Q6V5_ADSP=m
+CONFIG_QCOM_Q6V5_MSS=m
+CONFIG_QCOM_Q6V5_PAS=m
+CONFIG_QCOM_Q6V5_WCSS=m
+CONFIG_QCOM_SYSMON=m
+CONFIG_RPMSG_QCOM_GLINK_SMEM=m
+CONFIG_RPMSG_QCOM_SMD=m
+CONFIG_QCOM_AOSS_QMP=m
+CONFIG_QCOM_SMP2P=m
+CONFIG_QCOM_SOCINFO=m
+CONFIG_QCOM_APR=m
+CONFIG_QCOM_GLINK_SSR=m
+CONFIG_RPMSG_QCOM_GLINK_RPM=m
+CONFIG_QCOM_PDC=m
+CONFIG_QCOM_SCM=m
+CONFIG_ARM_SMMU=m
+CONFIG_ARM_QCOM_CPUFREQ_HW=m
+# XXX Audio bits start here
+CONFIG_I2C_CHARDEV=m
+CONFIG_I2C_MUX=m
+CONFIG_I2C_MUX_PCA954x=m
+CONFIG_I2C_DESIGNWARE_PLATFORM=m
+CONFIG_I2C_RK3X=m
+CONFIG_SPI_PL022=m
+CONFIG_SPI_QCOM_QSPI=m
+CONFIG_SPI_QUP=m
+CONFIG_SPI_QCOM_GENI=m
+CONFIG_GPIO_WCD934X=m
+CONFIG_MFD_WCD934X=m
+CONFIG_REGULATOR_GPIO=m
+CONFIG_SND_SOC_QCOM=m
+CONFIG_SND_SOC_QCOM_COMMON=m
+CONFIG_SND_SOC_QDSP6_COMMON=m
+CONFIG_SND_SOC_QDSP6_CORE=m
+CONFIG_SND_SOC_QDSP6_AFE=m
+CONFIG_SND_SOC_QDSP6_AFE_DAI=m
+CONFIG_SND_SOC_QDSP6_ADM=m
+CONFIG_SND_SOC_QDSP6_ROUTING=m
+CONFIG_SND_SOC_QDSP6_ASM=m
+CONFIG_SND_SOC_QDSP6_ASM_DAI=m
+CONFIG_SND_SOC_QDSP6=m
+CONFIG_SND_SOC_SDM845=m
+CONFIG_SND_SOC_DMIC=m
+CONFIG_SND_SOC_WCD9335=m
+CONFIG_SND_SOC_WCD934X=m
+CONFIG_SND_SOC_WSA881X=m
+CONFIG_QCOM_BAM_DMA=m
+CONFIG_SPMI_PMIC_CLKDIV=m
+CONFIG_SOUNDWIRE=m
+CONFIG_SOUNDWIRE_QCOM=m
+CONFIG_SLIMBUS=m
+CONFIG_SLIM_QCOM_NGD_CTRL=m
+# CONFIG_CXD2880_SPI_DRV is not set
+# CONFIG_MEDIA_TUNER_SIMPLE is not set
+# CONFIG_MEDIA_TUNER_TDA18250 is not set
+# CONFIG_MEDIA_TUNER_TDA8290 is not set
+# CONFIG_MEDIA_TUNER_TDA827X is not set
+# CONFIG_MEDIA_TUNER_TDA18271 is not set
+# CONFIG_MEDIA_TUNER_TDA9887 is not set
+# CONFIG_MEDIA_TUNER_TEA5761 is not set
+# CONFIG_MEDIA_TUNER_TEA5767 is not set
+# CONFIG_MEDIA_TUNER_MSI001 is not set
+# CONFIG_MEDIA_TUNER_MT20XX is not set
+# CONFIG_MEDIA_TUNER_MT2060 is not set
+# CONFIG_MEDIA_TUNER_MT2063 is not set
+# CONFIG_MEDIA_TUNER_MT2266 is not set
+# CONFIG_MEDIA_TUNER_MT2131 is not set
+# CONFIG_MEDIA_TUNER_QT1010 is not set
+# CONFIG_MEDIA_TUNER_XC2028 is not set
+# CONFIG_MEDIA_TUNER_XC5000 is not set
+# CONFIG_MEDIA_TUNER_XC4000 is not set
+# CONFIG_MEDIA_TUNER_MXL5005S is not set
+# CONFIG_MEDIA_TUNER_MXL5007T is not set
+# CONFIG_MEDIA_TUNER_MC44S803 is not set
+# CONFIG_MEDIA_TUNER_MAX2165 is not set
+# CONFIG_MEDIA_TUNER_TDA18218 is not set
+# CONFIG_MEDIA_TUNER_FC0011 is not set
+# CONFIG_MEDIA_TUNER_FC0012 is not set
+# CONFIG_MEDIA_TUNER_FC0013 is not set
+# CONFIG_MEDIA_TUNER_TDA18212 is not set
+# CONFIG_MEDIA_TUNER_E4000 is not set
+# CONFIG_MEDIA_TUNER_FC2580 is not set
+# CONFIG_MEDIA_TUNER_M88RS6000T is not set
+# CONFIG_MEDIA_TUNER_TUA9001 is not set
+# CONFIG_MEDIA_TUNER_SI2157 is not set
+# CONFIG_MEDIA_TUNER_IT913X is not set
+# CONFIG_MEDIA_TUNER_R820T is not set
+# CONFIG_MEDIA_TUNER_MXL301RF is not set
+# CONFIG_MEDIA_TUNER_QM1D1C0042 is not set
+# CONFIG_MEDIA_TUNER_QM1D1B0004 is not set
+# CONFIG_DVB_STB0899 is not set
+# CONFIG_DVB_STB6100 is not set
+# CONFIG_DVB_STV090x is not set
+# CONFIG_DVB_STV0910 is not set
+# CONFIG_DVB_STV6110x is not set
+# CONFIG_DVB_STV6111 is not set
+# CONFIG_DVB_MXL5XX is not set
+# CONFIG_DVB_M88DS3103 is not set
+# CONFIG_DVB_DRXK is not set
+# CONFIG_DVB_TDA18271C2DD is not set
+# CONFIG_DVB_SI2165 is not set
+# CONFIG_DVB_MN88472 is not set
+# CONFIG_DVB_MN88473 is not set
+# CONFIG_DVB_CX24110 is not set
+# CONFIG_DVB_CX24123 is not set
+# CONFIG_DVB_MT312 is not set
+# CONFIG_DVB_ZL10036 is not set
+# CONFIG_DVB_ZL10039 is not set
+# CONFIG_DVB_S5H1420 is not set
+# CONFIG_DVB_STV0288 is not set
+# CONFIG_DVB_STB6000 is not set
+# CONFIG_DVB_STV0299 is not set
+# CONFIG_DVB_STV6110 is not set
+# CONFIG_DVB_STV0900 is not set
+# CONFIG_DVB_TDA8083 is not set
+# CONFIG_DVB_TDA10086 is not set
+# CONFIG_DVB_TDA8261 is not set
+# CONFIG_DVB_VES1X93 is not set
+# CONFIG_DVB_TUNER_ITD1000 is not set
+# CONFIG_DVB_TUNER_CX24113 is not set
+# CONFIG_DVB_TDA826X is not set
+# CONFIG_DVB_TUA6100 is not set
+# CONFIG_DVB_CX24116 is not set
+# CONFIG_DVB_CX24117 is not set
+# CONFIG_DVB_CX24120 is not set
+# CONFIG_DVB_SI21XX is not set
+# CONFIG_DVB_TS2020 is not set
+# CONFIG_DVB_DS3000 is not set
+# CONFIG_DVB_MB86A16 is not set
+# CONFIG_DVB_TDA10071 is not set
+# CONFIG_DVB_SP8870 is not set
+# CONFIG_DVB_SP887X is not set
+# CONFIG_DVB_CX22700 is not set
+# CONFIG_DVB_CX22702 is not set
+# CONFIG_DVB_S5H1432 is not set
+# CONFIG_DVB_DRXD is not set
+# CONFIG_DVB_L64781 is not set
+# CONFIG_DVB_TDA1004X is not set
+# CONFIG_DVB_NXT6000 is not set
+# CONFIG_DVB_MT352 is not set
+# CONFIG_DVB_ZL10353 is not set
+# CONFIG_DVB_DIB3000MB is not set
+# CONFIG_DVB_DIB3000MC is not set
+# CONFIG_DVB_DIB7000M is not set
+# CONFIG_DVB_DIB7000P is not set
+# CONFIG_DVB_DIB9000 is not set
+# CONFIG_DVB_TDA10048 is not set
+# CONFIG_DVB_AF9013 is not set
+# CONFIG_DVB_EC100 is not set
+# CONFIG_DVB_STV0367 is not set
+# CONFIG_DVB_CXD2820R is not set
+# CONFIG_DVB_CXD2841ER is not set
+# CONFIG_DVB_RTL2830 is not set
+# CONFIG_DVB_RTL2832 is not set
+# CONFIG_DVB_RTL2832_SDR is not set
+# CONFIG_DVB_SI2168 is not set
+# CONFIG_DVB_ZD1301_DEMOD is not set
+# CONFIG_DVB_CXD2880 is not set
+# CONFIG_DVB_VES1820 is not set
+# CONFIG_DVB_TDA10021 is not set
+# CONFIG_DVB_TDA10023 is not set
+# CONFIG_DVB_STV0297 is not set
+# CONFIG_DVB_NXT200X is not set
+# CONFIG_DVB_OR51211 is not set
+# CONFIG_DVB_OR51132 is not set
+# CONFIG_DVB_BCM3510 is not set
+# CONFIG_DVB_LGDT330X is not set
+# CONFIG_DVB_LGDT3305 is not set
+# CONFIG_DVB_LGDT3306A is not set
+# CONFIG_DVB_LG2160 is not set
+# CONFIG_DVB_S5H1409 is not set
+# CONFIG_DVB_AU8522_DTV is not set
+# CONFIG_DVB_AU8522_V4L is not set
+# CONFIG_DVB_S5H1411 is not set
+# CONFIG_DVB_S921 is not set
+# CONFIG_DVB_DIB8000 is not set
+# CONFIG_DVB_MB86A20S is not set
+# CONFIG_DVB_TC90522 is not set
+# CONFIG_DVB_MN88443X is not set
+# CONFIG_DVB_PLL is not set
+# CONFIG_DVB_TUNER_DIB0070 is not set
+# CONFIG_DVB_TUNER_DIB0090 is not set
+# CONFIG_DVB_DRX39XYJ is not set
+# CONFIG_DVB_LNBH25 is not set
+# CONFIG_DVB_LNBH29 is not set
+# CONFIG_DVB_LNBP21 is not set
+# CONFIG_DVB_LNBP22 is not set
+# CONFIG_DVB_ISL6405 is not set
+# CONFIG_DVB_ISL6421 is not set
+# CONFIG_DVB_ISL6423 is not set
+# CONFIG_DVB_A8293 is not set
+# CONFIG_DVB_LGS8GL5 is not set
+# CONFIG_DVB_LGS8GXX is not set
+# CONFIG_DVB_ATBM8830 is not set
+# CONFIG_DVB_TDA665x is not set
+# CONFIG_DVB_IX2505V is not set
+# CONFIG_DVB_M88RS2000 is not set
+# CONFIG_DVB_AF9033 is not set
+# CONFIG_DVB_HORUS3A is not set
+# CONFIG_DVB_ASCOT2E is not set
+# CONFIG_DVB_HELENE is not set
+# CONFIG_DVB_CXD2099 is not set
+# CONFIG_DVB_SP2 is not set
diff --git a/arch/arm64/configs/gki_defconfig b/arch/arm64/configs/gki_defconfig
new file mode 100644
index 0000000..9780171
--- /dev/null
+++ b/arch/arm64/configs/gki_defconfig
@@ -0,0 +1,608 @@
+CONFIG_LOCALVERSION="-mainline"
+CONFIG_AUDIT=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_PREEMPT=y
+CONFIG_IRQ_TIME_ACCOUNTING=y
+CONFIG_TASK_XACCT=y
+CONFIG_TASK_IO_ACCOUNTING=y
+CONFIG_PSI=y
+CONFIG_RCU_EXPERT=y
+CONFIG_RCU_FAST_NO_HZ=y
+CONFIG_RCU_NOCB_CPU=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_IKHEADERS=y
+CONFIG_UCLAMP_TASK=y
+CONFIG_CGROUPS=y
+CONFIG_MEMCG=y
+CONFIG_BLK_CGROUP=y
+CONFIG_CGROUP_SCHED=y
+# CONFIG_FAIR_GROUP_SCHED is not set
+CONFIG_UCLAMP_TASK_GROUP=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CPUSETS=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_CGROUP_BPF=y
+CONFIG_NAMESPACES=y
+# CONFIG_PID_NS is not set
+CONFIG_RT_SOFTINT_OPTIMIZATION=y
+# CONFIG_RD_BZIP2 is not set
+# CONFIG_RD_LZMA is not set
+# CONFIG_RD_XZ is not set
+# CONFIG_RD_LZO is not set
+CONFIG_BOOT_CONFIG=y
+# CONFIG_SYSFS_SYSCALL is not set
+# CONFIG_FHANDLE is not set
+CONFIG_KALLSYMS_ALL=y
+CONFIG_BPF_SYSCALL=y
+CONFIG_BPF_JIT_ALWAYS_ON=y
+# CONFIG_RSEQ is not set
+CONFIG_EMBEDDED=y
+# CONFIG_COMPAT_BRK is not set
+# CONFIG_SLAB_MERGE_DEFAULT is not set
+CONFIG_SLAB_FREELIST_RANDOM=y
+CONFIG_SLAB_FREELIST_HARDENED=y
+CONFIG_SHUFFLE_PAGE_ALLOCATOR=y
+CONFIG_PROFILING=y
+CONFIG_ARCH_SUNXI=y
+CONFIG_ARCH_HISI=y
+CONFIG_ARCH_QCOM=y
+CONFIG_SCHED_MC=y
+CONFIG_NR_CPUS=32
+CONFIG_PARAVIRT=y
+CONFIG_ARM64_SW_TTBR0_PAN=y
+CONFIG_COMPAT=y
+CONFIG_ARMV8_DEPRECATED=y
+CONFIG_SWP_EMULATION=y
+CONFIG_CP15_BARRIER_EMULATION=y
+CONFIG_SETEND_EMULATION=y
+# CONFIG_ARM64_VHE is not set
+CONFIG_RANDOMIZE_BASE=y
+# CONFIG_RANDOMIZE_MODULE_REGION_FULL is not set
+CONFIG_CMDLINE="rw"
+CONFIG_CMDLINE_EXTEND=y
+# CONFIG_DMI is not set
+CONFIG_PM_WAKELOCKS=y
+CONFIG_PM_WAKELOCKS_LIMIT=0
+# CONFIG_PM_WAKELOCKS_GC is not set
+CONFIG_ENERGY_MODEL=y
+CONFIG_CPU_IDLE=y
+CONFIG_ARM_CPUIDLE=y
+CONFIG_ARM_PSCI_CPUIDLE=y
+CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_STAT=y
+CONFIG_CPU_FREQ_TIMES=y
+CONFIG_CPU_FREQ_GOV_POWERSAVE=y
+CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
+CONFIG_ARM_SCPI_CPUFREQ=y
+CONFIG_ARM_SCMI_CPUFREQ=y
+CONFIG_ARM_SCMI_PROTOCOL=y
+# CONFIG_ARM_SCMI_POWER_DOMAIN is not set
+CONFIG_ARM_SCPI_PROTOCOL=y
+# CONFIG_ARM_SCPI_POWER_DOMAIN is not set
+# CONFIG_EFI_ARMSTUB_DTB_LOADER is not set
+CONFIG_VIRTUALIZATION=y
+CONFIG_KVM=y
+CONFIG_CRYPTO_SHA2_ARM64_CE=y
+CONFIG_CRYPTO_AES_ARM64_CE_BLK=y
+CONFIG_KPROBES=y
+CONFIG_JUMP_LABEL=y
+CONFIG_SHADOW_CALL_STACK=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_BLK_INLINE_ENCRYPTION=y
+CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK=y
+CONFIG_GKI_HACKS_TO_FIX=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_BINFMT_MISC=y
+CONFIG_MEMORY_HOTPLUG=y
+CONFIG_MEMORY_HOTREMOVE=y
+CONFIG_CLEANCACHE=y
+CONFIG_CMA=y
+CONFIG_CMA_AREAS=16
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=y
+CONFIG_XFRM_INTERFACE=y
+CONFIG_XFRM_STATISTICS=y
+CONFIG_NET_KEY=y
+CONFIG_XDP_SOCKETS=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_NET_IPIP=y
+CONFIG_NET_IPGRE_DEMUX=y
+CONFIG_NET_IPGRE=y
+CONFIG_NET_IPVTI=y
+CONFIG_INET_ESP=y
+CONFIG_INET_UDP_DIAG=y
+CONFIG_INET_DIAG_DESTROY=y
+CONFIG_IPV6_ROUTER_PREF=y
+CONFIG_IPV6_ROUTE_INFO=y
+CONFIG_IPV6_OPTIMISTIC_DAD=y
+CONFIG_INET6_ESP=y
+CONFIG_INET6_IPCOMP=y
+CONFIG_IPV6_MIP6=y
+CONFIG_IPV6_VTI=y
+CONFIG_IPV6_GRE=y
+CONFIG_IPV6_MULTIPLE_TABLES=y
+CONFIG_NETFILTER=y
+CONFIG_NF_CONNTRACK=y
+CONFIG_NF_CONNTRACK_SECMARK=y
+CONFIG_NF_CONNTRACK_EVENTS=y
+CONFIG_NF_CONNTRACK_AMANDA=y
+CONFIG_NF_CONNTRACK_FTP=y
+CONFIG_NF_CONNTRACK_H323=y
+CONFIG_NF_CONNTRACK_IRC=y
+CONFIG_NF_CONNTRACK_NETBIOS_NS=y
+CONFIG_NF_CONNTRACK_PPTP=y
+CONFIG_NF_CONNTRACK_SANE=y
+CONFIG_NF_CONNTRACK_TFTP=y
+CONFIG_NF_CT_NETLINK=y
+CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y
+CONFIG_NETFILTER_XT_TARGET_CONNMARK=y
+CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y
+CONFIG_NETFILTER_XT_TARGET_CT=y
+CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y
+CONFIG_NETFILTER_XT_TARGET_MARK=y
+CONFIG_NETFILTER_XT_TARGET_NFLOG=y
+CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y
+CONFIG_NETFILTER_XT_TARGET_TPROXY=y
+CONFIG_NETFILTER_XT_TARGET_TRACE=y
+CONFIG_NETFILTER_XT_TARGET_SECMARK=y
+CONFIG_NETFILTER_XT_TARGET_TCPMSS=y
+CONFIG_NETFILTER_XT_MATCH_BPF=y
+CONFIG_NETFILTER_XT_MATCH_COMMENT=y
+CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y
+CONFIG_NETFILTER_XT_MATCH_CONNMARK=y
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y
+CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y
+CONFIG_NETFILTER_XT_MATCH_HELPER=y
+CONFIG_NETFILTER_XT_MATCH_IPRANGE=y
+CONFIG_NETFILTER_XT_MATCH_LENGTH=y
+CONFIG_NETFILTER_XT_MATCH_LIMIT=y
+CONFIG_NETFILTER_XT_MATCH_MAC=y
+CONFIG_NETFILTER_XT_MATCH_MARK=y
+CONFIG_NETFILTER_XT_MATCH_OWNER=y
+CONFIG_NETFILTER_XT_MATCH_POLICY=y
+CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA2=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG=y
+CONFIG_NETFILTER_XT_MATCH_SOCKET=y
+CONFIG_NETFILTER_XT_MATCH_STATE=y
+CONFIG_NETFILTER_XT_MATCH_STATISTIC=y
+CONFIG_NETFILTER_XT_MATCH_STRING=y
+CONFIG_NETFILTER_XT_MATCH_TIME=y
+CONFIG_NETFILTER_XT_MATCH_U32=y
+CONFIG_IP_NF_IPTABLES=y
+CONFIG_IP_NF_MATCH_ECN=y
+CONFIG_IP_NF_MATCH_TTL=y
+CONFIG_IP_NF_FILTER=y
+CONFIG_IP_NF_TARGET_REJECT=y
+CONFIG_IP_NF_NAT=y
+CONFIG_IP_NF_TARGET_MASQUERADE=y
+CONFIG_IP_NF_TARGET_NETMAP=y
+CONFIG_IP_NF_TARGET_REDIRECT=y
+CONFIG_IP_NF_MANGLE=y
+CONFIG_IP_NF_RAW=y
+CONFIG_IP_NF_SECURITY=y
+CONFIG_IP_NF_ARPTABLES=y
+CONFIG_IP_NF_ARPFILTER=y
+CONFIG_IP_NF_ARP_MANGLE=y
+CONFIG_IP6_NF_IPTABLES=y
+CONFIG_IP6_NF_MATCH_RPFILTER=y
+CONFIG_IP6_NF_FILTER=y
+CONFIG_IP6_NF_TARGET_REJECT=y
+CONFIG_IP6_NF_MANGLE=y
+CONFIG_IP6_NF_RAW=y
+CONFIG_TIPC=y
+CONFIG_L2TP=y
+CONFIG_BRIDGE=y
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_HTB=y
+CONFIG_NET_SCH_INGRESS=y
+CONFIG_NET_CLS_U32=y
+CONFIG_NET_CLS_BPF=y
+CONFIG_NET_EMATCH=y
+CONFIG_NET_EMATCH_U32=y
+CONFIG_NET_CLS_ACT=y
+CONFIG_VSOCKETS=y
+CONFIG_BPF_JIT=y
+CONFIG_BT=y
+CONFIG_BT_RFCOMM=y
+CONFIG_BT_RFCOMM_TTY=y
+CONFIG_BT_HIDP=y
+CONFIG_BT_HCIBTSDIO=y
+CONFIG_BT_HCIUART=y
+CONFIG_BT_HCIUART_LL=y
+CONFIG_BT_HCIUART_BCM=y
+CONFIG_BT_HCIUART_QCA=y
+CONFIG_CFG80211=y
+# CONFIG_CFG80211_DEFAULT_PS is not set
+# CONFIG_CFG80211_CRDA_SUPPORT is not set
+CONFIG_MAC80211=y
+CONFIG_RFKILL=y
+CONFIG_PCI=y
+CONFIG_PCIEPORTBUS=y
+CONFIG_PCI_HOST_GENERIC=y
+CONFIG_PCIE_DW_PLAT_EP=y
+CONFIG_PCIE_QCOM=y
+CONFIG_PCIE_KIRIN=y
+CONFIG_PCI_ENDPOINT=y
+CONFIG_FW_LOADER_USER_HELPER=y
+# CONFIG_FW_CACHE is not set
+# CONFIG_SUN50I_DE2_BUS is not set
+# CONFIG_SUNXI_RSB is not set
+CONFIG_GNSS=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_LOOP_MIN_COUNT=16
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=8192
+CONFIG_UID_SYS_STATS=y
+CONFIG_SCSI=y
+# CONFIG_SCSI_PROC_FS is not set
+CONFIG_BLK_DEV_SD=y
+CONFIG_SCSI_UFSHCD=y
+CONFIG_SCSI_UFSHCD_PCI=y
+CONFIG_SCSI_UFSHCD_PLATFORM=y
+CONFIG_SCSI_UFS_DWC_TC_PLATFORM=y
+CONFIG_SCSI_UFS_HISI=y
+CONFIG_SCSI_UFS_BSG=y
+CONFIG_SCSI_UFS_CRYPTO=y
+CONFIG_MD=y
+CONFIG_BLK_DEV_DM=y
+CONFIG_DM_CRYPT=y
+CONFIG_DM_DEFAULT_KEY=y
+CONFIG_DM_SNAPSHOT=y
+CONFIG_DM_UEVENT=y
+CONFIG_DM_VERITY=y
+CONFIG_DM_VERITY_AVB=y
+CONFIG_DM_VERITY_FEC=y
+CONFIG_DM_BOW=y
+CONFIG_NETDEVICES=y
+CONFIG_DUMMY=y
+CONFIG_WIREGUARD=y
+CONFIG_TUN=y
+CONFIG_VETH=y
+# CONFIG_ETHERNET is not set
+CONFIG_PHYLIB=y
+CONFIG_PPP=y
+CONFIG_PPP_BSDCOMP=y
+CONFIG_PPP_DEFLATE=y
+CONFIG_PPP_MPPE=y
+CONFIG_PPTP=y
+CONFIG_PPPOL2TP=y
+CONFIG_USB_RTL8152=y
+CONFIG_USB_USBNET=y
+# CONFIG_USB_NET_AX8817X is not set
+# CONFIG_USB_NET_AX88179_178A is not set
+# CONFIG_USB_NET_CDCETHER is not set
+# CONFIG_USB_NET_CDC_NCM is not set
+# CONFIG_USB_NET_NET1080 is not set
+# CONFIG_USB_NET_CDC_SUBSET is not set
+# CONFIG_USB_NET_ZAURUS is not set
+# CONFIG_WLAN_VENDOR_ADMTEK is not set
+# CONFIG_WLAN_VENDOR_ATH is not set
+# CONFIG_WLAN_VENDOR_ATMEL is not set
+# CONFIG_WLAN_VENDOR_BROADCOM is not set
+# CONFIG_WLAN_VENDOR_CISCO is not set
+# CONFIG_WLAN_VENDOR_INTEL is not set
+# CONFIG_WLAN_VENDOR_INTERSIL is not set
+# CONFIG_WLAN_VENDOR_MARVELL is not set
+# CONFIG_WLAN_VENDOR_MEDIATEK is not set
+# CONFIG_WLAN_VENDOR_RALINK is not set
+# CONFIG_WLAN_VENDOR_REALTEK is not set
+# CONFIG_WLAN_VENDOR_RSI is not set
+# CONFIG_WLAN_VENDOR_ST is not set
+# CONFIG_WLAN_VENDOR_TI is not set
+# CONFIG_WLAN_VENDOR_ZYDAS is not set
+# CONFIG_WLAN_VENDOR_QUANTENNA is not set
+CONFIG_INPUT_EVDEV=y
+CONFIG_KEYBOARD_GPIO=y
+# CONFIG_MOUSE_PS2 is not set
+CONFIG_INPUT_JOYSTICK=y
+CONFIG_JOYSTICK_XPAD=y
+CONFIG_INPUT_TOUCHSCREEN=y
+CONFIG_INPUT_MISC=y
+CONFIG_INPUT_UINPUT=y
+# CONFIG_VT is not set
+# CONFIG_LEGACY_PTYS is not set
+CONFIG_SERIAL_8250=y
+# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set
+CONFIG_SERIAL_8250_CONSOLE=y
+# CONFIG_SERIAL_8250_EXAR is not set
+CONFIG_SERIAL_OF_PLATFORM=y
+CONFIG_SERIAL_AMBA_PL011=y
+CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
+CONFIG_SERIAL_SAMSUNG=y
+CONFIG_SERIAL_SAMSUNG_CONSOLE=y
+CONFIG_SERIAL_MSM_GENI_EARLY_CONSOLE=y
+CONFIG_SERIAL_SPRD=y
+CONFIG_SERIAL_SPRD_CONSOLE=y
+CONFIG_HVC_DCC=y
+CONFIG_HVC_DCC_SERIALIZE_SMP=y
+CONFIG_SERIAL_DEV_BUS=y
+CONFIG_HW_RANDOM=y
+# CONFIG_HW_RANDOM_CAVIUM is not set
+# CONFIG_DEVMEM is not set
+# CONFIG_DEVPORT is not set
+# CONFIG_I2C_COMPAT is not set
+# CONFIG_I2C_HELPER_AUTO is not set
+CONFIG_SPI=y
+CONFIG_SPMI=y
+# CONFIG_SPMI_MSM_PMIC_ARB is not set
+# CONFIG_PINCTRL_SUN8I_H3_R is not set
+# CONFIG_PINCTRL_SUN50I_A64 is not set
+# CONFIG_PINCTRL_SUN50I_A64_R is not set
+# CONFIG_PINCTRL_SUN50I_H5 is not set
+# CONFIG_PINCTRL_SUN50I_H6 is not set
+# CONFIG_PINCTRL_SUN50I_H6_R is not set
+CONFIG_GPIO_GENERIC_PLATFORM=y
+CONFIG_POWER_RESET_HISI=y
+# CONFIG_HWMON is not set
+CONFIG_THERMAL=y
+CONFIG_THERMAL_NETLINK=y
+CONFIG_THERMAL_STATISTICS=y
+CONFIG_THERMAL_EMERGENCY_POWEROFF_DELAY_MS=100
+CONFIG_THERMAL_WRITABLE_TRIPS=y
+CONFIG_THERMAL_GOV_USER_SPACE=y
+CONFIG_THERMAL_GOV_POWER_ALLOCATOR=y
+CONFIG_CPU_THERMAL=y
+CONFIG_DEVFREQ_THERMAL=y
+CONFIG_THERMAL_EMULATION=y
+CONFIG_WATCHDOG=y
+CONFIG_WATCHDOG_CORE=y
+CONFIG_MFD_ACT8945A=y
+CONFIG_MFD_SYSCON=y
+CONFIG_REGULATOR=y
+CONFIG_REGULATOR_FIXED_VOLTAGE=y
+CONFIG_RC_CORE=y
+# CONFIG_RC_MAP is not set
+CONFIG_LIRC=y
+CONFIG_BPF_LIRC_MODE2=y
+CONFIG_RC_DECODERS=y
+CONFIG_RC_DEVICES=y
+CONFIG_MEDIA_CEC_RC=y
+# CONFIG_MEDIA_ANALOG_TV_SUPPORT is not set
+# CONFIG_MEDIA_DIGITAL_TV_SUPPORT is not set
+# CONFIG_MEDIA_RADIO_SUPPORT is not set
+# CONFIG_MEDIA_SDR_SUPPORT is not set
+# CONFIG_MEDIA_TEST_SUPPORT is not set
+CONFIG_VIDEO_V4L2_SUBDEV_API=y
+CONFIG_MEDIA_USB_SUPPORT=y
+CONFIG_USB_VIDEO_CLASS=y
+CONFIG_USB_GSPCA=y
+CONFIG_V4L_PLATFORM_DRIVERS=y
+CONFIG_V4L_MEM2MEM_DRIVERS=y
+# CONFIG_VGA_ARB is not set
+CONFIG_DRM=y
+# CONFIG_DRM_FBDEV_EMULATION is not set
+CONFIG_BACKLIGHT_CLASS_DEVICE=y
+CONFIG_SOUND=y
+CONFIG_SND=y
+CONFIG_SND_HRTIMER=y
+CONFIG_SND_DYNAMIC_MINORS=y
+# CONFIG_SND_SUPPORT_OLD_API is not set
+# CONFIG_SND_VERBOSE_PROCFS is not set
+# CONFIG_SND_DRIVERS is not set
+CONFIG_SND_USB_AUDIO=y
+CONFIG_SND_SOC=y
+CONFIG_HID_BATTERY_STRENGTH=y
+CONFIG_HIDRAW=y
+CONFIG_UHID=y
+CONFIG_HID_APPLE=y
+CONFIG_HID_PRODIKEYS=y
+CONFIG_HID_ELECOM=y
+CONFIG_HID_UCLOGIC=y
+CONFIG_HID_LOGITECH=y
+CONFIG_HID_LOGITECH_DJ=y
+CONFIG_HID_MAGICMOUSE=y
+CONFIG_HID_MICROSOFT=y
+CONFIG_HID_MULTITOUCH=y
+CONFIG_HID_NINTENDO=y
+CONFIG_HID_PICOLCD=y
+CONFIG_HID_PLANTRONICS=y
+CONFIG_HID_ROCCAT=y
+CONFIG_HID_SONY=y
+CONFIG_SONY_FF=y
+CONFIG_HID_STEAM=y
+CONFIG_HID_WACOM=y
+CONFIG_HID_WIIMOTE=y
+CONFIG_USB_HIDDEV=y
+CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
+CONFIG_USB_OTG=y
+CONFIG_USB_XHCI_HCD=y
+CONFIG_USB_XHCI_PLATFORM=y
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_EHCI_HCD_PLATFORM=y
+CONFIG_USB_ACM=y
+CONFIG_USB_STORAGE=y
+CONFIG_USB_UAS=y
+CONFIG_USB_GADGET=y
+CONFIG_USB_CONFIGFS=y
+CONFIG_USB_CONFIGFS_UEVENT=y
+CONFIG_USB_CONFIGFS_SERIAL=y
+CONFIG_USB_CONFIGFS_ACM=y
+CONFIG_USB_CONFIGFS_NCM=y
+CONFIG_USB_CONFIGFS_RNDIS=y
+CONFIG_USB_CONFIGFS_MASS_STORAGE=y
+CONFIG_USB_CONFIGFS_F_FS=y
+CONFIG_USB_CONFIGFS_F_ACC=y
+CONFIG_USB_CONFIGFS_F_AUDIO_SRC=y
+CONFIG_USB_CONFIGFS_F_MIDI=y
+CONFIG_USB_CONFIGFS_F_HID=y
+CONFIG_TYPEC=y
+CONFIG_TYPEC_TCPM=y
+CONFIG_TYPEC_TCPCI=y
+CONFIG_TYPEC_UCSI=y
+CONFIG_MMC=y
+# CONFIG_PWRSEQ_EMMC is not set
+# CONFIG_PWRSEQ_SIMPLE is not set
+CONFIG_MMC_SDHCI=y
+CONFIG_MMC_SDHCI_PLTFM=y
+CONFIG_LEDS_CLASS_FLASH=y
+CONFIG_LEDS_TRIGGER_TIMER=y
+CONFIG_LEDS_TRIGGER_TRANSIENT=y
+CONFIG_EDAC=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_PL030=y
+CONFIG_RTC_DRV_PL031=y
+CONFIG_DMADEVICES=y
+CONFIG_DMABUF_HEAPS=y
+CONFIG_DMABUF_HEAPS_SYSTEM=y
+CONFIG_UIO=y
+CONFIG_VHOST_VSOCK=y
+CONFIG_STAGING=y
+CONFIG_ASHMEM=y
+CONFIG_ION=y
+CONFIG_ION_SYSTEM_HEAP=y
+CONFIG_COMMON_CLK_SCPI=y
+# CONFIG_CLK_SUNXI is not set
+# CONFIG_SUNXI_CCU is not set
+CONFIG_HWSPINLOCK=y
+CONFIG_SUN4I_TIMER=y
+# CONFIG_SUN50I_ERRATUM_UNKNOWN1 is not set
+CONFIG_MAILBOX=y
+CONFIG_IOMMU_LIMIT_IOVA_ALIGNMENT=y
+CONFIG_REMOTEPROC=y
+CONFIG_REMOTEPROC_CDEV=y
+CONFIG_RPMSG_CHAR=y
+CONFIG_DEVFREQ_GOV_PERFORMANCE=y
+CONFIG_DEVFREQ_GOV_POWERSAVE=y
+CONFIG_DEVFREQ_GOV_USERSPACE=y
+CONFIG_DEVFREQ_GOV_PASSIVE=y
+CONFIG_PM_DEVFREQ_EVENT=y
+CONFIG_IIO=y
+CONFIG_IIO_BUFFER=y
+CONFIG_IIO_TRIGGER=y
+CONFIG_PWM=y
+CONFIG_GENERIC_PHY=y
+CONFIG_RAS=y
+CONFIG_ANDROID=y
+CONFIG_ANDROID_BINDER_IPC=y
+CONFIG_ANDROID_BINDERFS=y
+CONFIG_ANDROID_VENDOR_HOOKS=y
+CONFIG_LIBNVDIMM=y
+# CONFIG_ND_BLK is not set
+CONFIG_INTERCONNECT=y
+CONFIG_EXT4_FS=y
+CONFIG_EXT4_FS_POSIX_ACL=y
+CONFIG_EXT4_FS_SECURITY=y
+CONFIG_F2FS_FS=y
+CONFIG_F2FS_FS_SECURITY=y
+CONFIG_F2FS_FS_COMPRESSION=y
+CONFIG_FS_ENCRYPTION=y
+CONFIG_FS_ENCRYPTION_INLINE_CRYPT=y
+CONFIG_FS_VERITY=y
+CONFIG_FS_VERITY_BUILTIN_SIGNATURES=y
+# CONFIG_DNOTIFY is not set
+CONFIG_QUOTA=y
+CONFIG_QFMT_V2=y
+CONFIG_FUSE_FS=y
+CONFIG_OVERLAY_FS=y
+CONFIG_INCREMENTAL_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_TMPFS=y
+# CONFIG_EFIVAR_FS is not set
+CONFIG_PSTORE=y
+CONFIG_PSTORE_CONSOLE=y
+CONFIG_PSTORE_PMSG=y
+CONFIG_PSTORE_RAM=y
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_CODEPAGE_737=y
+CONFIG_NLS_CODEPAGE_775=y
+CONFIG_NLS_CODEPAGE_850=y
+CONFIG_NLS_CODEPAGE_852=y
+CONFIG_NLS_CODEPAGE_855=y
+CONFIG_NLS_CODEPAGE_857=y
+CONFIG_NLS_CODEPAGE_860=y
+CONFIG_NLS_CODEPAGE_861=y
+CONFIG_NLS_CODEPAGE_862=y
+CONFIG_NLS_CODEPAGE_863=y
+CONFIG_NLS_CODEPAGE_864=y
+CONFIG_NLS_CODEPAGE_865=y
+CONFIG_NLS_CODEPAGE_866=y
+CONFIG_NLS_CODEPAGE_869=y
+CONFIG_NLS_CODEPAGE_936=y
+CONFIG_NLS_CODEPAGE_950=y
+CONFIG_NLS_CODEPAGE_932=y
+CONFIG_NLS_CODEPAGE_949=y
+CONFIG_NLS_CODEPAGE_874=y
+CONFIG_NLS_ISO8859_8=y
+CONFIG_NLS_CODEPAGE_1250=y
+CONFIG_NLS_CODEPAGE_1251=y
+CONFIG_NLS_ASCII=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_NLS_ISO8859_2=y
+CONFIG_NLS_ISO8859_3=y
+CONFIG_NLS_ISO8859_4=y
+CONFIG_NLS_ISO8859_5=y
+CONFIG_NLS_ISO8859_6=y
+CONFIG_NLS_ISO8859_7=y
+CONFIG_NLS_ISO8859_9=y
+CONFIG_NLS_ISO8859_13=y
+CONFIG_NLS_ISO8859_14=y
+CONFIG_NLS_ISO8859_15=y
+CONFIG_NLS_KOI8_R=y
+CONFIG_NLS_KOI8_U=y
+CONFIG_NLS_MAC_ROMAN=y
+CONFIG_NLS_MAC_CELTIC=y
+CONFIG_NLS_MAC_CENTEURO=y
+CONFIG_NLS_MAC_CROATIAN=y
+CONFIG_NLS_MAC_CYRILLIC=y
+CONFIG_NLS_MAC_GAELIC=y
+CONFIG_NLS_MAC_GREEK=y
+CONFIG_NLS_MAC_ICELAND=y
+CONFIG_NLS_MAC_INUIT=y
+CONFIG_NLS_MAC_ROMANIAN=y
+CONFIG_NLS_MAC_TURKISH=y
+CONFIG_NLS_UTF8=y
+CONFIG_UNICODE=y
+CONFIG_SECURITY=y
+CONFIG_SECURITYFS=y
+CONFIG_SECURITY_NETWORK=y
+CONFIG_HARDENED_USERCOPY=y
+# CONFIG_HARDENED_USERCOPY_FALLBACK is not set
+CONFIG_FORTIFY_SOURCE=y
+CONFIG_STATIC_USERMODEHELPER=y
+CONFIG_STATIC_USERMODEHELPER_PATH=""
+CONFIG_SECURITY_SELINUX=y
+CONFIG_INIT_STACK_ALL_ZERO=y
+CONFIG_INIT_ON_ALLOC_DEFAULT_ON=y
+CONFIG_CRYPTO_CHACHA20POLY1305=y
+CONFIG_CRYPTO_ADIANTUM=y
+CONFIG_CRYPTO_XCBC=y
+CONFIG_CRYPTO_LZ4=y
+CONFIG_CRYPTO_ZSTD=y
+CONFIG_CRYPTO_ANSI_CPRNG=y
+CONFIG_CRC_CCITT=y
+CONFIG_CRC8=y
+CONFIG_XZ_DEC=y
+CONFIG_DMA_CMA=y
+CONFIG_PRINTK_TIME=y
+CONFIG_PRINTK_CALLER=y
+CONFIG_DYNAMIC_DEBUG_CORE=y
+CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_INFO_DWARF4=y
+# CONFIG_ENABLE_MUST_CHECK is not set
+CONFIG_HEADERS_INSTALL=y
+# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_FS=y
+CONFIG_DEBUG_STACK_USAGE=y
+CONFIG_DEBUG_MEMORY_INIT=y
+CONFIG_PANIC_ON_OOPS=y
+CONFIG_PANIC_TIMEOUT=-1
+CONFIG_SOFTLOCKUP_DETECTOR=y
+# CONFIG_DETECT_HUNG_TASK is not set
+CONFIG_WQ_WATCHDOG=y
+CONFIG_SCHEDSTATS=y
+# CONFIG_DEBUG_PREEMPT is not set
+CONFIG_BUG_ON_DATA_CORRUPTION=y
+# CONFIG_RUNTIME_TESTING_MENU is not set
diff --git a/arch/arm64/configs/rockpi4_defconfig b/arch/arm64/configs/rockpi4_defconfig
new file mode 100644
index 0000000..d4845d0
--- /dev/null
+++ b/arch/arm64/configs/rockpi4_defconfig
@@ -0,0 +1,542 @@
+CONFIG_LOCALVERSION="-mainline"
+CONFIG_AUDIT=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_PREEMPT=y
+CONFIG_TASKSTATS=y
+CONFIG_TASK_XACCT=y
+CONFIG_TASK_IO_ACCOUNTING=y
+CONFIG_PSI=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_MEMCG=y
+CONFIG_MEMCG_SWAP=y
+CONFIG_BLK_CGROUP=y
+CONFIG_RT_GROUP_SCHED=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CPUSETS=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_CGROUP_BPF=y
+CONFIG_SCHED_AUTOGROUP=y
+CONFIG_BLK_DEV_INITRD=y
+# CONFIG_RD_BZIP2 is not set
+# CONFIG_RD_LZMA is not set
+# CONFIG_RD_XZ is not set
+# CONFIG_RD_LZO is not set
+# CONFIG_RD_LZ4 is not set
+# CONFIG_SYSFS_SYSCALL is not set
+CONFIG_KALLSYMS_ALL=y
+CONFIG_BPF_SYSCALL=y
+# CONFIG_RSEQ is not set
+CONFIG_EMBEDDED=y
+# CONFIG_VM_EVENT_COUNTERS is not set
+# CONFIG_COMPAT_BRK is not set
+# CONFIG_SLAB_MERGE_DEFAULT is not set
+CONFIG_SLAB_FREELIST_RANDOM=y
+CONFIG_SLAB_FREELIST_HARDENED=y
+CONFIG_PROFILING=y
+CONFIG_ARCH_QCOM=y
+CONFIG_ARCH_ROCKCHIP=y
+CONFIG_SCHED_MC=y
+CONFIG_NR_CPUS=32
+CONFIG_SECCOMP=y
+CONFIG_PARAVIRT=y
+CONFIG_COMPAT=y
+CONFIG_ARMV8_DEPRECATED=y
+CONFIG_SWP_EMULATION=y
+CONFIG_CP15_BARRIER_EMULATION=y
+CONFIG_SETEND_EMULATION=y
+CONFIG_RANDOMIZE_BASE=y
+# CONFIG_DMI is not set
+CONFIG_PM_WAKELOCKS=y
+CONFIG_PM_WAKELOCKS_LIMIT=0
+# CONFIG_PM_WAKELOCKS_GC is not set
+CONFIG_ENERGY_MODEL=y
+CONFIG_CPU_IDLE=y
+CONFIG_ARM_CPUIDLE=y
+CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_TIMES=y
+CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL=y
+CONFIG_CPU_FREQ_GOV_POWERSAVE=y
+CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
+CONFIG_ARM_SCPI_CPUFREQ=y
+CONFIG_ARM_SCMI_CPUFREQ=y
+CONFIG_ARM_SCMI_PROTOCOL=y
+# CONFIG_ARM_SCMI_POWER_DOMAIN is not set
+CONFIG_ARM_SCPI_PROTOCOL=y
+# CONFIG_ARM_SCPI_POWER_DOMAIN is not set
+# CONFIG_EFI_ARMSTUB_DTB_LOADER is not set
+CONFIG_VIRTUALIZATION=y
+CONFIG_KVM=y
+CONFIG_VHOST_VSOCK=y
+CONFIG_ARM64_CRYPTO=y
+CONFIG_CRYPTO_AES_ARM64=y
+CONFIG_KPROBES=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_BINFMT_MISC=y
+# CONFIG_SPARSEMEM_VMEMMAP is not set
+CONFIG_MEMORY_HOTPLUG=y
+CONFIG_TRANSPARENT_HUGEPAGE=y
+CONFIG_CMA=y
+CONFIG_CMA_AREAS=16
+CONFIG_ZSMALLOC=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=y
+CONFIG_XFRM_INTERFACE=y
+CONFIG_XFRM_STATISTICS=y
+CONFIG_NET_KEY=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_NET_IPGRE_DEMUX=y
+CONFIG_NET_IPVTI=y
+CONFIG_INET_ESP=y
+CONFIG_INET_UDP_DIAG=y
+CONFIG_INET_DIAG_DESTROY=y
+CONFIG_IPV6_ROUTER_PREF=y
+CONFIG_IPV6_ROUTE_INFO=y
+CONFIG_IPV6_OPTIMISTIC_DAD=y
+CONFIG_INET6_ESP=y
+CONFIG_INET6_IPCOMP=y
+CONFIG_IPV6_MIP6=y
+CONFIG_IPV6_VTI=y
+CONFIG_IPV6_MULTIPLE_TABLES=y
+CONFIG_NETFILTER=y
+# CONFIG_BRIDGE_NETFILTER is not set
+CONFIG_NF_CONNTRACK=y
+CONFIG_NF_CONNTRACK_SECMARK=y
+CONFIG_NF_CONNTRACK_EVENTS=y
+CONFIG_NF_CONNTRACK_AMANDA=y
+CONFIG_NF_CONNTRACK_FTP=y
+CONFIG_NF_CONNTRACK_H323=y
+CONFIG_NF_CONNTRACK_IRC=y
+CONFIG_NF_CONNTRACK_NETBIOS_NS=y
+CONFIG_NF_CONNTRACK_PPTP=y
+CONFIG_NF_CONNTRACK_SANE=y
+CONFIG_NF_CONNTRACK_TFTP=y
+CONFIG_NF_CT_NETLINK=y
+CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y
+CONFIG_NETFILTER_XT_TARGET_CONNMARK=y
+CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y
+CONFIG_NETFILTER_XT_TARGET_CT=y
+CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y
+CONFIG_NETFILTER_XT_TARGET_MARK=y
+CONFIG_NETFILTER_XT_TARGET_NFLOG=y
+CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y
+CONFIG_NETFILTER_XT_TARGET_TPROXY=y
+CONFIG_NETFILTER_XT_TARGET_TRACE=y
+CONFIG_NETFILTER_XT_TARGET_SECMARK=y
+CONFIG_NETFILTER_XT_TARGET_TCPMSS=y
+CONFIG_NETFILTER_XT_MATCH_BPF=y
+CONFIG_NETFILTER_XT_MATCH_COMMENT=y
+CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y
+CONFIG_NETFILTER_XT_MATCH_CONNMARK=y
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y
+CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y
+CONFIG_NETFILTER_XT_MATCH_HELPER=y
+CONFIG_NETFILTER_XT_MATCH_IPRANGE=y
+CONFIG_NETFILTER_XT_MATCH_LENGTH=y
+CONFIG_NETFILTER_XT_MATCH_LIMIT=y
+CONFIG_NETFILTER_XT_MATCH_MAC=y
+CONFIG_NETFILTER_XT_MATCH_MARK=y
+CONFIG_NETFILTER_XT_MATCH_OWNER=y
+CONFIG_NETFILTER_XT_MATCH_POLICY=y
+CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA2=y
+CONFIG_NETFILTER_XT_MATCH_SOCKET=y
+CONFIG_NETFILTER_XT_MATCH_STATE=y
+CONFIG_NETFILTER_XT_MATCH_STATISTIC=y
+CONFIG_NETFILTER_XT_MATCH_STRING=y
+CONFIG_NETFILTER_XT_MATCH_TIME=y
+CONFIG_NETFILTER_XT_MATCH_U32=y
+CONFIG_IP_NF_IPTABLES=y
+CONFIG_IP_NF_MATCH_ECN=y
+CONFIG_IP_NF_MATCH_TTL=y
+CONFIG_IP_NF_FILTER=y
+CONFIG_IP_NF_TARGET_REJECT=y
+CONFIG_IP_NF_NAT=y
+CONFIG_IP_NF_TARGET_MASQUERADE=y
+CONFIG_IP_NF_TARGET_NETMAP=y
+CONFIG_IP_NF_TARGET_REDIRECT=y
+CONFIG_IP_NF_MANGLE=y
+CONFIG_IP_NF_RAW=y
+CONFIG_IP_NF_SECURITY=y
+CONFIG_IP_NF_ARPTABLES=y
+CONFIG_IP_NF_ARPFILTER=y
+CONFIG_IP_NF_ARP_MANGLE=y
+CONFIG_IP6_NF_IPTABLES=y
+CONFIG_IP6_NF_MATCH_RPFILTER=y
+CONFIG_IP6_NF_FILTER=y
+CONFIG_IP6_NF_TARGET_REJECT=y
+CONFIG_IP6_NF_MANGLE=y
+CONFIG_IP6_NF_RAW=y
+CONFIG_TIPC=y
+CONFIG_L2TP=y
+CONFIG_BRIDGE=y
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_HTB=y
+CONFIG_NET_SCH_INGRESS=y
+CONFIG_NET_CLS_U32=y
+CONFIG_NET_CLS_BPF=y
+CONFIG_NET_EMATCH=y
+CONFIG_NET_EMATCH_U32=y
+CONFIG_NET_CLS_ACT=y
+CONFIG_VSOCKETS=y
+CONFIG_VIRTIO_VSOCKETS=y
+CONFIG_BT=y
+CONFIG_CFG80211=y
+# CONFIG_CFG80211_DEFAULT_PS is not set
+# CONFIG_CFG80211_CRDA_SUPPORT is not set
+CONFIG_MAC80211=y
+# CONFIG_MAC80211_RC_MINSTREL is not set
+CONFIG_RFKILL=y
+CONFIG_PCI=y
+CONFIG_PCI_HOST_GENERIC=y
+CONFIG_DEVTMPFS=y
+# CONFIG_ALLOW_DEV_COREDUMP is not set
+CONFIG_DEBUG_DEVRES=y
+CONFIG_ZRAM=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=8192
+CONFIG_VIRTIO_BLK=y
+CONFIG_UID_SYS_STATS=y
+CONFIG_SCSI=y
+# CONFIG_SCSI_PROC_FS is not set
+CONFIG_BLK_DEV_SD=y
+CONFIG_SCSI_UFSHCD=y
+CONFIG_SCSI_UFSHCD_PLATFORM=y
+CONFIG_MD=y
+CONFIG_BLK_DEV_DM=y
+CONFIG_DM_CRYPT=y
+CONFIG_DM_UEVENT=y
+CONFIG_DM_VERITY=y
+CONFIG_DM_VERITY_AVB=y
+CONFIG_DM_VERITY_FEC=y
+CONFIG_DM_BOW=y
+CONFIG_NETDEVICES=y
+CONFIG_TUN=y
+CONFIG_VIRTIO_NET=y
+# CONFIG_NET_VENDOR_3COM is not set
+# CONFIG_NET_VENDOR_ADAPTEC is not set
+# CONFIG_NET_VENDOR_AGERE is not set
+# CONFIG_NET_VENDOR_ALACRITECH is not set
+# CONFIG_NET_VENDOR_ALTEON is not set
+# CONFIG_NET_VENDOR_AMAZON is not set
+# CONFIG_NET_VENDOR_AMD is not set
+# CONFIG_NET_VENDOR_AQUANTIA is not set
+# CONFIG_NET_VENDOR_ARC is not set
+# CONFIG_NET_VENDOR_ATHEROS is not set
+# CONFIG_NET_VENDOR_AURORA is not set
+# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_VENDOR_BROCADE is not set
+# CONFIG_NET_VENDOR_CADENCE is not set
+# CONFIG_NET_VENDOR_CAVIUM is not set
+# CONFIG_NET_VENDOR_CHELSIO is not set
+# CONFIG_NET_VENDOR_CISCO is not set
+# CONFIG_NET_VENDOR_CORTINA is not set
+# CONFIG_NET_VENDOR_DEC is not set
+# CONFIG_NET_VENDOR_DLINK is not set
+# CONFIG_NET_VENDOR_EMULEX is not set
+# CONFIG_NET_VENDOR_EZCHIP is not set
+# CONFIG_NET_VENDOR_HISILICON is not set
+# CONFIG_NET_VENDOR_HP is not set
+# CONFIG_NET_VENDOR_HUAWEI is not set
+# CONFIG_NET_VENDOR_INTEL is not set
+# CONFIG_NET_VENDOR_MARVELL is not set
+# CONFIG_NET_VENDOR_MELLANOX is not set
+# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_MICROCHIP is not set
+# CONFIG_NET_VENDOR_MICROSEMI is not set
+# CONFIG_NET_VENDOR_MYRI is not set
+# CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_NETERION is not set
+# CONFIG_NET_VENDOR_NETRONOME is not set
+# CONFIG_NET_VENDOR_NI is not set
+# CONFIG_NET_VENDOR_NVIDIA is not set
+# CONFIG_NET_VENDOR_OKI is not set
+# CONFIG_NET_VENDOR_PACKET_ENGINES is not set
+# CONFIG_NET_VENDOR_QLOGIC is not set
+# CONFIG_NET_VENDOR_QUALCOMM is not set
+# CONFIG_NET_VENDOR_RDC is not set
+# CONFIG_NET_VENDOR_REALTEK is not set
+# CONFIG_NET_VENDOR_RENESAS is not set
+# CONFIG_NET_VENDOR_ROCKER is not set
+# CONFIG_NET_VENDOR_SAMSUNG is not set
+# CONFIG_NET_VENDOR_SEEQ is not set
+# CONFIG_NET_VENDOR_SOLARFLARE is not set
+# CONFIG_NET_VENDOR_SILAN is not set
+# CONFIG_NET_VENDOR_SIS is not set
+# CONFIG_NET_VENDOR_SMSC is not set
+# CONFIG_NET_VENDOR_SOCIONEXT is not set
+CONFIG_STMMAC_ETH=y
+# CONFIG_DWMAC_GENERIC is not set
+# CONFIG_NET_VENDOR_SUN is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
+# CONFIG_NET_VENDOR_TEHUTI is not set
+# CONFIG_NET_VENDOR_TI is not set
+# CONFIG_NET_VENDOR_VIA is not set
+# CONFIG_NET_VENDOR_WIZNET is not set
+CONFIG_ROCKCHIP_PHY=y
+CONFIG_PPP=y
+CONFIG_PPP_BSDCOMP=y
+CONFIG_PPP_DEFLATE=y
+CONFIG_PPP_MPPE=y
+CONFIG_PPTP=y
+CONFIG_PPPOL2TP=y
+CONFIG_USB_RTL8152=y
+CONFIG_USB_USBNET=y
+# CONFIG_USB_NET_AX8817X is not set
+# CONFIG_USB_NET_AX88179_178A is not set
+# CONFIG_USB_NET_CDCETHER is not set
+# CONFIG_USB_NET_CDC_NCM is not set
+# CONFIG_USB_NET_NET1080 is not set
+# CONFIG_USB_NET_CDC_SUBSET is not set
+# CONFIG_USB_NET_ZAURUS is not set
+# CONFIG_WLAN_VENDOR_ADMTEK is not set
+# CONFIG_WLAN_VENDOR_ATH is not set
+# CONFIG_WLAN_VENDOR_ATMEL is not set
+# CONFIG_WLAN_VENDOR_BROADCOM is not set
+# CONFIG_WLAN_VENDOR_CISCO is not set
+# CONFIG_WLAN_VENDOR_INTEL is not set
+# CONFIG_WLAN_VENDOR_INTERSIL is not set
+# CONFIG_WLAN_VENDOR_MARVELL is not set
+# CONFIG_WLAN_VENDOR_MEDIATEK is not set
+# CONFIG_WLAN_VENDOR_RALINK is not set
+# CONFIG_WLAN_VENDOR_REALTEK is not set
+# CONFIG_WLAN_VENDOR_RSI is not set
+# CONFIG_WLAN_VENDOR_ST is not set
+# CONFIG_WLAN_VENDOR_TI is not set
+# CONFIG_WLAN_VENDOR_ZYDAS is not set
+# CONFIG_WLAN_VENDOR_QUANTENNA is not set
+CONFIG_VIRT_WIFI=y
+CONFIG_INPUT_EVDEV=y
+# CONFIG_INPUT_MOUSE is not set
+CONFIG_INPUT_JOYSTICK=y
+CONFIG_INPUT_MISC=y
+CONFIG_INPUT_UINPUT=y
+# CONFIG_VT is not set
+# CONFIG_LEGACY_PTYS is not set
+# CONFIG_DEVMEM is not set
+CONFIG_SERIAL_8250=y
+# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set
+CONFIG_SERIAL_8250_CONSOLE=y
+# CONFIG_SERIAL_8250_EXAR is not set
+CONFIG_SERIAL_8250_NR_UARTS=48
+CONFIG_SERIAL_8250_EXTENDED=y
+CONFIG_SERIAL_8250_MANY_PORTS=y
+CONFIG_SERIAL_8250_SHARE_IRQ=y
+CONFIG_SERIAL_8250_DW=y
+CONFIG_SERIAL_OF_PLATFORM=y
+CONFIG_SERIAL_AMBA_PL011=y
+CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
+CONFIG_VIRTIO_CONSOLE=y
+CONFIG_HW_RANDOM=y
+CONFIG_HW_RANDOM_VIRTIO=y
+# CONFIG_HW_RANDOM_CAVIUM is not set
+# CONFIG_DEVPORT is not set
+# CONFIG_I2C_COMPAT is not set
+CONFIG_I2C_MUX_PINCTRL=y
+CONFIG_I2C_DEMUX_PINCTRL=y
+# CONFIG_I2C_HELPER_AUTO is not set
+CONFIG_I2C_DESIGNWARE_PLATFORM=y
+CONFIG_I2C_DESIGNWARE_SLAVE=y
+CONFIG_I2C_RK3X=y
+CONFIG_SPI=y
+CONFIG_SPI_ROCKCHIP=y
+CONFIG_SPMI=y
+CONFIG_DEBUG_PINCTRL=y
+CONFIG_PINCTRL_AMD=y
+CONFIG_PINCTRL_SINGLE=y
+CONFIG_PINCTRL_RK805=y
+CONFIG_GPIO_SYSFS=y
+CONFIG_GPIO_GENERIC_PLATFORM=y
+CONFIG_POWER_AVS=y
+CONFIG_ROCKCHIP_IODOMAIN=y
+CONFIG_POWER_RESET_GPIO=y
+CONFIG_POWER_RESET_GPIO_RESTART=y
+CONFIG_POWER_RESET_RESTART=y
+CONFIG_POWER_RESET_SYSCON=y
+CONFIG_POWER_RESET_SYSCON_POWEROFF=y
+CONFIG_SYSCON_REBOOT_MODE=y
+# CONFIG_HWMON is not set
+CONFIG_THERMAL=y
+CONFIG_THERMAL_GOV_USER_SPACE=y
+CONFIG_CPU_THERMAL=y
+CONFIG_DEVFREQ_THERMAL=y
+CONFIG_ROCKCHIP_THERMAL=y
+CONFIG_WATCHDOG=y
+CONFIG_WATCHDOG_PRETIMEOUT_GOV=y
+# CONFIG_WATCHDOG_PRETIMEOUT_GOV_NOOP is not set
+CONFIG_DW_WATCHDOG=y
+CONFIG_MFD_ACT8945A=y
+CONFIG_MFD_RK808=y
+CONFIG_REGULATOR_DEBUG=y
+CONFIG_REGULATOR_FIXED_VOLTAGE=y
+CONFIG_REGULATOR_VIRTUAL_CONSUMER=y
+CONFIG_REGULATOR_USERSPACE_CONSUMER=y
+CONFIG_REGULATOR_GPIO=y
+CONFIG_REGULATOR_PWM=y
+CONFIG_REGULATOR_RK808=y
+CONFIG_REGULATOR_VCTRL=y
+CONFIG_MEDIA_SUPPORT=y
+CONFIG_MEDIA_CAMERA_SUPPORT=y
+CONFIG_MEDIA_CONTROLLER=y
+# CONFIG_VGA_ARB is not set
+CONFIG_DRM=y
+# CONFIG_DRM_FBDEV_EMULATION is not set
+CONFIG_DRM_ROCKCHIP=y
+CONFIG_ROCKCHIP_DW_HDMI=y
+CONFIG_DRM_VIRTIO_GPU=y
+# CONFIG_LCD_CLASS_DEVICE is not set
+CONFIG_BACKLIGHT_CLASS_DEVICE=y
+CONFIG_SOUND=y
+CONFIG_SND=y
+CONFIG_SND_HRTIMER=y
+CONFIG_SND_DYNAMIC_MINORS=y
+# CONFIG_SND_SUPPORT_OLD_API is not set
+# CONFIG_SND_VERBOSE_PROCFS is not set
+# CONFIG_SND_DRIVERS is not set
+CONFIG_SND_INTEL8X0=y
+CONFIG_SND_USB_AUDIO=y
+CONFIG_SND_SOC=y
+CONFIG_SND_SOC_TS3A227E=y
+CONFIG_HIDRAW=y
+CONFIG_UHID=y
+CONFIG_HID_APPLE=y
+CONFIG_HID_ELECOM=y
+CONFIG_HID_MAGICMOUSE=y
+CONFIG_HID_MICROSOFT=y
+CONFIG_HID_MULTITOUCH=y
+CONFIG_USB_HIDDEV=y
+CONFIG_USB=y
+CONFIG_USB_OTG=y
+CONFIG_USB_OTG_FSM=y
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_EHCI_ROOT_HUB_TT=y
+CONFIG_USB_EHCI_HCD_PLATFORM=y
+CONFIG_USB_OHCI_HCD=y
+# CONFIG_USB_OHCI_HCD_PCI is not set
+CONFIG_USB_OHCI_HCD_PLATFORM=y
+CONFIG_USB_GADGET=y
+CONFIG_USB_DUMMY_HCD=m
+CONFIG_USB_CONFIGFS=y
+CONFIG_USB_CONFIGFS_UEVENT=y
+CONFIG_USB_CONFIGFS_F_FS=y
+CONFIG_USB_CONFIGFS_F_ACC=y
+CONFIG_USB_CONFIGFS_F_AUDIO_SRC=y
+CONFIG_USB_CONFIGFS_F_MIDI=y
+CONFIG_MMC=y
+# CONFIG_PWRSEQ_EMMC is not set
+# CONFIG_PWRSEQ_SIMPLE is not set
+CONFIG_MMC_SDHCI=y
+CONFIG_MMC_SDHCI_PLTFM=y
+CONFIG_MMC_SDHCI_OF_ARASAN=y
+CONFIG_MMC_SDHCI_OF_DWCMSHC=y
+CONFIG_MMC_DW=y
+CONFIG_MMC_DW_ROCKCHIP=y
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_EDAC=y
+CONFIG_RTC_CLASS=y
+# CONFIG_RTC_SYSTOHC is not set
+CONFIG_RTC_DRV_RK808=y
+CONFIG_RTC_DRV_PL030=y
+CONFIG_RTC_DRV_PL031=y
+CONFIG_DMADEVICES=y
+CONFIG_PL330_DMA=y
+CONFIG_VIRTIO_PCI=y
+# CONFIG_VIRTIO_PCI_LEGACY is not set
+CONFIG_VIRTIO_INPUT=y
+CONFIG_VIRTIO_MMIO=y
+CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES=y
+CONFIG_STAGING=y
+CONFIG_ASHMEM=y
+CONFIG_ANDROID_VSOC=y
+CONFIG_ION=y
+CONFIG_ION_SYSTEM_HEAP=y
+CONFIG_ION_SYSTEM_CONTIG_HEAP=y
+CONFIG_COMMON_CLK_RK808=y
+CONFIG_COMMON_CLK_SCPI=y
+CONFIG_HWSPINLOCK=y
+CONFIG_MAILBOX=y
+CONFIG_ROCKCHIP_MBOX=y
+CONFIG_ROCKCHIP_IOMMU=y
+CONFIG_ARM_SMMU=y
+CONFIG_QCOM_COMMAND_DB=y
+CONFIG_QCOM_RPMH=y
+CONFIG_ROCKCHIP_PM_DOMAINS=y
+CONFIG_DEVFREQ_GOV_PERFORMANCE=y
+CONFIG_DEVFREQ_GOV_POWERSAVE=y
+CONFIG_DEVFREQ_GOV_USERSPACE=y
+CONFIG_DEVFREQ_GOV_PASSIVE=y
+CONFIG_ARM_RK3399_DMC_DEVFREQ=y
+CONFIG_PWM=y
+CONFIG_PWM_ROCKCHIP=y
+CONFIG_QCOM_PDC=y
+CONFIG_PHY_ROCKCHIP_EMMC=y
+CONFIG_PHY_ROCKCHIP_INNO_HDMI=y
+CONFIG_PHY_ROCKCHIP_INNO_USB2=y
+CONFIG_PHY_ROCKCHIP_USB=y
+CONFIG_RAS=y
+CONFIG_ANDROID=y
+CONFIG_ANDROID_BINDER_IPC=y
+CONFIG_ROCKCHIP_EFUSE=y
+CONFIG_INTERCONNECT=y
+CONFIG_EXT4_FS=y
+CONFIG_EXT4_FS_SECURITY=y
+CONFIG_F2FS_FS=y
+CONFIG_F2FS_FS_SECURITY=y
+CONFIG_FS_ENCRYPTION=y
+# CONFIG_DNOTIFY is not set
+CONFIG_QUOTA=y
+CONFIG_QFMT_V2=y
+CONFIG_FUSE_FS=y
+CONFIG_OVERLAY_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_TMPFS_POSIX_ACL=y
+# CONFIG_EFIVAR_FS is not set
+CONFIG_PSTORE=y
+CONFIG_PSTORE_CONSOLE=y
+CONFIG_PSTORE_RAM=y
+CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y
+CONFIG_SECURITY=y
+CONFIG_SECURITY_NETWORK=y
+CONFIG_HARDENED_USERCOPY=y
+CONFIG_SECURITY_SELINUX=y
+CONFIG_CRYPTO_ADIANTUM=y
+CONFIG_CRYPTO_MD4=y
+CONFIG_CRYPTO_SHA512=y
+CONFIG_CRYPTO_LZ4=y
+CONFIG_CRYPTO_ZSTD=y
+CONFIG_CRYPTO_ANSI_CPRNG=y
+CONFIG_CRYPTO_DEV_ROCKCHIP=y
+CONFIG_CRYPTO_DEV_VIRTIO=y
+CONFIG_CRC_CCITT=y
+CONFIG_CRC8=y
+CONFIG_XZ_DEC=y
+CONFIG_DMA_CMA=y
+CONFIG_PRINTK_TIME=y
+CONFIG_DEBUG_INFO=y
+# CONFIG_ENABLE_MUST_CHECK is not set
+# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_STACK_USAGE=y
+CONFIG_DEBUG_MEMORY_INIT=y
+CONFIG_SOFTLOCKUP_DETECTOR=y
+# CONFIG_DETECT_HUNG_TASK is not set
+CONFIG_PANIC_TIMEOUT=5
+CONFIG_SCHEDSTATS=y
+CONFIG_FUNCTION_TRACER=y
+# CONFIG_RUNTIME_TESTING_MENU is not set
+CONFIG_CORESIGHT=y
+CONFIG_CORESIGHT_STM=y
diff --git a/arch/arm64/include/asm/cpu.h b/arch/arm64/include/asm/cpu.h
index 7faae6f..c920fa4 100644
--- a/arch/arm64/include/asm/cpu.h
+++ b/arch/arm64/include/asm/cpu.h
@@ -15,6 +15,7 @@
 struct cpuinfo_arm64 {
 	struct cpu	cpu;
 	struct kobject	kobj;
+	bool		aarch32_valid;
 	u32		reg_ctr;
 	u32		reg_cntfrq;
 	u32		reg_dczid;
@@ -65,6 +66,7 @@
 void __init cpuinfo_store_boot_cpu(void);
 
 void __init init_cpu_features(struct cpuinfo_arm64 *info);
+void init_cpu_32bit_features(struct cpuinfo_arm64 *info);
 void update_cpu_features(int cpu, struct cpuinfo_arm64 *info,
 				 struct cpuinfo_arm64 *boot);
 
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
index da250e4..6af7973 100644
--- a/arch/arm64/include/asm/cpufeature.h
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -17,6 +17,7 @@
 #ifndef __ASSEMBLY__
 
 #include <linux/bug.h>
+#include <linux/cpumask.h>
 #include <linux/jump_label.h>
 #include <linux/kernel.h>
 
@@ -394,6 +395,8 @@
 	return is_vhe_hyp_code() || is_nvhe_hyp_code();
 }
 
+extern cpumask_t aarch32_el0_mask;
+
 extern DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS);
 extern struct static_key_false cpu_hwcap_keys[ARM64_NCAPS];
 extern struct static_key_false arm64_const_caps_ready;
@@ -611,6 +614,15 @@
 	return cpus_have_const_cap(ARM64_HAS_32BIT_EL0);
 }
 
+static inline bool kvm_system_supports_32bit_el0(void)
+{
+#ifndef CONFIG_ASYMMETRIC_AARCH32
+	return system_supports_32bit_el0();
+#else
+	return cpumask_equal(&aarch32_el0_mask, cpu_possible_mask);
+#endif
+}
+
 static inline bool system_supports_4kb_granule(void)
 {
 	u64 mmfr0;
diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h
index fce8cbec..9c5efcc 100644
--- a/arch/arm64/include/asm/processor.h
+++ b/arch/arm64/include/asm/processor.h
@@ -27,6 +27,7 @@
 #include <linux/stddef.h>
 #include <linux/string.h>
 #include <linux/thread_info.h>
+#include <linux/android_vendor.h>
 
 #include <vdso/processor.h>
 
@@ -141,6 +142,8 @@
 		struct user_fpsimd_state fpsimd_state;
 	} uw;
 
+	ANDROID_VENDOR_DATA(1);
+
 	unsigned int		fpsimd_cpu;
 	void			*sve_state;	/* SVE registers, if any */
 	unsigned int		sve_vl;		/* SVE vector length */
diff --git a/arch/arm64/include/asm/system_misc.h b/arch/arm64/include/asm/system_misc.h
index 1ab63cf..d02c5e5 100644
--- a/arch/arm64/include/asm/system_misc.h
+++ b/arch/arm64/include/asm/system_misc.h
@@ -32,8 +32,6 @@
 struct mm_struct;
 extern void __show_regs(struct pt_regs *);
 
-extern void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd);
-
 #endif	/* __ASSEMBLY__ */
 
 #endif	/* __ASM_SYSTEM_MISC_H */
diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h
index 1fbab85..4a967f417 100644
--- a/arch/arm64/include/asm/thread_info.h
+++ b/arch/arm64/include/asm/thread_info.h
@@ -68,6 +68,7 @@
 #define TIF_UPROBE		4	/* uprobe breakpoint or singlestep */
 #define TIF_FSCHECK		5	/* Check FS is USER_DS on return */
 #define TIF_MTE_ASYNC_FAULT	6	/* MTE Asynchronous Tag Check Fault */
+#define TIF_CHECK_32BIT_AFFINITY 7	/* Check thread affinity for asymmetric AArch32 */
 #define TIF_SYSCALL_TRACE	8	/* syscall trace active */
 #define TIF_SYSCALL_AUDIT	9	/* syscall auditing */
 #define TIF_SYSCALL_TRACEPOINT	10	/* syscall tracepoint for ftrace */
@@ -96,12 +97,15 @@
 #define _TIF_FSCHECK		(1 << TIF_FSCHECK)
 #define _TIF_SINGLESTEP		(1 << TIF_SINGLESTEP)
 #define _TIF_32BIT		(1 << TIF_32BIT)
+#define _TIF_CHECK_32BIT_AFFINITY (1 << TIF_CHECK_32BIT_AFFINITY)
 #define _TIF_SVE		(1 << TIF_SVE)
 #define _TIF_MTE_ASYNC_FAULT	(1 << TIF_MTE_ASYNC_FAULT)
 
 #define _TIF_WORK_MASK		(_TIF_NEED_RESCHED | _TIF_SIGPENDING | \
 				 _TIF_NOTIFY_RESUME | _TIF_FOREIGN_FPSTATE | \
-				 _TIF_UPROBE | _TIF_FSCHECK | _TIF_MTE_ASYNC_FAULT)
+				 _TIF_UPROBE | _TIF_FSCHECK | \
+				 _TIF_MTE_ASYNC_FAULT | \
+				_TIF_CHECK_32BIT_AFFINITY)
 
 #define _TIF_SYSCALL_WORK	(_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
 				 _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | \
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index 6f36c4f..2d20cec 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -63,7 +63,6 @@
 #define pr_fmt(fmt) "CPU features: " fmt
 
 #include <linux/bsearch.h>
-#include <linux/cpumask.h>
 #include <linux/crash_dump.h>
 #include <linux/sort.h>
 #include <linux/stop_machine.h>
@@ -221,7 +220,11 @@
 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL3_SHIFT, 4, 0),
 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL2_SHIFT, 4, 0),
 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_SHIFT, 4, ID_AA64PFR0_EL1_64BIT_ONLY),
+#ifndef CONFIG_ASYMMETRIC_AARCH32
 	ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL0_SHIFT, 4, ID_AA64PFR0_EL0_64BIT_ONLY),
+#else
+	ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_HIGHER_SAFE, ID_AA64PFR0_EL0_SHIFT, 4, ID_AA64PFR0_EL0_64BIT_ONLY),
+#endif
 	ARM64_FTR_END,
 };
 
@@ -756,7 +759,7 @@
  * Any bits that are not covered by an arm64_ftr_bits entry are considered
  * RES0 for the system-wide value, and must strictly match.
  */
-static void __init init_cpu_ftr_reg(u32 sys_reg, u64 new)
+static void init_cpu_ftr_reg(u32 sys_reg, u64 new)
 {
 	u64 val = 0;
 	u64 strict_mask = ~0x0ULL;
@@ -838,30 +841,6 @@
 	init_cpu_ftr_reg(SYS_ID_AA64PFR1_EL1, info->reg_id_aa64pfr1);
 	init_cpu_ftr_reg(SYS_ID_AA64ZFR0_EL1, info->reg_id_aa64zfr0);
 
-	if (id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0)) {
-		init_cpu_ftr_reg(SYS_ID_DFR0_EL1, info->reg_id_dfr0);
-		init_cpu_ftr_reg(SYS_ID_DFR1_EL1, info->reg_id_dfr1);
-		init_cpu_ftr_reg(SYS_ID_ISAR0_EL1, info->reg_id_isar0);
-		init_cpu_ftr_reg(SYS_ID_ISAR1_EL1, info->reg_id_isar1);
-		init_cpu_ftr_reg(SYS_ID_ISAR2_EL1, info->reg_id_isar2);
-		init_cpu_ftr_reg(SYS_ID_ISAR3_EL1, info->reg_id_isar3);
-		init_cpu_ftr_reg(SYS_ID_ISAR4_EL1, info->reg_id_isar4);
-		init_cpu_ftr_reg(SYS_ID_ISAR5_EL1, info->reg_id_isar5);
-		init_cpu_ftr_reg(SYS_ID_ISAR6_EL1, info->reg_id_isar6);
-		init_cpu_ftr_reg(SYS_ID_MMFR0_EL1, info->reg_id_mmfr0);
-		init_cpu_ftr_reg(SYS_ID_MMFR1_EL1, info->reg_id_mmfr1);
-		init_cpu_ftr_reg(SYS_ID_MMFR2_EL1, info->reg_id_mmfr2);
-		init_cpu_ftr_reg(SYS_ID_MMFR3_EL1, info->reg_id_mmfr3);
-		init_cpu_ftr_reg(SYS_ID_MMFR4_EL1, info->reg_id_mmfr4);
-		init_cpu_ftr_reg(SYS_ID_MMFR5_EL1, info->reg_id_mmfr5);
-		init_cpu_ftr_reg(SYS_ID_PFR0_EL1, info->reg_id_pfr0);
-		init_cpu_ftr_reg(SYS_ID_PFR1_EL1, info->reg_id_pfr1);
-		init_cpu_ftr_reg(SYS_ID_PFR2_EL1, info->reg_id_pfr2);
-		init_cpu_ftr_reg(SYS_MVFR0_EL1, info->reg_mvfr0);
-		init_cpu_ftr_reg(SYS_MVFR1_EL1, info->reg_mvfr1);
-		init_cpu_ftr_reg(SYS_MVFR2_EL1, info->reg_mvfr2);
-	}
-
 	if (id_aa64pfr0_sve(info->reg_id_aa64pfr0)) {
 		init_cpu_ftr_reg(SYS_ZCR_EL1, info->reg_zcr);
 		sve_init_vq_map();
@@ -880,6 +859,31 @@
 	setup_boot_cpu_capabilities();
 }
 
+void init_cpu_32bit_features(struct cpuinfo_arm64 *info)
+{
+	init_cpu_ftr_reg(SYS_ID_DFR0_EL1, info->reg_id_dfr0);
+	init_cpu_ftr_reg(SYS_ID_DFR1_EL1, info->reg_id_dfr1);
+	init_cpu_ftr_reg(SYS_ID_ISAR0_EL1, info->reg_id_isar0);
+	init_cpu_ftr_reg(SYS_ID_ISAR1_EL1, info->reg_id_isar1);
+	init_cpu_ftr_reg(SYS_ID_ISAR2_EL1, info->reg_id_isar2);
+	init_cpu_ftr_reg(SYS_ID_ISAR3_EL1, info->reg_id_isar3);
+	init_cpu_ftr_reg(SYS_ID_ISAR4_EL1, info->reg_id_isar4);
+	init_cpu_ftr_reg(SYS_ID_ISAR5_EL1, info->reg_id_isar5);
+	init_cpu_ftr_reg(SYS_ID_ISAR6_EL1, info->reg_id_isar6);
+	init_cpu_ftr_reg(SYS_ID_MMFR0_EL1, info->reg_id_mmfr0);
+	init_cpu_ftr_reg(SYS_ID_MMFR1_EL1, info->reg_id_mmfr1);
+	init_cpu_ftr_reg(SYS_ID_MMFR2_EL1, info->reg_id_mmfr2);
+	init_cpu_ftr_reg(SYS_ID_MMFR3_EL1, info->reg_id_mmfr3);
+	init_cpu_ftr_reg(SYS_ID_MMFR4_EL1, info->reg_id_mmfr4);
+	init_cpu_ftr_reg(SYS_ID_MMFR5_EL1, info->reg_id_mmfr5);
+	init_cpu_ftr_reg(SYS_ID_PFR0_EL1, info->reg_id_pfr0);
+	init_cpu_ftr_reg(SYS_ID_PFR1_EL1, info->reg_id_pfr1);
+	init_cpu_ftr_reg(SYS_ID_PFR2_EL1, info->reg_id_pfr2);
+	init_cpu_ftr_reg(SYS_MVFR0_EL1, info->reg_mvfr0);
+	init_cpu_ftr_reg(SYS_MVFR1_EL1, info->reg_mvfr1);
+	init_cpu_ftr_reg(SYS_MVFR2_EL1, info->reg_mvfr2);
+}
+
 static void update_cpu_ftr_reg(struct arm64_ftr_reg *reg, u64 new)
 {
 	const struct arm64_ftr_bits *ftrp;
@@ -1728,6 +1732,16 @@
 	return !!(cap->type & ARM64_CPUCAP_PANIC_ON_CONFLICT);
 }
 
+#ifdef CONFIG_ASYMMETRIC_AARCH32
+cpumask_t aarch32_el0_mask;
+
+static void cpu_enable_aarch32_el0(struct arm64_cpu_capabilities const *cap)
+{
+	if (has_cpuid_feature(cap, SCOPE_LOCAL_CPU))
+		cpumask_set_cpu(smp_processor_id(), &aarch32_el0_mask);
+}
+#endif
+
 static const struct arm64_cpu_capabilities arm64_features[] = {
 	{
 		.desc = "GIC system register CPU interface",
@@ -1804,7 +1818,12 @@
 	{
 		.desc = "32-bit EL0 Support",
 		.capability = ARM64_HAS_32BIT_EL0,
+#ifndef CONFIG_ASYMMETRIC_AARCH32
 		.type = ARM64_CPUCAP_SYSTEM_FEATURE,
+#else
+		.type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE,
+		.cpu_enable = cpu_enable_aarch32_el0,
+#endif
 		.matches = has_cpuid_feature,
 		.sys_reg = SYS_ID_AA64PFR0_EL1,
 		.sign = FTR_UNSIGNED,
@@ -2596,7 +2615,8 @@
 
 	verify_local_elf_hwcaps(arm64_elf_hwcaps);
 
-	if (system_supports_32bit_el0())
+	if (system_supports_32bit_el0() &&
+	    this_cpu_has_cap(ARM64_HAS_32BIT_EL0))
 		verify_local_elf_hwcaps(compat_elf_hwcaps);
 
 	if (system_supports_sve())
diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c
index 77605ae..5776d8c 100644
--- a/arch/arm64/kernel/cpuinfo.c
+++ b/arch/arm64/kernel/cpuinfo.c
@@ -371,32 +371,6 @@
 	info->reg_id_aa64pfr1 = read_cpuid(ID_AA64PFR1_EL1);
 	info->reg_id_aa64zfr0 = read_cpuid(ID_AA64ZFR0_EL1);
 
-	/* Update the 32bit ID registers only if AArch32 is implemented */
-	if (id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0)) {
-		info->reg_id_dfr0 = read_cpuid(ID_DFR0_EL1);
-		info->reg_id_dfr1 = read_cpuid(ID_DFR1_EL1);
-		info->reg_id_isar0 = read_cpuid(ID_ISAR0_EL1);
-		info->reg_id_isar1 = read_cpuid(ID_ISAR1_EL1);
-		info->reg_id_isar2 = read_cpuid(ID_ISAR2_EL1);
-		info->reg_id_isar3 = read_cpuid(ID_ISAR3_EL1);
-		info->reg_id_isar4 = read_cpuid(ID_ISAR4_EL1);
-		info->reg_id_isar5 = read_cpuid(ID_ISAR5_EL1);
-		info->reg_id_isar6 = read_cpuid(ID_ISAR6_EL1);
-		info->reg_id_mmfr0 = read_cpuid(ID_MMFR0_EL1);
-		info->reg_id_mmfr1 = read_cpuid(ID_MMFR1_EL1);
-		info->reg_id_mmfr2 = read_cpuid(ID_MMFR2_EL1);
-		info->reg_id_mmfr3 = read_cpuid(ID_MMFR3_EL1);
-		info->reg_id_mmfr4 = read_cpuid(ID_MMFR4_EL1);
-		info->reg_id_mmfr5 = read_cpuid(ID_MMFR5_EL1);
-		info->reg_id_pfr0 = read_cpuid(ID_PFR0_EL1);
-		info->reg_id_pfr1 = read_cpuid(ID_PFR1_EL1);
-		info->reg_id_pfr2 = read_cpuid(ID_PFR2_EL1);
-
-		info->reg_mvfr0 = read_cpuid(MVFR0_EL1);
-		info->reg_mvfr1 = read_cpuid(MVFR1_EL1);
-		info->reg_mvfr2 = read_cpuid(MVFR2_EL1);
-	}
-
 	if (IS_ENABLED(CONFIG_ARM64_SVE) &&
 	    id_aa64pfr0_sve(info->reg_id_aa64pfr0))
 		info->reg_zcr = read_zcr_features();
@@ -404,10 +378,51 @@
 	cpuinfo_detect_icache_policy(info);
 }
 
+static void __cpuinfo_store_cpu_32bit(struct cpuinfo_arm64 *info)
+{
+	info->aarch32_valid = true;
+
+	info->reg_id_dfr0 = read_cpuid(ID_DFR0_EL1);
+	info->reg_id_dfr1 = read_cpuid(ID_DFR1_EL1);
+	info->reg_id_isar0 = read_cpuid(ID_ISAR0_EL1);
+	info->reg_id_isar1 = read_cpuid(ID_ISAR1_EL1);
+	info->reg_id_isar2 = read_cpuid(ID_ISAR2_EL1);
+	info->reg_id_isar3 = read_cpuid(ID_ISAR3_EL1);
+	info->reg_id_isar4 = read_cpuid(ID_ISAR4_EL1);
+	info->reg_id_isar5 = read_cpuid(ID_ISAR5_EL1);
+	info->reg_id_isar6 = read_cpuid(ID_ISAR6_EL1);
+	info->reg_id_mmfr0 = read_cpuid(ID_MMFR0_EL1);
+	info->reg_id_mmfr1 = read_cpuid(ID_MMFR1_EL1);
+	info->reg_id_mmfr2 = read_cpuid(ID_MMFR2_EL1);
+	info->reg_id_mmfr3 = read_cpuid(ID_MMFR3_EL1);
+	info->reg_id_mmfr4 = read_cpuid(ID_MMFR4_EL1);
+	info->reg_id_mmfr5 = read_cpuid(ID_MMFR5_EL1);
+	info->reg_id_pfr0 = read_cpuid(ID_PFR0_EL1);
+	info->reg_id_pfr1 = read_cpuid(ID_PFR1_EL1);
+	info->reg_id_pfr2 = read_cpuid(ID_PFR2_EL1);
+
+	info->reg_mvfr0 = read_cpuid(MVFR0_EL1);
+	info->reg_mvfr1 = read_cpuid(MVFR1_EL1);
+	info->reg_mvfr2 = read_cpuid(MVFR2_EL1);
+}
+
 void cpuinfo_store_cpu(void)
 {
 	struct cpuinfo_arm64 *info = this_cpu_ptr(&cpu_data);
 	__cpuinfo_store_cpu(info);
+	if (id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0))
+		__cpuinfo_store_cpu_32bit(info);
+	/*
+	 * With asymmetric AArch32 support, populate the boot CPU information
+	 * on the first 32-bit capable secondary CPU if the primary one
+	 * skipped this step.
+	 */
+	if (IS_ENABLED(CONFIG_ASYMMETRIC_AARCH32) &&
+	    !boot_cpu_data.aarch32_valid &&
+	    id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0)) {
+		__cpuinfo_store_cpu_32bit(&boot_cpu_data);
+		init_cpu_32bit_features(&boot_cpu_data);
+	}
 	update_cpu_features(smp_processor_id(), info, &boot_cpu_data);
 }
 
@@ -415,7 +430,11 @@
 {
 	struct cpuinfo_arm64 *info = &per_cpu(cpu_data, 0);
 	__cpuinfo_store_cpu(info);
+	if (id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0))
+		__cpuinfo_store_cpu_32bit(info);
 
 	boot_cpu_data = *info;
 	init_cpu_features(&boot_cpu_data);
+	if (id_aa64pfr0_32bit_el0(boot_cpu_data.reg_id_aa64pfr0))
+		init_cpu_32bit_features(&boot_cpu_data);
 }
diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c
index fa76151..1e14bfa 100644
--- a/arch/arm64/kernel/debug-monitors.c
+++ b/arch/arm64/kernel/debug-monitors.c
@@ -284,21 +284,25 @@
 {
 	register_debug_hook(&hook->node, &user_break_hook);
 }
+EXPORT_SYMBOL_GPL(register_user_break_hook);
 
 void unregister_user_break_hook(struct break_hook *hook)
 {
 	unregister_debug_hook(&hook->node);
 }
+EXPORT_SYMBOL_GPL(unregister_user_break_hook);
 
 void register_kernel_break_hook(struct break_hook *hook)
 {
 	register_debug_hook(&hook->node, &kernel_break_hook);
 }
+EXPORT_SYMBOL_GPL(register_kernel_break_hook);
 
 void unregister_kernel_break_hook(struct break_hook *hook)
 {
 	unregister_debug_hook(&hook->node);
 }
+EXPORT_SYMBOL_GPL(unregister_kernel_break_hook);
 
 static int call_break_hook(struct pt_regs *regs, unsigned int esr)
 {
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index ed919f6..577a63b 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -44,6 +44,8 @@
 #include <linux/percpu.h>
 #include <linux/thread_info.h>
 #include <linux/prctl.h>
+#include <trace/hooks/fpsimd.h>
+#include <trace/hooks/mpam.h>
 
 #include <asm/alternative.h>
 #include <asm/arch_gicv3.h>
@@ -70,8 +72,6 @@
 void (*pm_power_off)(void);
 EXPORT_SYMBOL_GPL(pm_power_off);
 
-void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd);
-
 static void noinstr __cpu_do_idle(void)
 {
 	dsb(sy);
@@ -199,10 +199,7 @@
 		efi_reboot(reboot_mode, NULL);
 
 	/* Now call the architecture specific reboot code. */
-	if (arm_pm_restart)
-		arm_pm_restart(reboot_mode, cmd);
-	else
-		do_kernel_restart(cmd);
+	do_kernel_restart(cmd);
 
 	/*
 	 * Whoops - the architecture was unable to reboot.
@@ -509,6 +506,15 @@
 	__this_cpu_write(__entry_task, next);
 }
 
+static void aarch32_thread_switch(struct task_struct *next)
+{
+	struct thread_info *ti = task_thread_info(next);
+
+	if (IS_ENABLED(CONFIG_ASYMMETRIC_AARCH32) && is_compat_thread(ti) &&
+	    !cpumask_test_cpu(smp_processor_id(), &aarch32_el0_mask))
+		set_ti_thread_flag(ti, TIF_CHECK_32BIT_AFFINITY);
+}
+
 /*
  * ARM erratum 1418040 handling, affecting the 32bit view of CNTVCT.
  * Assuming the virtual counter is enabled at the beginning of times:
@@ -557,6 +563,12 @@
 	uao_thread_switch(next);
 	ssbs_thread_switch(next);
 	erratum_1418040_thread_switch(prev, next);
+	aarch32_thread_switch(next);
+	/*
+	 *  vendor hook is needed before the dsb(),
+	 *  because MPAM is related to cache maintenance.
+	 */
+	trace_android_vh_mpam_set(prev, next);
 
 	/*
 	 * Complete any pending TLB or cache maintenance on this CPU in case
@@ -573,6 +585,8 @@
 	 */
 	mte_thread_switch(next);
 
+	trace_android_vh_is_fpsimd_save(prev, next);
+
 	/* the actual thread switch */
 	last = cpu_switch_to(prev, next);
 
@@ -627,6 +641,13 @@
 		arch_prctl_spec_ctrl_set(current, PR_SPEC_STORE_BYPASS,
 					 PR_SPEC_ENABLE);
 	}
+
+	/*
+	 * If exec'ing a 32-bit task, force the asymmetric 32-bit feature
+	 * check as the task may not go through a switch_to() call.
+	 */
+	if (IS_ENABLED(CONFIG_ASYMMETRIC_AARCH32) && is_compat_task())
+		set_thread_flag(TIF_CHECK_32BIT_AFFINITY);
 }
 
 #ifdef CONFIG_ARM64_TAGGED_ADDR_ABI
diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c
index a8184ca..d7f4623 100644
--- a/arch/arm64/kernel/signal.c
+++ b/arch/arm64/kernel/signal.c
@@ -8,6 +8,7 @@
 
 #include <linux/cache.h>
 #include <linux/compat.h>
+#include <linux/cpumask.h>
 #include <linux/errno.h>
 #include <linux/kernel.h>
 #include <linux/signal.h>
@@ -911,6 +912,38 @@
 	restore_saved_sigmask();
 }
 
+static void set_32bit_cpus_allowed(void)
+{
+	int ret;
+
+	/*
+	 * Try to honour as best as possible whatever affinity request this
+	 * task has. If it spans no compatible CPU, disregard it entirely.
+	 */
+	if (cpumask_intersects(current->cpus_ptr, &aarch32_el0_mask)) {
+		cpumask_var_t cpus_allowed;
+
+		if (!alloc_cpumask_var(&cpus_allowed, GFP_ATOMIC)) {
+
+			ret = set_cpus_allowed_ptr(current, &aarch32_el0_mask);
+
+		} else {
+
+			cpumask_and(cpus_allowed, current->cpus_ptr, &aarch32_el0_mask);
+			ret = set_cpus_allowed_ptr(current, cpus_allowed);
+			free_cpumask_var(cpus_allowed);
+
+		}
+	} else {
+		ret = set_cpus_allowed_ptr(current, &aarch32_el0_mask);
+	}
+
+	if (ret) {
+		pr_warn_once("No CPUs capable of running 32-bit tasks\n");
+		force_sig(SIGKILL);
+	}
+}
+
 asmlinkage void do_notify_resume(struct pt_regs *regs,
 				 unsigned long thread_flags)
 {
@@ -933,6 +966,12 @@
 		} else {
 			local_daif_restore(DAIF_PROCCTX);
 
+			if (IS_ENABLED(CONFIG_ASYMMETRIC_AARCH32) &&
+			    thread_flags & _TIF_CHECK_32BIT_AFFINITY) {
+				clear_thread_flag(TIF_CHECK_32BIT_AFFINITY);
+				set_32bit_cpus_allowed();
+			}
+
 			if (thread_flags & _TIF_UPROBE)
 				uprobe_notify_resume(regs);
 
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index 18e9727..91e2fd1 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -53,9 +53,14 @@
 
 #define CREATE_TRACE_POINTS
 #include <trace/events/ipi.h>
+#undef CREATE_TRACE_POINTS
+#include <trace/hooks/debug.h>
 
 DEFINE_PER_CPU_READ_MOSTLY(int, cpu_number);
 EXPORT_PER_CPU_SYMBOL(cpu_number);
+EXPORT_TRACEPOINT_SYMBOL_GPL(ipi_raise);
+EXPORT_TRACEPOINT_SYMBOL_GPL(ipi_entry);
+EXPORT_TRACEPOINT_SYMBOL_GPL(ipi_exit);
 
 /*
  * as from 2.5, kernels no longer have an init_tasks structure
@@ -93,6 +98,54 @@
 }
 #endif
 
+#ifdef CONFIG_ASYMMETRIC_AARCH32
+static int last_aarch32_cpu = -1;
+static cpumask_t aarch32_online;
+
+static void asym_aarch32_online(void)
+{
+		/*
+		 * Since we onlined another cpu, restore the hotpluggability of
+		 * the last AAarch32 cpu if it was disabled.
+		 */
+		cpumask_and(&aarch32_online, &aarch32_el0_mask, cpu_online_mask);
+
+		if (last_aarch32_cpu >= 0 &&
+		    cpumask_weight(&aarch32_online) > 1) {
+
+			struct device *dev;
+
+			dev = get_cpu_device(last_aarch32_cpu);
+			dev->offline_disabled = 0;
+			last_aarch32_cpu = -1;
+		}
+}
+
+static void asym_aarch32_offline(void)
+{
+	/* Don't let the last AArch32-compatible CPU go down */
+	if (!cpumask_empty(&aarch32_el0_mask)) {
+
+		cpumask_and(&aarch32_online, &aarch32_el0_mask, cpu_online_mask);
+
+		/*
+		 * If we're left with only one AAarch32 cpu, prevent it from
+		 * being offlined.
+		 */
+		if (cpumask_weight(&aarch32_online) == 1) {
+			struct device *dev;
+
+			last_aarch32_cpu = cpumask_first(&aarch32_online);
+			dev = get_cpu_device(last_aarch32_cpu);
+			dev->offline_disabled = 1;
+		}
+	}
+}
+#else
+static void asym_aarch32_online(void) {}
+static void asym_aarch32_offline(void) {}
+#endif
+
 
 /*
  * Boot a secondary CPU, and assign it the specified idle task.
@@ -137,8 +190,10 @@
 	 */
 	wait_for_completion_timeout(&cpu_running,
 				    msecs_to_jiffies(5000));
-	if (cpu_online(cpu))
+	if (cpu_online(cpu)) {
+		asym_aarch32_online();
 		return 0;
+	}
 
 	pr_crit("CPU%u: failed to come online\n", cpu);
 	secondary_data.task = NULL;
@@ -316,6 +371,8 @@
 	set_cpu_online(cpu, false);
 	ipi_teardown(cpu);
 
+	asym_aarch32_offline();
+
 	/*
 	 * OK - migrate IRQs away from this CPU
 	 */
@@ -351,7 +408,7 @@
 		pr_crit("CPU%u: cpu didn't die\n", cpu);
 		return;
 	}
-	pr_notice("CPU%u: shutdown\n", cpu);
+	pr_debug("CPU%u: shutdown\n", cpu);
 
 	/*
 	 * Now that the dying CPU is beyond the point of no return w.r.t.
@@ -903,6 +960,7 @@
 		break;
 
 	case IPI_CPU_STOP:
+		trace_android_vh_ipi_stop_rcuidle(get_irq_regs());
 		local_cpu_stop();
 		break;
 
diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c
index fa56af1..d0634ad 100644
--- a/arch/arm64/kernel/stacktrace.c
+++ b/arch/arm64/kernel/stacktrace.c
@@ -190,6 +190,7 @@
 
 	put_task_stack(tsk);
 }
+EXPORT_SYMBOL_GPL(dump_backtrace);
 
 void show_stack(struct task_struct *tsk, unsigned long *sp, const char *loglvl)
 {
diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c
index dfb5218..cfccb06 100644
--- a/arch/arm64/kvm/guest.c
+++ b/arch/arm64/kvm/guest.c
@@ -226,7 +226,7 @@
 		u64 mode = (*(u64 *)valp) & PSR_AA32_MODE_MASK;
 		switch (mode) {
 		case PSR_AA32_MODE_USR:
-			if (!system_supports_32bit_el0())
+			if (!kvm_system_supports_32bit_el0())
 				return -EINVAL;
 			break;
 		case PSR_AA32_MODE_FIQ:
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index c1fac98..271517c 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -670,7 +670,7 @@
 	 */
 	val = ((pmcr & ~ARMV8_PMU_PMCR_MASK)
 	       | (ARMV8_PMU_PMCR_MASK & 0xdecafbad)) & (~ARMV8_PMU_PMCR_E);
-	if (!system_supports_32bit_el0())
+	if (!kvm_system_supports_32bit_el0())
 		val |= ARMV8_PMU_PMCR_LC;
 	__vcpu_sys_reg(vcpu, r->reg) = val;
 }
@@ -722,7 +722,7 @@
 		val = __vcpu_sys_reg(vcpu, PMCR_EL0);
 		val &= ~ARMV8_PMU_PMCR_MASK;
 		val |= p->regval & ARMV8_PMU_PMCR_MASK;
-		if (!system_supports_32bit_el0())
+		if (!kvm_system_supports_32bit_el0())
 			val |= ARMV8_PMU_PMCR_LC;
 		__vcpu_sys_reg(vcpu, PMCR_EL0) = val;
 		kvm_pmu_handle_pmcr(vcpu, val);
@@ -1122,6 +1122,15 @@
 		val &= ~(0xfUL << ID_AA64PFR0_AMU_SHIFT);
 		val &= ~(0xfUL << ID_AA64PFR0_CSV2_SHIFT);
 		val |= ((u64)vcpu->kvm->arch.pfr0_csv2 << ID_AA64PFR0_CSV2_SHIFT);
+
+		if (!kvm_system_supports_32bit_el0()) {
+			/*
+			 * We could be running on asym aarch32 system.
+			 * Override to present a aarch64 only system.
+			 */
+			val &= ~(0xfUL << ID_AA64PFR0_EL0_SHIFT);
+			val |= (ID_AA64PFR0_EL0_64BIT_ONLY << ID_AA64PFR0_EL0_SHIFT);
+		}
 	} else if (id == SYS_ID_AA64PFR1_EL1) {
 		val &= ~(0xfUL << ID_AA64PFR1_MTE_SHIFT);
 	} else if (id == SYS_ID_AA64ISAR1_EL1 && !vcpu_has_ptrauth(vcpu)) {
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index 0955406..9345882 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -348,15 +348,18 @@
 
 	if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {
 		extern u16 memstart_offset_seed;
-		u64 range = linear_region_size -
-			    (memblock_end_of_DRAM() - memblock_start_of_DRAM());
+		u64 mmfr0 = read_cpuid(ID_AA64MMFR0_EL1);
+		int parange = cpuid_feature_extract_unsigned_field(
+					mmfr0, ID_AA64MMFR0_PARANGE_SHIFT);
+		s64 range = linear_region_size -
+			    BIT(id_aa64mmfr0_parange_to_phys_shift(parange));
 
 		/*
 		 * If the size of the linear region exceeds, by a sufficient
-		 * margin, the size of the region that the available physical
-		 * memory spans, randomize the linear region as well.
+		 * margin, the size of the region that the physical memory can
+		 * span, randomize the linear region as well.
 		 */
-		if (memstart_offset_seed > 0 && range >= ARM64_MEMSTART_ALIGN) {
+		if (memstart_offset_seed > 0 && range >= (s64)ARM64_MEMSTART_ALIGN) {
 			range /= ARM64_MEMSTART_ALIGN;
 			memstart_addr -= ARM64_MEMSTART_ALIGN *
 					 ((range * memstart_offset_seed) >> 16);
diff --git a/arch/um/kernel/irq.c b/arch/um/kernel/irq.c
index 3577118..9410424 100644
--- a/arch/um/kernel/irq.c
+++ b/arch/um/kernel/irq.c
@@ -480,7 +480,7 @@
 	irq_set_chip_and_handler(TIMER_IRQ, &SIGVTALRM_irq_type, handle_edge_irq);
 
 
-	for (i = 1; i <= LAST_IRQ; i++)
+	for (i = 1; i < NR_IRQS; i++)
 		irq_set_chip_and_handler(i, &normal_irq_type, handle_edge_irq);
 	/* Initialize EPOLL Loop */
 	os_setup_epoll();
diff --git a/arch/x86/configs/gki_defconfig b/arch/x86/configs/gki_defconfig
new file mode 100644
index 0000000..a1992b2
--- /dev/null
+++ b/arch/x86/configs/gki_defconfig
@@ -0,0 +1,545 @@
+CONFIG_LOCALVERSION="-mainline"
+CONFIG_KERNEL_LZ4=y
+# CONFIG_USELIB is not set
+CONFIG_AUDIT=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_PREEMPT=y
+CONFIG_IRQ_TIME_ACCOUNTING=y
+CONFIG_TASK_XACCT=y
+CONFIG_TASK_IO_ACCOUNTING=y
+CONFIG_PSI=y
+CONFIG_RCU_EXPERT=y
+CONFIG_RCU_FAST_NO_HZ=y
+CONFIG_RCU_NOCB_CPU=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_IKHEADERS=y
+CONFIG_UCLAMP_TASK=y
+CONFIG_CGROUPS=y
+CONFIG_MEMCG=y
+CONFIG_CGROUP_SCHED=y
+# CONFIG_FAIR_GROUP_SCHED is not set
+CONFIG_UCLAMP_TASK_GROUP=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CPUSETS=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_CGROUP_BPF=y
+CONFIG_NAMESPACES=y
+# CONFIG_TIME_NS is not set
+# CONFIG_PID_NS is not set
+# CONFIG_RD_BZIP2 is not set
+# CONFIG_RD_LZMA is not set
+# CONFIG_RD_XZ is not set
+# CONFIG_RD_LZO is not set
+CONFIG_BOOT_CONFIG=y
+# CONFIG_SYSFS_SYSCALL is not set
+# CONFIG_FHANDLE is not set
+CONFIG_KALLSYMS_ALL=y
+CONFIG_BPF_SYSCALL=y
+CONFIG_BPF_JIT_ALWAYS_ON=y
+# CONFIG_RSEQ is not set
+CONFIG_EMBEDDED=y
+# CONFIG_COMPAT_BRK is not set
+# CONFIG_SLAB_MERGE_DEFAULT is not set
+CONFIG_SLAB_FREELIST_RANDOM=y
+CONFIG_SLAB_FREELIST_HARDENED=y
+CONFIG_SHUFFLE_PAGE_ALLOCATOR=y
+CONFIG_PROFILING=y
+CONFIG_SMP=y
+CONFIG_X86_X2APIC=y
+CONFIG_HYPERVISOR_GUEST=y
+CONFIG_PARAVIRT=y
+CONFIG_NR_CPUS=32
+CONFIG_EFI=y
+CONFIG_CMDLINE_BOOL=y
+CONFIG_CMDLINE="rw"
+CONFIG_PM_WAKELOCKS=y
+CONFIG_PM_WAKELOCKS_LIMIT=0
+# CONFIG_PM_WAKELOCKS_GC is not set
+CONFIG_CPU_FREQ_STAT=y
+CONFIG_CPU_FREQ_TIMES=y
+CONFIG_CPU_FREQ_GOV_POWERSAVE=y
+CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
+CONFIG_IA32_EMULATION=y
+CONFIG_KVM=y
+CONFIG_KVM_INTEL=y
+CONFIG_KVM_AMD=y
+CONFIG_KPROBES=y
+CONFIG_JUMP_LABEL=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_BLK_INLINE_ENCRYPTION=y
+CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK=y
+CONFIG_GKI_HACKS_TO_FIX=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_BINFMT_MISC=y
+CONFIG_MEMORY_HOTPLUG=y
+CONFIG_MEMORY_HOTREMOVE=y
+CONFIG_CLEANCACHE=y
+CONFIG_CMA=y
+CONFIG_CMA_AREAS=16
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=y
+CONFIG_XFRM_INTERFACE=y
+CONFIG_XFRM_STATISTICS=y
+CONFIG_NET_KEY=y
+CONFIG_XDP_SOCKETS=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_NET_IPIP=y
+CONFIG_NET_IPGRE_DEMUX=y
+CONFIG_NET_IPGRE=y
+CONFIG_NET_IPVTI=y
+CONFIG_INET_ESP=y
+CONFIG_INET_UDP_DIAG=y
+CONFIG_INET_DIAG_DESTROY=y
+CONFIG_IPV6_ROUTER_PREF=y
+CONFIG_IPV6_ROUTE_INFO=y
+CONFIG_IPV6_OPTIMISTIC_DAD=y
+CONFIG_INET6_ESP=y
+CONFIG_INET6_IPCOMP=y
+CONFIG_IPV6_MIP6=y
+CONFIG_IPV6_VTI=y
+CONFIG_IPV6_GRE=y
+CONFIG_IPV6_MULTIPLE_TABLES=y
+CONFIG_NETFILTER=y
+CONFIG_NF_CONNTRACK=y
+CONFIG_NF_CONNTRACK_SECMARK=y
+CONFIG_NF_CONNTRACK_EVENTS=y
+CONFIG_NF_CONNTRACK_AMANDA=y
+CONFIG_NF_CONNTRACK_FTP=y
+CONFIG_NF_CONNTRACK_H323=y
+CONFIG_NF_CONNTRACK_IRC=y
+CONFIG_NF_CONNTRACK_NETBIOS_NS=y
+CONFIG_NF_CONNTRACK_PPTP=y
+CONFIG_NF_CONNTRACK_SANE=y
+CONFIG_NF_CONNTRACK_TFTP=y
+CONFIG_NF_CT_NETLINK=y
+CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y
+CONFIG_NETFILTER_XT_TARGET_CONNMARK=y
+CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y
+CONFIG_NETFILTER_XT_TARGET_CT=y
+CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y
+CONFIG_NETFILTER_XT_TARGET_MARK=y
+CONFIG_NETFILTER_XT_TARGET_NFLOG=y
+CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y
+CONFIG_NETFILTER_XT_TARGET_TPROXY=y
+CONFIG_NETFILTER_XT_TARGET_TRACE=y
+CONFIG_NETFILTER_XT_TARGET_SECMARK=y
+CONFIG_NETFILTER_XT_TARGET_TCPMSS=y
+CONFIG_NETFILTER_XT_MATCH_BPF=y
+CONFIG_NETFILTER_XT_MATCH_COMMENT=y
+CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y
+CONFIG_NETFILTER_XT_MATCH_CONNMARK=y
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y
+CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y
+CONFIG_NETFILTER_XT_MATCH_HELPER=y
+CONFIG_NETFILTER_XT_MATCH_IPRANGE=y
+CONFIG_NETFILTER_XT_MATCH_LENGTH=y
+CONFIG_NETFILTER_XT_MATCH_LIMIT=y
+CONFIG_NETFILTER_XT_MATCH_MAC=y
+CONFIG_NETFILTER_XT_MATCH_MARK=y
+CONFIG_NETFILTER_XT_MATCH_OWNER=y
+CONFIG_NETFILTER_XT_MATCH_POLICY=y
+CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA2=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG=y
+CONFIG_NETFILTER_XT_MATCH_SOCKET=y
+CONFIG_NETFILTER_XT_MATCH_STATE=y
+CONFIG_NETFILTER_XT_MATCH_STATISTIC=y
+CONFIG_NETFILTER_XT_MATCH_STRING=y
+CONFIG_NETFILTER_XT_MATCH_TIME=y
+CONFIG_NETFILTER_XT_MATCH_U32=y
+CONFIG_IP_NF_IPTABLES=y
+CONFIG_IP_NF_MATCH_ECN=y
+CONFIG_IP_NF_MATCH_TTL=y
+CONFIG_IP_NF_FILTER=y
+CONFIG_IP_NF_TARGET_REJECT=y
+CONFIG_IP_NF_NAT=y
+CONFIG_IP_NF_TARGET_MASQUERADE=y
+CONFIG_IP_NF_TARGET_NETMAP=y
+CONFIG_IP_NF_TARGET_REDIRECT=y
+CONFIG_IP_NF_MANGLE=y
+CONFIG_IP_NF_RAW=y
+CONFIG_IP_NF_SECURITY=y
+CONFIG_IP_NF_ARPTABLES=y
+CONFIG_IP_NF_ARPFILTER=y
+CONFIG_IP_NF_ARP_MANGLE=y
+CONFIG_IP6_NF_IPTABLES=y
+CONFIG_IP6_NF_MATCH_RPFILTER=y
+CONFIG_IP6_NF_FILTER=y
+CONFIG_IP6_NF_TARGET_REJECT=y
+CONFIG_IP6_NF_MANGLE=y
+CONFIG_IP6_NF_RAW=y
+CONFIG_TIPC=y
+CONFIG_L2TP=y
+CONFIG_BRIDGE=y
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_HTB=y
+CONFIG_NET_SCH_INGRESS=y
+CONFIG_NET_CLS_U32=y
+CONFIG_NET_CLS_BPF=y
+CONFIG_NET_EMATCH=y
+CONFIG_NET_EMATCH_U32=y
+CONFIG_NET_CLS_ACT=y
+CONFIG_VSOCKETS=y
+CONFIG_BPF_JIT=y
+CONFIG_BT=y
+CONFIG_BT_RFCOMM=y
+CONFIG_BT_RFCOMM_TTY=y
+CONFIG_BT_HIDP=y
+CONFIG_BT_HCIBTSDIO=y
+CONFIG_BT_HCIUART=y
+CONFIG_BT_HCIUART_LL=y
+CONFIG_BT_HCIUART_BCM=y
+CONFIG_BT_HCIUART_QCA=y
+CONFIG_CFG80211=y
+# CONFIG_CFG80211_DEFAULT_PS is not set
+# CONFIG_CFG80211_CRDA_SUPPORT is not set
+CONFIG_MAC80211=y
+CONFIG_RFKILL=y
+CONFIG_PCI=y
+CONFIG_PCIEPORTBUS=y
+CONFIG_PCI_MSI=y
+CONFIG_PCIE_DW_PLAT_EP=y
+CONFIG_PCI_ENDPOINT=y
+CONFIG_FW_LOADER_USER_HELPER=y
+# CONFIG_FW_CACHE is not set
+CONFIG_GNSS=y
+CONFIG_OF=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_LOOP_MIN_COUNT=16
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=8192
+CONFIG_UID_SYS_STATS=y
+CONFIG_SCSI=y
+# CONFIG_SCSI_PROC_FS is not set
+CONFIG_BLK_DEV_SD=y
+CONFIG_SCSI_UFSHCD=y
+CONFIG_SCSI_UFSHCD_PCI=y
+CONFIG_SCSI_UFSHCD_PLATFORM=y
+CONFIG_SCSI_UFS_DWC_TC_PLATFORM=y
+CONFIG_SCSI_UFS_BSG=y
+CONFIG_SCSI_UFS_CRYPTO=y
+CONFIG_MD=y
+CONFIG_BLK_DEV_DM=y
+CONFIG_DM_CRYPT=y
+CONFIG_DM_DEFAULT_KEY=y
+CONFIG_DM_SNAPSHOT=y
+CONFIG_DM_UEVENT=y
+CONFIG_DM_VERITY=y
+CONFIG_DM_VERITY_AVB=y
+CONFIG_DM_VERITY_FEC=y
+CONFIG_DM_BOW=y
+CONFIG_NETDEVICES=y
+CONFIG_DUMMY=y
+CONFIG_WIREGUARD=y
+CONFIG_TUN=y
+CONFIG_VETH=y
+# CONFIG_ETHERNET is not set
+CONFIG_PHYLIB=y
+CONFIG_PPP=y
+CONFIG_PPP_BSDCOMP=y
+CONFIG_PPP_DEFLATE=y
+CONFIG_PPP_MPPE=y
+CONFIG_PPTP=y
+CONFIG_PPPOL2TP=y
+CONFIG_USB_RTL8152=y
+CONFIG_USB_USBNET=y
+# CONFIG_USB_NET_AX8817X is not set
+# CONFIG_USB_NET_AX88179_178A is not set
+# CONFIG_USB_NET_CDCETHER is not set
+# CONFIG_USB_NET_CDC_NCM is not set
+# CONFIG_USB_NET_NET1080 is not set
+# CONFIG_USB_NET_CDC_SUBSET is not set
+# CONFIG_USB_NET_ZAURUS is not set
+# CONFIG_WLAN_VENDOR_ADMTEK is not set
+# CONFIG_WLAN_VENDOR_ATH is not set
+# CONFIG_WLAN_VENDOR_ATMEL is not set
+# CONFIG_WLAN_VENDOR_BROADCOM is not set
+# CONFIG_WLAN_VENDOR_CISCO is not set
+# CONFIG_WLAN_VENDOR_INTEL is not set
+# CONFIG_WLAN_VENDOR_INTERSIL is not set
+# CONFIG_WLAN_VENDOR_MARVELL is not set
+# CONFIG_WLAN_VENDOR_MEDIATEK is not set
+# CONFIG_WLAN_VENDOR_RALINK is not set
+# CONFIG_WLAN_VENDOR_REALTEK is not set
+# CONFIG_WLAN_VENDOR_RSI is not set
+# CONFIG_WLAN_VENDOR_ST is not set
+# CONFIG_WLAN_VENDOR_TI is not set
+# CONFIG_WLAN_VENDOR_ZYDAS is not set
+# CONFIG_WLAN_VENDOR_QUANTENNA is not set
+CONFIG_INPUT_EVDEV=y
+CONFIG_KEYBOARD_GPIO=y
+# CONFIG_MOUSE_PS2 is not set
+CONFIG_INPUT_JOYSTICK=y
+CONFIG_JOYSTICK_XPAD=y
+CONFIG_INPUT_TOUCHSCREEN=y
+CONFIG_INPUT_MISC=y
+CONFIG_INPUT_UINPUT=y
+# CONFIG_VT is not set
+# CONFIG_LEGACY_PTYS is not set
+CONFIG_SERIAL_8250=y
+# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_OF_PLATFORM=y
+CONFIG_SERIAL_SAMSUNG=y
+CONFIG_SERIAL_SAMSUNG_CONSOLE=y
+CONFIG_SERIAL_DEV_BUS=y
+CONFIG_HW_RANDOM=y
+# CONFIG_DEVMEM is not set
+# CONFIG_DEVPORT is not set
+CONFIG_HPET=y
+# CONFIG_I2C_COMPAT is not set
+# CONFIG_I2C_HELPER_AUTO is not set
+CONFIG_SPI=y
+CONFIG_GPIOLIB=y
+CONFIG_GPIO_GENERIC_PLATFORM=y
+# CONFIG_HWMON is not set
+CONFIG_THERMAL_NETLINK=y
+CONFIG_THERMAL_STATISTICS=y
+CONFIG_THERMAL_EMERGENCY_POWEROFF_DELAY_MS=100
+CONFIG_THERMAL_WRITABLE_TRIPS=y
+CONFIG_THERMAL_GOV_USER_SPACE=y
+CONFIG_CPU_THERMAL=y
+CONFIG_DEVFREQ_THERMAL=y
+CONFIG_THERMAL_EMULATION=y
+# CONFIG_X86_PKG_TEMP_THERMAL is not set
+CONFIG_WATCHDOG=y
+CONFIG_WATCHDOG_CORE=y
+CONFIG_MFD_SYSCON=y
+CONFIG_REGULATOR=y
+CONFIG_REGULATOR_FIXED_VOLTAGE=y
+CONFIG_RC_CORE=y
+# CONFIG_RC_MAP is not set
+CONFIG_LIRC=y
+CONFIG_BPF_LIRC_MODE2=y
+CONFIG_RC_DECODERS=y
+CONFIG_RC_DEVICES=y
+CONFIG_MEDIA_CEC_RC=y
+# CONFIG_MEDIA_ANALOG_TV_SUPPORT is not set
+# CONFIG_MEDIA_DIGITAL_TV_SUPPORT is not set
+# CONFIG_MEDIA_RADIO_SUPPORT is not set
+# CONFIG_MEDIA_SDR_SUPPORT is not set
+# CONFIG_MEDIA_TEST_SUPPORT is not set
+CONFIG_VIDEO_V4L2_SUBDEV_API=y
+CONFIG_MEDIA_USB_SUPPORT=y
+CONFIG_USB_VIDEO_CLASS=y
+CONFIG_USB_GSPCA=y
+CONFIG_V4L_PLATFORM_DRIVERS=y
+CONFIG_V4L_MEM2MEM_DRIVERS=y
+CONFIG_DRM=y
+# CONFIG_DRM_FBDEV_EMULATION is not set
+CONFIG_BACKLIGHT_CLASS_DEVICE=y
+CONFIG_SOUND=y
+CONFIG_SND=y
+CONFIG_SND_HRTIMER=y
+CONFIG_SND_DYNAMIC_MINORS=y
+# CONFIG_SND_SUPPORT_OLD_API is not set
+# CONFIG_SND_VERBOSE_PROCFS is not set
+# CONFIG_SND_DRIVERS is not set
+CONFIG_SND_USB_AUDIO=y
+CONFIG_SND_SOC=y
+CONFIG_HID_BATTERY_STRENGTH=y
+CONFIG_HIDRAW=y
+CONFIG_UHID=y
+CONFIG_HID_APPLE=y
+CONFIG_HID_PRODIKEYS=y
+CONFIG_HID_ELECOM=y
+CONFIG_HID_UCLOGIC=y
+CONFIG_HID_LOGITECH=y
+CONFIG_HID_LOGITECH_DJ=y
+CONFIG_HID_MAGICMOUSE=y
+CONFIG_HID_MICROSOFT=y
+CONFIG_HID_MULTITOUCH=y
+CONFIG_HID_NINTENDO=y
+CONFIG_HID_PICOLCD=y
+CONFIG_HID_PLANTRONICS=y
+CONFIG_HID_ROCCAT=y
+CONFIG_HID_SONY=y
+CONFIG_SONY_FF=y
+CONFIG_HID_STEAM=y
+CONFIG_HID_WACOM=y
+CONFIG_HID_WIIMOTE=y
+CONFIG_USB_HIDDEV=y
+CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
+CONFIG_USB_XHCI_HCD=y
+CONFIG_USB_XHCI_PLATFORM=y
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_EHCI_HCD_PLATFORM=y
+CONFIG_USB_ACM=y
+CONFIG_USB_STORAGE=y
+CONFIG_USB_UAS=y
+CONFIG_USB_GADGET=y
+CONFIG_USB_CONFIGFS=y
+CONFIG_USB_CONFIGFS_UEVENT=y
+CONFIG_USB_CONFIGFS_SERIAL=y
+CONFIG_USB_CONFIGFS_ACM=y
+CONFIG_USB_CONFIGFS_NCM=y
+CONFIG_USB_CONFIGFS_RNDIS=y
+CONFIG_USB_CONFIGFS_MASS_STORAGE=y
+CONFIG_USB_CONFIGFS_F_FS=y
+CONFIG_USB_CONFIGFS_F_ACC=y
+CONFIG_USB_CONFIGFS_F_AUDIO_SRC=y
+CONFIG_USB_CONFIGFS_F_MIDI=y
+CONFIG_USB_CONFIGFS_F_HID=y
+CONFIG_TYPEC=y
+CONFIG_TYPEC_TCPM=y
+CONFIG_TYPEC_TCPCI=y
+CONFIG_TYPEC_UCSI=y
+CONFIG_MMC=y
+# CONFIG_PWRSEQ_EMMC is not set
+# CONFIG_PWRSEQ_SIMPLE is not set
+CONFIG_MMC_SDHCI=y
+CONFIG_MMC_SDHCI_PLTFM=y
+CONFIG_LEDS_CLASS_FLASH=y
+CONFIG_LEDS_TRIGGER_TIMER=y
+CONFIG_LEDS_TRIGGER_TRANSIENT=y
+CONFIG_EDAC=y
+CONFIG_RTC_CLASS=y
+CONFIG_DMABUF_HEAPS=y
+CONFIG_DMABUF_HEAPS_SYSTEM=y
+CONFIG_UIO=y
+CONFIG_VHOST_VSOCK=y
+CONFIG_STAGING=y
+CONFIG_ASHMEM=y
+CONFIG_ION=y
+CONFIG_ION_SYSTEM_HEAP=y
+CONFIG_REMOTEPROC=y
+CONFIG_REMOTEPROC_CDEV=y
+CONFIG_RPMSG_CHAR=y
+CONFIG_PM_DEVFREQ_EVENT=y
+CONFIG_IIO=y
+CONFIG_IIO_BUFFER=y
+CONFIG_IIO_TRIGGER=y
+CONFIG_RAS=y
+CONFIG_ANDROID=y
+CONFIG_ANDROID_BINDER_IPC=y
+CONFIG_ANDROID_BINDERFS=y
+CONFIG_ANDROID_VENDOR_HOOKS=y
+CONFIG_LIBNVDIMM=y
+# CONFIG_ND_BLK is not set
+CONFIG_INTERCONNECT=y
+CONFIG_EXT4_FS=y
+CONFIG_EXT4_FS_POSIX_ACL=y
+CONFIG_EXT4_FS_SECURITY=y
+CONFIG_F2FS_FS=y
+CONFIG_F2FS_FS_SECURITY=y
+CONFIG_F2FS_FS_COMPRESSION=y
+CONFIG_FS_ENCRYPTION=y
+CONFIG_FS_ENCRYPTION_INLINE_CRYPT=y
+CONFIG_FS_VERITY=y
+CONFIG_FS_VERITY_BUILTIN_SIGNATURES=y
+# CONFIG_DNOTIFY is not set
+CONFIG_QUOTA=y
+CONFIG_QFMT_V2=y
+CONFIG_FUSE_FS=y
+CONFIG_OVERLAY_FS=y
+CONFIG_INCREMENTAL_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+# CONFIG_EFIVAR_FS is not set
+CONFIG_PSTORE=y
+CONFIG_PSTORE_CONSOLE=y
+CONFIG_PSTORE_PMSG=y
+CONFIG_PSTORE_RAM=y
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_CODEPAGE_737=y
+CONFIG_NLS_CODEPAGE_775=y
+CONFIG_NLS_CODEPAGE_850=y
+CONFIG_NLS_CODEPAGE_852=y
+CONFIG_NLS_CODEPAGE_855=y
+CONFIG_NLS_CODEPAGE_857=y
+CONFIG_NLS_CODEPAGE_860=y
+CONFIG_NLS_CODEPAGE_861=y
+CONFIG_NLS_CODEPAGE_862=y
+CONFIG_NLS_CODEPAGE_863=y
+CONFIG_NLS_CODEPAGE_864=y
+CONFIG_NLS_CODEPAGE_865=y
+CONFIG_NLS_CODEPAGE_866=y
+CONFIG_NLS_CODEPAGE_869=y
+CONFIG_NLS_CODEPAGE_936=y
+CONFIG_NLS_CODEPAGE_950=y
+CONFIG_NLS_CODEPAGE_932=y
+CONFIG_NLS_CODEPAGE_949=y
+CONFIG_NLS_CODEPAGE_874=y
+CONFIG_NLS_ISO8859_8=y
+CONFIG_NLS_CODEPAGE_1250=y
+CONFIG_NLS_CODEPAGE_1251=y
+CONFIG_NLS_ASCII=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_NLS_ISO8859_2=y
+CONFIG_NLS_ISO8859_3=y
+CONFIG_NLS_ISO8859_4=y
+CONFIG_NLS_ISO8859_5=y
+CONFIG_NLS_ISO8859_6=y
+CONFIG_NLS_ISO8859_7=y
+CONFIG_NLS_ISO8859_9=y
+CONFIG_NLS_ISO8859_13=y
+CONFIG_NLS_ISO8859_14=y
+CONFIG_NLS_ISO8859_15=y
+CONFIG_NLS_KOI8_R=y
+CONFIG_NLS_KOI8_U=y
+CONFIG_NLS_MAC_ROMAN=y
+CONFIG_NLS_MAC_CELTIC=y
+CONFIG_NLS_MAC_CENTEURO=y
+CONFIG_NLS_MAC_CROATIAN=y
+CONFIG_NLS_MAC_CYRILLIC=y
+CONFIG_NLS_MAC_GAELIC=y
+CONFIG_NLS_MAC_GREEK=y
+CONFIG_NLS_MAC_ICELAND=y
+CONFIG_NLS_MAC_INUIT=y
+CONFIG_NLS_MAC_ROMANIAN=y
+CONFIG_NLS_MAC_TURKISH=y
+CONFIG_NLS_UTF8=y
+CONFIG_UNICODE=y
+CONFIG_SECURITY=y
+CONFIG_SECURITYFS=y
+CONFIG_SECURITY_NETWORK=y
+CONFIG_HARDENED_USERCOPY=y
+# CONFIG_HARDENED_USERCOPY_FALLBACK is not set
+CONFIG_FORTIFY_SOURCE=y
+CONFIG_STATIC_USERMODEHELPER=y
+CONFIG_STATIC_USERMODEHELPER_PATH=""
+CONFIG_SECURITY_SELINUX=y
+CONFIG_INIT_STACK_ALL_ZERO=y
+CONFIG_INIT_ON_ALLOC_DEFAULT_ON=y
+CONFIG_CRYPTO_CHACHA20POLY1305=y
+CONFIG_CRYPTO_ADIANTUM=y
+CONFIG_CRYPTO_XCBC=y
+CONFIG_CRYPTO_SHA256_SSSE3=y
+CONFIG_CRYPTO_AES_NI_INTEL=y
+CONFIG_CRYPTO_LZ4=y
+CONFIG_CRYPTO_ZSTD=y
+CONFIG_CRYPTO_ANSI_CPRNG=y
+CONFIG_CRC8=y
+CONFIG_XZ_DEC=y
+CONFIG_DMA_CMA=y
+CONFIG_PRINTK_TIME=y
+CONFIG_DYNAMIC_DEBUG_CORE=y
+CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_INFO_DWARF4=y
+# CONFIG_ENABLE_MUST_CHECK is not set
+CONFIG_HEADERS_INSTALL=y
+# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_FS=y
+CONFIG_DEBUG_STACK_USAGE=y
+CONFIG_DEBUG_MEMORY_INIT=y
+CONFIG_PANIC_ON_OOPS=y
+CONFIG_PANIC_TIMEOUT=-1
+CONFIG_SOFTLOCKUP_DETECTOR=y
+# CONFIG_DETECT_HUNG_TASK is not set
+CONFIG_WQ_WATCHDOG=y
+CONFIG_SCHEDSTATS=y
+CONFIG_BUG_ON_DATA_CORRUPTION=y
+CONFIG_UNWINDER_FRAME_POINTER=y
diff --git a/arch/x86/entry/calling.h b/arch/x86/entry/calling.h
index 07a9331..ffc5bd7 100644
--- a/arch/x86/entry/calling.h
+++ b/arch/x86/entry/calling.h
@@ -366,7 +366,11 @@
  */
 .macro LOAD_CPU_AND_NODE_SEG_LIMIT reg:req
 	movq	$__CPUNODE_SEG, \reg
+#ifdef __clang__
+	.quad 0xc0030f48
+#else
 	lsl	\reg, \reg
+#endif
 .endm
 
 /*
diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
index db1378c..7e4bafa 100644
--- a/arch/x86/power/cpu.c
+++ b/arch/x86/power/cpu.c
@@ -274,6 +274,19 @@
 /* Needed by apm.c */
 void notrace restore_processor_state(void)
 {
+#ifdef __clang__
+	// The following code snippet is copied from __restore_processor_state.
+	// Its purpose is to prepare GS segment before the function is called.
+	// Since the function is compiled with SCS on, it will use GS at its
+	// entry.
+	// TODO: Hack to be removed later when compiler bug is fixed.
+#ifdef CONFIG_X86_64
+	wrmsrl(MSR_GS_BASE, saved_context.kernelmode_gs_base);
+#else
+	loadsegment(fs, __KERNEL_PERCPU);
+	loadsegment(gs, __KERNEL_STACK_CANARY);
+#endif
+#endif
 	__restore_processor_state(&saved_context);
 }
 #ifdef CONFIG_X86_32
diff --git a/block/Kconfig b/block/Kconfig
index a2297ed..e657e2b 100644
--- a/block/Kconfig
+++ b/block/Kconfig
@@ -221,7 +221,7 @@
 	default y
 
 config BLK_MQ_VIRTIO
-	bool
+	tristate
 	depends on BLOCK && VIRTIO
 	default y
 
diff --git a/block/blk-crypto-fallback.c b/block/blk-crypto-fallback.c
index c162b75..d02d297 100644
--- a/block/blk-crypto-fallback.c
+++ b/block/blk-crypto-fallback.c
@@ -180,6 +180,8 @@
 	bio_clone_blkg_association(bio, bio_src);
 	blkcg_bio_issue_init(bio);
 
+	bio_clone_skip_dm_default_key(bio, bio_src);
+
 	return bio;
 }
 
@@ -543,6 +545,7 @@
 
 	blk_crypto_ksm.ksm_ll_ops = blk_crypto_ksm_ll_ops;
 	blk_crypto_ksm.max_dun_bytes_supported = BLK_CRYPTO_MAX_IV_SIZE;
+	blk_crypto_ksm.features = BLK_CRYPTO_FEATURE_STANDARD_KEYS;
 
 	/* All blk-crypto modes have a crypto API fallback. */
 	for (i = 0; i < BLK_ENCRYPTION_MODE_MAX; i++)
diff --git a/block/blk-crypto.c b/block/blk-crypto.c
index 5da43f0..d0f13d3 100644
--- a/block/blk-crypto.c
+++ b/block/blk-crypto.c
@@ -96,6 +96,7 @@
 
 	bio->bi_crypt_context = bc;
 }
+EXPORT_SYMBOL_GPL(bio_crypt_set_ctx);
 
 void __bio_crypt_free_ctx(struct bio *bio)
 {
@@ -306,8 +307,13 @@
 /**
  * blk_crypto_init_key() - Prepare a key for use with blk-crypto
  * @blk_key: Pointer to the blk_crypto_key to initialize.
- * @raw_key: Pointer to the raw key. Must be the correct length for the chosen
- *	     @crypto_mode; see blk_crypto_modes[].
+ * @raw_key: Pointer to the raw key.
+ * @raw_key_size: Size of raw key.  Must be at least the required size for the
+ *                chosen @crypto_mode; see blk_crypto_modes[].  (It's allowed
+ *                to be longer than the mode's actual key size, in order to
+ *                support inline encryption hardware that accepts wrapped keys.
+ *                @is_hw_wrapped has to be set for such keys)
+ * @is_hw_wrapped: Denotes @raw_key is wrapped.
  * @crypto_mode: identifier for the encryption algorithm to use
  * @dun_bytes: number of bytes that will be used to specify the DUN when this
  *	       key is used
@@ -316,7 +322,9 @@
  * Return: 0 on success, -errno on failure.  The caller is responsible for
  *	   zeroizing both blk_key and raw_key when done with them.
  */
-int blk_crypto_init_key(struct blk_crypto_key *blk_key, const u8 *raw_key,
+int blk_crypto_init_key(struct blk_crypto_key *blk_key,
+			const u8 *raw_key, unsigned int raw_key_size,
+			bool is_hw_wrapped,
 			enum blk_crypto_mode_num crypto_mode,
 			unsigned int dun_bytes,
 			unsigned int data_unit_size)
@@ -328,9 +336,17 @@
 	if (crypto_mode >= ARRAY_SIZE(blk_crypto_modes))
 		return -EINVAL;
 
+	BUILD_BUG_ON(BLK_CRYPTO_MAX_WRAPPED_KEY_SIZE < BLK_CRYPTO_MAX_KEY_SIZE);
+
 	mode = &blk_crypto_modes[crypto_mode];
-	if (mode->keysize == 0)
-		return -EINVAL;
+	if (is_hw_wrapped) {
+		if (raw_key_size < mode->keysize ||
+		    raw_key_size > BLK_CRYPTO_MAX_WRAPPED_KEY_SIZE)
+			return -EINVAL;
+	} else {
+		if (raw_key_size != mode->keysize)
+			return -EINVAL;
+	}
 
 	if (dun_bytes == 0 || dun_bytes > BLK_CRYPTO_MAX_IV_SIZE)
 		return -EINVAL;
@@ -341,12 +357,14 @@
 	blk_key->crypto_cfg.crypto_mode = crypto_mode;
 	blk_key->crypto_cfg.dun_bytes = dun_bytes;
 	blk_key->crypto_cfg.data_unit_size = data_unit_size;
+	blk_key->crypto_cfg.is_hw_wrapped = is_hw_wrapped;
 	blk_key->data_unit_size_bits = ilog2(data_unit_size);
-	blk_key->size = mode->keysize;
-	memcpy(blk_key->raw, raw_key, mode->keysize);
+	blk_key->size = raw_key_size;
+	memcpy(blk_key->raw, raw_key, raw_key_size);
 
 	return 0;
 }
+EXPORT_SYMBOL_GPL(blk_crypto_init_key);
 
 /*
  * Check if bios with @cfg can be en/decrypted by blk-crypto (i.e. either the
@@ -356,8 +374,10 @@
 bool blk_crypto_config_supported(struct request_queue *q,
 				 const struct blk_crypto_config *cfg)
 {
-	return IS_ENABLED(CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK) ||
-	       blk_ksm_crypto_cfg_supported(q->ksm, cfg);
+	if (IS_ENABLED(CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK) &&
+	    !cfg->is_hw_wrapped)
+		return true;
+	return blk_ksm_crypto_cfg_supported(q->ksm, cfg);
 }
 
 /**
@@ -380,8 +400,13 @@
 {
 	if (blk_ksm_crypto_cfg_supported(q->ksm, &key->crypto_cfg))
 		return 0;
+	if (key->crypto_cfg.is_hw_wrapped) {
+		pr_warn_once("hardware doesn't support wrapped keys\n");
+		return -EOPNOTSUPP;
+	}
 	return blk_crypto_fallback_start_using_mode(key->crypto_cfg.crypto_mode);
 }
+EXPORT_SYMBOL_GPL(blk_crypto_start_using_key);
 
 /**
  * blk_crypto_evict_key() - Evict a key from any inline encryption hardware
@@ -409,3 +434,4 @@
 	 */
 	return blk_crypto_fallback_evict_key(key);
 }
+EXPORT_SYMBOL_GPL(blk_crypto_evict_key);
diff --git a/block/blk-mq-virtio.c b/block/blk-mq-virtio.c
index 7b8a42c..78b2b4f 100644
--- a/block/blk-mq-virtio.c
+++ b/block/blk-mq-virtio.c
@@ -44,3 +44,6 @@
 	return blk_mq_map_queues(qmap);
 }
 EXPORT_SYMBOL_GPL(blk_mq_virtio_map_queues);
+
+MODULE_DESCRIPTION("Virtio Device Default Queue Mapping");
+MODULE_LICENSE("GPL v2");
diff --git a/block/ioprio.c b/block/ioprio.c
index 364d229..04ebd37 100644
--- a/block/ioprio.c
+++ b/block/ioprio.c
@@ -69,7 +69,7 @@
 
 	switch (class) {
 		case IOPRIO_CLASS_RT:
-			if (!capable(CAP_SYS_NICE) && !capable(CAP_SYS_ADMIN))
+			if (!capable(CAP_SYS_ADMIN))
 				return -EPERM;
 			fallthrough;
 			/* rt has prio field too */
diff --git a/block/keyslot-manager.c b/block/keyslot-manager.c
index 86f8195..ecd7df6 100644
--- a/block/keyslot-manager.c
+++ b/block/keyslot-manager.c
@@ -62,6 +62,11 @@
 		pm_runtime_put_sync(ksm->dev);
 }
 
+static inline bool blk_ksm_is_passthrough(struct blk_keyslot_manager *ksm)
+{
+	return ksm->num_slots == 0;
+}
+
 /**
  * blk_ksm_init() - Initialize a keyslot manager
  * @ksm: The keyslot_manager to initialize.
@@ -205,6 +210,10 @@
 	int err;
 
 	*slot_ptr = NULL;
+
+	if (blk_ksm_is_passthrough(ksm))
+		return BLK_STS_OK;
+
 	down_read(&ksm->lock);
 	slot = blk_ksm_find_and_grab_keyslot(ksm, key);
 	up_read(&ksm->lock);
@@ -302,6 +311,13 @@
 		return false;
 	if (ksm->max_dun_bytes_supported < cfg->dun_bytes)
 		return false;
+	if (cfg->is_hw_wrapped) {
+		if (!(ksm->features & BLK_CRYPTO_FEATURE_WRAPPED_KEYS))
+			return false;
+	} else {
+		if (!(ksm->features & BLK_CRYPTO_FEATURE_STANDARD_KEYS))
+			return false;
+	}
 	return true;
 }
 
@@ -325,6 +341,16 @@
 	struct blk_ksm_keyslot *slot;
 	int err = 0;
 
+	if (blk_ksm_is_passthrough(ksm)) {
+		if (ksm->ksm_ll_ops.keyslot_evict) {
+			blk_ksm_hw_enter(ksm);
+			err = ksm->ksm_ll_ops.keyslot_evict(ksm, key, -1);
+			blk_ksm_hw_exit(ksm);
+			return err;
+		}
+		return 0;
+	}
+
 	blk_ksm_hw_enter(ksm);
 	slot = blk_ksm_find_keyslot(ksm, key);
 	if (!slot)
@@ -360,6 +386,9 @@
 {
 	unsigned int slot;
 
+	if (WARN_ON(blk_ksm_is_passthrough(ksm)))
+		return;
+
 	/* This is for device initialization, so don't resume the device */
 	down_write(&ksm->lock);
 	for (slot = 0; slot < ksm->num_slots; slot++) {
@@ -401,3 +430,96 @@
 {
 	q->ksm = NULL;
 }
+EXPORT_SYMBOL_GPL(blk_ksm_unregister);
+
+/**
+ * blk_ksm_derive_raw_secret() - Derive software secret from wrapped key
+ * @ksm: The keyslot manager
+ * @wrapped_key: The wrapped key
+ * @wrapped_key_size: Size of the wrapped key in bytes
+ * @secret: (output) the software secret
+ * @secret_size: (output) the number of secret bytes to derive
+ *
+ * Given a hardware-wrapped key, ask the hardware to derive a secret which
+ * software can use for cryptographic tasks other than inline encryption.  The
+ * derived secret is guaranteed to be cryptographically isolated from the key
+ * with which any inline encryption with this wrapped key would actually be
+ * done.  I.e., both will be derived from the unwrapped key.
+ *
+ * Return: 0 on success, -EOPNOTSUPP if hardware-wrapped keys are unsupported,
+ *	   or another -errno code.
+ */
+int blk_ksm_derive_raw_secret(struct blk_keyslot_manager *ksm,
+			      const u8 *wrapped_key,
+			      unsigned int wrapped_key_size,
+			      u8 *secret, unsigned int secret_size)
+{
+	int err;
+
+	if (ksm->ksm_ll_ops.derive_raw_secret) {
+		blk_ksm_hw_enter(ksm);
+		err = ksm->ksm_ll_ops.derive_raw_secret(ksm, wrapped_key,
+							wrapped_key_size,
+							secret, secret_size);
+		blk_ksm_hw_exit(ksm);
+	} else {
+		err = -EOPNOTSUPP;
+	}
+
+	return err;
+}
+EXPORT_SYMBOL_GPL(blk_ksm_derive_raw_secret);
+
+/**
+ * blk_ksm_intersect_modes() - restrict supported modes by child device
+ * @parent: The keyslot manager for parent device
+ * @child: The keyslot manager for child device, or NULL
+ *
+ * Clear any crypto mode support bits in @parent that aren't set in @child.
+ * If @child is NULL, then all parent bits are cleared.
+ *
+ * Only use this when setting up the keyslot manager for a layered device,
+ * before it's been exposed yet.
+ */
+void blk_ksm_intersect_modes(struct blk_keyslot_manager *parent,
+			     const struct blk_keyslot_manager *child)
+{
+	if (child) {
+		unsigned int i;
+
+		parent->max_dun_bytes_supported =
+			min(parent->max_dun_bytes_supported,
+			    child->max_dun_bytes_supported);
+		parent->features &= child->features;
+		for (i = 0; i < ARRAY_SIZE(child->crypto_modes_supported);
+		     i++) {
+			parent->crypto_modes_supported[i] &=
+				child->crypto_modes_supported[i];
+		}
+	} else {
+		parent->max_dun_bytes_supported = 0;
+		parent->features = 0;
+		memset(parent->crypto_modes_supported, 0,
+		       sizeof(parent->crypto_modes_supported));
+	}
+}
+EXPORT_SYMBOL_GPL(blk_ksm_intersect_modes);
+
+/**
+ * blk_ksm_init_passthrough() - Init a passthrough keyslot manager
+ * @ksm: The keyslot manager to init
+ *
+ * Initialize a passthrough keyslot manager.
+ * Called by e.g. storage drivers to set up a keyslot manager in their
+ * request_queue, when the storage driver wants to manage its keys by itself.
+ * This is useful for inline encryption hardware that don't have a small fixed
+ * number of keyslots, and for layered devices.
+ *
+ * See blk_ksm_init() for more details about the parameters.
+ */
+void blk_ksm_init_passthrough(struct blk_keyslot_manager *ksm)
+{
+	memset(ksm, 0, sizeof(*ksm));
+	init_rwsem(&ksm->lock);
+}
+EXPORT_SYMBOL_GPL(blk_ksm_init_passthrough);
diff --git a/build.config.aarch64 b/build.config.aarch64
new file mode 100644
index 0000000..0b9adb9
--- /dev/null
+++ b/build.config.aarch64
@@ -0,0 +1,16 @@
+ARCH=arm64
+
+LLVM_IAS=1
+CROSS_COMPILE=aarch64-linux-gnu-
+LINUX_GCC_CROSS_COMPILE_PREBUILTS_BIN=prebuilts/gas/linux-x86
+
+MAKE_GOALS="
+Image
+modules
+"
+
+FILES="
+arch/arm64/boot/Image
+vmlinux
+System.map
+"
diff --git a/build.config.allmodconfig b/build.config.allmodconfig
new file mode 100644
index 0000000..89b6c5c
--- /dev/null
+++ b/build.config.allmodconfig
@@ -0,0 +1,14 @@
+DEFCONFIG=allmodconfig
+
+POST_DEFCONFIG_CMDS="update_config"
+function update_config() {
+    ${KERNEL_DIR}/scripts/config --file ${OUT_DIR}/.config \
+         -d TEST_KMOD  \
+         -d CPU_BIG_ENDIAN \
+         -e UNWINDER_FRAME_POINTER \
+
+    (cd ${OUT_DIR} && \
+     make O=${OUT_DIR} $archsubarch CROSS_COMPILE=${CROSS_COMPILE} "${TOOL_ARGS[@]}" ${MAKE_ARGS} olddefconfig)
+}
+
+ADDITIONAL_HOST_TOOLS="${ADDITIONAL_HOST_TOOLS} openssl"
diff --git a/build.config.allmodconfig.aarch64 b/build.config.allmodconfig.aarch64
new file mode 100644
index 0000000..2fbe380
--- /dev/null
+++ b/build.config.allmodconfig.aarch64
@@ -0,0 +1,4 @@
+. ${ROOT_DIR}/${KERNEL_DIR}/build.config.common
+. ${ROOT_DIR}/${KERNEL_DIR}/build.config.aarch64
+. ${ROOT_DIR}/${KERNEL_DIR}/build.config.allmodconfig
+
diff --git a/build.config.allmodconfig.arm b/build.config.allmodconfig.arm
new file mode 100644
index 0000000..e92744a
--- /dev/null
+++ b/build.config.allmodconfig.arm
@@ -0,0 +1,4 @@
+. ${ROOT_DIR}/${KERNEL_DIR}/build.config.common
+. ${ROOT_DIR}/${KERNEL_DIR}/build.config.arm
+. ${ROOT_DIR}/${KERNEL_DIR}/build.config.allmodconfig
+
diff --git a/build.config.allmodconfig.x86_64 b/build.config.allmodconfig.x86_64
new file mode 100644
index 0000000..f06b30c
--- /dev/null
+++ b/build.config.allmodconfig.x86_64
@@ -0,0 +1,4 @@
+. ${ROOT_DIR}/${KERNEL_DIR}/build.config.common
+. ${ROOT_DIR}/${KERNEL_DIR}/build.config.x86_64
+. ${ROOT_DIR}/${KERNEL_DIR}/build.config.allmodconfig
+
diff --git a/build.config.arm b/build.config.arm
new file mode 100644
index 0000000..77f8dc8
--- /dev/null
+++ b/build.config.arm
@@ -0,0 +1,15 @@
+ARCH=arm
+
+CROSS_COMPILE=arm-linux-gnueabi-
+LINUX_GCC_CROSS_COMPILE_PREBUILTS_BIN=prebuilts/gas/linux-x86
+
+MAKE_GOALS="
+zImage
+modules
+"
+
+FILES="
+arch/arm/boot/zImage
+vmlinux
+System.map
+"
diff --git a/build.config.common b/build.config.common
new file mode 100644
index 0000000..8605f2c
--- /dev/null
+++ b/build.config.common
@@ -0,0 +1,15 @@
+BRANCH=android-mainline
+KMI_GENERATION=0
+
+LLVM=1
+DEPMOD=depmod
+DTC=dtc
+CLANG_PREBUILT_BIN=prebuilts-master/clang/host/linux-x86/clang-r399163b/bin
+BUILDTOOLS_PREBUILT_BIN=build/build-tools/path/linux-x86
+
+EXTRA_CMDS=''
+STOP_SHIP_TRACEPRINTK=1
+IN_KERNEL_MODULES=1
+DO_NOT_STRIP_MODULES=1
+
+HERMETIC_TOOLCHAIN=1
diff --git a/build.config.db845c b/build.config.db845c
new file mode 100644
index 0000000..03351b8
--- /dev/null
+++ b/build.config.db845c
@@ -0,0 +1,17 @@
+. ${ROOT_DIR}/${KERNEL_DIR}/build.config.common
+. ${ROOT_DIR}/${KERNEL_DIR}/build.config.aarch64
+
+BUILD_INITRAMFS=1
+DEFCONFIG=db845c_gki_defconfig
+PRE_DEFCONFIG_CMDS="KCONFIG_CONFIG=${ROOT_DIR}/${KERNEL_DIR}/arch/arm64/configs/${DEFCONFIG} ${ROOT_DIR}/${KERNEL_DIR}/scripts/kconfig/merge_config.sh -m -r ${ROOT_DIR}/${KERNEL_DIR}/arch/arm64/configs/gki_defconfig ${ROOT_DIR}/${KERNEL_DIR}/arch/arm64/configs/db845c_gki.fragment"
+POST_DEFCONFIG_CMDS="rm ${ROOT_DIR}/${KERNEL_DIR}/arch/arm64/configs/${DEFCONFIG}"
+
+MAKE_GOALS="${MAKE_GOALS}
+qcom/sdm845-db845c.dtb
+Image.gz
+"
+
+FILES="${FILES}
+arch/arm64/boot/Image.gz
+arch/arm64/boot/dts/qcom/sdm845-db845c.dtb
+"
diff --git a/build.config.gki b/build.config.gki
new file mode 100644
index 0000000..44d4ed1
--- /dev/null
+++ b/build.config.gki
@@ -0,0 +1,3 @@
+DEFCONFIG=gki_defconfig
+POST_DEFCONFIG_CMDS="check_defconfig"
+
diff --git a/build.config.gki-debug.aarch64 b/build.config.gki-debug.aarch64
new file mode 100644
index 0000000..c1fe2f0
--- /dev/null
+++ b/build.config.gki-debug.aarch64
@@ -0,0 +1,3 @@
+. ${ROOT_DIR}/${KERNEL_DIR}/build.config.gki.aarch64
+TRIM_NONLISTED_KMI=""
+KMI_SYMBOL_LIST_STRICT_MODE=""
diff --git a/build.config.gki-debug.x86_64 b/build.config.gki-debug.x86_64
new file mode 100644
index 0000000..d89b7ad
--- /dev/null
+++ b/build.config.gki-debug.x86_64
@@ -0,0 +1,3 @@
+. ${ROOT_DIR}/${KERNEL_DIR}/build.config.gki.x86_64
+TRIM_NONLISTED_KMI=""
+KMI_SYMBOL_LIST_STRICT_MODE=""
diff --git a/build.config.gki.aarch64 b/build.config.gki.aarch64
new file mode 100644
index 0000000..0dd6e44
--- /dev/null
+++ b/build.config.gki.aarch64
@@ -0,0 +1,5 @@
+. ${ROOT_DIR}/${KERNEL_DIR}/build.config.common
+. ${ROOT_DIR}/${KERNEL_DIR}/build.config.aarch64
+. ${ROOT_DIR}/${KERNEL_DIR}/build.config.gki
+
+GENERATE_VMLINUX_BTF=1
diff --git a/build.config.gki.x86_64 b/build.config.gki.x86_64
new file mode 100644
index 0000000..0e04fc6
--- /dev/null
+++ b/build.config.gki.x86_64
@@ -0,0 +1,4 @@
+. ${ROOT_DIR}/${KERNEL_DIR}/build.config.common
+. ${ROOT_DIR}/${KERNEL_DIR}/build.config.x86_64
+. ${ROOT_DIR}/${KERNEL_DIR}/build.config.gki
+
diff --git a/build.config.gki_kasan b/build.config.gki_kasan
new file mode 100644
index 0000000..e682b0d
--- /dev/null
+++ b/build.config.gki_kasan
@@ -0,0 +1,23 @@
+DEFCONFIG=gki_defconfig
+POST_DEFCONFIG_CMDS="check_defconfig && update_kasan_config"
+KERNEL_DIR=common
+function update_kasan_config() {
+    ${KERNEL_DIR}/scripts/config --file ${OUT_DIR}/.config \
+         -e CONFIG_KASAN \
+         -e CONFIG_KASAN_INLINE \
+         -e CONFIG_KASAN_PANIC_ON_WARN \
+         -e CONFIG_KCOV \
+         -e CONFIG_PANIC_ON_WARN_DEFAULT_ENABLE \
+         -d CONFIG_RANDOMIZE_BASE \
+         -d CONFIG_KASAN_OUTLINE \
+         --set-val CONFIG_FRAME_WARN 0 \
+         -d LTO \
+         -d LTO_CLANG \
+         -d CFI \
+         -d CFI_PERMISSIVE \
+         -d CFI_CLANG \
+         -d SHADOW_CALL_STACK
+    (cd ${OUT_DIR} && \
+     make ${CC_LD_ARG} O=${OUT_DIR} olddefconfig)
+}
+
diff --git a/build.config.gki_kasan.aarch64 b/build.config.gki_kasan.aarch64
new file mode 100644
index 0000000..9fd2560
--- /dev/null
+++ b/build.config.gki_kasan.aarch64
@@ -0,0 +1,3 @@
+. ${ROOT_DIR}/${KERNEL_DIR}/build.config.common
+. ${ROOT_DIR}/${KERNEL_DIR}/build.config.aarch64
+. ${ROOT_DIR}/${KERNEL_DIR}/build.config.gki_kasan
diff --git a/build.config.gki_kasan.x86_64 b/build.config.gki_kasan.x86_64
new file mode 100644
index 0000000..eec6458
--- /dev/null
+++ b/build.config.gki_kasan.x86_64
@@ -0,0 +1,4 @@
+. ${ROOT_DIR}/${KERNEL_DIR}/build.config.common
+. ${ROOT_DIR}/${KERNEL_DIR}/build.config.x86_64
+. ${ROOT_DIR}/${KERNEL_DIR}/build.config.gki_kasan
+
diff --git a/build.config.x86_64 b/build.config.x86_64
new file mode 100644
index 0000000..aa0c49e
--- /dev/null
+++ b/build.config.x86_64
@@ -0,0 +1,16 @@
+ARCH=x86_64
+
+LLVM_IAS=1
+CROSS_COMPILE=x86_64-linux-gnu-
+LINUX_GCC_CROSS_COMPILE_PREBUILTS_BIN=prebuilts/gas/linux-x86
+
+MAKE_GOALS="
+bzImage
+modules
+"
+
+FILES="
+arch/x86/boot/bzImage
+vmlinux
+System.map
+"
diff --git a/drivers/android/Kconfig b/drivers/android/Kconfig
index 53b22e2..32fb9e5 100644
--- a/drivers/android/Kconfig
+++ b/drivers/android/Kconfig
@@ -54,6 +54,15 @@
 	  exhaustively with combinations of various buffer sizes and
 	  alignments.
 
+config ANDROID_VENDOR_HOOKS
+	bool "Android Vendor Hooks"
+	depends on TRACEPOINTS
+	help
+	  Enable vendor hooks implemented as tracepoints
+
+	  Allow vendor modules to attach to tracepoint "hooks" defined via
+	  DECLARE_HOOK or DECLARE_RESTRICTED_HOOK.
+
 endif # if ANDROID
 
 endmenu
diff --git a/drivers/android/Makefile b/drivers/android/Makefile
index c9d3d0c9..d488047 100644
--- a/drivers/android/Makefile
+++ b/drivers/android/Makefile
@@ -4,3 +4,4 @@
 obj-$(CONFIG_ANDROID_BINDERFS)		+= binderfs.o
 obj-$(CONFIG_ANDROID_BINDER_IPC)	+= binder.o binder_alloc.o
 obj-$(CONFIG_ANDROID_BINDER_IPC_SELFTEST) += binder_alloc_selftest.o
+obj-$(CONFIG_ANDROID_VENDOR_HOOKS) += vendor_hooks.o
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index b511757..bce8e1b 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -66,7 +66,9 @@
 #include <linux/syscalls.h>
 #include <linux/task_work.h>
 #include <linux/sizes.h>
+#include <linux/android_vendor.h>
 
+#include <uapi/linux/sched/types.h>
 #include <uapi/linux/android/binder.h>
 #include <uapi/linux/android/binderfs.h>
 
@@ -75,6 +77,7 @@
 #include "binder_alloc.h"
 #include "binder_internal.h"
 #include "binder_trace.h"
+#include <trace/hooks/binder.h>
 
 static HLIST_HEAD(binder_deferred_list);
 static DEFINE_MUTEX(binder_deferred_lock);
@@ -288,10 +291,13 @@
  *                        and by @lock)
  * @has_async_transaction: async transaction to node in progress
  *                        (protected by @lock)
+ * @sched_policy:         minimum scheduling policy for node
+ *                        (invariant after initialized)
  * @accept_fds:           file descriptor operations supported for node
  *                        (invariant after initialized)
  * @min_priority:         minimum scheduling priority
  *                        (invariant after initialized)
+ * @inherit_rt:           inherit RT scheduling policy from caller
  * @txn_security_ctx:     require sender's security context
  *                        (invariant after initialized)
  * @async_todo:           list of async work items
@@ -329,6 +335,8 @@
 		/*
 		 * invariant after initialization
 		 */
+		u8 sched_policy:2;
+		u8 inherit_rt:1;
 		u8 accept_fds:1;
 		u8 txn_security_ctx:1;
 		u8 min_priority;
@@ -403,6 +411,22 @@
 };
 
 /**
+ * struct binder_priority - scheduler policy and priority
+ * @sched_policy            scheduler policy
+ * @prio                    [100..139] for SCHED_NORMAL, [0..99] for FIFO/RT
+ *
+ * The binder driver supports inheriting the following scheduler policies:
+ * SCHED_NORMAL
+ * SCHED_BATCH
+ * SCHED_FIFO
+ * SCHED_RR
+ */
+struct binder_priority {
+	unsigned int sched_policy;
+	int prio;
+};
+
+/**
  * struct binder_proc - binder process bookkeeping
  * @proc_node:            element for binder_procs list
  * @threads:              rbtree of binder_threads in this proc
@@ -476,7 +500,7 @@
 	int requested_threads;
 	int requested_threads_started;
 	int tmp_ref;
-	long default_priority;
+	struct binder_priority default_priority;
 	struct dentry *debugfs_entry;
 	struct binder_alloc alloc;
 	struct binder_context *context;
@@ -527,6 +551,7 @@
  * @is_dead:              thread is dead and awaiting free
  *                        when outstanding transactions are cleaned up
  *                        (protected by @proc->inner_lock)
+ * @task:                 struct task_struct for this thread
  *
  * Bookkeeping structure for binder threads.
  */
@@ -546,6 +571,7 @@
 	struct binder_stats stats;
 	atomic_t tmp_ref;
 	bool is_dead;
+	struct task_struct *task;
 };
 
 /**
@@ -579,8 +605,9 @@
 	struct binder_buffer *buffer;
 	unsigned int	code;
 	unsigned int	flags;
-	long	priority;
-	long	saved_priority;
+	struct binder_priority	priority;
+	struct binder_priority	saved_priority;
+	bool    set_priority_called;
 	kuid_t	sender_euid;
 	struct list_head fd_fixups;
 	binder_uintptr_t security_ctx;
@@ -591,6 +618,7 @@
 	 * during thread teardown
 	 */
 	spinlock_t lock;
+	ANDROID_VENDOR_DATA(1);
 };
 
 /**
@@ -929,6 +957,7 @@
 		thread = rb_entry(n, struct binder_thread, rb_node);
 		if (thread->looper & BINDER_LOOPER_STATE_POLL &&
 		    binder_available_for_proc_work_ilocked(thread)) {
+			trace_android_vh_binder_wakeup_poll_threads_ilocked(thread->task);
 			if (sync)
 				wake_up_interruptible_sync(&thread->wait);
 			else
@@ -988,6 +1017,7 @@
 	assert_spin_locked(&proc->inner_lock);
 
 	if (thread) {
+		trace_android_vh_binder_wakeup_thread_ilocked(thread->task);
 		if (sync)
 			wake_up_interruptible_sync(&thread->wait);
 		else
@@ -1018,22 +1048,146 @@
 	binder_wakeup_thread_ilocked(proc, thread, /* sync = */false);
 }
 
-static void binder_set_nice(long nice)
+static bool is_rt_policy(int policy)
 {
-	long min_nice;
+	return policy == SCHED_FIFO || policy == SCHED_RR;
+}
 
-	if (can_nice(current, nice)) {
-		set_user_nice(current, nice);
+static bool is_fair_policy(int policy)
+{
+	return policy == SCHED_NORMAL || policy == SCHED_BATCH;
+}
+
+static bool binder_supported_policy(int policy)
+{
+	return is_fair_policy(policy) || is_rt_policy(policy);
+}
+
+static int to_userspace_prio(int policy, int kernel_priority)
+{
+	if (is_fair_policy(policy))
+		return PRIO_TO_NICE(kernel_priority);
+	else
+		return MAX_USER_RT_PRIO - 1 - kernel_priority;
+}
+
+static int to_kernel_prio(int policy, int user_priority)
+{
+	if (is_fair_policy(policy))
+		return NICE_TO_PRIO(user_priority);
+	else
+		return MAX_USER_RT_PRIO - 1 - user_priority;
+}
+
+static void binder_do_set_priority(struct task_struct *task,
+				   struct binder_priority desired,
+				   bool verify)
+{
+	int priority; /* user-space prio value */
+	bool has_cap_nice;
+	unsigned int policy = desired.sched_policy;
+
+	if (task->policy == policy && task->normal_prio == desired.prio)
 		return;
+
+	has_cap_nice = has_capability_noaudit(task, CAP_SYS_NICE);
+
+	priority = to_userspace_prio(policy, desired.prio);
+
+	if (verify && is_rt_policy(policy) && !has_cap_nice) {
+		long max_rtprio = task_rlimit(task, RLIMIT_RTPRIO);
+
+		if (max_rtprio == 0) {
+			policy = SCHED_NORMAL;
+			priority = MIN_NICE;
+		} else if (priority > max_rtprio) {
+			priority = max_rtprio;
+		}
 	}
-	min_nice = rlimit_to_nice(rlimit(RLIMIT_NICE));
-	binder_debug(BINDER_DEBUG_PRIORITY_CAP,
-		     "%d: nice value %ld not allowed use %ld instead\n",
-		      current->pid, nice, min_nice);
-	set_user_nice(current, min_nice);
-	if (min_nice <= MAX_NICE)
+
+	if (verify && is_fair_policy(policy) && !has_cap_nice) {
+		long min_nice = rlimit_to_nice(task_rlimit(task, RLIMIT_NICE));
+
+		if (min_nice > MAX_NICE) {
+			binder_user_error("%d RLIMIT_NICE not set\n",
+					  task->pid);
+			return;
+		} else if (priority < min_nice) {
+			priority = min_nice;
+		}
+	}
+
+	if (policy != desired.sched_policy ||
+	    to_kernel_prio(policy, priority) != desired.prio)
+		binder_debug(BINDER_DEBUG_PRIORITY_CAP,
+			     "%d: priority %d not allowed, using %d instead\n",
+			      task->pid, desired.prio,
+			      to_kernel_prio(policy, priority));
+
+	trace_binder_set_priority(task->tgid, task->pid, task->normal_prio,
+				  to_kernel_prio(policy, priority),
+				  desired.prio);
+
+	/* Set the actual priority */
+	if (task->policy != policy || is_rt_policy(policy)) {
+		struct sched_param params;
+
+		params.sched_priority = is_rt_policy(policy) ? priority : 0;
+
+		sched_setscheduler_nocheck(task,
+					   policy | SCHED_RESET_ON_FORK,
+					   &params);
+	}
+	if (is_fair_policy(policy))
+		set_user_nice(task, priority);
+}
+
+static void binder_set_priority(struct task_struct *task,
+				struct binder_priority desired)
+{
+	binder_do_set_priority(task, desired, /* verify = */ true);
+}
+
+static void binder_restore_priority(struct task_struct *task,
+				    struct binder_priority desired)
+{
+	binder_do_set_priority(task, desired, /* verify = */ false);
+}
+
+static void binder_transaction_priority(struct task_struct *task,
+					struct binder_transaction *t,
+					struct binder_priority node_prio,
+					bool inherit_rt)
+{
+	struct binder_priority desired_prio = t->priority;
+
+	if (t->set_priority_called)
 		return;
-	binder_user_error("%d RLIMIT_NICE not set\n", current->pid);
+
+	t->set_priority_called = true;
+	t->saved_priority.sched_policy = task->policy;
+	t->saved_priority.prio = task->normal_prio;
+
+	if (!inherit_rt && is_rt_policy(desired_prio.sched_policy)) {
+		desired_prio.prio = NICE_TO_PRIO(0);
+		desired_prio.sched_policy = SCHED_NORMAL;
+	}
+
+	if (node_prio.prio < t->priority.prio ||
+	    (node_prio.prio == t->priority.prio &&
+	     node_prio.sched_policy == SCHED_FIFO)) {
+		/*
+		 * In case the minimum priority on the node is
+		 * higher (lower value), use that priority. If
+		 * the priority is the same, but the node uses
+		 * SCHED_FIFO, prefer SCHED_FIFO, since it can
+		 * run unbounded, unlike SCHED_RR.
+		 */
+		desired_prio = node_prio;
+	}
+
+	binder_set_priority(task, desired_prio);
+	trace_android_vh_binder_set_priority(t, task);
 }
 
 static struct binder_node *binder_get_node_ilocked(struct binder_proc *proc,
@@ -1086,6 +1240,7 @@
 	binder_uintptr_t ptr = fp ? fp->binder : 0;
 	binder_uintptr_t cookie = fp ? fp->cookie : 0;
 	__u32 flags = fp ? fp->flags : 0;
+	s8 priority;
 
 	assert_spin_locked(&proc->inner_lock);
 
@@ -1118,8 +1273,12 @@
 	node->ptr = ptr;
 	node->cookie = cookie;
 	node->work.type = BINDER_WORK_NODE;
-	node->min_priority = flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
+	priority = flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
+	node->sched_policy = (flags & FLAT_BINDER_FLAG_SCHED_POLICY_MASK) >>
+		FLAT_BINDER_FLAG_SCHED_POLICY_SHIFT;
+	node->min_priority = to_kernel_prio(node->sched_policy, priority);
 	node->accept_fds = !!(flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
+	node->inherit_rt = !!(flags & FLAT_BINDER_FLAG_INHERIT_RT);
 	node->txn_security_ctx = !!(flags & FLAT_BINDER_FLAG_TXN_SECURITY_CTX);
 	spin_lock_init(&node->lock);
 	INIT_LIST_HEAD(&node->work.entry);
@@ -2729,11 +2888,15 @@
 				    struct binder_thread *thread)
 {
 	struct binder_node *node = t->buffer->target_node;
+	struct binder_priority node_prio;
 	bool oneway = !!(t->flags & TF_ONE_WAY);
 	bool pending_async = false;
 
 	BUG_ON(!node);
 	binder_node_lock(node);
+	node_prio.prio = node->min_priority;
+	node_prio.sched_policy = node->sched_policy;
+
 	if (oneway) {
 		BUG_ON(thread);
 		if (node->has_async_transaction)
@@ -2753,12 +2916,15 @@
 	if (!thread && !pending_async)
 		thread = binder_select_thread_ilocked(proc);
 
-	if (thread)
+	if (thread) {
+		binder_transaction_priority(thread->task, t, node_prio,
+					    node->inherit_rt);
 		binder_enqueue_thread_work_ilocked(thread, &t->work);
-	else if (!pending_async)
+	} else if (!pending_async) {
 		binder_enqueue_work_ilocked(&t->work, &proc->todo);
-	else
+	} else {
 		binder_enqueue_work_ilocked(&t->work, &node->async_todo);
+	}
 
 	if (!pending_async)
 		binder_wakeup_thread_ilocked(proc, thread, !oneway /* sync */);
@@ -2879,7 +3045,6 @@
 		}
 		thread->transaction_stack = in_reply_to->to_parent;
 		binder_inner_proc_unlock(proc);
-		binder_set_nice(in_reply_to->saved_priority);
 		target_thread = binder_get_txn_from_and_acq_inner(in_reply_to);
 		if (target_thread == NULL) {
 			/* annotation for sparse */
@@ -2957,12 +3122,6 @@
 			goto err_dead_binder;
 		}
 		e->to_node = target_node->debug_id;
-		if (WARN_ON(proc == target_proc)) {
-			return_error = BR_FAILED_REPLY;
-			return_error_param = -EINVAL;
-			return_error_line = __LINE__;
-			goto err_invalid_target_handle;
-		}
 		if (security_binder_transaction(proc->tsk,
 						target_proc->tsk) < 0) {
 			return_error = BR_FAILED_REPLY;
@@ -3044,6 +3203,7 @@
 	INIT_LIST_HEAD(&t->fd_fixups);
 	binder_stats_created(BINDER_STAT_TRANSACTION);
 	spin_lock_init(&t->lock);
+	trace_android_vh_binder_transaction_init(t);
 
 	tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
 	if (tcomplete == NULL) {
@@ -3084,7 +3244,15 @@
 	t->to_thread = target_thread;
 	t->code = tr->code;
 	t->flags = tr->flags;
-	t->priority = task_nice(current);
+	if (!(t->flags & TF_ONE_WAY) &&
+	    binder_supported_policy(current->policy)) {
+		/* Inherit supported policies for synchronous transactions */
+		t->priority.sched_policy = current->policy;
+		t->priority.prio = current->normal_prio;
+	} else {
+		/* Otherwise, fall back to the default priority */
+		t->priority = target_proc->default_priority;
+	}
 
 	if (target_node && target_node->txn_security_ctx) {
 		u32 secid;
@@ -3411,6 +3579,8 @@
 		binder_enqueue_thread_work_ilocked(target_thread, &t->work);
 		binder_inner_proc_unlock(target_proc);
 		wake_up_interruptible_sync(&target_thread->wait);
+		trace_android_vh_binder_restore_priority(in_reply_to, current);
+		binder_restore_priority(current, in_reply_to->saved_priority);
 		binder_free_transaction(in_reply_to);
 	} else if (!(t->flags & TF_ONE_WAY)) {
 		BUG_ON(t->buffer->async_transaction != 0);
@@ -3521,6 +3691,8 @@
 
 	BUG_ON(thread->return_error.cmd != BR_OK);
 	if (in_reply_to) {
+		trace_android_vh_binder_restore_priority(in_reply_to, current);
+		binder_restore_priority(current, in_reply_to->saved_priority);
 		thread->return_error.cmd = BR_TRANSACTION_COMPLETE;
 		binder_enqueue_thread_work(thread, &thread->return_error.work);
 		binder_send_failed_reply(in_reply_to, return_error);
@@ -3616,17 +3788,10 @@
 				struct binder_node *ctx_mgr_node;
 				mutex_lock(&context->context_mgr_node_lock);
 				ctx_mgr_node = context->binder_context_mgr_node;
-				if (ctx_mgr_node) {
-					if (ctx_mgr_node->proc == proc) {
-						binder_user_error("%d:%d context manager tried to acquire desc 0\n",
-								  proc->pid, thread->pid);
-						mutex_unlock(&context->context_mgr_node_lock);
-						return -EINVAL;
-					}
+				if (ctx_mgr_node)
 					ret = binder_inc_ref_for_node(
 							proc, ctx_mgr_node,
 							strong, NULL, &rdata);
-				}
 				mutex_unlock(&context->context_mgr_node_lock);
 			}
 			if (ret)
@@ -4194,7 +4359,8 @@
 			wait_event_interruptible(binder_user_error_wait,
 						 binder_stop_on_user_error < 2);
 		}
-		binder_set_nice(proc->default_priority);
+		trace_android_vh_binder_restore_priority(NULL, current);
+		binder_restore_priority(current, proc->default_priority);
 	}
 
 	if (non_block) {
@@ -4416,16 +4582,14 @@
 		BUG_ON(t->buffer == NULL);
 		if (t->buffer->target_node) {
 			struct binder_node *target_node = t->buffer->target_node;
+			struct binder_priority node_prio;
 
 			trd->target.ptr = target_node->ptr;
 			trd->cookie =  target_node->cookie;
-			t->saved_priority = task_nice(current);
-			if (t->priority < target_node->min_priority &&
-			    !(t->flags & TF_ONE_WAY))
-				binder_set_nice(t->priority);
-			else if (!(t->flags & TF_ONE_WAY) ||
-				 t->saved_priority > target_node->min_priority)
-				binder_set_nice(target_node->min_priority);
+			node_prio.sched_policy = target_node->sched_policy;
+			node_prio.prio = target_node->min_priority;
+			binder_transaction_priority(current, t, node_prio,
+						    target_node->inherit_rt);
 			cmd = BR_TRANSACTION;
 		} else {
 			trd->target.ptr = 0;
@@ -4643,6 +4807,8 @@
 	binder_stats_created(BINDER_STAT_THREAD);
 	thread->proc = proc;
 	thread->pid = current->pid;
+	get_task_struct(current);
+	thread->task = current;
 	atomic_set(&thread->tmp_ref, 0);
 	init_waitqueue_head(&thread->wait);
 	INIT_LIST_HEAD(&thread->todo);
@@ -4700,6 +4866,7 @@
 	BUG_ON(!list_empty(&thread->todo));
 	binder_stats_deleted(BINDER_STAT_THREAD);
 	binder_proc_dec_tmpref(thread->proc);
+	put_task_struct(thread->task);
 	kfree(thread);
 }
 
@@ -5209,7 +5376,14 @@
 	get_task_struct(current->group_leader);
 	proc->tsk = current->group_leader;
 	INIT_LIST_HEAD(&proc->todo);
-	proc->default_priority = task_nice(current);
+	if (binder_supported_policy(current->policy)) {
+		proc->default_priority.sched_policy = current->policy;
+		proc->default_priority.prio = current->normal_prio;
+	} else {
+		proc->default_priority.sched_policy = SCHED_NORMAL;
+		proc->default_priority.prio = NICE_TO_PRIO(0);
+	}
+
 	/* binderfs stashes devices in i_private */
 	if (is_binderfs_device(nodp)) {
 		binder_dev = nodp->i_private;
@@ -5531,13 +5705,14 @@
 	spin_lock(&t->lock);
 	to_proc = t->to_proc;
 	seq_printf(m,
-		   "%s %d: %pK from %d:%d to %d:%d code %x flags %x pri %ld r%d",
+		   "%s %d: %pK from %d:%d to %d:%d code %x flags %x pri %d:%d r%d",
 		   prefix, t->debug_id, t,
 		   t->from ? t->from->proc->pid : 0,
 		   t->from ? t->from->pid : 0,
 		   to_proc ? to_proc->pid : 0,
 		   t->to_thread ? t->to_thread->pid : 0,
-		   t->code, t->flags, t->priority, t->need_reply);
+		   t->code, t->flags, t->priority.sched_policy,
+		   t->priority.prio, t->need_reply);
 	spin_unlock(&t->lock);
 
 	if (proc != to_proc) {
@@ -5655,8 +5830,9 @@
 	hlist_for_each_entry(ref, &node->refs, node_entry)
 		count++;
 
-	seq_printf(m, "  node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d tr %d",
+	seq_printf(m, "  node %d: u%016llx c%016llx pri %d:%d hs %d hw %d ls %d lw %d is %d iw %d tr %d",
 		   node->debug_id, (u64)node->ptr, (u64)node->cookie,
+		   node->sched_policy, node->min_priority,
 		   node->has_strong_ref, node->has_weak_ref,
 		   node->local_strong_refs, node->local_weak_refs,
 		   node->internal_strong_refs, count, node->tmp_refs);
diff --git a/drivers/android/binder_trace.h b/drivers/android/binder_trace.h
index 6731c3c..a70e237 100644
--- a/drivers/android/binder_trace.h
+++ b/drivers/android/binder_trace.h
@@ -76,6 +76,30 @@
 DEFINE_BINDER_FUNCTION_RETURN_EVENT(binder_write_done);
 DEFINE_BINDER_FUNCTION_RETURN_EVENT(binder_read_done);
 
+TRACE_EVENT(binder_set_priority,
+	TP_PROTO(int proc, int thread, unsigned int old_prio,
+		 unsigned int desired_prio, unsigned int new_prio),
+	TP_ARGS(proc, thread, old_prio, new_prio, desired_prio),
+
+	TP_STRUCT__entry(
+		__field(int, proc)
+		__field(int, thread)
+		__field(unsigned int, old_prio)
+		__field(unsigned int, new_prio)
+		__field(unsigned int, desired_prio)
+	),
+	TP_fast_assign(
+		__entry->proc = proc;
+		__entry->thread = thread;
+		__entry->old_prio = old_prio;
+		__entry->new_prio = new_prio;
+		__entry->desired_prio = desired_prio;
+	),
+	TP_printk("proc=%d thread=%d old=%d => new=%d desired=%d",
+		  __entry->proc, __entry->thread, __entry->old_prio,
+		  __entry->new_prio, __entry->desired_prio)
+);
+
 TRACE_EVENT(binder_wait_for_work,
 	TP_PROTO(bool proc_work, bool transaction_stack, bool thread_todo),
 	TP_ARGS(proc_work, transaction_stack, thread_todo),
diff --git a/drivers/android/vendor_hooks.c b/drivers/android/vendor_hooks.c
new file mode 100644
index 0000000..d801a28
--- /dev/null
+++ b/drivers/android/vendor_hooks.c
@@ -0,0 +1,99 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* vendor_hook.c
+ *
+ * Android Vendor Hook Support
+ *
+ * Copyright 2020 Google LLC
+ */
+
+#define CREATE_TRACE_POINTS
+#include <trace/hooks/vendor_hooks.h>
+#include <trace/hooks/sched.h>
+#include <trace/hooks/fpsimd.h>
+#include <trace/hooks/binder.h>
+#include <trace/hooks/rwsem.h>
+#include <trace/hooks/futex.h>
+#include <trace/hooks/dtask.h>
+#include <trace/hooks/cpuidle.h>
+#include <trace/hooks/topology.h>
+#include <trace/hooks/mpam.h>
+#include <trace/hooks/gic.h>
+#include <trace/hooks/wqlockup.h>
+#include <trace/hooks/debug.h>
+#include <trace/hooks/sysrqcrash.h>
+#include <trace/hooks/printk.h>
+#include <trace/hooks/gic_v3.h>
+#include <trace/hooks/epoch.h>
+#include <trace/hooks/cpufreq.h>
+#include <trace/hooks/mm.h>
+#include <trace/hooks/preemptirq.h>
+
+/*
+ * Export tracepoints that act as a bare tracehook (ie: have no trace event
+ * associated with them) to allow external modules to probe them.
+ */
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_select_task_rq_fair);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_select_task_rq_rt);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_select_fallback_rq);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_scheduler_tick);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_enqueue_task);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_dequeue_task);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_can_migrate_task);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_find_lowest_rq);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_rtmutex_prepare_setprio);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_prepare_prio_fork);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_finish_prio_fork);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_set_user_nice);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_setscheduler);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_arch_set_freq_scale);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_is_fpsimd_save);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_transaction_init);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_set_priority);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_restore_priority);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_wakeup_poll_threads_ilocked);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_wakeup_thread_ilocked);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_rwsem_init);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_rwsem_wake);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_rwsem_write_finished);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_alter_rwsem_list_add);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_alter_futex_plist_add);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mutex_wait_start);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mutex_wait_finish);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_rwsem_read_wait_start);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_rwsem_read_wait_finish);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_rwsem_write_wait_start);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_rwsem_write_wait_finish);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_sched_show_task);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_cpu_idle);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mpam_set);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_find_busiest_group);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_gic_resume);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_wq_lockup_pool);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_ipi_stop);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_sysrq_crash);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_dump_throttled_rt_tasks);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_printk_hotplug);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_jiffies_update);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_gic_v3_set_affinity);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_gic_v3_affinity_init);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_show_suspend_epoch_val);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_show_resume_epoch_val);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_show_max_freq);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_freq_table_limits);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_sched_newidle_balance);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_sched_nohz_balancer_kick);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_find_busiest_queue);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_migrate_queued_task);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_find_energy_efficient_cpu);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_set_sugov_sched_attr);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_set_iowait);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_set_sugov_update);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_sched_setaffinity);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_update_cpus_allowed);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_set_skip_swapcache_flags);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_set_gfp_zone_flags);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_set_readahead_gfp_mask);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_preempt_disable);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_preempt_enable);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_irqs_disable);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_irqs_enable);
diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c
index de8587c..56f317f 100644
--- a/drivers/base/arch_topology.c
+++ b/drivers/base/arch_topology.c
@@ -20,6 +20,7 @@
 #include <linux/percpu.h>
 #include <linux/sched.h>
 #include <linux/smp.h>
+#include <trace/hooks/topology.h>
 
 bool topology_scale_freq_invariant(void)
 {
@@ -32,6 +33,7 @@
 	return false;
 }
 DEFINE_PER_CPU(unsigned long, freq_scale) = SCHED_CAPACITY_SCALE;
+EXPORT_PER_CPU_SYMBOL_GPL(freq_scale);
 
 void topology_set_freq_scale(const struct cpumask *cpus, unsigned long cur_freq,
 			     unsigned long max_freq)
@@ -52,11 +54,14 @@
 
 	scale = (cur_freq << SCHED_CAPACITY_SHIFT) / max_freq;
 
+	trace_android_vh_arch_set_freq_scale(cpus, cur_freq, max_freq, &scale);
+
 	for_each_cpu(i, cpus)
 		per_cpu(freq_scale, i) = scale;
 }
 
 DEFINE_PER_CPU(unsigned long, cpu_scale) = SCHED_CAPACITY_SCALE;
+EXPORT_PER_CPU_SYMBOL_GPL(cpu_scale);
 
 void topology_set_cpu_scale(unsigned int cpu, unsigned long capacity)
 {
@@ -73,6 +78,7 @@
 	for_each_cpu(cpu, cpus)
 		WRITE_ONCE(per_cpu(thermal_pressure, cpu), th_pressure);
 }
+EXPORT_SYMBOL_GPL(topology_set_thermal_pressure);
 
 static ssize_t cpu_capacity_show(struct device *dev,
 				 struct device_attribute *attr,
diff --git a/drivers/base/core.c b/drivers/base/core.c
index d661ada..46eeb95 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -1409,7 +1409,7 @@
 	device_links_write_unlock();
 }
 
-static u32 fw_devlink_flags = DL_FLAG_SYNC_STATE_ONLY;
+static u32 fw_devlink_flags = DL_FLAG_AUTOPROBE_CONSUMER;
 static int __init fw_devlink_setup(char *arg)
 {
 	if (!arg)
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index c7ac490..d3c863c 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -34,6 +34,7 @@
 #include <linux/cpuidle.h>
 #include <linux/devfreq.h>
 #include <linux/timer.h>
+#include <linux/wakeup_reason.h>
 
 #include "../base.h"
 #include "power.h"
@@ -1233,6 +1234,8 @@
 	error = dpm_run_callback(callback, dev, state, info);
 	if (error) {
 		async_error = error;
+		log_suspend_abort_reason("Callback failed on %s in %pS returned %d",
+					 dev_name(dev), callback, error);
 		goto Complete;
 	}
 
@@ -1425,6 +1428,8 @@
 	error = dpm_run_callback(callback, dev, state, info);
 	if (error) {
 		async_error = error;
+		log_suspend_abort_reason("Callback failed on %s in %pS returned %d",
+					 dev_name(dev), callback, error);
 		goto Complete;
 	}
 	dpm_propagate_wakeup_to_parent(dev);
@@ -1695,6 +1700,9 @@
 
 		dpm_propagate_wakeup_to_parent(dev);
 		dpm_clear_superiors_direct_complete(dev);
+	} else {
+		log_suspend_abort_reason("Callback failed on %s in %pS returned %d",
+					 dev_name(dev), callback, error);
 	}
 
 	device_unlock(dev);
@@ -1899,6 +1907,9 @@
 			}
 			pr_info("Device %s not prepared for power transition: code %d\n",
 				dev_name(dev), error);
+			log_suspend_abort_reason("Device %s not prepared for power transition: code %d",
+						 dev_name(dev), error);
+			dpm_save_failed_dev(dev_name(dev));
 			put_device(dev);
 			break;
 		}
diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c
index 8e93167..0489db3 100644
--- a/drivers/base/power/qos.c
+++ b/drivers/base/power/qos.c
@@ -137,6 +137,7 @@
 
 	return ret;
 }
+EXPORT_SYMBOL_GPL(dev_pm_qos_read_value);
 
 /**
  * apply_constraint - Add/modify/remove device PM QoS request.
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
index 92073ac6..01057f6 100644
--- a/drivers/base/power/wakeup.c
+++ b/drivers/base/power/wakeup.c
@@ -15,6 +15,9 @@
 #include <linux/seq_file.h>
 #include <linux/debugfs.h>
 #include <linux/pm_wakeirq.h>
+#include <linux/irq.h>
+#include <linux/irqdesc.h>
+#include <linux/wakeup_reason.h>
 #include <trace/events/power.h>
 
 #include "power.h"
@@ -872,6 +875,37 @@
 }
 EXPORT_SYMBOL_GPL(pm_wakeup_dev_event);
 
+void pm_get_active_wakeup_sources(char *pending_wakeup_source, size_t max)
+{
+	struct wakeup_source *ws, *last_active_ws = NULL;
+	int len = 0;
+	bool active = false;
+
+	rcu_read_lock();
+	list_for_each_entry_rcu(ws, &wakeup_sources, entry) {
+		if (ws->active && len < max) {
+			if (!active)
+				len += scnprintf(pending_wakeup_source, max,
+						"Pending Wakeup Sources: ");
+			len += scnprintf(pending_wakeup_source + len, max - len,
+				"%s ", ws->name);
+			active = true;
+		} else if (!active &&
+			   (!last_active_ws ||
+			    ktime_to_ns(ws->last_time) >
+			    ktime_to_ns(last_active_ws->last_time))) {
+			last_active_ws = ws;
+		}
+	}
+	if (!active && last_active_ws) {
+		scnprintf(pending_wakeup_source, max,
+				"Last active Wakeup Source: %s",
+				last_active_ws->name);
+	}
+	rcu_read_unlock();
+}
+EXPORT_SYMBOL_GPL(pm_get_active_wakeup_sources);
+
 void pm_print_active_wakeup_sources(void)
 {
 	struct wakeup_source *ws;
@@ -910,6 +944,7 @@
 {
 	unsigned long flags;
 	bool ret = false;
+	char suspend_abort[MAX_SUSPEND_ABORT_LEN];
 
 	raw_spin_lock_irqsave(&events_lock, flags);
 	if (events_check_enabled) {
@@ -924,6 +959,10 @@
 	if (ret) {
 		pm_pr_dbg("Wakeup pending, aborting suspend\n");
 		pm_print_active_wakeup_sources();
+		pm_get_active_wakeup_sources(suspend_abort,
+					     MAX_SUSPEND_ABORT_LEN);
+		log_suspend_abort_reason(suspend_abort);
+		pr_info("PM: %s\n", suspend_abort);
 	}
 
 	return ret || atomic_read(&pm_abort_suspend) > 0;
@@ -951,6 +990,18 @@
 void pm_system_irq_wakeup(unsigned int irq_number)
 {
 	if (pm_wakeup_irq == 0) {
+		struct irq_desc *desc;
+		const char *name = "null";
+
+		desc = irq_to_desc(irq_number);
+		if (desc == NULL)
+			name = "stray irq";
+		else if (desc->action && desc->action->name)
+			name = desc->action->name;
+
+		log_irq_wakeup_reason(irq_number);
+		pr_warn("%s: %d triggered %s\n", __func__, irq_number, name);
+
 		pm_wakeup_irq = irq_number;
 		pm_system_wakeup();
 	}
diff --git a/drivers/base/syscore.c b/drivers/base/syscore.c
index 13db1f78..3a9527d 100644
--- a/drivers/base/syscore.c
+++ b/drivers/base/syscore.c
@@ -10,6 +10,7 @@
 #include <linux/module.h>
 #include <linux/suspend.h>
 #include <trace/events/power.h>
+#include <linux/wakeup_reason.h>
 
 static LIST_HEAD(syscore_ops_list);
 static DEFINE_MUTEX(syscore_ops_lock);
@@ -73,7 +74,9 @@
 	return 0;
 
  err_out:
-	pr_err("PM: System core suspend callback %pS failed.\n", ops->suspend);
+	log_suspend_abort_reason("System core suspend callback %pS failed",
+		ops->suspend);
+	pr_err("PM: System core suspend callback %pF failed.\n", ops->suspend);
 
 	list_for_each_entry_continue(ops, &syscore_ops_list, node)
 		if (ops->resume)
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index a314b93..616ced8 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -228,8 +228,6 @@
 	bool unmap = false;
 	u32 type;
 
-	BUG_ON(req->nr_phys_segments + 2 > vblk->sg_elems);
-
 	switch (req_op(req)) {
 	case REQ_OP_READ:
 	case REQ_OP_WRITE:
@@ -253,6 +251,10 @@
 		return BLK_STS_IOERR;
 	}
 
+	BUG_ON(type != VIRTIO_BLK_T_DISCARD &&
+	       type != VIRTIO_BLK_T_WRITE_ZEROES &&
+	       (req->nr_phys_segments + 2 > vblk->sg_elems));
+
 	vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, type);
 	vbr->out_hdr.sector = type ?
 		0 : cpu_to_virtio64(vblk->vdev, blk_rq_pos(req));
diff --git a/drivers/char/misc.c b/drivers/char/misc.c
index f6a1474..ca5141e 100644
--- a/drivers/char/misc.c
+++ b/drivers/char/misc.c
@@ -60,7 +60,7 @@
 /*
  * Assigned numbers, used for dynamic minors
  */
-#define DYNAMIC_MINORS 64 /* like dynamic majors */
+#define DYNAMIC_MINORS 128 /* like dynamic majors */
 static DECLARE_BITMAP(misc_minors, DYNAMIC_MINORS);
 
 #ifdef CONFIG_PROC_FS
diff --git a/drivers/clk/clk-composite.c b/drivers/clk/clk-composite.c
index 2ddb54f..2e7fedf 100644
--- a/drivers/clk/clk-composite.c
+++ b/drivers/clk/clk-composite.c
@@ -361,6 +361,7 @@
 		return ERR_CAST(hw);
 	return hw->clk;
 }
+EXPORT_SYMBOL_GPL(clk_register_composite);
 
 struct clk *clk_register_composite_pdata(struct device *dev, const char *name,
 			const struct clk_parent_data *parent_data,
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index f83dac54..d3e58cb 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -72,6 +72,8 @@
 	unsigned long		flags;
 	bool			orphan;
 	bool			rpm_enabled;
+	bool			need_sync;
+	bool			boot_enabled;
 	unsigned int		enable_count;
 	unsigned int		prepare_count;
 	unsigned int		protect_count;
@@ -1194,6 +1196,10 @@
 	hlist_for_each_entry(child, &core->children, child_node)
 		clk_unprepare_unused_subtree(child);
 
+	if (dev_has_sync_state(core->dev) &&
+	    !(core->flags & CLK_DONT_HOLD_STATE))
+		return;
+
 	if (core->prepare_count)
 		return;
 
@@ -1225,6 +1231,10 @@
 	hlist_for_each_entry(child, &core->children, child_node)
 		clk_disable_unused_subtree(child);
 
+	if (dev_has_sync_state(core->dev) &&
+	    !(core->flags & CLK_DONT_HOLD_STATE))
+		return;
+
 	if (core->flags & CLK_OPS_PARENT_ENABLE)
 		clk_core_prepare_enable(core->parent);
 
@@ -1298,6 +1308,38 @@
 }
 late_initcall_sync(clk_disable_unused);
 
+static void clk_unprepare_disable_dev_subtree(struct clk_core *core,
+					      struct device *dev)
+{
+	struct clk_core *child;
+
+	lockdep_assert_held(&prepare_lock);
+
+	hlist_for_each_entry(child, &core->children, child_node)
+		clk_unprepare_disable_dev_subtree(child, dev);
+
+	if (core->dev != dev || !core->need_sync)
+		return;
+
+	clk_core_disable_unprepare(core);
+}
+
+void clk_sync_state(struct device *dev)
+{
+	struct clk_core *core;
+
+	clk_prepare_lock();
+
+	hlist_for_each_entry(core, &clk_root_list, child_node)
+		clk_unprepare_disable_dev_subtree(core, dev);
+
+	hlist_for_each_entry(core, &clk_orphan_list, child_node)
+		clk_unprepare_disable_dev_subtree(core, dev);
+
+	clk_prepare_unlock();
+}
+EXPORT_SYMBOL_GPL(clk_sync_state);
+
 static int clk_core_determine_round_nolock(struct clk_core *core,
 					   struct clk_rate_request *req)
 {
@@ -1704,6 +1746,33 @@
 }
 EXPORT_SYMBOL_GPL(clk_hw_get_parent_index);
 
+static void clk_core_hold_state(struct clk_core *core)
+{
+	if (core->need_sync || !core->boot_enabled)
+		return;
+
+	if (core->orphan || !dev_has_sync_state(core->dev))
+		return;
+
+	if (core->flags & CLK_DONT_HOLD_STATE)
+		return;
+
+	core->need_sync = !clk_core_prepare_enable(core);
+}
+
+static void __clk_core_update_orphan_hold_state(struct clk_core *core)
+{
+	struct clk_core *child;
+
+	if (core->orphan)
+		return;
+
+	clk_core_hold_state(core);
+
+	hlist_for_each_entry(child, &core->children, child_node)
+		__clk_core_update_orphan_hold_state(child);
+}
+
 /*
  * Update the orphan status of @core and all its children.
  */
@@ -2010,6 +2079,13 @@
 			fail_clk = core;
 	}
 
+	if (core->ops->pre_rate_change) {
+		ret = core->ops->pre_rate_change(core->hw, core->rate,
+						 core->new_rate);
+		if (ret)
+			fail_clk = core;
+	}
+
 	hlist_for_each_entry(child, &core->children, child_node) {
 		/* Skip children who will be reparented to another clock */
 		if (child->new_parent && child->new_parent != core)
@@ -2112,6 +2188,9 @@
 	if (core->flags & CLK_RECALC_NEW_RATES)
 		(void)clk_calc_new_rates(core, core->new_rate);
 
+	if (core->ops->post_rate_change)
+		core->ops->post_rate_change(core->hw, old_rate, core->rate);
+
 	/*
 	 * Use safe iteration, as change_rate can actually swap parents
 	 * for certain clock types.
@@ -3028,7 +3107,7 @@
 }
 DEFINE_SHOW_ATTRIBUTE(clk_dump);
 
-#undef CLOCK_ALLOW_WRITE_DEBUGFS
+#define CLOCK_ALLOW_WRITE_DEBUGFS
 #ifdef CLOCK_ALLOW_WRITE_DEBUGFS
 /*
  * This can be dangerous, therefore don't provide any real compile time
@@ -3366,6 +3445,7 @@
 			__clk_set_parent_after(orphan, parent, NULL);
 			__clk_recalc_accuracies(orphan);
 			__clk_recalc_rates(orphan, 0);
+			__clk_core_update_orphan_hold_state(orphan);
 		}
 	}
 }
@@ -3524,6 +3604,8 @@
 		rate = 0;
 	core->rate = core->req_rate = rate;
 
+	core->boot_enabled = clk_core_is_enabled(core);
+
 	/*
 	 * Enable CLK_IS_CRITICAL clocks so newly added critical clocks
 	 * don't get accidentally disabled when walking the orphan tree and
@@ -3550,6 +3632,7 @@
 		}
 	}
 
+	clk_core_hold_state(core);
 	clk_core_reparent_orphans_nolock();
 
 
diff --git a/drivers/clk/qcom/dispcc-sdm845.c b/drivers/clk/qcom/dispcc-sdm845.c
index 5c932cd..11e03f7 100644
--- a/drivers/clk/qcom/dispcc-sdm845.c
+++ b/drivers/clk/qcom/dispcc-sdm845.c
@@ -3,6 +3,7 @@
  * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
  */
 
+#include <linux/clk.h>
 #include <linux/clk-provider.h>
 #include <linux/module.h>
 #include <linux/platform_device.h>
@@ -878,6 +879,7 @@
 	.driver		= {
 		.name	= "disp_cc-sdm845",
 		.of_match_table = disp_cc_sdm845_match_table,
+		.sync_state = clk_sync_state,
 	},
 };
 
diff --git a/drivers/clk/qcom/gcc-msm8998.c b/drivers/clk/qcom/gcc-msm8998.c
index 9d7016bc..867fa7d 100644
--- a/drivers/clk/qcom/gcc-msm8998.c
+++ b/drivers/clk/qcom/gcc-msm8998.c
@@ -3119,6 +3119,7 @@
 	.driver		= {
 		.name	= "gcc-msm8998",
 		.of_match_table = gcc_msm8998_match_table,
+		.sync_state = clk_sync_state,
 	},
 };
 
diff --git a/drivers/clk/qcom/gcc-sdm845.c b/drivers/clk/qcom/gcc-sdm845.c
index 90f7feb..50694f5 100644
--- a/drivers/clk/qcom/gcc-sdm845.c
+++ b/drivers/clk/qcom/gcc-sdm845.c
@@ -3628,6 +3628,7 @@
 	.driver		= {
 		.name	= "gcc-sdm845",
 		.of_match_table = gcc_sdm845_match_table,
+		.sync_state = clk_sync_state,
 	},
 };
 
diff --git a/drivers/clk/qcom/gpucc-sdm845.c b/drivers/clk/qcom/gpucc-sdm845.c
index 5663698..46916a7 100644
--- a/drivers/clk/qcom/gpucc-sdm845.c
+++ b/drivers/clk/qcom/gpucc-sdm845.c
@@ -208,6 +208,7 @@
 	.driver = {
 		.name = "sdm845-gpucc",
 		.of_match_table = gpu_cc_sdm845_match_table,
+		.sync_state = clk_sync_state,
 	},
 };
 
diff --git a/drivers/clk/qcom/videocc-sdm845.c b/drivers/clk/qcom/videocc-sdm845.c
index 5d6a772..5822252 100644
--- a/drivers/clk/qcom/videocc-sdm845.c
+++ b/drivers/clk/qcom/videocc-sdm845.c
@@ -338,6 +338,7 @@
 	.driver		= {
 		.name	= "sdm845-videocc",
 		.of_match_table = video_cc_sdm845_match_table,
+		.sync_state = clk_sync_state,
 	},
 };
 
diff --git a/drivers/clk/versatile/Kconfig b/drivers/clk/versatile/Kconfig
index 91f0ff5..792315d 100644
--- a/drivers/clk/versatile/Kconfig
+++ b/drivers/clk/versatile/Kconfig
@@ -1,8 +1,6 @@
 # SPDX-License-Identifier: GPL-2.0-only
 
 menu "Clock driver for ARM Reference designs"
-	depends on ARCH_INTEGRATOR || ARCH_REALVIEW || \
-		ARCH_VERSATILE || ARCH_VEXPRESS || COMPILE_TEST
 
 config ICST
 	bool "Clock driver for ARM Reference designs ICST"
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index 68b087b..42f9d95 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -126,7 +126,7 @@
 	  Enables the support for the RDA Micro timer driver.
 
 config SUN4I_TIMER
-	bool "Sun4i timer driver" if COMPILE_TEST
+	bool "Sun4i timer driver"
 	depends on HAS_IOMEM
 	select CLKSRC_MMIO
 	select TIMER_OF
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig
index 85de313..cf79fe1 100644
--- a/drivers/cpufreq/Kconfig
+++ b/drivers/cpufreq/Kconfig
@@ -34,6 +34,13 @@
 
 	  If in doubt, say N.
 
+config CPU_FREQ_TIMES
+       bool "CPU frequency time-in-state statistics"
+       help
+         Export CPU time-in-state information through procfs.
+
+         If in doubt, say N.
+
 choice
 	prompt "Default CPUFreq governor"
 	default CPU_FREQ_DEFAULT_GOV_USERSPACE if ARM_SA1100_CPUFREQ || ARM_SA1110_CPUFREQ
@@ -226,6 +233,15 @@
 
 	  If in doubt, say N.
 
+config CPUFREQ_DUMMY
+	tristate "Dummy CPU frequency driver"
+	help
+	  This option adds a generic dummy CPUfreq driver, which sets a fake
+	  2-frequency table when initializing each policy and otherwise does
+	  nothing.
+
+	  If in doubt, say N
+
 if X86
 source "drivers/cpufreq/Kconfig.x86"
 endif
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile
index f1b7e3d..b2f4531 100644
--- a/drivers/cpufreq/Makefile
+++ b/drivers/cpufreq/Makefile
@@ -5,7 +5,10 @@
 # CPUfreq stats
 obj-$(CONFIG_CPU_FREQ_STAT)             += cpufreq_stats.o
 
-# CPUfreq governors 
+# CPUfreq times
+obj-$(CONFIG_CPU_FREQ_TIMES)		+= cpufreq_times.o
+
+# CPUfreq governors
 obj-$(CONFIG_CPU_FREQ_GOV_PERFORMANCE)	+= cpufreq_performance.o
 obj-$(CONFIG_CPU_FREQ_GOV_POWERSAVE)	+= cpufreq_powersave.o
 obj-$(CONFIG_CPU_FREQ_GOV_USERSPACE)	+= cpufreq_userspace.o
@@ -17,6 +20,8 @@
 obj-$(CONFIG_CPUFREQ_DT)		+= cpufreq-dt.o
 obj-$(CONFIG_CPUFREQ_DT_PLATDEV)	+= cpufreq-dt-platdev.o
 
+obj-$(CONFIG_CPUFREQ_DUMMY)		+= dummy-cpufreq.o
+
 ##################################################################################
 # x86 drivers.
 # Link order matters. K8 is preferred to ACPI because of firmware bugs in early
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 1e7e3f2..65e0d286 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -16,6 +16,7 @@
 
 #include <linux/cpu.h>
 #include <linux/cpufreq.h>
+#include <linux/cpufreq_times.h>
 #include <linux/cpu_cooling.h>
 #include <linux/delay.h>
 #include <linux/device.h>
@@ -29,6 +30,7 @@
 #include <linux/syscore_ops.h>
 #include <linux/tick.h>
 #include <trace/events/power.h>
+#include <trace/hooks/cpufreq.h>
 
 static LIST_HEAD(cpufreq_policy_list);
 
@@ -387,6 +389,7 @@
 					 CPUFREQ_POSTCHANGE, freqs);
 
 		cpufreq_stats_record_transition(policy, freqs->new);
+		cpufreq_times_record_transition(policy, freqs->new);
 		policy->cur = freqs->new;
 	}
 }
@@ -688,8 +691,15 @@
 	return sprintf(buf, "%u\n", policy->object);	\
 }
 
+static ssize_t show_cpuinfo_max_freq(struct cpufreq_policy *policy, char *buf)
+{
+	unsigned int max_freq = policy->cpuinfo.max_freq;
+
+	trace_android_vh_show_max_freq(policy, &max_freq);
+	return sprintf(buf, "%u\n", max_freq);
+}
+
 show_one(cpuinfo_min_freq, cpuinfo.min_freq);
-show_one(cpuinfo_max_freq, cpuinfo.max_freq);
 show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
 show_one(scaling_min_freq, min);
 show_one(scaling_max_freq, max);
@@ -1479,6 +1489,7 @@
 			goto out_destroy_policy;
 
 		cpufreq_stats_create_table(policy);
+		cpufreq_times_create_policy(policy);
 
 		write_lock_irqsave(&cpufreq_driver_lock, flags);
 		list_add(&policy->policy_list, &cpufreq_policy_list);
@@ -2509,7 +2520,6 @@
 		ret = cpufreq_start_governor(policy);
 		if (!ret) {
 			pr_debug("governor change\n");
-			sched_cpufreq_governor_change(policy, old_gov);
 			return 0;
 		}
 		cpufreq_exit_governor(policy);
diff --git a/drivers/cpufreq/cpufreq_times.c b/drivers/cpufreq/cpufreq_times.c
new file mode 100644
index 0000000..4df55b3
--- /dev/null
+++ b/drivers/cpufreq/cpufreq_times.c
@@ -0,0 +1,211 @@
+/* drivers/cpufreq/cpufreq_times.c
+ *
+ * Copyright (C) 2018 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/cpufreq.h>
+#include <linux/cpufreq_times.h>
+#include <linux/jiffies.h>
+#include <linux/sched.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/threads.h>
+
+static DEFINE_SPINLOCK(task_time_in_state_lock); /* task->time_in_state */
+
+/**
+ * struct cpu_freqs - per-cpu frequency information
+ * @offset: start of these freqs' stats in task time_in_state array
+ * @max_state: number of entries in freq_table
+ * @last_index: index in freq_table of last frequency switched to
+ * @freq_table: list of available frequencies
+ */
+struct cpu_freqs {
+	unsigned int offset;
+	unsigned int max_state;
+	unsigned int last_index;
+	unsigned int freq_table[0];
+};
+
+static struct cpu_freqs *all_freqs[NR_CPUS];
+
+static unsigned int next_offset;
+
+void cpufreq_task_times_init(struct task_struct *p)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&task_time_in_state_lock, flags);
+	p->time_in_state = NULL;
+	spin_unlock_irqrestore(&task_time_in_state_lock, flags);
+	p->max_state = 0;
+}
+
+void cpufreq_task_times_alloc(struct task_struct *p)
+{
+	void *temp;
+	unsigned long flags;
+	unsigned int max_state = READ_ONCE(next_offset);
+
+	/* We use one array to avoid multiple allocs per task */
+	temp = kcalloc(max_state, sizeof(p->time_in_state[0]), GFP_ATOMIC);
+	if (!temp)
+		return;
+
+	spin_lock_irqsave(&task_time_in_state_lock, flags);
+	p->time_in_state = temp;
+	spin_unlock_irqrestore(&task_time_in_state_lock, flags);
+	p->max_state = max_state;
+}
+
+/* Caller must hold task_time_in_state_lock */
+static int cpufreq_task_times_realloc_locked(struct task_struct *p)
+{
+	void *temp;
+	unsigned int max_state = READ_ONCE(next_offset);
+
+	temp = krealloc(p->time_in_state, max_state * sizeof(u64), GFP_ATOMIC);
+	if (!temp)
+		return -ENOMEM;
+	p->time_in_state = temp;
+	memset(p->time_in_state + p->max_state, 0,
+	       (max_state - p->max_state) * sizeof(u64));
+	p->max_state = max_state;
+	return 0;
+}
+
+void cpufreq_task_times_exit(struct task_struct *p)
+{
+	unsigned long flags;
+	void *temp;
+
+	if (!p->time_in_state)
+		return;
+
+	spin_lock_irqsave(&task_time_in_state_lock, flags);
+	temp = p->time_in_state;
+	p->time_in_state = NULL;
+	spin_unlock_irqrestore(&task_time_in_state_lock, flags);
+	kfree(temp);
+}
+
+int proc_time_in_state_show(struct seq_file *m, struct pid_namespace *ns,
+	struct pid *pid, struct task_struct *p)
+{
+	unsigned int cpu, i;
+	u64 cputime;
+	unsigned long flags;
+	struct cpu_freqs *freqs;
+	struct cpu_freqs *last_freqs = NULL;
+
+	spin_lock_irqsave(&task_time_in_state_lock, flags);
+	for_each_possible_cpu(cpu) {
+		freqs = all_freqs[cpu];
+		if (!freqs || freqs == last_freqs)
+			continue;
+		last_freqs = freqs;
+
+		seq_printf(m, "cpu%u\n", cpu);
+		for (i = 0; i < freqs->max_state; i++) {
+			cputime = 0;
+			if (freqs->offset + i < p->max_state &&
+			    p->time_in_state)
+				cputime = p->time_in_state[freqs->offset + i];
+			seq_printf(m, "%u %lu\n", freqs->freq_table[i],
+				   (unsigned long)nsec_to_clock_t(cputime));
+		}
+	}
+	spin_unlock_irqrestore(&task_time_in_state_lock, flags);
+	return 0;
+}
+
+void cpufreq_acct_update_power(struct task_struct *p, u64 cputime)
+{
+	unsigned long flags;
+	unsigned int state;
+	struct cpu_freqs *freqs = all_freqs[task_cpu(p)];
+
+	if (!freqs || is_idle_task(p) || p->flags & PF_EXITING)
+		return;
+
+	state = freqs->offset + READ_ONCE(freqs->last_index);
+
+	spin_lock_irqsave(&task_time_in_state_lock, flags);
+	if ((state < p->max_state || !cpufreq_task_times_realloc_locked(p)) &&
+	    p->time_in_state)
+		p->time_in_state[state] += cputime;
+	spin_unlock_irqrestore(&task_time_in_state_lock, flags);
+}
+
+static int cpufreq_times_get_index(struct cpu_freqs *freqs, unsigned int freq)
+{
+	int index;
+        for (index = 0; index < freqs->max_state; ++index) {
+		if (freqs->freq_table[index] == freq)
+			return index;
+        }
+	return -1;
+}
+
+void cpufreq_times_create_policy(struct cpufreq_policy *policy)
+{
+	int cpu, index = 0;
+	unsigned int count = 0;
+	struct cpufreq_frequency_table *pos, *table;
+	struct cpu_freqs *freqs;
+	void *tmp;
+
+	if (all_freqs[policy->cpu])
+		return;
+
+	table = policy->freq_table;
+	if (!table)
+		return;
+
+	cpufreq_for_each_valid_entry(pos, table)
+		count++;
+
+	tmp =  kzalloc(sizeof(*freqs) + sizeof(freqs->freq_table[0]) * count,
+		       GFP_KERNEL);
+	if (!tmp)
+		return;
+
+	freqs = tmp;
+	freqs->max_state = count;
+
+	cpufreq_for_each_valid_entry(pos, table)
+		freqs->freq_table[index++] = pos->frequency;
+
+	index = cpufreq_times_get_index(freqs, policy->cur);
+	if (index >= 0)
+		WRITE_ONCE(freqs->last_index, index);
+
+	freqs->offset = next_offset;
+	WRITE_ONCE(next_offset, freqs->offset + count);
+	for_each_cpu(cpu, policy->related_cpus)
+		all_freqs[cpu] = freqs;
+}
+
+void cpufreq_times_record_transition(struct cpufreq_policy *policy,
+	unsigned int new_freq)
+{
+	int index;
+	struct cpu_freqs *freqs = all_freqs[policy->cpu];
+	if (!freqs)
+		return;
+
+	index = cpufreq_times_get_index(freqs, new_freq);
+	if (index >= 0)
+		WRITE_ONCE(freqs->last_index, index);
+}
diff --git a/drivers/cpufreq/dummy-cpufreq.c b/drivers/cpufreq/dummy-cpufreq.c
new file mode 100644
index 0000000..e74ef67
--- /dev/null
+++ b/drivers/cpufreq/dummy-cpufreq.c
@@ -0,0 +1,60 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019 Google, Inc.
+ */
+#include <linux/cpufreq.h>
+#include <linux/module.h>
+
+static struct cpufreq_frequency_table freq_table[] = {
+	{ .frequency = 1 },
+	{ .frequency = 2 },
+	{ .frequency = CPUFREQ_TABLE_END },
+};
+
+static int dummy_cpufreq_target_index(struct cpufreq_policy *policy,
+				   unsigned int index)
+{
+	return 0;
+}
+
+static int dummy_cpufreq_driver_init(struct cpufreq_policy *policy)
+{
+	policy->freq_table = freq_table;
+	return 0;
+}
+
+static unsigned int dummy_cpufreq_get(unsigned int cpu)
+{
+	return 1;
+}
+
+static int dummy_cpufreq_verify(struct cpufreq_policy_data *data)
+{
+	return 0;
+}
+
+static struct cpufreq_driver dummy_cpufreq_driver = {
+	.name = "dummy",
+	.target_index = dummy_cpufreq_target_index,
+	.init = dummy_cpufreq_driver_init,
+	.get = dummy_cpufreq_get,
+	.verify = dummy_cpufreq_verify,
+	.attr = cpufreq_generic_attr,
+};
+
+static int __init dummy_cpufreq_init(void)
+{
+	return cpufreq_register_driver(&dummy_cpufreq_driver);
+}
+
+static void __exit dummy_cpufreq_exit(void)
+{
+	cpufreq_unregister_driver(&dummy_cpufreq_driver);
+}
+
+module_init(dummy_cpufreq_init);
+module_exit(dummy_cpufreq_exit);
+
+MODULE_AUTHOR("Connor O'Brien <connoro@google.com>");
+MODULE_DESCRIPTION("dummy cpufreq driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/freq_table.c b/drivers/cpufreq/freq_table.c
index f839dc98..1128ba2 100644
--- a/drivers/cpufreq/freq_table.c
+++ b/drivers/cpufreq/freq_table.c
@@ -9,6 +9,7 @@
 
 #include <linux/cpufreq.h>
 #include <linux/module.h>
+#include <trace/hooks/cpufreq.h>
 
 /*********************************************************************
  *                     FREQUENCY TABLE HELPERS                       *
@@ -51,6 +52,7 @@
 			max_freq = freq;
 	}
 
+	trace_android_vh_freq_table_limits(policy, min_freq, max_freq);
 	policy->min = policy->cpuinfo.min_freq = min_freq;
 	policy->max = policy->cpuinfo.max_freq = max_freq;
 
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index 83af15f..fa0024b 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -24,6 +24,7 @@
 #include <linux/tick.h>
 #include <linux/mmu_context.h>
 #include <trace/events/power.h>
+#include <trace/hooks/cpuidle.h>
 
 #include "cpuidle.h"
 
@@ -228,6 +229,7 @@
 	/* Take note of the planned idle state. */
 	sched_idle_set_state(target_state);
 
+	trace_android_vh_cpu_idle(0, index, dev->cpu);
 	trace_cpu_idle(index, dev->cpu);
 	time_start = ns_to_ktime(local_clock());
 
@@ -242,6 +244,7 @@
 	sched_clock_idle_wakeup_event();
 	time_end = ns_to_ktime(local_clock());
 	trace_cpu_idle(PWR_EVENT_EXIT, dev->cpu);
+	trace_android_vh_cpu_idle(PWR_EVENT_EXIT, entered_state, dev->cpu);
 
 	/* The cpu is no longer idle or about to enter idle. */
 	sched_idle_set_state(NULL);
diff --git a/drivers/cpuidle/governor.c b/drivers/cpuidle/governor.c
index 29acaf4..0e51ed2 100644
--- a/drivers/cpuidle/governor.c
+++ b/drivers/cpuidle/governor.c
@@ -102,6 +102,7 @@
 
 	return ret;
 }
+EXPORT_SYMBOL_GPL(cpuidle_register_governor);
 
 /**
  * cpuidle_governor_latency_req - Compute a latency constraint for CPU
@@ -118,3 +119,4 @@
 
 	return (s64)device_req * NSEC_PER_USEC;
 }
+EXPORT_SYMBOL_GPL(cpuidle_governor_latency_req);
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
index 844967f..29d080e 100644
--- a/drivers/dma-buf/dma-buf.c
+++ b/drivers/dma-buf/dma-buf.c
@@ -1101,6 +1101,30 @@
 }
 EXPORT_SYMBOL_GPL(dma_buf_begin_cpu_access);
 
+int dma_buf_begin_cpu_access_partial(struct dma_buf *dmabuf,
+				     enum dma_data_direction direction,
+				     unsigned int offset, unsigned int len)
+{
+	int ret = 0;
+
+	if (WARN_ON(!dmabuf))
+		return -EINVAL;
+
+	if (dmabuf->ops->begin_cpu_access_partial)
+		ret = dmabuf->ops->begin_cpu_access_partial(dmabuf, direction,
+							    offset, len);
+
+	/* Ensure that all fences are waited upon - but we first allow
+	 * the native handler the chance to do so more efficiently if it
+	 * chooses. A double invocation here will be reasonably cheap no-op.
+	 */
+	if (ret == 0)
+		ret = __dma_buf_begin_cpu_access(dmabuf, direction);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(dma_buf_begin_cpu_access_partial);
+
 /**
  * dma_buf_end_cpu_access - Must be called after accessing a dma_buf from the
  * cpu in the kernel context. Calls end_cpu_access to allow exporter-specific
@@ -1127,6 +1151,21 @@
 }
 EXPORT_SYMBOL_GPL(dma_buf_end_cpu_access);
 
+int dma_buf_end_cpu_access_partial(struct dma_buf *dmabuf,
+				   enum dma_data_direction direction,
+				   unsigned int offset, unsigned int len)
+{
+	int ret = 0;
+
+	WARN_ON(!dmabuf);
+
+	if (dmabuf->ops->end_cpu_access_partial)
+		ret = dmabuf->ops->end_cpu_access_partial(dmabuf, direction,
+							  offset, len);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(dma_buf_end_cpu_access_partial);
 
 /**
  * dma_buf_mmap - Setup up a userspace mmap with the given vma
@@ -1255,6 +1294,32 @@
 }
 EXPORT_SYMBOL_GPL(dma_buf_vunmap);
 
+int dma_buf_get_flags(struct dma_buf *dmabuf, unsigned long *flags)
+{
+	int ret = 0;
+
+	if (WARN_ON(!dmabuf) || !flags)
+		return -EINVAL;
+
+	if (dmabuf->ops->get_flags)
+		ret = dmabuf->ops->get_flags(dmabuf, flags);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(dma_buf_get_flags);
+
+int dma_buf_get_uuid(struct dma_buf *dmabuf, uuid_t *uuid)
+{
+	if (WARN_ON(!dmabuf) || !uuid)
+		return -EINVAL;
+
+	if (!dmabuf->ops->get_uuid)
+		return -ENODEV;
+
+	return dmabuf->ops->get_uuid(dmabuf, uuid);
+}
+EXPORT_SYMBOL_GPL(dma_buf_get_uuid);
+
 #ifdef CONFIG_DEBUG_FS
 static int dma_buf_debug_show(struct seq_file *s, void *unused)
 {
diff --git a/drivers/dma-buf/dma-heap.c b/drivers/dma-buf/dma-heap.c
index afd22c9..ba25f09 100644
--- a/drivers/dma-buf/dma-heap.c
+++ b/drivers/dma-buf/dma-heap.c
@@ -40,6 +40,7 @@
 	dev_t heap_devt;
 	struct list_head list;
 	struct cdev heap_cdev;
+	struct kref refcount;
 };
 
 static LIST_HEAD(heap_list);
@@ -48,20 +49,72 @@
 static struct class *dma_heap_class;
 static DEFINE_XARRAY_ALLOC(dma_heap_minors);
 
-static int dma_heap_buffer_alloc(struct dma_heap *heap, size_t len,
-				 unsigned int fd_flags,
-				 unsigned int heap_flags)
+struct dma_heap *dma_heap_find(const char *name)
 {
+	struct dma_heap *h;
+
+	mutex_lock(&heap_list_lock);
+	list_for_each_entry(h, &heap_list, list) {
+		if (!strcmp(h->name, name)) {
+			kref_get(&h->refcount);
+			mutex_unlock(&heap_list_lock);
+			return h;
+		}
+	}
+	mutex_unlock(&heap_list_lock);
+	return NULL;
+}
+EXPORT_SYMBOL_GPL(dma_heap_find);
+
+
+void dma_heap_buffer_free(struct dma_buf *dmabuf)
+{
+	dma_buf_put(dmabuf);
+}
+EXPORT_SYMBOL_GPL(dma_heap_buffer_free);
+
+struct dma_buf *dma_heap_buffer_alloc(struct dma_heap *heap, size_t len,
+				      unsigned int fd_flags,
+				      unsigned int heap_flags)
+{
+	if (fd_flags & ~DMA_HEAP_VALID_FD_FLAGS)
+		return ERR_PTR(-EINVAL);
+
+	if (heap_flags & ~DMA_HEAP_VALID_HEAP_FLAGS)
+		return ERR_PTR(-EINVAL);
 	/*
 	 * Allocations from all heaps have to begin
 	 * and end on page boundaries.
 	 */
 	len = PAGE_ALIGN(len);
 	if (!len)
-		return -EINVAL;
+		return ERR_PTR(-EINVAL);
 
 	return heap->ops->allocate(heap, len, fd_flags, heap_flags);
 }
+EXPORT_SYMBOL_GPL(dma_heap_buffer_alloc);
+
+int dma_heap_bufferfd_alloc(struct dma_heap *heap, size_t len,
+			    unsigned int fd_flags,
+			    unsigned int heap_flags)
+{
+	struct dma_buf *dmabuf;
+	int fd;
+
+	dmabuf = dma_heap_buffer_alloc(heap, len, fd_flags, heap_flags);
+
+	if (IS_ERR(dmabuf))
+		return PTR_ERR(dmabuf);
+
+	fd = dma_buf_fd(dmabuf, fd_flags);
+	if (fd < 0) {
+		dma_buf_put(dmabuf);
+		/* just return, as put will call release and that will free */
+	}
+	return fd;
+
+}
+EXPORT_SYMBOL_GPL(dma_heap_bufferfd_alloc);
 
 static int dma_heap_open(struct inode *inode, struct file *file)
 {
@@ -89,15 +142,9 @@
 	if (heap_allocation->fd)
 		return -EINVAL;
 
-	if (heap_allocation->fd_flags & ~DMA_HEAP_VALID_FD_FLAGS)
-		return -EINVAL;
-
-	if (heap_allocation->heap_flags & ~DMA_HEAP_VALID_HEAP_FLAGS)
-		return -EINVAL;
-
-	fd = dma_heap_buffer_alloc(heap, heap_allocation->len,
-				   heap_allocation->fd_flags,
-				   heap_allocation->heap_flags);
+	fd = dma_heap_bufferfd_alloc(heap, heap_allocation->len,
+				     heap_allocation->fd_flags,
+				     heap_allocation->heap_flags);
 	if (fd < 0)
 		return fd;
 
@@ -189,12 +236,39 @@
 {
 	return heap->priv;
 }
+EXPORT_SYMBOL_GPL(dma_heap_get_drvdata);
+
+static void dma_heap_release(struct kref *ref)
+{
+	struct dma_heap *heap = container_of(ref, struct dma_heap, refcount);
+	int minor = MINOR(heap->heap_devt);
+
+	/* Note, we already holding the heap_list_lock here */
+	list_del(&heap->list);
+
+	device_destroy(dma_heap_class, heap->heap_devt);
+	cdev_del(&heap->heap_cdev);
+	xa_erase(&dma_heap_minors, minor);
+
+	kfree(heap);
+}
+
+void dma_heap_put(struct dma_heap *h)
+{
+	/*
+	 * Take the heap_list_lock now to avoid racing with code
+	 * scanning the list and then taking a kref.
+	 */
+	mutex_lock(&heap_list_lock);
+	kref_put(&h->refcount, dma_heap_release);
+	mutex_unlock(&heap_list_lock);
+}
 
 struct dma_heap *dma_heap_add(const struct dma_heap_export_info *exp_info)
 {
-	struct dma_heap *heap, *h, *err_ret;
+	struct dma_heap *heap, *err_ret;
 	struct device *dev_ret;
-	unsigned int minor;
+	int minor;
 	int ret;
 
 	if (!exp_info->name || !strcmp(exp_info->name, "")) {
@@ -208,21 +282,19 @@
 	}
 
 	/* check the name is unique */
-	mutex_lock(&heap_list_lock);
-	list_for_each_entry(h, &heap_list, list) {
-		if (!strcmp(h->name, exp_info->name)) {
-			mutex_unlock(&heap_list_lock);
-			pr_err("dma_heap: Already registered heap named %s\n",
-			       exp_info->name);
-			return ERR_PTR(-EINVAL);
-		}
+	heap = dma_heap_find(exp_info->name);
+	if (heap) {
+		pr_err("dma_heap: Already registered heap named %s\n",
+		       exp_info->name);
+		dma_heap_put(heap);
+		return ERR_PTR(-EINVAL);
 	}
-	mutex_unlock(&heap_list_lock);
 
 	heap = kzalloc(sizeof(*heap), GFP_KERNEL);
 	if (!heap)
 		return ERR_PTR(-ENOMEM);
 
+	kref_init(&heap->refcount);
 	heap->name = exp_info->name;
 	heap->ops = exp_info->ops;
 	heap->priv = exp_info->priv;
@@ -272,6 +344,7 @@
 	kfree(heap);
 	return err_ret;
 }
+EXPORT_SYMBOL_GPL(dma_heap_add);
 
 static char *dma_heap_devnode(struct device *dev, umode_t *mode)
 {
diff --git a/drivers/dma-buf/heaps/Kconfig b/drivers/dma-buf/heaps/Kconfig
index a5eef06..aed0b9b 100644
--- a/drivers/dma-buf/heaps/Kconfig
+++ b/drivers/dma-buf/heaps/Kconfig
@@ -6,7 +6,7 @@
 	  is backed by pages from the buddy allocator. If in doubt, say Y.
 
 config DMABUF_HEAPS_CMA
-	bool "DMA-BUF CMA Heap"
+	tristate "DMA-BUF CMA Heap"
 	depends on DMABUF_HEAPS && DMA_CMA
 	help
 	  Choose this option to enable dma-buf CMA heap. This heap is backed
diff --git a/drivers/dma-buf/heaps/cma_heap.c b/drivers/dma-buf/heaps/cma_heap.c
index e55384d..a58e987 100644
--- a/drivers/dma-buf/heaps/cma_heap.c
+++ b/drivers/dma-buf/heaps/cma_heap.c
@@ -40,10 +40,10 @@
 }
 
 /* dmabuf heap CMA operations functions */
-static int cma_heap_allocate(struct dma_heap *heap,
-			     unsigned long len,
-			     unsigned long fd_flags,
-			     unsigned long heap_flags)
+struct dma_buf *cma_heap_allocate(struct dma_heap *heap,
+				  unsigned long len,
+				  unsigned long fd_flags,
+				  unsigned long heap_flags)
 {
 	struct cma_heap *cma_heap = dma_heap_get_drvdata(heap);
 	struct heap_helper_buffer *helper_buffer;
@@ -60,7 +60,7 @@
 
 	helper_buffer = kzalloc(sizeof(*helper_buffer), GFP_KERNEL);
 	if (!helper_buffer)
-		return -ENOMEM;
+		return ERR_PTR(-ENOMEM);
 
 	init_heap_helper_buffer(helper_buffer, cma_heap_free);
 	helper_buffer->heap = heap;
@@ -115,14 +115,7 @@
 	helper_buffer->dmabuf = dmabuf;
 	helper_buffer->priv_virt = cma_pages;
 
-	ret = dma_buf_fd(dmabuf, fd_flags);
-	if (ret < 0) {
-		dma_buf_put(dmabuf);
-		/* just return, as put will call release and that will free */
-		return ret;
-	}
-
-	return ret;
+	return dmabuf;
 
 free_pages:
 	kfree(helper_buffer->pages);
@@ -130,7 +123,7 @@
 	cma_release(cma_heap->cma, cma_pages, nr_pages);
 free_buf:
 	kfree(helper_buffer);
-	return ret;
+	return ERR_PTR(ret);
 }
 
 static const struct dma_heap_ops cma_heap_ops = {
diff --git a/drivers/dma-buf/heaps/heap-helpers.c b/drivers/dma-buf/heaps/heap-helpers.c
index d0696cf..0a08203 100644
--- a/drivers/dma-buf/heaps/heap-helpers.c
+++ b/drivers/dma-buf/heaps/heap-helpers.c
@@ -24,6 +24,7 @@
 	INIT_LIST_HEAD(&buffer->attachments);
 	buffer->free = free;
 }
+EXPORT_SYMBOL_GPL(init_heap_helper_buffer);
 
 struct dma_buf *heap_helper_export_dmabuf(struct heap_helper_buffer *buffer,
 					  int fd_flags)
@@ -37,6 +38,7 @@
 
 	return dma_buf_export(&exp_info);
 }
+EXPORT_SYMBOL_GPL(heap_helper_export_dmabuf);
 
 static void *dma_heap_map_kernel(struct heap_helper_buffer *buffer)
 {
diff --git a/drivers/dma-buf/heaps/system_heap.c b/drivers/dma-buf/heaps/system_heap.c
index 0bf688e..f8f0ab0 100644
--- a/drivers/dma-buf/heaps/system_heap.c
+++ b/drivers/dma-buf/heaps/system_heap.c
@@ -32,10 +32,10 @@
 	kfree(buffer);
 }
 
-static int system_heap_allocate(struct dma_heap *heap,
-				unsigned long len,
-				unsigned long fd_flags,
-				unsigned long heap_flags)
+static struct dma_buf *system_heap_allocate(struct dma_heap *heap,
+					    unsigned long len,
+					    unsigned long fd_flags,
+					    unsigned long heap_flags)
 {
 	struct heap_helper_buffer *helper_buffer;
 	struct dma_buf *dmabuf;
@@ -44,7 +44,7 @@
 
 	helper_buffer = kzalloc(sizeof(*helper_buffer), GFP_KERNEL);
 	if (!helper_buffer)
-		return -ENOMEM;
+		return ERR_PTR(-ENOMEM);
 
 	init_heap_helper_buffer(helper_buffer, system_heap_free);
 	helper_buffer->heap = heap;
@@ -81,14 +81,7 @@
 
 	helper_buffer->dmabuf = dmabuf;
 
-	ret = dma_buf_fd(dmabuf, fd_flags);
-	if (ret < 0) {
-		dma_buf_put(dmabuf);
-		/* just return, as put will call release and that will free */
-		return ret;
-	}
-
-	return ret;
+	return dmabuf;
 
 err1:
 	while (pg > 0)
@@ -97,7 +90,7 @@
 err0:
 	kfree(helper_buffer);
 
-	return ret;
+	return ERR_PTR(ret);
 }
 
 static const struct dma_heap_ops system_heap_ops = {
diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig
index 3315e3c..d6d40f0 100644
--- a/drivers/firmware/Kconfig
+++ b/drivers/firmware/Kconfig
@@ -235,7 +235,7 @@
 	  Say Y here if you want Intel RSU support.
 
 config QCOM_SCM
-	bool
+	tristate "Qcom SCM driver"
 	depends on ARM || ARM64
 	select RESET_CONTROLLER
 
diff --git a/drivers/firmware/Makefile b/drivers/firmware/Makefile
index 5e013b6..523173c 100644
--- a/drivers/firmware/Makefile
+++ b/drivers/firmware/Makefile
@@ -17,7 +17,8 @@
 obj-$(CONFIG_FIRMWARE_MEMMAP)	+= memmap.o
 obj-$(CONFIG_RASPBERRYPI_FIRMWARE) += raspberrypi.o
 obj-$(CONFIG_FW_CFG_SYSFS)	+= qemu_fw_cfg.o
-obj-$(CONFIG_QCOM_SCM)		+= qcom_scm.o qcom_scm-smc.o qcom_scm-legacy.o
+obj-$(CONFIG_QCOM_SCM)		+= qcom-scm.o
+qcom-scm-objs += qcom_scm.o qcom_scm-smc.o qcom_scm-legacy.o
 obj-$(CONFIG_TI_SCI_PROTOCOL)	+= ti_sci.o
 obj-$(CONFIG_TRUSTED_FOUNDATIONS) += trusted_foundations.o
 obj-$(CONFIG_TURRIS_MOX_RWTM)	+= turris-mox-rwtm.o
diff --git a/drivers/firmware/psci/psci.c b/drivers/firmware/psci/psci.c
index 00af99b..e042704 100644
--- a/drivers/firmware/psci/psci.c
+++ b/drivers/firmware/psci/psci.c
@@ -259,7 +259,8 @@
 	return 0;
 }
 
-static void psci_sys_reset(enum reboot_mode reboot_mode, const char *cmd)
+static int psci_sys_reset(struct notifier_block *nb, unsigned long action,
+			  void *data)
 {
 	if ((reboot_mode == REBOOT_WARM || reboot_mode == REBOOT_SOFT) &&
 	    psci_system_reset2_supported) {
@@ -272,8 +273,15 @@
 	} else {
 		invoke_psci_fn(PSCI_0_2_FN_SYSTEM_RESET, 0, 0, 0);
 	}
+
+	return NOTIFY_DONE;
 }
 
+static struct notifier_block psci_sys_reset_nb = {
+	.notifier_call = psci_sys_reset,
+	.priority = 129,
+};
+
 static void psci_sys_poweroff(void)
 {
 	invoke_psci_fn(PSCI_0_2_FN_SYSTEM_OFF, 0, 0, 0);
@@ -440,7 +448,7 @@
 
 	psci_ops.migrate_info_type = psci_migrate_info_type;
 
-	arm_pm_restart = psci_sys_reset;
+	register_restart_handler(&psci_sys_reset_nb);
 
 	pm_power_off = psci_sys_poweroff;
 }
diff --git a/drivers/firmware/qcom_scm.c b/drivers/firmware/qcom_scm.c
index 7be48c1..6f431b7 100644
--- a/drivers/firmware/qcom_scm.c
+++ b/drivers/firmware/qcom_scm.c
@@ -1280,6 +1280,7 @@
 	{ .compatible = "qcom,scm" },
 	{}
 };
+MODULE_DEVICE_TABLE(of, qcom_scm_dt_match);
 
 static struct platform_driver qcom_scm_driver = {
 	.driver = {
@@ -1295,3 +1296,6 @@
 	return platform_driver_register(&qcom_scm_driver);
 }
 subsys_initcall(qcom_scm_init);
+
+MODULE_DESCRIPTION("Qualcomm Technologies, Inc. SCM driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gnss/Kconfig b/drivers/gnss/Kconfig
index bd12e3d..b94e210 100644
--- a/drivers/gnss/Kconfig
+++ b/drivers/gnss/Kconfig
@@ -54,4 +54,19 @@
 
 	  If unsure, say N.
 
+config GNSS_CMDLINE_SERIAL
+	tristate "Command line test driver for GNSS"
+	depends on SERIAL_DEV_BUS
+	select GNSS_SERIAL
+	help
+	  Say Y here if you want to test the GNSS subsystem but do not have a
+	  way to communicate a binding through firmware such as DT or ACPI.
+	  The correct serdev device and protocol type must be specified on
+	  the module command line.
+
+	  To compile this driver as a module, choose M here: the module will
+	  be called gnss-cmdline.
+
+	  If unsure, say N.
+
 endif # GNSS
diff --git a/drivers/gnss/Makefile b/drivers/gnss/Makefile
index 451f114..1d27659 100644
--- a/drivers/gnss/Makefile
+++ b/drivers/gnss/Makefile
@@ -17,3 +17,6 @@
 
 obj-$(CONFIG_GNSS_UBX_SERIAL)		+= gnss-ubx.o
 gnss-ubx-y := ubx.o
+
+obj-$(CONFIG_GNSS_CMDLINE_SERIAL)	+= gnss-cmdline.o
+gnss-cmdline-y := cmdline.o
diff --git a/drivers/gnss/cmdline.c b/drivers/gnss/cmdline.c
new file mode 100644
index 0000000..3e1d2463
--- /dev/null
+++ b/drivers/gnss/cmdline.c
@@ -0,0 +1,139 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Test driver for GNSS. This driver requires the serdev binding and protocol
+ * type to be specified on the module command line.
+ *
+ * Copyright 2019 Google LLC
+ */
+
+#include <linux/device.h>
+#include <linux/gnss.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/serdev.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+
+#include "serial.h"
+
+#define GNSS_CMDLINE_MODULE_NAME "gnss-cmdline"
+
+#define gnss_cmdline_err(...) \
+	pr_err(GNSS_CMDLINE_MODULE_NAME ": " __VA_ARGS__)
+
+static char *serdev;
+module_param(serdev, charp, 0644);
+MODULE_PARM_DESC(serdev, "serial device to wrap");
+
+static int type;
+module_param(type, int, 0644);
+MODULE_PARM_DESC(serdev, "GNSS protocol type (see 'enum gnss_type')");
+
+static struct serdev_device *serdev_device;
+
+static int name_match(struct device *dev, void *data)
+{
+	return strstr(dev_name(dev), data) != NULL;
+}
+
+static int __init gnss_cmdline_init(void)
+{
+	struct device *serial_dev, *port_dev, *serdev_dev;
+	char *driver_name, *port_name, *serdev_name;
+	char *serdev_dup, *serdev_dup_sep;
+	struct gnss_serial *gserial;
+	int err = -ENODEV;
+
+	/* User did not set the serdev module parameter */
+	if (!serdev)
+		return 0;
+
+	if (type < 0 || type >= GNSS_TYPE_COUNT) {
+		gnss_cmdline_err("invalid gnss type '%d'\n", type);
+		return -EINVAL;
+	}
+
+	serdev_dup = serdev_dup_sep = kstrdup(serdev, GFP_KERNEL);
+	if (!serdev_dup)
+		return -ENOMEM;
+
+	driver_name = strsep(&serdev_dup_sep, "/");
+	if (!driver_name) {
+		gnss_cmdline_err("driver name missing\n");
+		goto err_free_serdev_dup;
+	}
+
+	port_name = strsep(&serdev_dup_sep, "/");
+	if (!port_name) {
+		gnss_cmdline_err("port name missing\n");
+		goto err_free_serdev_dup;
+	}
+
+	serdev_name = strsep(&serdev_dup_sep, "/");
+	if (!serdev_name) {
+		gnss_cmdline_err("serdev name missing\n");
+		goto err_free_serdev_dup;
+	}
+
+	/* Find the driver device instance (e.g. serial8250) */
+	serial_dev = bus_find_device_by_name(&platform_bus_type,
+					     NULL, driver_name);
+	if (!serial_dev) {
+		gnss_cmdline_err("no device '%s'\n", driver_name);
+		goto err_free_serdev_dup;
+	}
+
+	/* Find the port device instance (e.g. serial0) */
+	port_dev = device_find_child(serial_dev, port_name, name_match);
+	if (!port_dev) {
+		gnss_cmdline_err("no port '%s'\n", port_name);
+		goto err_free_serdev_dup;
+	}
+
+	/* Find the serdev device instance (e.g. serial0-0) */
+	serdev_dev = device_find_child(port_dev, serdev_name, name_match);
+	if (!serdev_dev) {
+		gnss_cmdline_err("no serdev '%s'\n", serdev_name);
+		goto err_free_serdev_dup;
+	}
+
+	gserial = gnss_serial_allocate(to_serdev_device(serdev_dev), 0);
+	if (IS_ERR(gserial)) {
+		err = PTR_ERR(gserial);
+		goto err_free_serdev_dup;
+	}
+
+	gserial->gdev->type = type;
+
+	err = gnss_serial_register(gserial);
+	if (err) {
+		gnss_serial_free(gserial);
+		goto err_free_serdev_dup;
+	}
+
+	serdev_device = to_serdev_device(serdev_dev);
+	err = 0;
+err_free_serdev_dup:
+	kfree(serdev_dup);
+	return err;
+}
+
+static void __exit gnss_cmdline_exit(void)
+{
+	struct gnss_serial *gserial;
+
+	if (!serdev_device)
+		return;
+
+	gserial = serdev_device_get_drvdata(serdev_device);
+
+	gnss_serial_deregister(gserial);
+	gnss_serial_free(gserial);
+}
+
+module_init(gnss_cmdline_init);
+module_exit(gnss_cmdline_exit);
+
+MODULE_AUTHOR("Alistair Delva <adelva@google.com>");
+MODULE_DESCRIPTION("GNSS command line driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index 789ee65..9a2c765 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -677,9 +677,9 @@
 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb_ioctl, 0),
 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_PAGE_FLIP, drm_mode_page_flip_ioctl, DRM_MASTER),
 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_DIRTYFB, drm_mode_dirtyfb_ioctl, DRM_MASTER),
-	DRM_IOCTL_DEF(DRM_IOCTL_MODE_CREATE_DUMB, drm_mode_create_dumb_ioctl, 0),
-	DRM_IOCTL_DEF(DRM_IOCTL_MODE_MAP_DUMB, drm_mode_mmap_dumb_ioctl, 0),
-	DRM_IOCTL_DEF(DRM_IOCTL_MODE_DESTROY_DUMB, drm_mode_destroy_dumb_ioctl, 0),
+	DRM_IOCTL_DEF(DRM_IOCTL_MODE_CREATE_DUMB, drm_mode_create_dumb_ioctl, DRM_RENDER_ALLOW),
+	DRM_IOCTL_DEF(DRM_IOCTL_MODE_MAP_DUMB, drm_mode_mmap_dumb_ioctl, DRM_RENDER_ALLOW),
+	DRM_IOCTL_DEF(DRM_IOCTL_MODE_DESTROY_DUMB, drm_mode_destroy_dumb_ioctl, DRM_RENDER_ALLOW),
 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_GETPROPERTIES, drm_mode_obj_get_properties_ioctl, 0),
 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_SETPROPERTY, drm_mode_obj_set_property_ioctl, DRM_MASTER),
 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR2, drm_mode_cursor2_ioctl, DRM_MASTER),
diff --git a/drivers/gpu/drm/drm_mipi_dsi.c b/drivers/gpu/drm/drm_mipi_dsi.c
index 5dd475e..c73aaab 100644
--- a/drivers/gpu/drm/drm_mipi_dsi.c
+++ b/drivers/gpu/drm/drm_mipi_dsi.c
@@ -355,6 +355,7 @@
 
 	if (dsi->mode_flags & MIPI_DSI_MODE_LPM)
 		msg->flags |= MIPI_DSI_MSG_USE_LPM;
+	msg->flags |= MIPI_DSI_MSG_LASTCOMMAND;
 
 	return ops->transfer(dsi->host, msg);
 }
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index 501b4fe..ea2cb1a 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -1939,6 +1939,7 @@
 	strncpy(out->name, in->name, DRM_DISPLAY_MODE_LEN);
 	out->name[DRM_DISPLAY_MODE_LEN-1] = 0;
 }
+EXPORT_SYMBOL_GPL(drm_mode_convert_to_umode);
 
 /**
  * drm_crtc_convert_umode - convert a modeinfo into a drm_display_mode
@@ -2015,6 +2016,7 @@
 
 	return 0;
 }
+EXPORT_SYMBOL_GPL(drm_mode_convert_umode);
 
 /**
  * drm_mode_is_420_only - if a given videomode can be only supported in YCBCR420
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index 9f955f2..ca6d13eb 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -780,6 +780,28 @@
 }
 EXPORT_SYMBOL(drm_gem_dmabuf_mmap);
 
+/**
+ * drm_gem_dmabuf_get_uuid - dma_buf get_uuid implementation for GEM
+ * @dma_buf: buffer to query
+ * @uuid: uuid outparam
+ *
+ * Queries the buffer's virtio UUID. This can be used as the
+ * &dma_buf_ops.get_uuid callback. Calls into &drm_driver.gem_prime_get_uuid.
+ *
+ * Returns 0 on success or a negative error code on failure.
+ */
+int drm_gem_dmabuf_get_uuid(struct dma_buf *dma_buf, uuid_t *uuid)
+{
+	struct drm_gem_object *obj = dma_buf->priv;
+	struct drm_device *dev = obj->dev;
+
+	if (!dev->driver->gem_prime_get_uuid)
+		return -ENODEV;
+
+	return dev->driver->gem_prime_get_uuid(obj, uuid);
+}
+EXPORT_SYMBOL(drm_gem_dmabuf_get_uuid);
+
 static const struct dma_buf_ops drm_gem_prime_dmabuf_ops =  {
 	.cache_sgt_mapping = true,
 	.attach = drm_gem_map_attach,
@@ -790,6 +812,7 @@
 	.mmap = drm_gem_dmabuf_mmap,
 	.vmap = drm_gem_dmabuf_vmap,
 	.vunmap = drm_gem_dmabuf_vunmap,
+	.get_uuid = drm_gem_dmabuf_get_uuid,
 };
 
 /**
diff --git a/drivers/gpu/drm/virtio/virtgpu_display.c b/drivers/gpu/drm/virtio/virtgpu_display.c
index f84b7e6..3f7d579 100644
--- a/drivers/gpu/drm/virtio/virtgpu_display.c
+++ b/drivers/gpu/drm/virtio/virtgpu_display.c
@@ -296,10 +296,6 @@
 	struct virtio_gpu_framebuffer *virtio_gpu_fb;
 	int ret;
 
-	if (mode_cmd->pixel_format != DRM_FORMAT_HOST_XRGB8888 &&
-	    mode_cmd->pixel_format != DRM_FORMAT_HOST_ARGB8888)
-		return ERR_PTR(-ENOENT);
-
 	/* lookup object associated with res handle */
 	obj = drm_gem_object_lookup(file_priv, mode_cmd->handles[0]);
 	if (!obj)
@@ -333,7 +329,6 @@
 	if (ret)
 		return ret;
 
-	vgdev->ddev->mode_config.quirk_addfb_prefer_host_byte_order = true;
 	vgdev->ddev->mode_config.funcs = &virtio_gpu_mode_funcs;
 
 	/* modes will be validated against the framebuffer size */
diff --git a/drivers/gpu/drm/virtio/virtgpu_plane.c b/drivers/gpu/drm/virtio/virtgpu_plane.c
index 6a311cd..bc15db7 100644
--- a/drivers/gpu/drm/virtio/virtgpu_plane.c
+++ b/drivers/gpu/drm/virtio/virtgpu_plane.c
@@ -31,7 +31,14 @@
 #include "virtgpu_drv.h"
 
 static const uint32_t virtio_gpu_formats[] = {
-	DRM_FORMAT_HOST_XRGB8888,
+	DRM_FORMAT_XRGB8888,
+	DRM_FORMAT_ARGB8888,
+	DRM_FORMAT_BGRX8888,
+	DRM_FORMAT_BGRA8888,
+	DRM_FORMAT_RGBX8888,
+	DRM_FORMAT_RGBA8888,
+	DRM_FORMAT_XBGR8888,
+	DRM_FORMAT_ABGR8888,
 };
 
 static const uint32_t virtio_gpu_cursor_formats[] = {
@@ -43,6 +50,32 @@
 	uint32_t format;
 
 	switch (drm_fourcc) {
+#ifdef __BIG_ENDIAN
+	case DRM_FORMAT_XRGB8888:
+		format = VIRTIO_GPU_FORMAT_X8R8G8B8_UNORM;
+		break;
+	case DRM_FORMAT_ARGB8888:
+		format = VIRTIO_GPU_FORMAT_A8R8G8B8_UNORM;
+		break;
+	case DRM_FORMAT_BGRX8888:
+		format = VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM;
+		break;
+	case DRM_FORMAT_BGRA8888:
+		format = VIRTIO_GPU_FORMAT_B8G8R8A8_UNORM;
+		break;
+	case DRM_FORMAT_RGBX8888:
+		format = VIRTIO_GPU_FORMAT_R8G8B8X8_UNORM;
+		break;
+	case DRM_FORMAT_RGBA8888:
+		format = VIRTIO_GPU_FORMAT_R8G8B8A8_UNORM;
+		break;
+	case DRM_FORMAT_XBGR8888:
+		format = VIRTIO_GPU_FORMAT_X8B8G8R8_UNORM;
+		break;
+	case DRM_FORMAT_ABGR8888:
+		format = VIRTIO_GPU_FORMAT_A8B8G8R8_UNORM;
+		break;
+#else
 	case DRM_FORMAT_XRGB8888:
 		format = VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM;
 		break;
@@ -55,6 +88,19 @@
 	case DRM_FORMAT_BGRA8888:
 		format = VIRTIO_GPU_FORMAT_A8R8G8B8_UNORM;
 		break;
+	case DRM_FORMAT_RGBX8888:
+		format = VIRTIO_GPU_FORMAT_X8B8G8R8_UNORM;
+		break;
+	case DRM_FORMAT_RGBA8888:
+		format = VIRTIO_GPU_FORMAT_A8B8G8R8_UNORM;
+		break;
+	case DRM_FORMAT_XBGR8888:
+		format = VIRTIO_GPU_FORMAT_R8G8B8X8_UNORM;
+		break;
+	case DRM_FORMAT_ABGR8888:
+		format = VIRTIO_GPU_FORMAT_R8G8B8A8_UNORM;
+		break;
+#endif
 	default:
 		/*
 		 * This should not happen, we handle everything listed
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index 6126296..520c2c6 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -719,6 +719,17 @@
 	  To compile this driver as a module, choose M here: the
 	  module will be called hid-multitouch.
 
+config HID_NINTENDO
+	tristate "Nintendo Joy-Con and Pro Controller support"
+	depends on HID
+	help
+	Adds support for the Nintendo Switch Joy-Cons and Pro Controller.
+	All controllers support bluetooth, and the Pro Controller also supports
+	its USB mode.
+
+	To compile this driver as a module, choose M here: the
+	module will be called hid-nintendo.
+
 config HID_NTI
 	tristate "NTI keyboard adapters"
 	help
diff --git a/drivers/hid/Makefile b/drivers/hid/Makefile
index 4acb583c9..3a3a7b3c 100644
--- a/drivers/hid/Makefile
+++ b/drivers/hid/Makefile
@@ -77,6 +77,7 @@
 obj-$(CONFIG_HID_MICROSOFT)	+= hid-microsoft.o
 obj-$(CONFIG_HID_MONTEREY)	+= hid-monterey.o
 obj-$(CONFIG_HID_MULTITOUCH)	+= hid-multitouch.o
+obj-$(CONFIG_HID_NINTENDO)	+= hid-nintendo.o
 obj-$(CONFIG_HID_NTI)			+= hid-nti.o
 obj-$(CONFIG_HID_NTRIG)		+= hid-ntrig.o
 obj-$(CONFIG_HID_ORTEK)		+= hid-ortek.o
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index f170fea..145e789 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -894,6 +894,9 @@
 #define USB_VENDOR_ID_NINTENDO		0x057e
 #define USB_DEVICE_ID_NINTENDO_WIIMOTE	0x0306
 #define USB_DEVICE_ID_NINTENDO_WIIMOTE2	0x0330
+#define USB_DEVICE_ID_NINTENDO_JOYCONL	0x2006
+#define USB_DEVICE_ID_NINTENDO_JOYCONR	0x2007
+#define USB_DEVICE_ID_NINTENDO_PROCON	0x2009
 
 #define USB_VENDOR_ID_NOVATEK		0x0603
 #define USB_DEVICE_ID_NOVATEK_PCT	0x0600
diff --git a/drivers/hid/hid-nintendo.c b/drivers/hid/hid-nintendo.c
new file mode 100644
index 0000000..3695b96
--- /dev/null
+++ b/drivers/hid/hid-nintendo.c
@@ -0,0 +1,820 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * HID driver for Nintendo Switch Joy-Cons and Pro Controllers
+ *
+ * Copyright (c) 2019 Daniel J. Ogorchock <djogorchock@gmail.com>
+ *
+ * The following resources/projects were referenced for this driver:
+ *   https://github.com/dekuNukem/Nintendo_Switch_Reverse_Engineering
+ *   https://gitlab.com/pjranki/joycon-linux-kernel (Peter Rankin)
+ *   https://github.com/FrotBot/SwitchProConLinuxUSB
+ *   https://github.com/MTCKC/ProconXInput
+ *   hid-wiimote kernel hid driver
+ *   hid-logitech-hidpp driver
+ *
+ * This driver supports the Nintendo Switch Joy-Cons and Pro Controllers. The
+ * Pro Controllers can either be used over USB or Bluetooth.
+ *
+ * The driver will retrieve the factory calibration info from the controllers,
+ * so little to no user calibration should be required.
+ *
+ */
+
+#include "hid-ids.h"
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/hid.h>
+#include <linux/input.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+
+/*
+ * Reference the url below for the following HID report defines:
+ * https://github.com/dekuNukem/Nintendo_Switch_Reverse_Engineering
+ */
+
+/* Output Reports */
+static const u8 JC_OUTPUT_RUMBLE_AND_SUBCMD	= 0x01;
+static const u8 JC_OUTPUT_FW_UPDATE_PKT		= 0x03;
+static const u8 JC_OUTPUT_RUMBLE_ONLY		= 0x10;
+static const u8 JC_OUTPUT_MCU_DATA		= 0x11;
+static const u8 JC_OUTPUT_USB_CMD		= 0x80;
+
+/* Subcommand IDs */
+static const u8 JC_SUBCMD_STATE			/*= 0x00*/;
+static const u8 JC_SUBCMD_MANUAL_BT_PAIRING	= 0x01;
+static const u8 JC_SUBCMD_REQ_DEV_INFO		= 0x02;
+static const u8 JC_SUBCMD_SET_REPORT_MODE	= 0x03;
+static const u8 JC_SUBCMD_TRIGGERS_ELAPSED	= 0x04;
+static const u8 JC_SUBCMD_GET_PAGE_LIST_STATE	= 0x05;
+static const u8 JC_SUBCMD_SET_HCI_STATE		= 0x06;
+static const u8 JC_SUBCMD_RESET_PAIRING_INFO	= 0x07;
+static const u8 JC_SUBCMD_LOW_POWER_MODE	= 0x08;
+static const u8 JC_SUBCMD_SPI_FLASH_READ	= 0x10;
+static const u8 JC_SUBCMD_SPI_FLASH_WRITE	= 0x11;
+static const u8 JC_SUBCMD_RESET_MCU		= 0x20;
+static const u8 JC_SUBCMD_SET_MCU_CONFIG	= 0x21;
+static const u8 JC_SUBCMD_SET_MCU_STATE		= 0x22;
+static const u8 JC_SUBCMD_SET_PLAYER_LIGHTS	= 0x30;
+static const u8 JC_SUBCMD_GET_PLAYER_LIGHTS	= 0x31;
+static const u8 JC_SUBCMD_SET_HOME_LIGHT	= 0x38;
+static const u8 JC_SUBCMD_ENABLE_IMU		= 0x40;
+static const u8 JC_SUBCMD_SET_IMU_SENSITIVITY	= 0x41;
+static const u8 JC_SUBCMD_WRITE_IMU_REG		= 0x42;
+static const u8 JC_SUBCMD_READ_IMU_REG		= 0x43;
+static const u8 JC_SUBCMD_ENABLE_VIBRATION	= 0x48;
+static const u8 JC_SUBCMD_GET_REGULATED_VOLTAGE	= 0x50;
+
+/* Input Reports */
+static const u8 JC_INPUT_BUTTON_EVENT		= 0x3F;
+static const u8 JC_INPUT_SUBCMD_REPLY		= 0x21;
+static const u8 JC_INPUT_IMU_DATA		= 0x30;
+static const u8 JC_INPUT_MCU_DATA		= 0x31;
+static const u8 JC_INPUT_USB_RESPONSE		= 0x81;
+
+/* Feature Reports */
+static const u8 JC_FEATURE_LAST_SUBCMD		= 0x02;
+static const u8 JC_FEATURE_OTA_FW_UPGRADE	= 0x70;
+static const u8 JC_FEATURE_SETUP_MEM_READ	= 0x71;
+static const u8 JC_FEATURE_MEM_READ		= 0x72;
+static const u8 JC_FEATURE_ERASE_MEM_SECTOR	= 0x73;
+static const u8 JC_FEATURE_MEM_WRITE		= 0x74;
+static const u8 JC_FEATURE_LAUNCH		= 0x75;
+
+/* USB Commands */
+static const u8 JC_USB_CMD_CONN_STATUS		= 0x01;
+static const u8 JC_USB_CMD_HANDSHAKE		= 0x02;
+static const u8 JC_USB_CMD_BAUDRATE_3M		= 0x03;
+static const u8 JC_USB_CMD_NO_TIMEOUT		= 0x04;
+static const u8 JC_USB_CMD_EN_TIMEOUT		= 0x05;
+static const u8 JC_USB_RESET			= 0x06;
+static const u8 JC_USB_PRE_HANDSHAKE		= 0x91;
+static const u8 JC_USB_SEND_UART		= 0x92;
+
+/* SPI storage addresses of factory calibration data */
+static const u16 JC_CAL_DATA_START		= 0x603d;
+static const u16 JC_CAL_DATA_END		= 0x604e;
+#define JC_CAL_DATA_SIZE	(JC_CAL_DATA_END - JC_CAL_DATA_START + 1)
+
+
+/* The raw analog joystick values will be mapped in terms of this magnitude */
+static const u16 JC_MAX_STICK_MAG		= 32767;
+static const u16 JC_STICK_FUZZ			= 250;
+static const u16 JC_STICK_FLAT			= 500;
+
+/* States for controller state machine */
+enum joycon_ctlr_state {
+	JOYCON_CTLR_STATE_INIT,
+	JOYCON_CTLR_STATE_READ,
+};
+
+struct joycon_stick_cal {
+	s32 max;
+	s32 min;
+	s32 center;
+};
+
+/*
+ * All the controller's button values are stored in a u32.
+ * They can be accessed with bitwise ANDs.
+ */
+static const u32 JC_BTN_Y	= BIT(0);
+static const u32 JC_BTN_X	= BIT(1);
+static const u32 JC_BTN_B	= BIT(2);
+static const u32 JC_BTN_A	= BIT(3);
+static const u32 JC_BTN_SR_R	= BIT(4);
+static const u32 JC_BTN_SL_R	= BIT(5);
+static const u32 JC_BTN_R	= BIT(6);
+static const u32 JC_BTN_ZR	= BIT(7);
+static const u32 JC_BTN_MINUS	= BIT(8);
+static const u32 JC_BTN_PLUS	= BIT(9);
+static const u32 JC_BTN_RSTICK	= BIT(10);
+static const u32 JC_BTN_LSTICK	= BIT(11);
+static const u32 JC_BTN_HOME	= BIT(12);
+static const u32 JC_BTN_CAP	= BIT(13); /* capture button */
+static const u32 JC_BTN_DOWN	= BIT(16);
+static const u32 JC_BTN_UP	= BIT(17);
+static const u32 JC_BTN_RIGHT	= BIT(18);
+static const u32 JC_BTN_LEFT	= BIT(19);
+static const u32 JC_BTN_SR_L	= BIT(20);
+static const u32 JC_BTN_SL_L	= BIT(21);
+static const u32 JC_BTN_L	= BIT(22);
+static const u32 JC_BTN_ZL	= BIT(23);
+
+enum joycon_msg_type {
+	JOYCON_MSG_TYPE_NONE,
+	JOYCON_MSG_TYPE_USB,
+	JOYCON_MSG_TYPE_SUBCMD,
+};
+
+struct joycon_subcmd_request {
+	u8 output_id; /* must be 0x01 for subcommand, 0x10 for rumble only */
+	u8 packet_num; /* incremented every send */
+	u8 rumble_data[8];
+	u8 subcmd_id;
+	u8 data[0]; /* length depends on the subcommand */
+} __packed;
+
+struct joycon_subcmd_reply {
+	u8 ack; /* MSB 1 for ACK, 0 for NACK */
+	u8 id; /* id of requested subcmd */
+	u8 data[0]; /* will be at most 35 bytes */
+} __packed;
+
+struct joycon_input_report {
+	u8 id;
+	u8 timer;
+	u8 bat_con; /* battery and connection info */
+	u8 button_status[3];
+	u8 left_stick[3];
+	u8 right_stick[3];
+	u8 vibrator_report;
+
+	/*
+	 * If support for firmware updates, gyroscope data, and/or NFC/IR
+	 * are added in the future, this can be swapped for a union.
+	 */
+	struct joycon_subcmd_reply reply;
+} __packed;
+
+#define JC_MAX_RESP_SIZE	(sizeof(struct joycon_input_report) + 35)
+
+/* Each physical controller is associated with a joycon_ctlr struct */
+struct joycon_ctlr {
+	struct hid_device *hdev;
+	struct input_dev *input;
+	enum joycon_ctlr_state ctlr_state;
+
+	/* The following members are used for synchronous sends/receives */
+	enum joycon_msg_type msg_type;
+	u8 subcmd_num;
+	struct mutex output_mutex;
+	u8 input_buf[JC_MAX_RESP_SIZE];
+	wait_queue_head_t wait;
+	bool received_resp;
+	u8 usb_ack_match;
+	u8 subcmd_ack_match;
+
+	/* factory calibration data */
+	struct joycon_stick_cal left_stick_cal_x;
+	struct joycon_stick_cal left_stick_cal_y;
+	struct joycon_stick_cal right_stick_cal_x;
+	struct joycon_stick_cal right_stick_cal_y;
+
+};
+
+static int __joycon_hid_send(struct hid_device *hdev, u8 *data, size_t len)
+{
+	u8 *buf;
+	int ret;
+
+	buf = kmemdup(data, len, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+	ret = hid_hw_output_report(hdev, buf, len);
+	kfree(buf);
+	if (ret < 0)
+		hid_dbg(hdev, "Failed to send output report ret=%d\n", ret);
+	return ret;
+}
+
+static int joycon_hid_send_sync(struct joycon_ctlr *ctlr, u8 *data, size_t len)
+{
+	int ret;
+
+	ret = __joycon_hid_send(ctlr->hdev, data, len);
+	if (ret < 0) {
+		memset(ctlr->input_buf, 0, JC_MAX_RESP_SIZE);
+		return ret;
+	}
+
+	if (!wait_event_timeout(ctlr->wait, ctlr->received_resp, HZ)) {
+		hid_dbg(ctlr->hdev, "synchronous send/receive timed out\n");
+		memset(ctlr->input_buf, 0, JC_MAX_RESP_SIZE);
+		return -ETIMEDOUT;
+	}
+
+	ctlr->received_resp = false;
+	return 0;
+}
+
+static int joycon_send_usb(struct joycon_ctlr *ctlr, u8 cmd)
+{
+	int ret;
+	u8 buf[2] = {JC_OUTPUT_USB_CMD};
+
+	buf[1] = cmd;
+	ctlr->usb_ack_match = cmd;
+	ctlr->msg_type = JOYCON_MSG_TYPE_USB;
+	ret = joycon_hid_send_sync(ctlr, buf, sizeof(buf));
+	if (ret)
+		hid_dbg(ctlr->hdev, "send usb command failed; ret=%d\n", ret);
+	return ret;
+}
+
+static int joycon_send_subcmd(struct joycon_ctlr *ctlr,
+			      struct joycon_subcmd_request *subcmd,
+			      size_t data_len)
+{
+	int ret;
+
+	subcmd->output_id = JC_OUTPUT_RUMBLE_AND_SUBCMD;
+	subcmd->packet_num = ctlr->subcmd_num;
+	if (++ctlr->subcmd_num > 0xF)
+		ctlr->subcmd_num = 0;
+	ctlr->subcmd_ack_match = subcmd->subcmd_id;
+	ctlr->msg_type = JOYCON_MSG_TYPE_SUBCMD;
+
+	ret = joycon_hid_send_sync(ctlr, (u8 *)subcmd,
+				   sizeof(*subcmd) + data_len);
+	if (ret < 0)
+		hid_dbg(ctlr->hdev, "send subcommand failed; ret=%d\n", ret);
+	else
+		ret = 0;
+	return ret;
+}
+
+/* Supply nibbles for flash and on. Ones correspond to active */
+static int joycon_set_player_leds(struct joycon_ctlr *ctlr, u8 flash, u8 on)
+{
+	struct joycon_subcmd_request *req;
+	u8 buffer[sizeof(*req) + 1] = { 0 };
+
+	req = (struct joycon_subcmd_request *)buffer;
+	req->subcmd_id = JC_SUBCMD_SET_PLAYER_LIGHTS;
+	req->data[0] = (flash << 4) | on;
+
+	hid_dbg(ctlr->hdev, "setting player leds\n");
+	return joycon_send_subcmd(ctlr, req, 1);
+}
+
+static const u16 DFLT_STICK_CAL_CEN = 2000;
+static const u16 DFLT_STICK_CAL_MAX = 3500;
+static const u16 DFLT_STICK_CAL_MIN = 500;
+static int joycon_request_calibration(struct joycon_ctlr *ctlr)
+{
+	struct joycon_subcmd_request *req;
+	u8 buffer[sizeof(*req) + 5] = { 0 };
+	struct joycon_input_report *report;
+	struct joycon_stick_cal *cal_x;
+	struct joycon_stick_cal *cal_y;
+	s32 x_max_above;
+	s32 x_min_below;
+	s32 y_max_above;
+	s32 y_min_below;
+	u8 *data;
+	u8 *raw_cal;
+	int ret;
+
+	req = (struct joycon_subcmd_request *)buffer;
+	req->subcmd_id = JC_SUBCMD_SPI_FLASH_READ;
+	data = req->data;
+	data[0] = 0xFF & JC_CAL_DATA_START;
+	data[1] = 0xFF & (JC_CAL_DATA_START >> 8);
+	data[2] = 0xFF & (JC_CAL_DATA_START >> 16);
+	data[3] = 0xFF & (JC_CAL_DATA_START >> 24);
+	data[4] = JC_CAL_DATA_SIZE;
+
+	hid_dbg(ctlr->hdev, "requesting cal data\n");
+	ret = joycon_send_subcmd(ctlr, req, 5);
+	if (ret) {
+		hid_warn(ctlr->hdev,
+			 "Failed to read stick cal, using defaults; ret=%d\n",
+			 ret);
+
+		ctlr->left_stick_cal_x.center = DFLT_STICK_CAL_CEN;
+		ctlr->left_stick_cal_x.max = DFLT_STICK_CAL_MAX;
+		ctlr->left_stick_cal_x.min = DFLT_STICK_CAL_MIN;
+
+		ctlr->left_stick_cal_y.center = DFLT_STICK_CAL_CEN;
+		ctlr->left_stick_cal_y.max = DFLT_STICK_CAL_MAX;
+		ctlr->left_stick_cal_y.min = DFLT_STICK_CAL_MIN;
+
+		ctlr->right_stick_cal_x.center = DFLT_STICK_CAL_CEN;
+		ctlr->right_stick_cal_x.max = DFLT_STICK_CAL_MAX;
+		ctlr->right_stick_cal_x.min = DFLT_STICK_CAL_MIN;
+
+		ctlr->right_stick_cal_y.center = DFLT_STICK_CAL_CEN;
+		ctlr->right_stick_cal_y.max = DFLT_STICK_CAL_MAX;
+		ctlr->right_stick_cal_y.min = DFLT_STICK_CAL_MIN;
+
+		return ret;
+	}
+
+	report = (struct joycon_input_report *)ctlr->input_buf;
+	raw_cal = &report->reply.data[5];
+
+	/* left stick calibration parsing */
+	cal_x = &ctlr->left_stick_cal_x;
+	cal_y = &ctlr->left_stick_cal_y;
+
+	x_max_above = hid_field_extract(ctlr->hdev, (raw_cal + 0), 0, 12);
+	y_max_above = hid_field_extract(ctlr->hdev, (raw_cal + 1), 4, 12);
+	cal_x->center = hid_field_extract(ctlr->hdev, (raw_cal + 3), 0, 12);
+	cal_y->center = hid_field_extract(ctlr->hdev, (raw_cal + 4), 4, 12);
+	x_min_below = hid_field_extract(ctlr->hdev, (raw_cal + 6), 0, 12);
+	y_min_below = hid_field_extract(ctlr->hdev, (raw_cal + 7), 4, 12);
+	cal_x->max = cal_x->center + x_max_above;
+	cal_x->min = cal_x->center - x_min_below;
+	cal_y->max = cal_y->center + y_max_above;
+	cal_y->min = cal_y->center - y_min_below;
+
+	/* right stick calibration parsing */
+	raw_cal += 9;
+	cal_x = &ctlr->right_stick_cal_x;
+	cal_y = &ctlr->right_stick_cal_y;
+
+	cal_x->center = hid_field_extract(ctlr->hdev, (raw_cal + 0), 0, 12);
+	cal_y->center = hid_field_extract(ctlr->hdev, (raw_cal + 1), 4, 12);
+	x_min_below = hid_field_extract(ctlr->hdev, (raw_cal + 3), 0, 12);
+	y_min_below = hid_field_extract(ctlr->hdev, (raw_cal + 4), 4, 12);
+	x_max_above = hid_field_extract(ctlr->hdev, (raw_cal + 6), 0, 12);
+	y_max_above = hid_field_extract(ctlr->hdev, (raw_cal + 7), 4, 12);
+	cal_x->max = cal_x->center + x_max_above;
+	cal_x->min = cal_x->center - x_min_below;
+	cal_y->max = cal_y->center + y_max_above;
+	cal_y->min = cal_y->center - y_min_below;
+
+	hid_dbg(ctlr->hdev, "calibration:\n"
+			    "l_x_c=%d l_x_max=%d l_x_min=%d\n"
+			    "l_y_c=%d l_y_max=%d l_y_min=%d\n"
+			    "r_x_c=%d r_x_max=%d r_x_min=%d\n"
+			    "r_y_c=%d r_y_max=%d r_y_min=%d\n",
+			    ctlr->left_stick_cal_x.center,
+			    ctlr->left_stick_cal_x.max,
+			    ctlr->left_stick_cal_x.min,
+			    ctlr->left_stick_cal_y.center,
+			    ctlr->left_stick_cal_y.max,
+			    ctlr->left_stick_cal_y.min,
+			    ctlr->right_stick_cal_x.center,
+			    ctlr->right_stick_cal_x.max,
+			    ctlr->right_stick_cal_x.min,
+			    ctlr->right_stick_cal_y.center,
+			    ctlr->right_stick_cal_y.max,
+			    ctlr->right_stick_cal_y.min);
+
+	return 0;
+}
+
+static int joycon_set_report_mode(struct joycon_ctlr *ctlr)
+{
+	struct joycon_subcmd_request *req;
+	u8 buffer[sizeof(*req) + 1] = { 0 };
+
+	req = (struct joycon_subcmd_request *)buffer;
+	req->subcmd_id = JC_SUBCMD_SET_REPORT_MODE;
+	req->data[0] = 0x30; /* standard, full report mode */
+
+	hid_dbg(ctlr->hdev, "setting controller report mode\n");
+	return joycon_send_subcmd(ctlr, req, 1);
+}
+
+static s32 joycon_map_stick_val(struct joycon_stick_cal *cal, s32 val)
+{
+	s32 center = cal->center;
+	s32 min = cal->min;
+	s32 max = cal->max;
+	s32 new_val;
+
+	if (val > center) {
+		new_val = (val - center) * JC_MAX_STICK_MAG;
+		new_val /= (max - center);
+	} else {
+		new_val = (center - val) * -JC_MAX_STICK_MAG;
+		new_val /= (center - min);
+	}
+	new_val = clamp(new_val, (s32)-JC_MAX_STICK_MAG, (s32)JC_MAX_STICK_MAG);
+	return new_val;
+}
+
+static void joycon_parse_report(struct joycon_ctlr *ctlr,
+				struct joycon_input_report *rep)
+{
+	struct input_dev *dev = ctlr->input;
+	u32 btns;
+	u32 id = ctlr->hdev->product;
+
+	btns = hid_field_extract(ctlr->hdev, rep->button_status, 0, 24);
+
+	if (id != USB_DEVICE_ID_NINTENDO_JOYCONR) {
+		u16 raw_x;
+		u16 raw_y;
+		s32 x;
+		s32 y;
+
+		/* get raw stick values */
+		raw_x = hid_field_extract(ctlr->hdev, rep->left_stick, 0, 12);
+		raw_y = hid_field_extract(ctlr->hdev,
+					  rep->left_stick + 1, 4, 12);
+		/* map the stick values */
+		x = joycon_map_stick_val(&ctlr->left_stick_cal_x, raw_x);
+		y = -joycon_map_stick_val(&ctlr->left_stick_cal_y, raw_y);
+		/* report sticks */
+		input_report_abs(dev, ABS_X, x);
+		input_report_abs(dev, ABS_Y, y);
+
+		/* report buttons */
+		input_report_key(dev, BTN_TL, btns & JC_BTN_L);
+		input_report_key(dev, BTN_TL2, btns & JC_BTN_ZL);
+		if (id != USB_DEVICE_ID_NINTENDO_PROCON) {
+			/* Report the S buttons as the non-existent triggers */
+			input_report_key(dev, BTN_TR, btns & JC_BTN_SL_L);
+			input_report_key(dev, BTN_TR2, btns & JC_BTN_SR_L);
+		}
+		input_report_key(dev, BTN_SELECT, btns & JC_BTN_MINUS);
+		input_report_key(dev, BTN_THUMBL, btns & JC_BTN_LSTICK);
+		input_report_key(dev, BTN_Z, btns & JC_BTN_CAP);
+		input_report_key(dev, BTN_DPAD_DOWN, btns & JC_BTN_DOWN);
+		input_report_key(dev, BTN_DPAD_UP, btns & JC_BTN_UP);
+		input_report_key(dev, BTN_DPAD_RIGHT, btns & JC_BTN_RIGHT);
+		input_report_key(dev, BTN_DPAD_LEFT, btns & JC_BTN_LEFT);
+	}
+	if (id != USB_DEVICE_ID_NINTENDO_JOYCONL) {
+		u16 raw_x;
+		u16 raw_y;
+		s32 x;
+		s32 y;
+
+		/* get raw stick values */
+		raw_x = hid_field_extract(ctlr->hdev, rep->right_stick, 0, 12);
+		raw_y = hid_field_extract(ctlr->hdev,
+					  rep->right_stick + 1, 4, 12);
+		/* map stick values */
+		x = joycon_map_stick_val(&ctlr->right_stick_cal_x, raw_x);
+		y = -joycon_map_stick_val(&ctlr->right_stick_cal_y, raw_y);
+		/* report sticks */
+		input_report_abs(dev, ABS_RX, x);
+		input_report_abs(dev, ABS_RY, y);
+
+		/* report buttons */
+		input_report_key(dev, BTN_TR, btns & JC_BTN_R);
+		input_report_key(dev, BTN_TR2, btns & JC_BTN_ZR);
+		if (id != USB_DEVICE_ID_NINTENDO_PROCON) {
+			/* Report the S buttons as the non-existent triggers */
+			input_report_key(dev, BTN_TL, btns & JC_BTN_SL_R);
+			input_report_key(dev, BTN_TL2, btns & JC_BTN_SR_R);
+		}
+		input_report_key(dev, BTN_START, btns & JC_BTN_PLUS);
+		input_report_key(dev, BTN_THUMBR, btns & JC_BTN_RSTICK);
+		input_report_key(dev, BTN_MODE, btns & JC_BTN_HOME);
+		input_report_key(dev, BTN_WEST, btns & JC_BTN_Y);
+		input_report_key(dev, BTN_NORTH, btns & JC_BTN_X);
+		input_report_key(dev, BTN_EAST, btns & JC_BTN_A);
+		input_report_key(dev, BTN_SOUTH, btns & JC_BTN_B);
+	}
+
+	input_sync(dev);
+}
+
+
+static const unsigned int joycon_button_inputs_l[] = {
+	BTN_SELECT, BTN_Z, BTN_THUMBL,
+	BTN_DPAD_UP, BTN_DPAD_DOWN, BTN_DPAD_LEFT, BTN_DPAD_RIGHT,
+	BTN_TL, BTN_TL2,
+	0 /* 0 signals end of array */
+};
+
+static const unsigned int joycon_button_inputs_r[] = {
+	BTN_START, BTN_MODE, BTN_THUMBR,
+	BTN_SOUTH, BTN_EAST, BTN_NORTH, BTN_WEST,
+	BTN_TR, BTN_TR2,
+	0 /* 0 signals end of array */
+};
+
+static DEFINE_MUTEX(joycon_input_num_mutex);
+static int joycon_input_create(struct joycon_ctlr *ctlr)
+{
+	struct hid_device *hdev;
+	static int input_num = 1;
+	const char *name;
+	int ret;
+	int i;
+
+	hdev = ctlr->hdev;
+
+	switch (hdev->product) {
+	case USB_DEVICE_ID_NINTENDO_PROCON:
+		name = "Nintendo Switch Pro Controller";
+		break;
+	case USB_DEVICE_ID_NINTENDO_JOYCONL:
+		name = "Nintendo Switch Left Joy-Con";
+		break;
+	case USB_DEVICE_ID_NINTENDO_JOYCONR:
+		name = "Nintendo Switch Right Joy-Con";
+		break;
+	default: /* Should be impossible */
+		hid_err(hdev, "Invalid hid product\n");
+		return -EINVAL;
+	}
+
+	ctlr->input = devm_input_allocate_device(&hdev->dev);
+	if (!ctlr->input)
+		return -ENOMEM;
+	ctlr->input->id.bustype = hdev->bus;
+	ctlr->input->id.vendor = hdev->vendor;
+	ctlr->input->id.product = hdev->product;
+	ctlr->input->id.version = hdev->version;
+	ctlr->input->name = name;
+	input_set_drvdata(ctlr->input, ctlr);
+
+
+	/* set up sticks */
+	if (hdev->product != USB_DEVICE_ID_NINTENDO_JOYCONR) {
+		input_set_abs_params(ctlr->input, ABS_X,
+				     -JC_MAX_STICK_MAG, JC_MAX_STICK_MAG,
+				     JC_STICK_FUZZ, JC_STICK_FLAT);
+		input_set_abs_params(ctlr->input, ABS_Y,
+				     -JC_MAX_STICK_MAG, JC_MAX_STICK_MAG,
+				     JC_STICK_FUZZ, JC_STICK_FLAT);
+	}
+	if (hdev->product != USB_DEVICE_ID_NINTENDO_JOYCONL) {
+		input_set_abs_params(ctlr->input, ABS_RX,
+				     -JC_MAX_STICK_MAG, JC_MAX_STICK_MAG,
+				     JC_STICK_FUZZ, JC_STICK_FLAT);
+		input_set_abs_params(ctlr->input, ABS_RY,
+				     -JC_MAX_STICK_MAG, JC_MAX_STICK_MAG,
+				     JC_STICK_FUZZ, JC_STICK_FLAT);
+	}
+
+	/* set up buttons */
+	if (hdev->product != USB_DEVICE_ID_NINTENDO_JOYCONR) {
+		for (i = 0; joycon_button_inputs_l[i] > 0; i++)
+			input_set_capability(ctlr->input, EV_KEY,
+					     joycon_button_inputs_l[i]);
+	}
+	if (hdev->product != USB_DEVICE_ID_NINTENDO_JOYCONL) {
+		for (i = 0; joycon_button_inputs_r[i] > 0; i++)
+			input_set_capability(ctlr->input, EV_KEY,
+					     joycon_button_inputs_r[i]);
+	}
+
+	ret = input_register_device(ctlr->input);
+	if (ret)
+		return ret;
+
+	/* Set the default controller player leds based on controller number */
+	mutex_lock(&joycon_input_num_mutex);
+	mutex_lock(&ctlr->output_mutex);
+	ret = joycon_set_player_leds(ctlr, 0, 0xF >> (4 - input_num));
+	if (ret)
+		hid_warn(ctlr->hdev, "Failed to set leds; ret=%d\n", ret);
+	mutex_unlock(&ctlr->output_mutex);
+	if (++input_num > 4)
+		input_num = 1;
+	mutex_unlock(&joycon_input_num_mutex);
+
+	return 0;
+}
+
+/* Common handler for parsing inputs */
+static int joycon_ctlr_read_handler(struct joycon_ctlr *ctlr, u8 *data,
+							      int size)
+{
+	int ret = 0;
+
+	if (data[0] == JC_INPUT_SUBCMD_REPLY || data[0] == JC_INPUT_IMU_DATA ||
+	    data[0] == JC_INPUT_MCU_DATA) {
+		if (size >= 12) /* make sure it contains the input report */
+			joycon_parse_report(ctlr,
+					    (struct joycon_input_report *)data);
+	}
+
+	return ret;
+}
+
+static int joycon_ctlr_handle_event(struct joycon_ctlr *ctlr, u8 *data,
+							      int size)
+{
+	int ret = 0;
+	bool match = false;
+	struct joycon_input_report *report;
+
+	if (unlikely(mutex_is_locked(&ctlr->output_mutex)) &&
+	    ctlr->msg_type != JOYCON_MSG_TYPE_NONE) {
+		switch (ctlr->msg_type) {
+		case JOYCON_MSG_TYPE_USB:
+			if (size < 2)
+				break;
+			if (data[0] == JC_INPUT_USB_RESPONSE &&
+			    data[1] == ctlr->usb_ack_match)
+				match = true;
+			break;
+		case JOYCON_MSG_TYPE_SUBCMD:
+			if (size < sizeof(struct joycon_input_report) ||
+			    data[0] != JC_INPUT_SUBCMD_REPLY)
+				break;
+			report = (struct joycon_input_report *)data;
+			if (report->reply.id == ctlr->subcmd_ack_match)
+				match = true;
+			break;
+		default:
+			break;
+		}
+
+		if (match) {
+			memcpy(ctlr->input_buf, data,
+			       min(size, (int)JC_MAX_RESP_SIZE));
+			ctlr->msg_type = JOYCON_MSG_TYPE_NONE;
+			ctlr->received_resp = true;
+			wake_up(&ctlr->wait);
+
+			/* This message has been handled */
+			return 1;
+		}
+	}
+
+	if (ctlr->ctlr_state == JOYCON_CTLR_STATE_READ)
+		ret = joycon_ctlr_read_handler(ctlr, data, size);
+
+	return ret;
+}
+
+static int nintendo_hid_event(struct hid_device *hdev,
+			      struct hid_report *report, u8 *raw_data, int size)
+{
+	struct joycon_ctlr *ctlr = hid_get_drvdata(hdev);
+
+	if (size < 1)
+		return -EINVAL;
+
+	return joycon_ctlr_handle_event(ctlr, raw_data, size);
+}
+
+static int nintendo_hid_probe(struct hid_device *hdev,
+			    const struct hid_device_id *id)
+{
+	int ret;
+	struct joycon_ctlr *ctlr;
+
+	hid_dbg(hdev, "probe - start\n");
+
+	ctlr = devm_kzalloc(&hdev->dev, sizeof(*ctlr), GFP_KERNEL);
+	if (!ctlr) {
+		ret = -ENOMEM;
+		goto err;
+	}
+
+	ctlr->hdev = hdev;
+	ctlr->ctlr_state = JOYCON_CTLR_STATE_INIT;
+	hid_set_drvdata(hdev, ctlr);
+	mutex_init(&ctlr->output_mutex);
+	init_waitqueue_head(&ctlr->wait);
+
+	ret = hid_parse(hdev);
+	if (ret) {
+		hid_err(hdev, "HID parse failed\n");
+		goto err;
+	}
+
+	ret = hid_hw_start(hdev, HID_CONNECT_HIDRAW);
+	if (ret) {
+		hid_err(hdev, "HW start failed\n");
+		goto err;
+	}
+
+	ret = hid_hw_open(hdev);
+	if (ret) {
+		hid_err(hdev, "cannot start hardware I/O\n");
+		goto err_stop;
+	}
+
+	hid_device_io_start(hdev);
+
+	/* Initialize the controller */
+	mutex_lock(&ctlr->output_mutex);
+	/* if handshake command fails, assume ble pro controller */
+	if (hdev->product == USB_DEVICE_ID_NINTENDO_PROCON &&
+	    !joycon_send_usb(ctlr, JC_USB_CMD_HANDSHAKE)) {
+		hid_dbg(hdev, "detected USB controller\n");
+		/* set baudrate for improved latency */
+		ret = joycon_send_usb(ctlr, JC_USB_CMD_BAUDRATE_3M);
+		if (ret) {
+			hid_err(hdev, "Failed to set baudrate; ret=%d\n", ret);
+			goto err_mutex;
+		}
+		/* handshake */
+		ret = joycon_send_usb(ctlr, JC_USB_CMD_HANDSHAKE);
+		if (ret) {
+			hid_err(hdev, "Failed handshake; ret=%d\n", ret);
+			goto err_mutex;
+		}
+		/*
+		 * Set no timeout (to keep controller in USB mode).
+		 * This doesn't send a response, so ignore the timeout.
+		 */
+		joycon_send_usb(ctlr, JC_USB_CMD_NO_TIMEOUT);
+	}
+
+	/* get controller calibration data, and parse it */
+	ret = joycon_request_calibration(ctlr);
+	if (ret) {
+		/*
+		 * We can function with default calibration, but it may be
+		 * inaccurate. Provide a warning, and continue on.
+		 */
+		hid_warn(hdev, "Analog stick positions may be inaccurate\n");
+	}
+
+	/* Set the reporting mode to 0x30, which is the full report mode */
+	ret = joycon_set_report_mode(ctlr);
+	if (ret) {
+		hid_err(hdev, "Failed to set report mode; ret=%d\n", ret);
+		goto err_mutex;
+	}
+
+	mutex_unlock(&ctlr->output_mutex);
+
+	ret = joycon_input_create(ctlr);
+	if (ret) {
+		hid_err(hdev, "Failed to create input device; ret=%d\n", ret);
+		goto err_close;
+	}
+
+	ctlr->ctlr_state = JOYCON_CTLR_STATE_READ;
+
+	hid_dbg(hdev, "probe - success\n");
+	return 0;
+
+err_mutex:
+	mutex_unlock(&ctlr->output_mutex);
+err_close:
+	hid_hw_close(hdev);
+err_stop:
+	hid_hw_stop(hdev);
+err:
+	hid_err(hdev, "probe - fail = %d\n", ret);
+	return ret;
+}
+
+static void nintendo_hid_remove(struct hid_device *hdev)
+{
+	hid_dbg(hdev, "remove\n");
+	hid_hw_close(hdev);
+	hid_hw_stop(hdev);
+}
+
+static const struct hid_device_id nintendo_hid_devices[] = {
+	{ HID_USB_DEVICE(USB_VENDOR_ID_NINTENDO,
+			 USB_DEVICE_ID_NINTENDO_PROCON) },
+	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO,
+			 USB_DEVICE_ID_NINTENDO_PROCON) },
+	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO,
+			 USB_DEVICE_ID_NINTENDO_JOYCONL) },
+	{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO,
+			 USB_DEVICE_ID_NINTENDO_JOYCONR) },
+	{ }
+};
+MODULE_DEVICE_TABLE(hid, nintendo_hid_devices);
+
+static struct hid_driver nintendo_hid_driver = {
+	.name		= "nintendo",
+	.id_table	= nintendo_hid_devices,
+	.probe		= nintendo_hid_probe,
+	.remove		= nintendo_hid_remove,
+	.raw_event	= nintendo_hid_event,
+};
+module_hid_driver(nintendo_hid_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Daniel J. Ogorchock <djogorchock@gmail.com>");
+MODULE_DESCRIPTION("Driver for Nintendo Switch Controllers");
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index 04878ca..1e53bf1 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -11,6 +11,36 @@
 config IOMMU_API
 	bool
 
+if IOMMU_IOVA
+
+config IOMMU_LIMIT_IOVA_ALIGNMENT
+	bool "Limit IOVA alignment"
+	help
+	  When the IOVA framework applies IOVA alignment it aligns all
+	  IOVAs to the smallest PAGE_SIZE order which is greater than or
+	  equal to the requested IOVA size. This works fine for sizes up
+	  to several MiB, but for larger sizes it results in address
+	  space wastage and fragmentation. For example drivers with a 4
+	  GiB IOVA space might run out of IOVA space when allocating
+	  buffers great than 64 MiB.
+
+	  Enable this option to impose a limit on the alignment of IOVAs.
+
+	  If unsure, say N.
+
+config IOMMU_IOVA_ALIGNMENT
+	int "Maximum PAGE_SIZE order of alignment for IOVAs"
+	depends on IOMMU_LIMIT_IOVA_ALIGNMENT
+	range 4 9
+	default 9
+	help
+	  With this parameter you can specify the maximum PAGE_SIZE order for
+	  IOVAs. Larger IOVAs will be aligned only to this specified order.
+	  The order is expressed a power of two multiplied by the PAGE_SIZE.
+
+	  If unsure, leave the default value "9".
+endif
+
 menuconfig IOMMU_SUPPORT
 	bool "IOMMU Hardware Support"
 	depends on MMU
@@ -248,6 +278,7 @@
 config ARM_SMMU
 	tristate "ARM Ltd. System MMU (SMMU) Support"
 	depends on ARM64 || ARM || (COMPILE_TEST && !GENERIC_ATOMIC64)
+	depends on QCOM_SCM || !QCOM_SCM #if QCOM_SCM=m this can't be =y
 	select IOMMU_API
 	select IOMMU_IO_PGTABLE_LPAE
 	select ARM_DMA_USE_IOMMU if ARM
@@ -375,6 +406,7 @@
 	# Note: iommu drivers cannot (yet?) be built as modules
 	bool "Qualcomm IOMMU Support"
 	depends on ARCH_QCOM || (COMPILE_TEST && !GENERIC_ATOMIC64)
+	depends on QCOM_SCM=y
 	select IOMMU_API
 	select IOMMU_IO_PGTABLE_LPAE
 	select ARM_DMA_USE_IOMMU
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
index 702fbaa..523b020 100644
--- a/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c
@@ -3,6 +3,7 @@
  * Copyright (c) 2019, The Linux Foundation. All rights reserved.
  */
 
+#include <linux/bitfield.h>
 #include <linux/of_device.h>
 #include <linux/qcom_scm.h>
 
@@ -12,6 +13,43 @@
 	struct arm_smmu_device smmu;
 };
 
+static int qcom_sdm845_smmu500_cfg_probe(struct arm_smmu_device *smmu)
+{
+	u32 s2cr;
+	u32 smr;
+	int i;
+
+	for (i = 0; i < smmu->num_mapping_groups; i++) {
+		smr = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_SMR(i));
+		s2cr = arm_smmu_gr0_read(smmu, ARM_SMMU_GR0_S2CR(i));
+
+		smmu->smrs[i].mask = FIELD_GET(ARM_SMMU_SMR_MASK, smr);
+		smmu->smrs[i].id = FIELD_GET(ARM_SMMU_SMR_ID, smr);
+		if (smmu->features & ARM_SMMU_FEAT_EXIDS)
+			smmu->smrs[i].valid = FIELD_GET(
+						ARM_SMMU_S2CR_EXIDVALID,
+						s2cr);
+		else
+			smmu->smrs[i].valid = FIELD_GET(
+						ARM_SMMU_SMR_VALID,
+						smr);
+
+		smmu->s2crs[i].group = NULL;
+		smmu->s2crs[i].count = 0;
+		smmu->s2crs[i].type = FIELD_GET(ARM_SMMU_S2CR_TYPE, s2cr);
+		smmu->s2crs[i].privcfg = FIELD_GET(ARM_SMMU_S2CR_PRIVCFG, s2cr);
+		smmu->s2crs[i].cbndx = FIELD_GET(ARM_SMMU_S2CR_CBNDX, s2cr);
+
+		if (!smmu->smrs[i].valid)
+			continue;
+
+		smmu->s2crs[i].pinned = true;
+		bitmap_set(smmu->context_map, smmu->s2crs[i].cbndx, 1);
+	}
+
+	return 0;
+}
+
 static const struct of_device_id qcom_smmu_client_of_match[] __maybe_unused = {
 	{ .compatible = "qcom,adreno" },
 	{ .compatible = "qcom,mdp4" },
@@ -62,6 +100,7 @@
 
 static const struct arm_smmu_impl qcom_smmu_impl = {
 	.def_domain_type = qcom_smmu_def_domain_type,
+	.cfg_probe = qcom_sdm845_smmu500_cfg_probe,
 	.reset = qcom_smmu500_reset,
 };
 
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu.c b/drivers/iommu/arm/arm-smmu/arm-smmu.c
index dad7fa8..f17e59f7 100644
--- a/drivers/iommu/arm/arm-smmu/arm-smmu.c
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu.c
@@ -1030,12 +1030,19 @@
 
 static bool arm_smmu_free_sme(struct arm_smmu_device *smmu, int idx)
 {
+	bool pinned = smmu->s2crs[idx].pinned;
+	u8 cbndx = smmu->s2crs[idx].cbndx;
+
 	if (--smmu->s2crs[idx].count)
 		return false;
 
 	smmu->s2crs[idx] = s2cr_init_val;
-	if (smmu->smrs)
+	if (pinned) {
+		smmu->s2crs[idx].pinned = true;
+		smmu->s2crs[idx].cbndx = cbndx;
+	} else if (smmu->smrs) {
 		smmu->smrs[idx].valid = false;
+	}
 
 	return true;
 }
@@ -1123,6 +1130,10 @@
 		if (type == s2cr[idx].type && cbndx == s2cr[idx].cbndx)
 			continue;
 
+		/* Don't bypasss pinned streams; leave them as they are */
+		if (type == S2CR_TYPE_BYPASS && s2cr[idx].pinned)
+			continue;
+
 		s2cr[idx].type = type;
 		s2cr[idx].privcfg = S2CR_PRIVCFG_DEFAULT;
 		s2cr[idx].cbndx = cbndx;
diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu.h b/drivers/iommu/arm/arm-smmu/arm-smmu.h
index 1a74647..5c90411 100644
--- a/drivers/iommu/arm/arm-smmu/arm-smmu.h
+++ b/drivers/iommu/arm/arm-smmu/arm-smmu.h
@@ -262,13 +262,13 @@
 	enum arm_smmu_s2cr_type		type;
 	enum arm_smmu_s2cr_privcfg	privcfg;
 	u8				cbndx;
+	bool				pinned;
 };
 
 struct arm_smmu_smr {
 	u16				mask;
 	u16				id;
 	bool				valid;
-	bool				pinned;
 };
 
 struct arm_smmu_device {
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index 0cbcd3fc..7f71b666 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -370,6 +370,52 @@
 	return 0;
 }
 
+/*
+ * Should be called prior to using dma-apis
+ */
+int iommu_dma_reserve_iova(struct device *dev, dma_addr_t base,
+			   u64 size)
+{
+	struct iommu_domain *domain;
+	struct iommu_dma_cookie *cookie;
+	struct iova_domain *iovad;
+	unsigned long pfn_lo, pfn_hi;
+
+	domain = iommu_get_domain_for_dev(dev);
+	if (!domain || !domain->iova_cookie)
+		return -EINVAL;
+
+	cookie = domain->iova_cookie;
+	iovad = &cookie->iovad;
+
+	/* iova will be freed automatically by put_iova_domain() */
+	pfn_lo = iova_pfn(iovad, base);
+	pfn_hi = iova_pfn(iovad, base + size - 1);
+	if (!reserve_iova(iovad, pfn_lo, pfn_hi))
+		return -EINVAL;
+
+	return 0;
+}
+EXPORT_SYMBOL(iommu_dma_reserve_iova);
+
+/*
+ * Should be called prior to using dma-apis.
+ */
+int iommu_dma_enable_best_fit_algo(struct device *dev)
+{
+	struct iommu_domain *domain;
+	struct iova_domain *iovad;
+
+	domain = iommu_get_domain_for_dev(dev);
+	if (!domain || !domain->iova_cookie)
+		return -EINVAL;
+
+	iovad = &((struct iommu_dma_cookie *)domain->iova_cookie)->iovad;
+	iovad->best_fit = true;
+	return 0;
+}
+EXPORT_SYMBOL(iommu_dma_enable_best_fit_algo);
+
 /**
  * dma_info_to_prot - Translate DMA API directions and attributes to IOMMU API
  *                    page flags.
diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c
index 30d969a..2adc788 100644
--- a/drivers/iommu/iova.c
+++ b/drivers/iommu/iova.c
@@ -50,6 +50,7 @@
 	iovad->anchor.pfn_lo = iovad->anchor.pfn_hi = IOVA_ANCHOR;
 	rb_link_node(&iovad->anchor.node, NULL, &iovad->rbroot.rb_node);
 	rb_insert_color(&iovad->anchor.node, &iovad->rbroot);
+	iovad->best_fit = false;
 	init_iova_rcaches(iovad);
 }
 EXPORT_SYMBOL_GPL(init_iova_domain);
@@ -177,6 +178,24 @@
 	rb_insert_color(&iova->node, root);
 }
 
+#ifdef CONFIG_IOMMU_LIMIT_IOVA_ALIGNMENT
+static unsigned long limit_align_shift(struct iova_domain *iovad,
+				       unsigned long shift)
+{
+	unsigned long max_align_shift;
+
+	max_align_shift = CONFIG_IOMMU_IOVA_ALIGNMENT + PAGE_SHIFT
+		- iova_shift(iovad);
+	return min_t(unsigned long, max_align_shift, shift);
+}
+#else
+static unsigned long limit_align_shift(struct iova_domain *iovad,
+				       unsigned long shift)
+{
+	return shift;
+}
+#endif
+
 static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
 		unsigned long size, unsigned long limit_pfn,
 			struct iova *new, bool size_aligned)
@@ -184,11 +203,12 @@
 	struct rb_node *curr, *prev;
 	struct iova *curr_iova;
 	unsigned long flags;
-	unsigned long new_pfn;
+	unsigned long new_pfn, low_pfn_new;
 	unsigned long align_mask = ~0UL;
+	unsigned long high_pfn = limit_pfn, low_pfn = iovad->start_pfn;
 
 	if (size_aligned)
-		align_mask <<= fls_long(size - 1);
+		align_mask <<= limit_align_shift(iovad, fls_long(size - 1));
 
 	/* Walk the tree backwards */
 	spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
@@ -198,15 +218,25 @@
 
 	curr = __get_cached_rbnode(iovad, limit_pfn);
 	curr_iova = rb_entry(curr, struct iova, node);
+	low_pfn_new = curr_iova->pfn_hi + 1;
+
+retry:
 	do {
-		limit_pfn = min(limit_pfn, curr_iova->pfn_lo);
-		new_pfn = (limit_pfn - size) & align_mask;
+		high_pfn = min(high_pfn, curr_iova->pfn_lo);
+		new_pfn = (high_pfn - size) & align_mask;
 		prev = curr;
 		curr = rb_prev(curr);
 		curr_iova = rb_entry(curr, struct iova, node);
-	} while (curr && new_pfn <= curr_iova->pfn_hi);
+	} while (curr && new_pfn <= curr_iova->pfn_hi && new_pfn >= low_pfn);
 
-	if (limit_pfn < size || new_pfn < iovad->start_pfn) {
+	if (high_pfn < size || new_pfn < low_pfn) {
+		if (low_pfn == iovad->start_pfn && low_pfn_new < limit_pfn) {
+			high_pfn = limit_pfn;
+			low_pfn = low_pfn_new;
+			curr = &iovad->anchor.node;
+			curr_iova = rb_entry(curr, struct iova, node);
+			goto retry;
+		}
 		iovad->max32_alloc_size = size;
 		goto iova32_full;
 	}
@@ -227,6 +257,70 @@
 	return -ENOMEM;
 }
 
+static int __alloc_and_insert_iova_best_fit(struct iova_domain *iovad,
+					    unsigned long size,
+					    unsigned long limit_pfn,
+					    struct iova *new, bool size_aligned)
+{
+	struct rb_node *curr, *prev;
+	struct iova *curr_iova, *prev_iova;
+	unsigned long flags;
+	unsigned long align_mask = ~0UL;
+	struct rb_node *candidate_rb_parent;
+	unsigned long new_pfn, candidate_pfn = ~0UL;
+	unsigned long gap, candidate_gap = ~0UL;
+
+	if (size_aligned)
+		align_mask <<= limit_align_shift(iovad, fls_long(size - 1));
+
+	/* Walk the tree backwards */
+	spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
+	curr = &iovad->anchor.node;
+	prev = rb_prev(curr);
+	for (; prev; curr = prev, prev = rb_prev(curr)) {
+		curr_iova = rb_entry(curr, struct iova, node);
+		prev_iova = rb_entry(prev, struct iova, node);
+
+		limit_pfn = min(limit_pfn, curr_iova->pfn_lo);
+		new_pfn = (limit_pfn - size) & align_mask;
+		gap = curr_iova->pfn_lo - prev_iova->pfn_hi - 1;
+		if ((limit_pfn >= size) && (new_pfn > prev_iova->pfn_hi)
+				&& (gap < candidate_gap)) {
+			candidate_gap = gap;
+			candidate_pfn = new_pfn;
+			candidate_rb_parent = curr;
+			if (gap == size)
+				goto insert;
+		}
+	}
+
+	curr_iova = rb_entry(curr, struct iova, node);
+	limit_pfn = min(limit_pfn, curr_iova->pfn_lo);
+	new_pfn = (limit_pfn - size) & align_mask;
+	gap = curr_iova->pfn_lo - iovad->start_pfn;
+	if (limit_pfn >= size && new_pfn >= iovad->start_pfn &&
+			gap < candidate_gap) {
+		candidate_gap = gap;
+		candidate_pfn = new_pfn;
+		candidate_rb_parent = curr;
+	}
+
+insert:
+	if (candidate_pfn == ~0UL) {
+		spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
+		return -ENOMEM;
+	}
+
+	/* pfn_lo will point to size aligned address if size_aligned is set */
+	new->pfn_lo = candidate_pfn;
+	new->pfn_hi = new->pfn_lo + size - 1;
+
+	/* If we have 'prev', it's a valid place to start the insertion. */
+	iova_insert_rbtree(&iovad->rbroot, new, candidate_rb_parent);
+	spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
+	return 0;
+}
+
 static struct kmem_cache *iova_cache;
 static unsigned int iova_cache_users;
 static DEFINE_MUTEX(iova_cache_mutex);
@@ -302,8 +396,13 @@
 	if (!new_iova)
 		return NULL;
 
-	ret = __alloc_and_insert_iova_range(iovad, size, limit_pfn + 1,
-			new_iova, size_aligned);
+	if (iovad->best_fit) {
+		ret = __alloc_and_insert_iova_best_fit(iovad, size,
+				limit_pfn + 1, new_iova, size_aligned);
+	} else {
+		ret = __alloc_and_insert_iova_range(iovad, size, limit_pfn + 1,
+				new_iova, size_aligned);
+	}
 
 	if (ret) {
 		free_iova_mem(new_iova);
@@ -431,6 +530,7 @@
 		flush_rcache = false;
 		for_each_online_cpu(cpu)
 			free_cpu_cached_iovas(cpu, iovad);
+		free_global_cached_iovas(iovad);
 		goto retry;
 	}
 
@@ -1046,5 +1146,27 @@
 	}
 }
 
+/*
+ * free all the IOVA ranges of global cache
+ */
+void free_global_cached_iovas(struct iova_domain *iovad)
+{
+	struct iova_rcache *rcache;
+	unsigned long flags;
+	int i, j;
+
+	for (i = 0; i < IOVA_RANGE_CACHE_MAX_SIZE; ++i) {
+		rcache = &iovad->rcaches[i];
+		spin_lock_irqsave(&rcache->lock, flags);
+		for (j = 0; j < rcache->depot_size; ++j) {
+			iova_magazine_free_pfns(rcache->depot[j], iovad);
+			iova_magazine_free(rcache->depot[j]);
+			rcache->depot[j] = NULL;
+		}
+		rcache->depot_size = 0;
+		spin_unlock_irqrestore(&rcache->lock, flags);
+	}
+}
+
 MODULE_AUTHOR("Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>");
 MODULE_LICENSE("GPL");
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index 2aa79c3..194ea43 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -431,8 +431,9 @@
          for Goldfish based virtual platforms.
 
 config QCOM_PDC
-	bool "QCOM PDC"
+	tristate "QCOM PDC"
 	depends on ARCH_QCOM
+	depends on QCOM_SCM || !QCOM_SCM
 	select IRQ_DOMAIN_HIERARCHY
 	help
 	  Power Domain Controller driver to manage and configure wakeup
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index 16fecc0..a17b6cf 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -18,6 +18,10 @@
 #include <linux/percpu.h>
 #include <linux/refcount.h>
 #include <linux/slab.h>
+#include <linux/syscore_ops.h>
+#include <linux/wakeup_reason.h>
+#include <trace/hooks/gic_v3.h>
+
 
 #include <linux/irqchip.h>
 #include <linux/irqchip/arm-gic-common.h>
@@ -29,6 +33,8 @@
 #include <asm/smp_plat.h>
 #include <asm/virt.h>
 
+#include <trace/hooks/gic.h>
+
 #include "irq-gic-common.h"
 
 #define GICD_INT_NMI_PRI	(GICD_INT_DEF_PRI & ~0x80)
@@ -670,6 +676,7 @@
 
 	if (handle_domain_irq(gic_data.domain, irqnr, regs)) {
 		WARN_ONCE(true, "Unexpected interrupt received!\n");
+		log_abnormal_wakeup_reason("unexpected HW IRQ %u", irqnr);
 		gic_deactivate_unhandled(irqnr);
 	}
 }
@@ -764,11 +771,15 @@
 	 * enabled.
 	 */
 	affinity = gic_mpidr_to_affinity(cpu_logical_map(smp_processor_id()));
-	for (i = 32; i < GIC_LINE_NR; i++)
+	for (i = 32; i < GIC_LINE_NR; i++) {
+		trace_android_vh_gic_v3_affinity_init(i, GICD_IROUTER, &affinity);
 		gic_write_irouter(affinity, base + GICD_IROUTER + i * 8);
+	}
 
-	for (i = 0; i < GIC_ESPI_NR; i++)
+	for (i = 0; i < GIC_ESPI_NR; i++) {
+		trace_android_vh_gic_v3_affinity_init(i, GICD_IROUTERnE, &affinity);
 		gic_write_irouter(affinity, base + GICD_IROUTERnE + i * 8);
+	}
 }
 
 static int gic_iterate_rdists(int (*fn)(struct redist_region *, void __iomem *))
@@ -1200,6 +1211,7 @@
 	reg = gic_dist_base(d) + offset + (index * 8);
 	val = gic_mpidr_to_affinity(cpu_logical_map(cpu));
 
+	trace_android_vh_gic_v3_set_affinity(d, mask_val, &val);
 	gic_write_irouter(val, reg);
 
 	/*
@@ -1254,6 +1266,26 @@
 static inline void gic_cpu_pm_init(void) { }
 #endif /* CONFIG_CPU_PM */
 
+#ifdef CONFIG_PM
+static void gic_resume(void)
+{
+	trace_android_vh_gic_resume(gic_data.domain, gic_data.dist_base);
+}
+
+static struct syscore_ops gic_syscore_ops = {
+	.resume = gic_resume,
+};
+
+static void gic_syscore_init(void)
+{
+	register_syscore_ops(&gic_syscore_ops);
+}
+
+#else
+static inline void gic_syscore_init(void) { }
+#endif
+
+
 static struct irq_chip gic_chip = {
 	.name			= "GICv3",
 	.irq_mask		= gic_mask_irq,
@@ -1710,6 +1742,7 @@
 	gic_cpu_init();
 	gic_smp_init();
 	gic_cpu_pm_init();
+	gic_syscore_init();
 
 	if (gic_dist_supports_lpis()) {
 		its_init(handle, &gic_data.rdists, gic_data.domain);
diff --git a/drivers/irqchip/qcom-pdc.c b/drivers/irqchip/qcom-pdc.c
index bd39e9d..abbb795 100644
--- a/drivers/irqchip/qcom-pdc.c
+++ b/drivers/irqchip/qcom-pdc.c
@@ -11,14 +11,18 @@
 #include <linux/irqdomain.h>
 #include <linux/io.h>
 #include <linux/kernel.h>
+#include <linux/module.h>
 #include <linux/of.h>
 #include <linux/of_address.h>
 #include <linux/of_device.h>
+#include <linux/of_irq.h>
 #include <linux/soc/qcom/irq.h>
 #include <linux/spinlock.h>
 #include <linux/slab.h>
 #include <linux/types.h>
 
+#include <linux/qcom_scm.h>
+
 #define PDC_MAX_IRQS		168
 #define PDC_MAX_GPIO_IRQS	256
 
@@ -36,10 +40,20 @@
 	u32 cnt;
 };
 
+struct spi_cfg_regs {
+	union {
+		u64 start;
+		void __iomem *base;
+	};
+	resource_size_t size;
+	bool scm_io;
+};
+
 static DEFINE_RAW_SPINLOCK(pdc_lock);
 static void __iomem *pdc_base;
 static struct pdc_pin_region *pdc_region;
 static int pdc_region_cnt;
+static struct spi_cfg_regs *spi_cfg;
 
 static void pdc_reg_write(int reg, u32 i, u32 val)
 {
@@ -121,6 +135,57 @@
 	irq_chip_unmask_parent(d);
 }
 
+static u32 __spi_pin_read(unsigned int pin)
+{
+	void __iomem *cfg_reg = spi_cfg->base + pin * 4;
+	u64 scm_cfg_reg = spi_cfg->start + pin * 4;
+
+	if (spi_cfg->scm_io) {
+		unsigned int val;
+
+		qcom_scm_io_readl(scm_cfg_reg, &val);
+		return val;
+	} else {
+		return readl(cfg_reg);
+	}
+}
+
+static void __spi_pin_write(unsigned int pin, unsigned int val)
+{
+	void __iomem *cfg_reg = spi_cfg->base + pin * 4;
+	u64 scm_cfg_reg = spi_cfg->start + pin * 4;
+
+	if (spi_cfg->scm_io)
+		qcom_scm_io_writel(scm_cfg_reg, val);
+	else
+		writel(val, cfg_reg);
+}
+
+static int spi_configure_type(irq_hw_number_t hwirq, unsigned int type)
+{
+	int spi = hwirq - 32;
+	u32 pin = spi / 32;
+	u32 mask = BIT(spi % 32);
+	u32 val;
+	unsigned long flags;
+
+	if (!spi_cfg)
+		return 0;
+
+	if (pin * 4 > spi_cfg->size)
+		return -EFAULT;
+
+	raw_spin_lock_irqsave(&pdc_lock, flags);
+	val = __spi_pin_read(pin);
+	val &= ~mask;
+	if (type & IRQ_TYPE_LEVEL_MASK)
+		val |= mask;
+	__spi_pin_write(pin, val);
+	raw_spin_unlock_irqrestore(&pdc_lock, flags);
+
+	return 0;
+}
+
 /*
  * GIC does not handle falling edge or active low. To allow falling edge and
  * active low interrupts to be handled at GIC, PDC has an inverter that inverts
@@ -158,7 +223,9 @@
 static int qcom_pdc_gic_set_type(struct irq_data *d, unsigned int type)
 {
 	int pin_out = d->hwirq;
+	int parent_hwirq = d->parent_data->hwirq;
 	enum pdc_irq_config_bits pdc_type;
+	int ret;
 
 	if (pin_out == GPIO_NO_WAKE_IRQ)
 		return 0;
@@ -189,6 +256,11 @@
 
 	pdc_reg_write(IRQ_i_CFG, pin_out, pdc_type);
 
+	/* Additionally, configure (only) the GPIO in the f/w */
+	ret = spi_configure_type(parent_hwirq, type);
+	if (ret)
+		return ret;
+
 	return irq_chip_set_type_parent(d, type);
 }
 
@@ -387,6 +459,7 @@
 static int qcom_pdc_init(struct device_node *node, struct device_node *parent)
 {
 	struct irq_domain *parent_domain, *pdc_domain, *pdc_gpio_domain;
+	struct resource res;
 	int ret;
 
 	pdc_base = of_iomap(node, 0);
@@ -417,6 +490,27 @@
 		goto fail;
 	}
 
+	ret = of_address_to_resource(node, 1, &res);
+	if (!ret) {
+		spi_cfg = kcalloc(1, sizeof(*spi_cfg), GFP_KERNEL);
+		if (!spi_cfg) {
+			ret = -ENOMEM;
+			goto remove;
+		}
+		spi_cfg->scm_io = of_find_property(node,
+						   "qcom,scm-spi-cfg", NULL);
+		spi_cfg->size = resource_size(&res);
+		if (spi_cfg->scm_io) {
+			spi_cfg->start = res.start;
+		} else {
+			spi_cfg->base = ioremap(res.start, spi_cfg->size);
+			if (!spi_cfg->base) {
+				ret = -ENOMEM;
+				goto remove;
+			}
+		}
+	}
+
 	pdc_gpio_domain = irq_domain_create_hierarchy(parent_domain,
 					IRQ_DOMAIN_FLAG_QCOM_PDC_WAKEUP,
 					PDC_MAX_GPIO_IRQS,
@@ -434,10 +528,35 @@
 
 remove:
 	irq_domain_remove(pdc_domain);
+	kfree(spi_cfg);
 fail:
 	kfree(pdc_region);
 	iounmap(pdc_base);
 	return ret;
 }
 
-IRQCHIP_DECLARE(qcom_pdc, "qcom,pdc", qcom_pdc_init);
+static int qcom_pdc_probe(struct platform_device *pdev)
+{
+	struct device_node *np = pdev->dev.of_node;
+	struct device_node *parent = of_irq_find_parent(np);
+
+	return qcom_pdc_init(np, parent);
+}
+
+static const struct of_device_id qcom_pdc_match_table[] = {
+	{ .compatible = "qcom,pdc" },
+	{}
+};
+MODULE_DEVICE_TABLE(of, qcom_pdc_match_table);
+
+static struct platform_driver qcom_pdc_driver = {
+	.probe = qcom_pdc_probe,
+	.driver = {
+		.name = "qcom-pdc",
+		.of_match_table = qcom_pdc_match_table,
+		.suppress_bind_attrs = true,
+	},
+};
+module_platform_driver(qcom_pdc_driver);
+MODULE_DESCRIPTION("Qualcomm Technologies, Inc. Power Domain Controller");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
index 30ba357..5c361df 100644
--- a/drivers/md/Kconfig
+++ b/drivers/md/Kconfig
@@ -286,6 +286,27 @@
 
 	  If unsure, say N.
 
+config DM_DEFAULT_KEY
+	tristate "Default-key target support"
+	depends on BLK_DEV_DM
+	depends on BLK_INLINE_ENCRYPTION
+	# dm-default-key doesn't require -o inlinecrypt, but it does currently
+	# rely on the inline encryption hooks being built into the kernel.
+	depends on FS_ENCRYPTION_INLINE_CRYPT
+	help
+	  This device-mapper target allows you to create a device that
+	  assigns a default encryption key to bios that aren't for the
+	  contents of an encrypted file.
+
+	  This ensures that all blocks on-disk will be encrypted with
+	  some key, without the performance hit of file contents being
+	  encrypted twice when fscrypt (File-Based Encryption) is used.
+
+	  It is only appropriate to use dm-default-key when key
+	  configuration is tightly controlled, like it is in Android,
+	  such that all fscrypt keys are at least as hard to compromise
+	  as the default key.
+
 config DM_SNAPSHOT
        tristate "Snapshot target"
        depends on BLK_DEV_DM
@@ -537,6 +558,17 @@
 
 	  If unsure, say N.
 
+config DM_VERITY_AVB
+	tristate "Support AVB specific verity error behavior"
+	depends on DM_VERITY
+	help
+	  Enables Android Verified Boot platform-specific error
+	  behavior. In particular, it will modify the vbmeta partition
+	  specified on the kernel command-line when non-transient error
+	  occurs (followed by a panic).
+
+	  If unsure, say N.
+
 config DM_VERITY_FEC
 	bool "Verity forward error correction support"
 	depends on DM_VERITY
@@ -617,4 +649,16 @@
 
 	  If unsure, say N.
 
+config DM_BOW
+	tristate "Backup block device"
+	depends on BLK_DEV_DM
+	select DM_BUFIO
+	help
+	  This device-mapper target takes a device and keeps a log of all
+	  changes using free blocks identified by issuing a trim command.
+	  This can then be restored by running a command line utility,
+	  or committed by simply replacing the target.
+
+	  If unsure, say N.
+
 endif # MD
diff --git a/drivers/md/Makefile b/drivers/md/Makefile
index 6d3e234..29401cc 100644
--- a/drivers/md/Makefile
+++ b/drivers/md/Makefile
@@ -52,6 +52,7 @@
 obj-$(CONFIG_DM_BUFIO)		+= dm-bufio.o
 obj-$(CONFIG_DM_BIO_PRISON)	+= dm-bio-prison.o
 obj-$(CONFIG_DM_CRYPT)		+= dm-crypt.o
+obj-$(CONFIG_DM_DEFAULT_KEY)	+= dm-default-key.o
 obj-$(CONFIG_DM_DELAY)		+= dm-delay.o
 obj-$(CONFIG_DM_DUST)		+= dm-dust.o
 obj-$(CONFIG_DM_FLAKEY)		+= dm-flakey.o
@@ -77,6 +78,7 @@
 obj-$(CONFIG_DM_INTEGRITY)	+= dm-integrity.o
 obj-$(CONFIG_DM_ZONED)		+= dm-zoned.o
 obj-$(CONFIG_DM_WRITECACHE)	+= dm-writecache.o
+obj-$(CONFIG_DM_BOW)		+= dm-bow.o
 
 ifeq ($(CONFIG_DM_INIT),y)
 dm-mod-objs			+= dm-init.o
@@ -86,6 +88,10 @@
 dm-mod-objs			+= dm-uevent.o
 endif
 
+ifeq ($(CONFIG_DM_VERITY_AVB),y)
+dm-verity-objs			+= dm-verity-avb.o
+endif
+
 ifeq ($(CONFIG_DM_VERITY_FEC),y)
 dm-verity-objs			+= dm-verity-fec.o
 endif
diff --git a/drivers/md/dm-bow.c b/drivers/md/dm-bow.c
new file mode 100644
index 0000000..ea6a30f
--- /dev/null
+++ b/drivers/md/dm-bow.c
@@ -0,0 +1,1297 @@
+/*
+ * Copyright (C) 2018 Google Limited.
+ *
+ * This file is released under the GPL.
+ */
+
+#include "dm.h"
+#include "dm-core.h"
+
+#include <linux/crc32.h>
+#include <linux/dm-bufio.h>
+#include <linux/module.h>
+
+#define DM_MSG_PREFIX "bow"
+
+struct log_entry {
+	u64 source;
+	u64 dest;
+	u32 size;
+	u32 checksum;
+} __packed;
+
+struct log_sector {
+	u32 magic;
+	u16 header_version;
+	u16 header_size;
+	u32 block_size;
+	u32 count;
+	u32 sequence;
+	sector_t sector0;
+	struct log_entry entries[];
+} __packed;
+
+/*
+ * MAGIC is BOW in ascii
+ */
+#define MAGIC 0x00574f42
+#define HEADER_VERSION 0x0100
+
+/*
+ * A sorted set of ranges representing the state of the data on the device.
+ * Use an rb_tree for fast lookup of a given sector
+ * Consecutive ranges are always of different type - operations on this
+ * set must merge matching consecutive ranges.
+ *
+ * Top range is always of type TOP
+ */
+struct bow_range {
+	struct rb_node		node;
+	sector_t		sector;
+	enum {
+		INVALID,	/* Type not set */
+		SECTOR0,	/* First sector - holds log record */
+		SECTOR0_CURRENT,/* Live contents of sector0 */
+		UNCHANGED,	/* Original contents */
+		TRIMMED,	/* Range has been trimmed */
+		CHANGED,	/* Range has been changed */
+		BACKUP,		/* Range is being used as a backup */
+		TOP,		/* Final range - sector is size of device */
+	} type;
+	struct list_head	trimmed_list; /* list of TRIMMED ranges */
+};
+
+static const char * const readable_type[] = {
+	"Invalid",
+	"Sector0",
+	"Sector0_current",
+	"Unchanged",
+	"Free",
+	"Changed",
+	"Backup",
+	"Top",
+};
+
+enum state {
+	TRIM,
+	CHECKPOINT,
+	COMMITTED,
+};
+
+struct bow_context {
+	struct dm_dev *dev;
+	u32 block_size;
+	u32 block_shift;
+	struct workqueue_struct *workqueue;
+	struct dm_bufio_client *bufio;
+	struct mutex ranges_lock; /* Hold to access this struct and/or ranges */
+	struct rb_root ranges;
+	struct dm_kobject_holder kobj_holder;	/* for sysfs attributes */
+	atomic_t state; /* One of the enum state values above */
+	u64 trims_total;
+	struct log_sector *log_sector;
+	struct list_head trimmed_list;
+	bool forward_trims;
+};
+
+sector_t range_top(struct bow_range *br)
+{
+	return container_of(rb_next(&br->node), struct bow_range, node)
+		->sector;
+}
+
+u64 range_size(struct bow_range *br)
+{
+	return (range_top(br) - br->sector) * SECTOR_SIZE;
+}
+
+static sector_t bvec_top(struct bvec_iter *bi_iter)
+{
+	return bi_iter->bi_sector + bi_iter->bi_size / SECTOR_SIZE;
+}
+
+/*
+ * Find the first range that overlaps with bi_iter
+ * bi_iter is set to the size of the overlapping sub-range
+ */
+static struct bow_range *find_first_overlapping_range(struct rb_root *ranges,
+						      struct bvec_iter *bi_iter)
+{
+	struct rb_node *node = ranges->rb_node;
+	struct bow_range *br;
+
+	while (node) {
+		br = container_of(node, struct bow_range, node);
+
+		if (br->sector <= bi_iter->bi_sector
+		    && bi_iter->bi_sector < range_top(br))
+			break;
+
+		if (bi_iter->bi_sector < br->sector)
+			node = node->rb_left;
+		else
+			node = node->rb_right;
+	}
+
+	WARN_ON(!node);
+	if (!node)
+		return NULL;
+
+	if (range_top(br) - bi_iter->bi_sector
+	    < bi_iter->bi_size >> SECTOR_SHIFT)
+		bi_iter->bi_size = (range_top(br) - bi_iter->bi_sector)
+			<< SECTOR_SHIFT;
+
+	return br;
+}
+
+void add_before(struct rb_root *ranges, struct bow_range *new_br,
+		struct bow_range *existing)
+{
+	struct rb_node *parent = &(existing->node);
+	struct rb_node **link = &(parent->rb_left);
+
+	while (*link) {
+		parent = *link;
+		link = &((*link)->rb_right);
+	}
+
+	rb_link_node(&new_br->node, parent, link);
+	rb_insert_color(&new_br->node, ranges);
+}
+
+/*
+ * Given a range br returned by find_first_overlapping_range, split br into a
+ * leading range, a range matching the bi_iter and a trailing range.
+ * Leading and trailing may end up size 0 and will then be deleted. The
+ * new range matching the bi_iter is then returned and should have its type
+ * and type specific fields populated.
+ * If bi_iter runs off the end of the range, bi_iter is truncated accordingly
+ */
+static int split_range(struct bow_context *bc, struct bow_range **br,
+		       struct bvec_iter *bi_iter)
+{
+	struct bow_range *new_br;
+
+	if (bi_iter->bi_sector < (*br)->sector) {
+		WARN_ON(true);
+		return BLK_STS_IOERR;
+	}
+
+	if (bi_iter->bi_sector > (*br)->sector) {
+		struct bow_range *leading_br =
+			kzalloc(sizeof(*leading_br), GFP_KERNEL);
+
+		if (!leading_br)
+			return BLK_STS_RESOURCE;
+
+		*leading_br = **br;
+		if (leading_br->type == TRIMMED)
+			list_add(&leading_br->trimmed_list, &bc->trimmed_list);
+
+		add_before(&bc->ranges, leading_br, *br);
+		(*br)->sector = bi_iter->bi_sector;
+	}
+
+	if (bvec_top(bi_iter) >= range_top(*br)) {
+		bi_iter->bi_size = (range_top(*br) - (*br)->sector)
+					* SECTOR_SIZE;
+		return BLK_STS_OK;
+	}
+
+	/* new_br will be the beginning, existing br will be the tail */
+	new_br = kzalloc(sizeof(*new_br), GFP_KERNEL);
+	if (!new_br)
+		return BLK_STS_RESOURCE;
+
+	new_br->sector = (*br)->sector;
+	(*br)->sector = bvec_top(bi_iter);
+	add_before(&bc->ranges, new_br, *br);
+	*br = new_br;
+
+	return BLK_STS_OK;
+}
+
+/*
+ * Sets type of a range. May merge range into surrounding ranges
+ * Since br may be invalidated, always sets br to NULL to prevent
+ * usage after this is called
+ */
+static void set_type(struct bow_context *bc, struct bow_range **br, int type)
+{
+	struct bow_range *prev = container_of(rb_prev(&(*br)->node),
+						      struct bow_range, node);
+	struct bow_range *next = container_of(rb_next(&(*br)->node),
+						      struct bow_range, node);
+
+	if ((*br)->type == TRIMMED) {
+		bc->trims_total -= range_size(*br);
+		list_del(&(*br)->trimmed_list);
+	}
+
+	if (type == TRIMMED) {
+		bc->trims_total += range_size(*br);
+		list_add(&(*br)->trimmed_list, &bc->trimmed_list);
+	}
+
+	(*br)->type = type;
+
+	if (next->type == type) {
+		if (type == TRIMMED)
+			list_del(&next->trimmed_list);
+		rb_erase(&next->node, &bc->ranges);
+		kfree(next);
+	}
+
+	if (prev->type == type) {
+		if (type == TRIMMED)
+			list_del(&(*br)->trimmed_list);
+		rb_erase(&(*br)->node, &bc->ranges);
+		kfree(*br);
+	}
+
+	*br = NULL;
+}
+
+static struct bow_range *find_free_range(struct bow_context *bc)
+{
+	if (list_empty(&bc->trimmed_list)) {
+		DMERR("Unable to find free space to back up to");
+		return NULL;
+	}
+
+	return list_first_entry(&bc->trimmed_list, struct bow_range,
+				trimmed_list);
+}
+
+static sector_t sector_to_page(struct bow_context const *bc, sector_t sector)
+{
+	WARN_ON((sector & (((sector_t)1 << (bc->block_shift - SECTOR_SHIFT)) - 1))
+		!= 0);
+	return sector >> (bc->block_shift - SECTOR_SHIFT);
+}
+
+static int copy_data(struct bow_context const *bc,
+		     struct bow_range *source, struct bow_range *dest,
+		     u32 *checksum)
+{
+	int i;
+
+	if (range_size(source) != range_size(dest)) {
+		WARN_ON(1);
+		return BLK_STS_IOERR;
+	}
+
+	if (checksum)
+		*checksum = sector_to_page(bc, source->sector);
+
+	for (i = 0; i < range_size(source) >> bc->block_shift; ++i) {
+		struct dm_buffer *read_buffer, *write_buffer;
+		u8 *read, *write;
+		sector_t page = sector_to_page(bc, source->sector) + i;
+
+		read = dm_bufio_read(bc->bufio, page, &read_buffer);
+		if (IS_ERR(read)) {
+			DMERR("Cannot read page %llu",
+			      (unsigned long long)page);
+			return PTR_ERR(read);
+		}
+
+		if (checksum)
+			*checksum = crc32(*checksum, read, bc->block_size);
+
+		write = dm_bufio_new(bc->bufio,
+				     sector_to_page(bc, dest->sector) + i,
+				     &write_buffer);
+		if (IS_ERR(write)) {
+			DMERR("Cannot write sector");
+			dm_bufio_release(read_buffer);
+			return PTR_ERR(write);
+		}
+
+		memcpy(write, read, bc->block_size);
+
+		dm_bufio_mark_buffer_dirty(write_buffer);
+		dm_bufio_release(write_buffer);
+		dm_bufio_release(read_buffer);
+	}
+
+	dm_bufio_write_dirty_buffers(bc->bufio);
+	return BLK_STS_OK;
+}
+
+/****** logging functions ******/
+
+static int add_log_entry(struct bow_context *bc, sector_t source, sector_t dest,
+			 unsigned int size, u32 checksum);
+
+static int backup_log_sector(struct bow_context *bc)
+{
+	struct bow_range *first_br, *free_br;
+	struct bvec_iter bi_iter;
+	u32 checksum = 0;
+	int ret;
+
+	first_br = container_of(rb_first(&bc->ranges), struct bow_range, node);
+
+	if (first_br->type != SECTOR0) {
+		WARN_ON(1);
+		return BLK_STS_IOERR;
+	}
+
+	if (range_size(first_br) != bc->block_size) {
+		WARN_ON(1);
+		return BLK_STS_IOERR;
+	}
+
+	free_br = find_free_range(bc);
+	/* No space left - return this error to userspace */
+	if (!free_br)
+		return BLK_STS_NOSPC;
+	bi_iter.bi_sector = free_br->sector;
+	bi_iter.bi_size = bc->block_size;
+	ret = split_range(bc, &free_br, &bi_iter);
+	if (ret)
+		return ret;
+	if (bi_iter.bi_size != bc->block_size) {
+		WARN_ON(1);
+		return BLK_STS_IOERR;
+	}
+
+	ret = copy_data(bc, first_br, free_br, &checksum);
+	if (ret)
+		return ret;
+
+	bc->log_sector->count = 0;
+	bc->log_sector->sequence++;
+	ret = add_log_entry(bc, first_br->sector, free_br->sector,
+			    range_size(first_br), checksum);
+	if (ret)
+		return ret;
+
+	set_type(bc, &free_br, BACKUP);
+	return BLK_STS_OK;
+}
+
+static int add_log_entry(struct bow_context *bc, sector_t source, sector_t dest,
+			 unsigned int size, u32 checksum)
+{
+	struct dm_buffer *sector_buffer;
+	u8 *sector;
+
+	if (sizeof(struct log_sector)
+	    + sizeof(struct log_entry) * (bc->log_sector->count + 1)
+		> bc->block_size) {
+		int ret = backup_log_sector(bc);
+
+		if (ret)
+			return ret;
+	}
+
+	sector = dm_bufio_new(bc->bufio, 0, &sector_buffer);
+	if (IS_ERR(sector)) {
+		DMERR("Cannot write boot sector");
+		dm_bufio_release(sector_buffer);
+		return BLK_STS_NOSPC;
+	}
+
+	bc->log_sector->entries[bc->log_sector->count].source = source;
+	bc->log_sector->entries[bc->log_sector->count].dest = dest;
+	bc->log_sector->entries[bc->log_sector->count].size = size;
+	bc->log_sector->entries[bc->log_sector->count].checksum = checksum;
+	bc->log_sector->count++;
+
+	memcpy(sector, bc->log_sector, bc->block_size);
+	dm_bufio_mark_buffer_dirty(sector_buffer);
+	dm_bufio_release(sector_buffer);
+	dm_bufio_write_dirty_buffers(bc->bufio);
+	return BLK_STS_OK;
+}
+
+static int prepare_log(struct bow_context *bc)
+{
+	struct bow_range *free_br, *first_br;
+	struct bvec_iter bi_iter;
+	u32 checksum = 0;
+	int ret;
+
+	/* Carve out first sector as log sector */
+	first_br = container_of(rb_first(&bc->ranges), struct bow_range, node);
+	if (first_br->type != UNCHANGED) {
+		WARN_ON(1);
+		return BLK_STS_IOERR;
+	}
+
+	if (range_size(first_br) < bc->block_size) {
+		WARN_ON(1);
+		return BLK_STS_IOERR;
+	}
+	bi_iter.bi_sector = 0;
+	bi_iter.bi_size = bc->block_size;
+	ret = split_range(bc, &first_br, &bi_iter);
+	if (ret)
+		return ret;
+	first_br->type = SECTOR0;
+	if (range_size(first_br) != bc->block_size) {
+		WARN_ON(1);
+		return BLK_STS_IOERR;
+	}
+
+	/* Find free sector for active sector0 reads/writes */
+	free_br = find_free_range(bc);
+	if (!free_br)
+		return BLK_STS_NOSPC;
+	bi_iter.bi_sector = free_br->sector;
+	bi_iter.bi_size = bc->block_size;
+	ret = split_range(bc, &free_br, &bi_iter);
+	if (ret)
+		return ret;
+	free_br->type = SECTOR0_CURRENT;
+
+	/* Copy data */
+	ret = copy_data(bc, first_br, free_br, NULL);
+	if (ret)
+		return ret;
+
+	bc->log_sector->sector0 = free_br->sector;
+
+	/* Find free sector to back up original sector zero */
+	free_br = find_free_range(bc);
+	if (!free_br)
+		return BLK_STS_NOSPC;
+	bi_iter.bi_sector = free_br->sector;
+	bi_iter.bi_size = bc->block_size;
+	ret = split_range(bc, &free_br, &bi_iter);
+	if (ret)
+		return ret;
+
+	/* Back up */
+	ret = copy_data(bc, first_br, free_br, &checksum);
+	if (ret)
+		return ret;
+
+	/*
+	 * Set up our replacement boot sector - it will get written when we
+	 * add the first log entry, which we do immediately
+	 */
+	bc->log_sector->magic = MAGIC;
+	bc->log_sector->header_version = HEADER_VERSION;
+	bc->log_sector->header_size = sizeof(*bc->log_sector);
+	bc->log_sector->block_size = bc->block_size;
+	bc->log_sector->count = 0;
+	bc->log_sector->sequence = 0;
+
+	/* Add log entry */
+	ret = add_log_entry(bc, first_br->sector, free_br->sector,
+			    range_size(first_br), checksum);
+	if (ret)
+		return ret;
+
+	set_type(bc, &free_br, BACKUP);
+	return BLK_STS_OK;
+}
+
+static struct bow_range *find_sector0_current(struct bow_context *bc)
+{
+	struct bvec_iter bi_iter;
+
+	bi_iter.bi_sector = bc->log_sector->sector0;
+	bi_iter.bi_size = bc->block_size;
+	return find_first_overlapping_range(&bc->ranges, &bi_iter);
+}
+
+/****** sysfs interface functions ******/
+
+static ssize_t state_show(struct kobject *kobj, struct kobj_attribute *attr,
+			  char *buf)
+{
+	struct bow_context *bc = container_of(kobj, struct bow_context,
+					      kobj_holder.kobj);
+
+	return scnprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&bc->state));
+}
+
+static ssize_t state_store(struct kobject *kobj, struct kobj_attribute *attr,
+			   const char *buf, size_t count)
+{
+	struct bow_context *bc = container_of(kobj, struct bow_context,
+					      kobj_holder.kobj);
+	enum state state, original_state;
+	int ret;
+
+	state = buf[0] - '0';
+	if (state < TRIM || state > COMMITTED) {
+		DMERR("State value %d out of range", state);
+		return -EINVAL;
+	}
+
+	mutex_lock(&bc->ranges_lock);
+	original_state = atomic_read(&bc->state);
+	if (state != original_state + 1) {
+		DMERR("Invalid state change from %d to %d",
+		      original_state, state);
+		ret = -EINVAL;
+		goto bad;
+	}
+
+	DMINFO("Switching to state %s", state == CHECKPOINT ? "Checkpoint"
+	       : state == COMMITTED ? "Committed" : "Unknown");
+
+	if (state == CHECKPOINT) {
+		ret = prepare_log(bc);
+		if (ret) {
+			DMERR("Failed to switch to checkpoint state");
+			goto bad;
+		}
+	} else if (state == COMMITTED) {
+		struct bow_range *br = find_sector0_current(bc);
+		struct bow_range *sector0_br =
+			container_of(rb_first(&bc->ranges), struct bow_range,
+				     node);
+
+		ret = copy_data(bc, br, sector0_br, 0);
+		if (ret) {
+			DMERR("Failed to switch to committed state");
+			goto bad;
+		}
+	}
+	atomic_inc(&bc->state);
+	ret = count;
+
+bad:
+	mutex_unlock(&bc->ranges_lock);
+	return ret;
+}
+
+static ssize_t free_show(struct kobject *kobj, struct kobj_attribute *attr,
+			  char *buf)
+{
+	struct bow_context *bc = container_of(kobj, struct bow_context,
+					      kobj_holder.kobj);
+	u64 trims_total;
+
+	mutex_lock(&bc->ranges_lock);
+	trims_total = bc->trims_total;
+	mutex_unlock(&bc->ranges_lock);
+
+	return scnprintf(buf, PAGE_SIZE, "%llu\n", trims_total);
+}
+
+static struct kobj_attribute attr_state = __ATTR_RW(state);
+static struct kobj_attribute attr_free = __ATTR_RO(free);
+
+static struct attribute *bow_attrs[] = {
+	&attr_state.attr,
+	&attr_free.attr,
+	NULL
+};
+
+static struct kobj_type bow_ktype = {
+	.sysfs_ops = &kobj_sysfs_ops,
+	.default_attrs = bow_attrs,
+	.release = dm_kobject_release
+};
+
+/****** constructor/destructor ******/
+
+static void dm_bow_dtr(struct dm_target *ti)
+{
+	struct bow_context *bc = (struct bow_context *) ti->private;
+	struct kobject *kobj;
+
+	while (rb_first(&bc->ranges)) {
+		struct bow_range *br = container_of(rb_first(&bc->ranges),
+						    struct bow_range, node);
+
+		rb_erase(&br->node, &bc->ranges);
+		kfree(br);
+	}
+	if (bc->workqueue)
+		destroy_workqueue(bc->workqueue);
+	if (bc->bufio)
+		dm_bufio_client_destroy(bc->bufio);
+
+	kobj = &bc->kobj_holder.kobj;
+	if (kobj->state_initialized) {
+		kobject_put(kobj);
+		wait_for_completion(dm_get_completion_from_kobject(kobj));
+	}
+
+	kfree(bc->log_sector);
+	kfree(bc);
+}
+
+static void dm_bow_io_hints(struct dm_target *ti, struct queue_limits *limits)
+{
+	struct bow_context *bc = ti->private;
+	const unsigned int block_size = bc->block_size;
+
+	limits->logical_block_size =
+		max_t(unsigned int, limits->logical_block_size, block_size);
+	limits->physical_block_size =
+		max_t(unsigned int, limits->physical_block_size, block_size);
+	limits->io_min = max_t(unsigned int, limits->io_min, block_size);
+
+	if (limits->max_discard_sectors == 0) {
+		limits->discard_granularity = 1 << 12;
+		limits->max_hw_discard_sectors = 1 << 15;
+		limits->max_discard_sectors = 1 << 15;
+		bc->forward_trims = false;
+	} else {
+		limits->discard_granularity = 1 << 12;
+		bc->forward_trims = true;
+	}
+}
+
+static int dm_bow_ctr_optional(struct dm_target *ti, unsigned int argc, char **argv)
+{
+	struct bow_context *bc = ti->private;
+	struct dm_arg_set as;
+	static const struct dm_arg _args[] = {
+		{0, 1, "Invalid number of feature args"},
+	};
+	unsigned int opt_params;
+	const char *opt_string;
+	int err;
+	char dummy;
+
+	as.argc = argc;
+	as.argv = argv;
+
+	err = dm_read_arg_group(_args, &as, &opt_params, &ti->error);
+	if (err)
+		return err;
+
+	while (opt_params--) {
+		opt_string = dm_shift_arg(&as);
+		if (!opt_string) {
+			ti->error = "Not enough feature arguments";
+			return -EINVAL;
+		}
+
+		if (sscanf(opt_string, "block_size:%u%c",
+					&bc->block_size, &dummy) == 1) {
+			if (bc->block_size < SECTOR_SIZE ||
+			    bc->block_size > 4096 ||
+			    !is_power_of_2(bc->block_size)) {
+				ti->error = "Invalid block_size";
+				return -EINVAL;
+			}
+		} else {
+			ti->error = "Invalid feature arguments";
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+static int dm_bow_ctr(struct dm_target *ti, unsigned int argc, char **argv)
+{
+	struct bow_context *bc;
+	struct bow_range *br;
+	int ret;
+	struct mapped_device *md = dm_table_get_md(ti->table);
+
+	if (argc < 1) {
+		ti->error = "Invalid argument count";
+		return -EINVAL;
+	}
+
+	bc = kzalloc(sizeof(*bc), GFP_KERNEL);
+	if (!bc) {
+		ti->error = "Cannot allocate bow context";
+		return -ENOMEM;
+	}
+
+	ti->num_flush_bios = 1;
+	ti->num_discard_bios = 1;
+	ti->num_write_same_bios = 1;
+	ti->private = bc;
+
+	ret = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table),
+			    &bc->dev);
+	if (ret) {
+		ti->error = "Device lookup failed";
+		goto bad;
+	}
+
+	bc->block_size =
+		bdev_get_queue(bc->dev->bdev)->limits.logical_block_size;
+	if (argc > 1) {
+		ret = dm_bow_ctr_optional(ti, argc - 1, &argv[1]);
+		if (ret)
+			goto bad;
+	}
+
+	bc->block_shift = ilog2(bc->block_size);
+	bc->log_sector = kzalloc(bc->block_size, GFP_KERNEL);
+	if (!bc->log_sector) {
+		ti->error = "Cannot allocate log sector";
+		goto bad;
+	}
+
+	init_completion(&bc->kobj_holder.completion);
+	ret = kobject_init_and_add(&bc->kobj_holder.kobj, &bow_ktype,
+				   &disk_to_dev(dm_disk(md))->kobj, "%s",
+				   "bow");
+	if (ret) {
+		ti->error = "Cannot create sysfs node";
+		goto bad;
+	}
+
+	mutex_init(&bc->ranges_lock);
+	bc->ranges = RB_ROOT;
+	bc->bufio = dm_bufio_client_create(bc->dev->bdev, bc->block_size, 1, 0,
+					   NULL, NULL);
+	if (IS_ERR(bc->bufio)) {
+		ti->error = "Cannot initialize dm-bufio";
+		ret = PTR_ERR(bc->bufio);
+		bc->bufio = NULL;
+		goto bad;
+	}
+
+	bc->workqueue = alloc_workqueue("dm-bow",
+					WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM
+					| WQ_UNBOUND, num_online_cpus());
+	if (!bc->workqueue) {
+		ti->error = "Cannot allocate workqueue";
+		ret = -ENOMEM;
+		goto bad;
+	}
+
+	INIT_LIST_HEAD(&bc->trimmed_list);
+
+	br = kzalloc(sizeof(*br), GFP_KERNEL);
+	if (!br) {
+		ti->error = "Cannot allocate ranges";
+		ret = -ENOMEM;
+		goto bad;
+	}
+
+	br->sector = ti->len;
+	br->type = TOP;
+	rb_link_node(&br->node, NULL, &bc->ranges.rb_node);
+	rb_insert_color(&br->node, &bc->ranges);
+
+	br = kzalloc(sizeof(*br), GFP_KERNEL);
+	if (!br) {
+		ti->error = "Cannot allocate ranges";
+		ret = -ENOMEM;
+		goto bad;
+	}
+
+	br->sector = 0;
+	br->type = UNCHANGED;
+	rb_link_node(&br->node, bc->ranges.rb_node,
+		     &bc->ranges.rb_node->rb_left);
+	rb_insert_color(&br->node, &bc->ranges);
+
+	ti->discards_supported = true;
+	ti->may_passthrough_inline_crypto = true;
+
+	return 0;
+
+bad:
+	dm_bow_dtr(ti);
+	return ret;
+}
+
+/****** Handle writes ******/
+
+static int prepare_unchanged_range(struct bow_context *bc, struct bow_range *br,
+				   struct bvec_iter *bi_iter,
+				   bool record_checksum)
+{
+	struct bow_range *backup_br;
+	struct bvec_iter backup_bi;
+	sector_t log_source, log_dest;
+	unsigned int log_size;
+	u32 checksum = 0;
+	int ret;
+	int original_type;
+	sector_t sector0;
+
+	/* Find a free range */
+	backup_br = find_free_range(bc);
+	if (!backup_br)
+		return BLK_STS_NOSPC;
+
+	/* Carve out a backup range. This may be smaller than the br given */
+	backup_bi.bi_sector = backup_br->sector;
+	backup_bi.bi_size = min(range_size(backup_br), (u64) bi_iter->bi_size);
+	ret = split_range(bc, &backup_br, &backup_bi);
+	if (ret)
+		return ret;
+
+	/*
+	 * Carve out a changed range. This will not be smaller than the backup
+	 * br since the backup br is smaller than the source range and iterator
+	 */
+	bi_iter->bi_size = backup_bi.bi_size;
+	ret = split_range(bc, &br, bi_iter);
+	if (ret)
+		return ret;
+	if (range_size(br) != range_size(backup_br)) {
+		WARN_ON(1);
+		return BLK_STS_IOERR;
+	}
+
+
+	/* Copy data over */
+	ret = copy_data(bc, br, backup_br, record_checksum ? &checksum : NULL);
+	if (ret)
+		return ret;
+
+	/* Add an entry to the log */
+	log_source = br->sector;
+	log_dest = backup_br->sector;
+	log_size = range_size(br);
+
+	/*
+	 * Set the types. Note that since set_type also amalgamates ranges
+	 * we have to set both sectors to their final type before calling
+	 * set_type on either
+	 */
+	original_type = br->type;
+	sector0 = backup_br->sector;
+	bc->trims_total -= range_size(backup_br);
+	if (backup_br->type == TRIMMED)
+		list_del(&backup_br->trimmed_list);
+	backup_br->type = br->type == SECTOR0_CURRENT ? SECTOR0_CURRENT
+						      : BACKUP;
+	br->type = CHANGED;
+	set_type(bc, &backup_br, backup_br->type);
+
+	/*
+	 * Add the log entry after marking the backup sector, since adding a log
+	 * can cause another backup
+	 */
+	ret = add_log_entry(bc, log_source, log_dest, log_size, checksum);
+	if (ret) {
+		br->type = original_type;
+		return ret;
+	}
+
+	/* Now it is safe to mark this backup successful */
+	if (original_type == SECTOR0_CURRENT)
+		bc->log_sector->sector0 = sector0;
+
+	set_type(bc, &br, br->type);
+	return ret;
+}
+
+static int prepare_free_range(struct bow_context *bc, struct bow_range *br,
+			      struct bvec_iter *bi_iter)
+{
+	int ret;
+
+	ret = split_range(bc, &br, bi_iter);
+	if (ret)
+		return ret;
+	set_type(bc, &br, CHANGED);
+	return BLK_STS_OK;
+}
+
+static int prepare_changed_range(struct bow_context *bc, struct bow_range *br,
+				 struct bvec_iter *bi_iter)
+{
+	/* Nothing to do ... */
+	return BLK_STS_OK;
+}
+
+static int prepare_one_range(struct bow_context *bc,
+			     struct bvec_iter *bi_iter)
+{
+	struct bow_range *br = find_first_overlapping_range(&bc->ranges,
+							    bi_iter);
+	switch (br->type) {
+	case CHANGED:
+		return prepare_changed_range(bc, br, bi_iter);
+
+	case TRIMMED:
+		return prepare_free_range(bc, br, bi_iter);
+
+	case UNCHANGED:
+	case BACKUP:
+		return prepare_unchanged_range(bc, br, bi_iter, true);
+
+	/*
+	 * We cannot track the checksum for the active sector0, since it
+	 * may change at any point.
+	 */
+	case SECTOR0_CURRENT:
+		return prepare_unchanged_range(bc, br, bi_iter, false);
+
+	case SECTOR0:	/* Handled in the dm_bow_map */
+	case TOP:	/* Illegal - top is off the end of the device */
+	default:
+		WARN_ON(1);
+		return BLK_STS_IOERR;
+	}
+}
+
+struct write_work {
+	struct work_struct work;
+	struct bow_context *bc;
+	struct bio *bio;
+};
+
+static void bow_write(struct work_struct *work)
+{
+	struct write_work *ww = container_of(work, struct write_work, work);
+	struct bow_context *bc = ww->bc;
+	struct bio *bio = ww->bio;
+	struct bvec_iter bi_iter = bio->bi_iter;
+	int ret = BLK_STS_OK;
+
+	kfree(ww);
+
+	mutex_lock(&bc->ranges_lock);
+	do {
+		ret = prepare_one_range(bc, &bi_iter);
+		bi_iter.bi_sector += bi_iter.bi_size / SECTOR_SIZE;
+		bi_iter.bi_size = bio->bi_iter.bi_size
+			- (bi_iter.bi_sector - bio->bi_iter.bi_sector)
+			  * SECTOR_SIZE;
+	} while (!ret && bi_iter.bi_size);
+
+	mutex_unlock(&bc->ranges_lock);
+
+	if (!ret) {
+		bio_set_dev(bio, bc->dev->bdev);
+		submit_bio(bio);
+	} else {
+		DMERR("Write failure with error %d", -ret);
+		bio->bi_status = ret;
+		bio_endio(bio);
+	}
+}
+
+static int queue_write(struct bow_context *bc, struct bio *bio)
+{
+	struct write_work *ww = kmalloc(sizeof(*ww), GFP_NOIO | __GFP_NORETRY
+					| __GFP_NOMEMALLOC | __GFP_NOWARN);
+	if (!ww) {
+		DMERR("Failed to allocate write_work");
+		return -ENOMEM;
+	}
+
+	INIT_WORK(&ww->work, bow_write);
+	ww->bc = bc;
+	ww->bio = bio;
+	queue_work(bc->workqueue, &ww->work);
+	return DM_MAPIO_SUBMITTED;
+}
+
+static int handle_sector0(struct bow_context *bc, struct bio *bio)
+{
+	int ret = DM_MAPIO_REMAPPED;
+
+	if (bio->bi_iter.bi_size > bc->block_size) {
+		struct bio * split = bio_split(bio,
+					       bc->block_size >> SECTOR_SHIFT,
+					       GFP_NOIO,
+					       &fs_bio_set);
+		if (!split) {
+			DMERR("Failed to split bio");
+			bio->bi_status = BLK_STS_RESOURCE;
+			bio_endio(bio);
+			return DM_MAPIO_SUBMITTED;
+		}
+
+		bio_chain(split, bio);
+		split->bi_iter.bi_sector = bc->log_sector->sector0;
+		bio_set_dev(split, bc->dev->bdev);
+		submit_bio(split);
+
+		if (bio_data_dir(bio) == WRITE)
+			ret = queue_write(bc, bio);
+	} else {
+		bio->bi_iter.bi_sector = bc->log_sector->sector0;
+	}
+
+	return ret;
+}
+
+static int add_trim(struct bow_context *bc, struct bio *bio)
+{
+	struct bow_range *br;
+	struct bvec_iter bi_iter = bio->bi_iter;
+
+	DMDEBUG("add_trim: %llu, %u",
+		(unsigned long long)bio->bi_iter.bi_sector,
+		bio->bi_iter.bi_size);
+
+	do {
+		br = find_first_overlapping_range(&bc->ranges, &bi_iter);
+
+		switch (br->type) {
+		case UNCHANGED:
+			if (!split_range(bc, &br, &bi_iter))
+				set_type(bc, &br, TRIMMED);
+			break;
+
+		case TRIMMED:
+			/* Nothing to do */
+			break;
+
+		default:
+			/* No other case is legal in TRIM state */
+			WARN_ON(true);
+			break;
+		}
+
+		bi_iter.bi_sector += bi_iter.bi_size / SECTOR_SIZE;
+		bi_iter.bi_size = bio->bi_iter.bi_size
+			- (bi_iter.bi_sector - bio->bi_iter.bi_sector)
+			  * SECTOR_SIZE;
+
+	} while (bi_iter.bi_size);
+
+	bio_endio(bio);
+	return DM_MAPIO_SUBMITTED;
+}
+
+static int remove_trim(struct bow_context *bc, struct bio *bio)
+{
+	struct bow_range *br;
+	struct bvec_iter bi_iter = bio->bi_iter;
+
+	DMDEBUG("remove_trim: %llu, %u",
+		(unsigned long long)bio->bi_iter.bi_sector,
+		bio->bi_iter.bi_size);
+
+	do {
+		br = find_first_overlapping_range(&bc->ranges, &bi_iter);
+
+		switch (br->type) {
+		case UNCHANGED:
+			/* Nothing to do */
+			break;
+
+		case TRIMMED:
+			if (!split_range(bc, &br, &bi_iter))
+				set_type(bc, &br, UNCHANGED);
+			break;
+
+		default:
+			/* No other case is legal in TRIM state */
+			WARN_ON(true);
+			break;
+		}
+
+		bi_iter.bi_sector += bi_iter.bi_size / SECTOR_SIZE;
+		bi_iter.bi_size = bio->bi_iter.bi_size
+			- (bi_iter.bi_sector - bio->bi_iter.bi_sector)
+			  * SECTOR_SIZE;
+
+	} while (bi_iter.bi_size);
+
+	return DM_MAPIO_REMAPPED;
+}
+
+int remap_unless_illegal_trim(struct bow_context *bc, struct bio *bio)
+{
+	if (!bc->forward_trims && bio_op(bio) == REQ_OP_DISCARD) {
+		bio->bi_status = BLK_STS_NOTSUPP;
+		bio_endio(bio);
+		return DM_MAPIO_SUBMITTED;
+	} else {
+		bio_set_dev(bio, bc->dev->bdev);
+		return DM_MAPIO_REMAPPED;
+	}
+}
+
+/****** dm interface ******/
+
+static int dm_bow_map(struct dm_target *ti, struct bio *bio)
+{
+	int ret = DM_MAPIO_REMAPPED;
+	struct bow_context *bc = ti->private;
+
+	if (likely(bc->state.counter == COMMITTED))
+		return remap_unless_illegal_trim(bc, bio);
+
+	if (bio_data_dir(bio) == READ && bio->bi_iter.bi_sector != 0)
+		return remap_unless_illegal_trim(bc, bio);
+
+	if (atomic_read(&bc->state) != COMMITTED) {
+		enum state state;
+
+		mutex_lock(&bc->ranges_lock);
+		state = atomic_read(&bc->state);
+		if (state == TRIM) {
+			if (bio_op(bio) == REQ_OP_DISCARD)
+				ret = add_trim(bc, bio);
+			else if (bio_data_dir(bio) == WRITE)
+				ret = remove_trim(bc, bio);
+			else
+				/* pass-through */;
+		} else if (state == CHECKPOINT) {
+			if (bio->bi_iter.bi_sector == 0)
+				ret = handle_sector0(bc, bio);
+			else if (bio_data_dir(bio) == WRITE)
+				ret = queue_write(bc, bio);
+			else
+				/* pass-through */;
+		} else {
+			/* pass-through */
+		}
+		mutex_unlock(&bc->ranges_lock);
+	}
+
+	if (ret == DM_MAPIO_REMAPPED)
+		return remap_unless_illegal_trim(bc, bio);
+
+	return ret;
+}
+
+static void dm_bow_tablestatus(struct dm_target *ti, char *result,
+			       unsigned int maxlen)
+{
+	char *end = result + maxlen;
+	struct bow_context *bc = ti->private;
+	struct rb_node *i;
+	int trimmed_list_length = 0;
+	int trimmed_range_count = 0;
+	struct bow_range *br;
+
+	if (maxlen == 0)
+		return;
+	result[0] = 0;
+
+	list_for_each_entry(br, &bc->trimmed_list, trimmed_list)
+		if (br->type == TRIMMED) {
+			++trimmed_list_length;
+		} else {
+			scnprintf(result, end - result,
+				  "ERROR: non-trimmed entry in trimmed_list");
+			return;
+		}
+
+	if (!rb_first(&bc->ranges)) {
+		scnprintf(result, end - result, "ERROR: Empty ranges");
+		return;
+	}
+
+	if (container_of(rb_first(&bc->ranges), struct bow_range, node)
+	    ->sector) {
+		scnprintf(result, end - result,
+			 "ERROR: First range does not start at sector 0");
+		return;
+	}
+
+	for (i = rb_first(&bc->ranges); i; i = rb_next(i)) {
+		struct bow_range *br = container_of(i, struct bow_range, node);
+
+		result += scnprintf(result, end - result, "%s: %llu",
+				    readable_type[br->type],
+				    (unsigned long long)br->sector);
+		if (result >= end)
+			return;
+
+		result += scnprintf(result, end - result, "\n");
+		if (result >= end)
+			return;
+
+		if (br->type == TRIMMED)
+			++trimmed_range_count;
+
+		if (br->type == TOP) {
+			if (br->sector != ti->len) {
+				scnprintf(result, end - result,
+					 "\nERROR: Top sector is incorrect");
+			}
+
+			if (&br->node != rb_last(&bc->ranges)) {
+				scnprintf(result, end - result,
+					  "\nERROR: Top sector is not last");
+			}
+
+			break;
+		}
+
+		if (!rb_next(i)) {
+			scnprintf(result, end - result,
+				  "\nERROR: Last range not of type TOP");
+			return;
+		}
+
+		if (br->sector > range_top(br)) {
+			scnprintf(result, end - result,
+				  "\nERROR: sectors out of order");
+			return;
+		}
+	}
+
+	if (trimmed_range_count != trimmed_list_length)
+		scnprintf(result, end - result,
+			  "\nERROR: not all trimmed ranges in trimmed list");
+}
+
+static void dm_bow_status(struct dm_target *ti, status_type_t type,
+			  unsigned int status_flags, char *result,
+			  unsigned int maxlen)
+{
+	switch (type) {
+	case STATUSTYPE_INFO:
+		if (maxlen)
+			result[0] = 0;
+		break;
+
+	case STATUSTYPE_TABLE:
+		dm_bow_tablestatus(ti, result, maxlen);
+		break;
+	}
+}
+
+int dm_bow_prepare_ioctl(struct dm_target *ti, struct block_device **bdev)
+{
+	struct bow_context *bc = ti->private;
+	struct dm_dev *dev = bc->dev;
+
+	*bdev = dev->bdev;
+	/* Only pass ioctls through if the device sizes match exactly. */
+	return ti->len != i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT;
+}
+
+static int dm_bow_iterate_devices(struct dm_target *ti,
+				  iterate_devices_callout_fn fn, void *data)
+{
+	struct bow_context *bc = ti->private;
+
+	return fn(ti, bc->dev, 0, ti->len, data);
+}
+
+static struct target_type bow_target = {
+	.name   = "bow",
+	.version = {1, 2, 0},
+	.module = THIS_MODULE,
+	.ctr    = dm_bow_ctr,
+	.dtr    = dm_bow_dtr,
+	.map    = dm_bow_map,
+	.status = dm_bow_status,
+	.prepare_ioctl  = dm_bow_prepare_ioctl,
+	.iterate_devices = dm_bow_iterate_devices,
+	.io_hints = dm_bow_io_hints,
+};
+
+int __init dm_bow_init(void)
+{
+	int r = dm_register_target(&bow_target);
+
+	if (r < 0)
+		DMERR("registering bow failed %d", r);
+	return r;
+}
+
+void dm_bow_exit(void)
+{
+	dm_unregister_target(&bow_target);
+}
+
+MODULE_LICENSE("GPL");
+
+module_init(dm_bow_init);
+module_exit(dm_bow_exit);
diff --git a/drivers/md/dm-core.h b/drivers/md/dm-core.h
index d522093..805f2ea 100644
--- a/drivers/md/dm-core.h
+++ b/drivers/md/dm-core.h
@@ -13,6 +13,7 @@
 #include <linux/ktime.h>
 #include <linux/genhd.h>
 #include <linux/blk-mq.h>
+#include <linux/keyslot-manager.h>
 
 #include <trace/events/block.h>
 
@@ -52,6 +53,9 @@
 
 	int numa_node_id;
 	struct request_queue *queue;
+#ifdef CONFIG_BLK_INLINE_ENCRYPTION
+	struct blk_keyslot_manager ksm;
+#endif
 
 	atomic_t holders;
 	atomic_t open_count;
diff --git a/drivers/md/dm-default-key.c b/drivers/md/dm-default-key.c
new file mode 100644
index 0000000..1c57816
--- /dev/null
+++ b/drivers/md/dm-default-key.c
@@ -0,0 +1,427 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2017 Google, Inc.
+ */
+
+#include <linux/blk-crypto.h>
+#include <linux/device-mapper.h>
+#include <linux/module.h>
+
+#define DM_MSG_PREFIX		"default-key"
+
+#define DM_DEFAULT_KEY_MAX_WRAPPED_KEY_SIZE 128
+
+static const struct dm_default_key_cipher {
+	const char *name;
+	enum blk_crypto_mode_num mode_num;
+	int key_size;
+} dm_default_key_ciphers[] = {
+	{
+		.name = "aes-xts-plain64",
+		.mode_num = BLK_ENCRYPTION_MODE_AES_256_XTS,
+		.key_size = 64,
+	}, {
+		.name = "xchacha12,aes-adiantum-plain64",
+		.mode_num = BLK_ENCRYPTION_MODE_ADIANTUM,
+		.key_size = 32,
+	},
+};
+
+/**
+ * struct dm_default_c - private data of a default-key target
+ * @dev: the underlying device
+ * @start: starting sector of the range of @dev which this target actually maps.
+ *	   For this purpose a "sector" is 512 bytes.
+ * @cipher_string: the name of the encryption algorithm being used
+ * @iv_offset: starting offset for IVs.  IVs are generated as if the target were
+ *	       preceded by @iv_offset 512-byte sectors.
+ * @sector_size: crypto sector size in bytes (usually 4096)
+ * @sector_bits: log2(sector_size)
+ * @key: the encryption key to use
+ * @max_dun: the maximum DUN that may be used (computed from other params)
+ */
+struct default_key_c {
+	struct dm_dev *dev;
+	sector_t start;
+	const char *cipher_string;
+	u64 iv_offset;
+	unsigned int sector_size;
+	unsigned int sector_bits;
+	struct blk_crypto_key key;
+	bool is_hw_wrapped;
+	u64 max_dun;
+};
+
+static const struct dm_default_key_cipher *
+lookup_cipher(const char *cipher_string)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(dm_default_key_ciphers); i++) {
+		if (strcmp(cipher_string, dm_default_key_ciphers[i].name) == 0)
+			return &dm_default_key_ciphers[i];
+	}
+	return NULL;
+}
+
+static void default_key_dtr(struct dm_target *ti)
+{
+	struct default_key_c *dkc = ti->private;
+	int err;
+
+	if (dkc->dev) {
+		err = blk_crypto_evict_key(bdev_get_queue(dkc->dev->bdev),
+					   &dkc->key);
+		if (err && err != -ENOKEY)
+			DMWARN("Failed to evict crypto key: %d", err);
+		dm_put_device(ti, dkc->dev);
+	}
+	kfree_sensitive(dkc->cipher_string);
+	kfree_sensitive(dkc);
+}
+
+static int default_key_ctr_optional(struct dm_target *ti,
+				    unsigned int argc, char **argv)
+{
+	struct default_key_c *dkc = ti->private;
+	struct dm_arg_set as;
+	static const struct dm_arg _args[] = {
+		{0, 4, "Invalid number of feature args"},
+	};
+	unsigned int opt_params;
+	const char *opt_string;
+	bool iv_large_sectors = false;
+	char dummy;
+	int err;
+
+	as.argc = argc;
+	as.argv = argv;
+
+	err = dm_read_arg_group(_args, &as, &opt_params, &ti->error);
+	if (err)
+		return err;
+
+	while (opt_params--) {
+		opt_string = dm_shift_arg(&as);
+		if (!opt_string) {
+			ti->error = "Not enough feature arguments";
+			return -EINVAL;
+		}
+		if (!strcmp(opt_string, "allow_discards")) {
+			ti->num_discard_bios = 1;
+		} else if (sscanf(opt_string, "sector_size:%u%c",
+				  &dkc->sector_size, &dummy) == 1) {
+			if (dkc->sector_size < SECTOR_SIZE ||
+			    dkc->sector_size > 4096 ||
+			    !is_power_of_2(dkc->sector_size)) {
+				ti->error = "Invalid sector_size";
+				return -EINVAL;
+			}
+		} else if (!strcmp(opt_string, "iv_large_sectors")) {
+			iv_large_sectors = true;
+		} else if (!strcmp(opt_string, "wrappedkey_v0")) {
+			dkc->is_hw_wrapped = true;
+		} else {
+			ti->error = "Invalid feature arguments";
+			return -EINVAL;
+		}
+	}
+
+	/* dm-default-key doesn't implement iv_large_sectors=false. */
+	if (dkc->sector_size != SECTOR_SIZE && !iv_large_sectors) {
+		ti->error = "iv_large_sectors must be specified";
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/*
+ * Construct a default-key mapping:
+ * <cipher> <key> <iv_offset> <dev_path> <start>
+ *
+ * This syntax matches dm-crypt's, but lots of unneeded functionality has been
+ * removed.  Also, dm-default-key requires that the "iv_large_sectors" option be
+ * given whenever a non-default sector size is used.
+ */
+static int default_key_ctr(struct dm_target *ti, unsigned int argc, char **argv)
+{
+	struct default_key_c *dkc;
+	const struct dm_default_key_cipher *cipher;
+	u8 raw_key[DM_DEFAULT_KEY_MAX_WRAPPED_KEY_SIZE];
+	unsigned int raw_key_size;
+	unsigned int dun_bytes;
+	unsigned long long tmpll;
+	char dummy;
+	int err;
+
+	if (argc < 5) {
+		ti->error = "Not enough arguments";
+		return -EINVAL;
+	}
+
+	dkc = kzalloc(sizeof(*dkc), GFP_KERNEL);
+	if (!dkc) {
+		ti->error = "Out of memory";
+		return -ENOMEM;
+	}
+	ti->private = dkc;
+
+	/* <cipher> */
+	dkc->cipher_string = kstrdup(argv[0], GFP_KERNEL);
+	if (!dkc->cipher_string) {
+		ti->error = "Out of memory";
+		err = -ENOMEM;
+		goto bad;
+	}
+	cipher = lookup_cipher(dkc->cipher_string);
+	if (!cipher) {
+		ti->error = "Unsupported cipher";
+		err = -EINVAL;
+		goto bad;
+	}
+
+	/* <key> */
+	raw_key_size = strlen(argv[1]);
+	if (raw_key_size > 2 * DM_DEFAULT_KEY_MAX_WRAPPED_KEY_SIZE ||
+	    raw_key_size % 2) {
+		ti->error = "Invalid keysize";
+		err = -EINVAL;
+		goto bad;
+	}
+	raw_key_size /= 2;
+	if (hex2bin(raw_key, argv[1], raw_key_size) != 0) {
+		ti->error = "Malformed key string";
+		err = -EINVAL;
+		goto bad;
+	}
+
+	/* <iv_offset> */
+	if (sscanf(argv[2], "%llu%c", &dkc->iv_offset, &dummy) != 1) {
+		ti->error = "Invalid iv_offset sector";
+		err = -EINVAL;
+		goto bad;
+	}
+
+	/* <dev_path> */
+	err = dm_get_device(ti, argv[3], dm_table_get_mode(ti->table),
+			    &dkc->dev);
+	if (err) {
+		ti->error = "Device lookup failed";
+		goto bad;
+	}
+
+	/* <start> */
+	if (sscanf(argv[4], "%llu%c", &tmpll, &dummy) != 1 ||
+	    tmpll != (sector_t)tmpll) {
+		ti->error = "Invalid start sector";
+		err = -EINVAL;
+		goto bad;
+	}
+	dkc->start = tmpll;
+
+	/* optional arguments */
+	dkc->sector_size = SECTOR_SIZE;
+	if (argc > 5) {
+		err = default_key_ctr_optional(ti, argc - 5, &argv[5]);
+		if (err)
+			goto bad;
+	}
+	dkc->sector_bits = ilog2(dkc->sector_size);
+	if (ti->len & ((dkc->sector_size >> SECTOR_SHIFT) - 1)) {
+		ti->error = "Device size is not a multiple of sector_size";
+		err = -EINVAL;
+		goto bad;
+	}
+
+	dkc->max_dun = (dkc->iv_offset + ti->len - 1) >>
+		       (dkc->sector_bits - SECTOR_SHIFT);
+	dun_bytes = DIV_ROUND_UP(fls64(dkc->max_dun), 8);
+
+	err = blk_crypto_init_key(&dkc->key, raw_key, raw_key_size,
+				  dkc->is_hw_wrapped, cipher->mode_num,
+				  dun_bytes, dkc->sector_size);
+	if (err) {
+		ti->error = "Error initializing blk-crypto key";
+		goto bad;
+	}
+
+	err = blk_crypto_start_using_key(&dkc->key,
+					 bdev_get_queue(dkc->dev->bdev));
+	if (err) {
+		ti->error = "Error starting to use blk-crypto";
+		goto bad;
+	}
+
+	ti->num_flush_bios = 1;
+
+	ti->may_passthrough_inline_crypto = true;
+
+	err = 0;
+	goto out;
+
+bad:
+	default_key_dtr(ti);
+out:
+	memzero_explicit(raw_key, sizeof(raw_key));
+	return err;
+}
+
+static int default_key_map(struct dm_target *ti, struct bio *bio)
+{
+	const struct default_key_c *dkc = ti->private;
+	sector_t sector_in_target;
+	u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE] = { 0 };
+
+	bio_set_dev(bio, dkc->dev->bdev);
+
+	/*
+	 * If the bio is a device-level request which doesn't target a specific
+	 * sector, there's nothing more to do.
+	 */
+	if (bio_sectors(bio) == 0)
+		return DM_MAPIO_REMAPPED;
+
+	/* Map the bio's sector to the underlying device. (512-byte sectors) */
+	sector_in_target = dm_target_offset(ti, bio->bi_iter.bi_sector);
+	bio->bi_iter.bi_sector = dkc->start + sector_in_target;
+
+	/*
+	 * If the bio should skip dm-default-key (i.e. if it's for an encrypted
+	 * file's contents), or if it doesn't have any data (e.g. if it's a
+	 * DISCARD request), there's nothing more to do.
+	 */
+	if (bio_should_skip_dm_default_key(bio) || !bio_has_data(bio))
+		return DM_MAPIO_REMAPPED;
+
+	/*
+	 * Else, dm-default-key needs to set this bio's encryption context.
+	 * It must not already have one.
+	 */
+	if (WARN_ON_ONCE(bio_has_crypt_ctx(bio)))
+		return DM_MAPIO_KILL;
+
+	/* Calculate the DUN and enforce data-unit (crypto sector) alignment. */
+	dun[0] = dkc->iv_offset + sector_in_target; /* 512-byte sectors */
+	if (dun[0] & ((dkc->sector_size >> SECTOR_SHIFT) - 1))
+		return DM_MAPIO_KILL;
+	dun[0] >>= dkc->sector_bits - SECTOR_SHIFT; /* crypto sectors */
+
+	/*
+	 * This check isn't necessary as we should have calculated max_dun
+	 * correctly, but be safe.
+	 */
+	if (WARN_ON_ONCE(dun[0] > dkc->max_dun))
+		return DM_MAPIO_KILL;
+
+	bio_crypt_set_ctx(bio, &dkc->key, dun, GFP_NOIO);
+
+	return DM_MAPIO_REMAPPED;
+}
+
+static void default_key_status(struct dm_target *ti, status_type_t type,
+			       unsigned int status_flags, char *result,
+			       unsigned int maxlen)
+{
+	const struct default_key_c *dkc = ti->private;
+	unsigned int sz = 0;
+	int num_feature_args = 0;
+
+	switch (type) {
+	case STATUSTYPE_INFO:
+		result[0] = '\0';
+		break;
+
+	case STATUSTYPE_TABLE:
+		/* Omit the key for now. */
+		DMEMIT("%s - %llu %s %llu", dkc->cipher_string, dkc->iv_offset,
+		       dkc->dev->name, (unsigned long long)dkc->start);
+
+		num_feature_args += !!ti->num_discard_bios;
+		if (dkc->sector_size != SECTOR_SIZE)
+			num_feature_args += 2;
+		if (dkc->is_hw_wrapped)
+			num_feature_args += 1;
+		if (num_feature_args != 0) {
+			DMEMIT(" %d", num_feature_args);
+			if (ti->num_discard_bios)
+				DMEMIT(" allow_discards");
+			if (dkc->sector_size != SECTOR_SIZE) {
+				DMEMIT(" sector_size:%u", dkc->sector_size);
+				DMEMIT(" iv_large_sectors");
+			}
+			if (dkc->is_hw_wrapped)
+				DMEMIT(" wrappedkey_v0");
+		}
+		break;
+	}
+}
+
+static int default_key_prepare_ioctl(struct dm_target *ti,
+				     struct block_device **bdev)
+{
+	const struct default_key_c *dkc = ti->private;
+	const struct dm_dev *dev = dkc->dev;
+
+	*bdev = dev->bdev;
+
+	/* Only pass ioctls through if the device sizes match exactly. */
+	if (dkc->start != 0 ||
+	    ti->len != i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT)
+		return 1;
+	return 0;
+}
+
+static int default_key_iterate_devices(struct dm_target *ti,
+				       iterate_devices_callout_fn fn,
+				       void *data)
+{
+	const struct default_key_c *dkc = ti->private;
+
+	return fn(ti, dkc->dev, dkc->start, ti->len, data);
+}
+
+static void default_key_io_hints(struct dm_target *ti,
+				 struct queue_limits *limits)
+{
+	const struct default_key_c *dkc = ti->private;
+	const unsigned int sector_size = dkc->sector_size;
+
+	limits->logical_block_size =
+		max_t(unsigned int, limits->logical_block_size, sector_size);
+	limits->physical_block_size =
+		max_t(unsigned int, limits->physical_block_size, sector_size);
+	limits->io_min = max_t(unsigned int, limits->io_min, sector_size);
+}
+
+static struct target_type default_key_target = {
+	.name			= "default-key",
+	.version		= {2, 1, 0},
+	.module			= THIS_MODULE,
+	.ctr			= default_key_ctr,
+	.dtr			= default_key_dtr,
+	.map			= default_key_map,
+	.status			= default_key_status,
+	.prepare_ioctl		= default_key_prepare_ioctl,
+	.iterate_devices	= default_key_iterate_devices,
+	.io_hints		= default_key_io_hints,
+};
+
+static int __init dm_default_key_init(void)
+{
+	return dm_register_target(&default_key_target);
+}
+
+static void __exit dm_default_key_exit(void)
+{
+	dm_unregister_target(&default_key_target);
+}
+
+module_init(dm_default_key_init);
+module_exit(dm_default_key_exit);
+
+MODULE_AUTHOR("Paul Lawrence <paullawrence@google.com>");
+MODULE_AUTHOR("Paul Crowley <paulcrowley@google.com>");
+MODULE_AUTHOR("Eric Biggers <ebiggers@google.com>");
+MODULE_DESCRIPTION(DM_NAME " target for encrypting filesystem metadata");
+MODULE_LICENSE("GPL");
diff --git a/drivers/md/dm-linear.c b/drivers/md/dm-linear.c
index 00774b5..345e22b 100644
--- a/drivers/md/dm-linear.c
+++ b/drivers/md/dm-linear.c
@@ -62,6 +62,7 @@
 	ti->num_secure_erase_bios = 1;
 	ti->num_write_same_bios = 1;
 	ti->num_write_zeroes_bios = 1;
+	ti->may_passthrough_inline_crypto = true;
 	ti->private = lc;
 	return 0;
 
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index ce543b7..ae85a40 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -22,6 +22,8 @@
 #include <linux/blk-mq.h>
 #include <linux/mount.h>
 #include <linux/dax.h>
+#include <linux/bio.h>
+#include <linux/keyslot-manager.h>
 
 #define DM_MSG_PREFIX "table"
 
@@ -1536,6 +1538,54 @@
 	}
 }
 
+#ifdef CONFIG_BLK_INLINE_ENCRYPTION
+static int device_intersect_crypto_modes(struct dm_target *ti,
+					 struct dm_dev *dev, sector_t start,
+					 sector_t len, void *data)
+{
+	struct blk_keyslot_manager *parent = data;
+	struct blk_keyslot_manager *child = bdev_get_queue(dev->bdev)->ksm;
+
+	blk_ksm_intersect_modes(parent, child);
+	return 0;
+}
+
+/*
+ * Update the inline crypto modes supported by 'q->ksm' to be the intersection
+ * of the modes supported by all targets in the table.
+ *
+ * For any mode to be supported at all, all targets must have explicitly
+ * declared that they can pass through inline crypto support.  For a particular
+ * mode to be supported, all underlying devices must also support it.
+ *
+ * Assume that 'q->ksm' initially declares all modes to be supported.
+ */
+static void dm_calculate_supported_crypto_modes(struct dm_table *t,
+						struct request_queue *q)
+{
+	struct dm_target *ti;
+	unsigned int i;
+
+	for (i = 0; i < dm_table_get_num_targets(t); i++) {
+		ti = dm_table_get_target(t, i);
+
+		if (!ti->may_passthrough_inline_crypto) {
+			blk_ksm_intersect_modes(q->ksm, NULL);
+			return;
+		}
+		if (!ti->type->iterate_devices)
+			continue;
+		ti->type->iterate_devices(ti, device_intersect_crypto_modes,
+					  q->ksm);
+	}
+}
+#else /* CONFIG_BLK_INLINE_ENCRYPTION */
+static inline void dm_calculate_supported_crypto_modes(struct dm_table *t,
+						       struct request_queue *q)
+{
+}
+#endif /* !CONFIG_BLK_INLINE_ENCRYPTION */
+
 static int device_flush_capable(struct dm_target *ti, struct dm_dev *dev,
 				sector_t start, sector_t len, void *data)
 {
@@ -1870,6 +1920,8 @@
 
 	dm_table_verify_integrity(t);
 
+	dm_calculate_supported_crypto_modes(t, q);
+
 	/*
 	 * Some devices don't use blk_integrity but still want stable pages
 	 * because they do their own checksumming.
diff --git a/drivers/md/dm-verity-avb.c b/drivers/md/dm-verity-avb.c
new file mode 100644
index 0000000..a9f102a
--- /dev/null
+++ b/drivers/md/dm-verity-avb.c
@@ -0,0 +1,229 @@
+/*
+ * Copyright (C) 2017 Google.
+ *
+ * This file is released under the GPLv2.
+ *
+ * Based on drivers/md/dm-verity-chromeos.c
+ */
+
+#include <linux/device-mapper.h>
+#include <linux/module.h>
+#include <linux/mount.h>
+
+#define DM_MSG_PREFIX "verity-avb"
+
+/* Set via module parameters. */
+static char avb_vbmeta_device[64];
+static char avb_invalidate_on_error[4];
+
+static void invalidate_vbmeta_endio(struct bio *bio)
+{
+	if (bio->bi_status)
+		DMERR("invalidate_vbmeta_endio: error %d", bio->bi_status);
+	complete(bio->bi_private);
+}
+
+static int invalidate_vbmeta_submit(struct bio *bio,
+				    struct block_device *bdev,
+				    int op, int access_last_sector,
+				    struct page *page)
+{
+	DECLARE_COMPLETION_ONSTACK(wait);
+
+	bio->bi_private = &wait;
+	bio->bi_end_io = invalidate_vbmeta_endio;
+	bio_set_dev(bio, bdev);
+	bio_set_op_attrs(bio, op, REQ_SYNC);
+
+	bio->bi_iter.bi_sector = 0;
+	if (access_last_sector) {
+		sector_t last_sector;
+
+		last_sector = (i_size_read(bdev->bd_inode)>>SECTOR_SHIFT) - 1;
+		bio->bi_iter.bi_sector = last_sector;
+	}
+	if (!bio_add_page(bio, page, PAGE_SIZE, 0)) {
+		DMERR("invalidate_vbmeta_submit: bio_add_page error");
+		return -EIO;
+	}
+
+	submit_bio(bio);
+	/* Wait up to 2 seconds for completion or fail. */
+	if (!wait_for_completion_timeout(&wait, msecs_to_jiffies(2000)))
+		return -EIO;
+	return 0;
+}
+
+static int invalidate_vbmeta(dev_t vbmeta_devt)
+{
+	int ret = 0;
+	struct block_device *bdev;
+	struct bio *bio;
+	struct page *page;
+	fmode_t dev_mode;
+	/* Ensure we do synchronous unblocked I/O. We may also need
+	 * sync_bdev() on completion, but it really shouldn't.
+	 */
+	int access_last_sector = 0;
+
+	DMINFO("invalidate_vbmeta: acting on device %d:%d",
+	       MAJOR(vbmeta_devt), MINOR(vbmeta_devt));
+
+	/* First we open the device for reading. */
+	dev_mode = FMODE_READ | FMODE_EXCL;
+	bdev = blkdev_get_by_dev(vbmeta_devt, dev_mode,
+				 invalidate_vbmeta);
+	if (IS_ERR(bdev)) {
+		DMERR("invalidate_kernel: could not open device for reading");
+		dev_mode = 0;
+		ret = -ENOENT;
+		goto failed_to_read;
+	}
+
+	bio = bio_alloc(GFP_NOIO, 1);
+	if (!bio) {
+		ret = -ENOMEM;
+		goto failed_bio_alloc;
+	}
+
+	page = alloc_page(GFP_NOIO);
+	if (!page) {
+		ret = -ENOMEM;
+		goto failed_to_alloc_page;
+	}
+
+	access_last_sector = 0;
+	ret = invalidate_vbmeta_submit(bio, bdev, REQ_OP_READ,
+				       access_last_sector, page);
+	if (ret) {
+		DMERR("invalidate_vbmeta: error reading");
+		goto failed_to_submit_read;
+	}
+
+	/* We have a page. Let's make sure it looks right. */
+	if (memcmp("AVB0", page_address(page), 4) == 0) {
+		/* Stamp it. */
+		memcpy(page_address(page), "AVE0", 4);
+		DMINFO("invalidate_vbmeta: found vbmeta partition");
+	} else {
+		/* Could be this is on a AVB footer, check. Also, since the
+		 * AVB footer is in the last 64 bytes, adjust for the fact that
+		 * we're dealing with 512-byte sectors.
+		 */
+		size_t offset = (1<<SECTOR_SHIFT) - 64;
+
+		access_last_sector = 1;
+		ret = invalidate_vbmeta_submit(bio, bdev, REQ_OP_READ,
+					       access_last_sector, page);
+		if (ret) {
+			DMERR("invalidate_vbmeta: error reading");
+			goto failed_to_submit_read;
+		}
+		if (memcmp("AVBf", page_address(page) + offset, 4) != 0) {
+			DMERR("invalidate_vbmeta on non-vbmeta partition");
+			ret = -EINVAL;
+			goto invalid_header;
+		}
+		/* Stamp it. */
+		memcpy(page_address(page) + offset, "AVE0", 4);
+		DMINFO("invalidate_vbmeta: found vbmeta footer partition");
+	}
+
+	/* Now rewrite the changed page - the block dev was being
+	 * changed on read. Let's reopen here.
+	 */
+	blkdev_put(bdev, dev_mode);
+	dev_mode = FMODE_WRITE | FMODE_EXCL;
+	bdev = blkdev_get_by_dev(vbmeta_devt, dev_mode,
+				 invalidate_vbmeta);
+	if (IS_ERR(bdev)) {
+		DMERR("invalidate_vbmeta: could not open device for writing");
+		dev_mode = 0;
+		ret = -ENOENT;
+		goto failed_to_write;
+	}
+
+	/* We re-use the same bio to do the write after the read. Need to reset
+	 * it to initialize bio->bi_remaining.
+	 */
+	bio_reset(bio);
+
+	ret = invalidate_vbmeta_submit(bio, bdev, REQ_OP_WRITE,
+				       access_last_sector, page);
+	if (ret) {
+		DMERR("invalidate_vbmeta: error writing");
+		goto failed_to_submit_write;
+	}
+
+	DMERR("invalidate_vbmeta: completed.");
+	ret = 0;
+failed_to_submit_write:
+failed_to_write:
+invalid_header:
+	__free_page(page);
+failed_to_submit_read:
+	/* Technically, we'll leak a page with the pending bio, but
+	 * we're about to reboot anyway.
+	 */
+failed_to_alloc_page:
+	bio_put(bio);
+failed_bio_alloc:
+	if (dev_mode)
+		blkdev_put(bdev, dev_mode);
+failed_to_read:
+	return ret;
+}
+
+void dm_verity_avb_error_handler(void)
+{
+	dev_t dev;
+
+	DMINFO("AVB error handler called for %s", avb_vbmeta_device);
+
+	if (strcmp(avb_invalidate_on_error, "yes") != 0) {
+		DMINFO("Not configured to invalidate");
+		return;
+	}
+
+	if (avb_vbmeta_device[0] == '\0') {
+		DMERR("avb_vbmeta_device parameter not set");
+		goto fail_no_dev;
+	}
+
+	dev = name_to_dev_t(avb_vbmeta_device);
+	if (!dev) {
+		DMERR("No matching partition for device: %s",
+		      avb_vbmeta_device);
+		goto fail_no_dev;
+	}
+
+	invalidate_vbmeta(dev);
+
+fail_no_dev:
+	;
+}
+
+static int __init dm_verity_avb_init(void)
+{
+	DMINFO("AVB error handler initialized with vbmeta device: %s",
+	       avb_vbmeta_device);
+	return 0;
+}
+
+static void __exit dm_verity_avb_exit(void)
+{
+}
+
+module_init(dm_verity_avb_init);
+module_exit(dm_verity_avb_exit);
+
+MODULE_AUTHOR("David Zeuthen <zeuthen@google.com>");
+MODULE_DESCRIPTION("AVB-specific error handler for dm-verity");
+MODULE_LICENSE("GPL");
+
+/* Declare parameter with no module prefix */
+#undef MODULE_PARAM_PREFIX
+#define MODULE_PARAM_PREFIX	"androidboot.vbmeta."
+module_param_string(device, avb_vbmeta_device, sizeof(avb_vbmeta_device), 0);
+module_param_string(invalidate_on_error, avb_invalidate_on_error,
+		    sizeof(avb_invalidate_on_error), 0);
diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c
index f74982d..e514697 100644
--- a/drivers/md/dm-verity-target.c
+++ b/drivers/md/dm-verity-target.c
@@ -252,8 +252,12 @@
 	if (v->mode == DM_VERITY_MODE_LOGGING)
 		return 0;
 
-	if (v->mode == DM_VERITY_MODE_RESTART)
+	if (v->mode == DM_VERITY_MODE_RESTART) {
+#ifdef CONFIG_DM_VERITY_AVB
+		dm_verity_avb_error_handler();
+#endif
 		kernel_restart("dm-verity device corrupted");
+	}
 
 	if (v->mode == DM_VERITY_MODE_PANIC)
 		panic("dm-verity device corrupted");
diff --git a/drivers/md/dm-verity.h b/drivers/md/dm-verity.h
index 4e769d13..f64b840 100644
--- a/drivers/md/dm-verity.h
+++ b/drivers/md/dm-verity.h
@@ -129,4 +129,6 @@
 extern int verity_hash_for_block(struct dm_verity *v, struct dm_verity_io *io,
 				 sector_t block, u8 *digest, bool *is_zero);
 
+extern void dm_verity_avb_error_handler(void);
+
 #endif /* DM_VERITY_H */
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index c18fc254..6558b82 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -28,6 +28,7 @@
 #include <linux/refcount.h>
 #include <linux/part_stat.h>
 #include <linux/blk-crypto.h>
+#include <linux/keyslot-manager.h>
 
 #define DM_MSG_PREFIX "core"
 
@@ -1721,6 +1722,8 @@
 
 static void dm_wq_work(struct work_struct *work);
 
+static void dm_destroy_inline_encryption(struct request_queue *q);
+
 static void cleanup_mapped_device(struct mapped_device *md)
 {
 	if (md->wq)
@@ -1742,8 +1745,10 @@
 		put_disk(md->disk);
 	}
 
-	if (md->queue)
+	if (md->queue) {
+		dm_destroy_inline_encryption(md->queue);
 		blk_cleanup_queue(md->queue);
+	}
 
 	cleanup_srcu_struct(&md->io_barrier);
 
@@ -2092,6 +2097,161 @@
 }
 EXPORT_SYMBOL_GPL(dm_get_queue_limits);
 
+#ifdef CONFIG_BLK_INLINE_ENCRYPTION
+struct dm_keyslot_evict_args {
+	const struct blk_crypto_key *key;
+	int err;
+};
+
+static int dm_keyslot_evict_callback(struct dm_target *ti, struct dm_dev *dev,
+				     sector_t start, sector_t len, void *data)
+{
+	struct dm_keyslot_evict_args *args = data;
+	int err;
+
+	err = blk_crypto_evict_key(bdev_get_queue(dev->bdev), args->key);
+	if (!args->err)
+		args->err = err;
+	/* Always try to evict the key from all devices. */
+	return 0;
+}
+
+/*
+ * When an inline encryption key is evicted from a device-mapper device, evict
+ * it from all the underlying devices.
+ */
+static int dm_keyslot_evict(struct blk_keyslot_manager *ksm,
+			    const struct blk_crypto_key *key, unsigned int slot)
+{
+	struct mapped_device *md = container_of(ksm, struct mapped_device, ksm);
+	struct dm_keyslot_evict_args args = { key };
+	struct dm_table *t;
+	int srcu_idx;
+	int i;
+	struct dm_target *ti;
+
+	t = dm_get_live_table(md, &srcu_idx);
+	if (!t)
+		return 0;
+	for (i = 0; i < dm_table_get_num_targets(t); i++) {
+		ti = dm_table_get_target(t, i);
+		if (!ti->type->iterate_devices)
+			continue;
+		ti->type->iterate_devices(ti, dm_keyslot_evict_callback, &args);
+	}
+	dm_put_live_table(md, srcu_idx);
+	return args.err;
+}
+
+struct dm_derive_raw_secret_args {
+	const u8 *wrapped_key;
+	unsigned int wrapped_key_size;
+	u8 *secret;
+	unsigned int secret_size;
+	int err;
+};
+
+static int dm_derive_raw_secret_callback(struct dm_target *ti,
+					 struct dm_dev *dev, sector_t start,
+					 sector_t len, void *data)
+{
+	struct dm_derive_raw_secret_args *args = data;
+	struct request_queue *q = bdev_get_queue(dev->bdev);
+
+	if (!args->err)
+		return 0;
+
+	if (!q->ksm) {
+		args->err = -EOPNOTSUPP;
+		return 0;
+	}
+
+	args->err = blk_ksm_derive_raw_secret(q->ksm, args->wrapped_key,
+					      args->wrapped_key_size,
+					      args->secret,
+					      args->secret_size);
+	/* Try another device in case this fails. */
+	return 0;
+}
+
+/*
+ * Retrieve the raw_secret from the underlying device. Given that
+ * only only one raw_secret can exist for a particular wrappedkey,
+ * retrieve it only from the first device that supports derive_raw_secret()
+ */
+static int dm_derive_raw_secret(struct blk_keyslot_manager *ksm,
+				const u8 *wrapped_key,
+				unsigned int wrapped_key_size,
+				u8 *secret, unsigned int secret_size)
+{
+	struct mapped_device *md = container_of(ksm, struct mapped_device, ksm);
+	struct dm_derive_raw_secret_args args = {
+		.wrapped_key = wrapped_key,
+		.wrapped_key_size = wrapped_key_size,
+		.secret = secret,
+		.secret_size = secret_size,
+		.err = -EOPNOTSUPP,
+	};
+	struct dm_table *t;
+	int srcu_idx;
+	int i;
+	struct dm_target *ti;
+
+	t = dm_get_live_table(md, &srcu_idx);
+	if (!t)
+		return -EOPNOTSUPP;
+	for (i = 0; i < dm_table_get_num_targets(t); i++) {
+		ti = dm_table_get_target(t, i);
+		if (!ti->type->iterate_devices)
+			continue;
+		ti->type->iterate_devices(ti, dm_derive_raw_secret_callback,
+					  &args);
+		if (!args.err)
+			break;
+	}
+	dm_put_live_table(md, srcu_idx);
+	return args.err;
+}
+
+static struct blk_ksm_ll_ops dm_ksm_ll_ops = {
+	.keyslot_evict = dm_keyslot_evict,
+	.derive_raw_secret = dm_derive_raw_secret,
+};
+
+static void dm_init_inline_encryption(struct mapped_device *md)
+{
+	blk_ksm_init_passthrough(&md->ksm);
+	md->ksm.ksm_ll_ops = dm_ksm_ll_ops;
+
+	/*
+	 * Initially declare support for all crypto settings. Anything
+	 * unsupported by a child device will be removed later when calculating
+	 * the device restrictions.
+	 */
+	md->ksm.max_dun_bytes_supported = UINT_MAX;
+	md->ksm.features = BLK_CRYPTO_FEATURE_STANDARD_KEYS |
+			   BLK_CRYPTO_FEATURE_WRAPPED_KEYS;
+	memset(md->ksm.crypto_modes_supported, 0xFF,
+	       sizeof(md->ksm.crypto_modes_supported));
+
+	blk_ksm_register(&md->ksm, md->queue);
+}
+
+static void dm_destroy_inline_encryption(struct request_queue *q)
+{
+	blk_ksm_destroy(q->ksm);
+	blk_ksm_unregister(q);
+}
+#else /* CONFIG_BLK_INLINE_ENCRYPTION */
+static inline void dm_init_inline_encryption(struct mapped_device *md)
+{
+}
+
+static inline void dm_destroy_inline_encryption(struct request_queue *q)
+{
+}
+#endif /* !CONFIG_BLK_INLINE_ENCRYPTION */
+
 /*
  * Setup the DM device's queue based on md's type
  */
@@ -2123,6 +2283,9 @@
 		DMERR("Cannot calculate initial queue limits");
 		return r;
 	}
+
+	dm_init_inline_encryption(md);
+
 	dm_table_set_restrictions(t, md->queue, &limits);
 	blk_register_queue(md->disk);
 
diff --git a/drivers/media/Kconfig b/drivers/media/Kconfig
index a6d073f..5d93e57 100644
--- a/drivers/media/Kconfig
+++ b/drivers/media/Kconfig
@@ -66,8 +66,7 @@
 # Multimedia support - automatically enable V4L2 and DVB core
 #
 config MEDIA_CAMERA_SUPPORT
-	bool
-	prompt "Cameras and video grabbers" if MEDIA_SUPPORT_FILTER
+	bool "Cameras and video grabbers"
 	default y if !MEDIA_SUPPORT_FILTER
 	help
 	  Enable support for webcams and video grabbers.
@@ -75,8 +74,7 @@
 	  Say Y when you have a webcam or a video capture grabber board.
 
 config MEDIA_ANALOG_TV_SUPPORT
-	bool
-	prompt "Analog TV" if MEDIA_SUPPORT_FILTER
+	bool "Analog TV"
 	default y if !MEDIA_SUPPORT_FILTER
 	help
 	  Enable analog TV support.
@@ -89,8 +87,7 @@
 		will disable support for them.
 
 config MEDIA_DIGITAL_TV_SUPPORT
-	bool
-	prompt "Digital TV" if MEDIA_SUPPORT_FILTER
+	bool "Digital TV"
 	default y if !MEDIA_SUPPORT_FILTER
 	help
 	  Enable digital TV support.
@@ -99,8 +96,7 @@
 	  hybrid digital TV and analog TV.
 
 config MEDIA_RADIO_SUPPORT
-	bool
-	prompt "AM/FM radio receivers/transmitters" if MEDIA_SUPPORT_FILTER
+	bool "AM/FM radio receivers/transmitters"
 	default y if !MEDIA_SUPPORT_FILTER
 	help
 	  Enable AM/FM radio support.
@@ -115,8 +111,7 @@
 		disable support for them.
 
 config MEDIA_SDR_SUPPORT
-	bool
-	prompt "Software defined radio" if MEDIA_SUPPORT_FILTER
+	bool "Software defined radio"
 	default y if !MEDIA_SUPPORT_FILTER
 	help
 	  Enable software defined radio support.
@@ -124,8 +119,7 @@
 	  Say Y when you have a software defined radio device.
 
 config MEDIA_PLATFORM_SUPPORT
-	bool
-	prompt "Platform-specific devices" if MEDIA_SUPPORT_FILTER
+	bool "Platform-specific devices"
 	default y if !MEDIA_SUPPORT_FILTER
 	help
 	  Enable support for complex cameras, codecs, and other hardware
@@ -138,8 +132,7 @@
 	  Say Y when you want to be able so see such devices.
 
 config MEDIA_TEST_SUPPORT
-	bool
-	prompt "Test drivers" if MEDIA_SUPPORT_FILTER
+	bool "Test drivers"
 	default y if !MEDIA_SUPPORT_FILTER
 	help
 	  Those drivers should not be used on production Kernels, but
diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
index a99e82e..cd8927e 100644
--- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
+++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
@@ -465,6 +465,11 @@
 		__s32		fd;
 	} m;
 	__u32			data_offset;
+	/*
+	 * few userspace clients and drivers use reserved fields
+	 * and it is up to them how these fields are used. v4l2
+	 * simply copy reserved fields between them.
+	 */
 	__u32			reserved[11];
 };
 
@@ -529,7 +534,9 @@
 
 	if (copy_in_user(p64, p32, 2 * sizeof(__u32)) ||
 	    copy_in_user(&p64->data_offset, &p32->data_offset,
-			 sizeof(p64->data_offset)))
+			 sizeof(p64->data_offset)) ||
+	    copy_in_user(p64->reserved, p32->reserved,
+			 sizeof(p64->reserved)))
 		return -EFAULT;
 
 	switch (memory) {
@@ -561,7 +568,9 @@
 
 	if (copy_in_user(p32, p64, 2 * sizeof(__u32)) ||
 	    copy_in_user(&p32->data_offset, &p64->data_offset,
-			 sizeof(p64->data_offset)))
+			 sizeof(p64->data_offset)) ||
+	    copy_in_user(p32->reserved, p64->reserved,
+			 sizeof(p32->reserved)))
 		return -EFAULT;
 
 	switch (memory) {
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index fafa8b0..2c8929a 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -448,6 +448,21 @@
 	tristate
 	default MISC_RTSX_PCI || MISC_RTSX_USB
 
+config UID_SYS_STATS
+	bool "Per-UID statistics"
+	depends on PROFILING && TASK_XACCT && TASK_IO_ACCOUNTING
+	help
+	  Per UID based cpu time statistics exported to /proc/uid_cputime
+	  Per UID based io statistics exported to /proc/uid_io
+	  Per UID based procstat control in /proc/uid_procstat
+
+config UID_SYS_STATS_DEBUG
+	bool "Per-TASK statistics"
+	depends on UID_SYS_STATS
+	default n
+	help
+	  Per TASK based io statistics exported to /proc/uid_io
+
 config PVPANIC
 	tristate "pvpanic device support"
 	depends on HAS_IOMEM && (ACPI || OF)
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index d23231e..aecaaed 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -57,3 +57,4 @@
 obj-$(CONFIG_UACCE)		+= uacce/
 obj-$(CONFIG_XILINX_SDFEC)	+= xilinx_sdfec.o
 obj-$(CONFIG_HISI_HIKEY_USB)	+= hisi_hikey_usb.o
+obj-$(CONFIG_UID_SYS_STATS)	+= uid_sys_stats.o
diff --git a/drivers/misc/uid_sys_stats.c b/drivers/misc/uid_sys_stats.c
new file mode 100644
index 0000000..4733670
--- /dev/null
+++ b/drivers/misc/uid_sys_stats.c
@@ -0,0 +1,706 @@
+/* drivers/misc/uid_sys_stats.c
+ *
+ * Copyright (C) 2014 - 2015 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/atomic.h>
+#include <linux/err.h>
+#include <linux/hashtable.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/mm.h>
+#include <linux/proc_fs.h>
+#include <linux/profile.h>
+#include <linux/rtmutex.h>
+#include <linux/sched/cputime.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+
+
+#define UID_HASH_BITS	10
+DECLARE_HASHTABLE(hash_table, UID_HASH_BITS);
+
+static DEFINE_RT_MUTEX(uid_lock);
+static struct proc_dir_entry *cpu_parent;
+static struct proc_dir_entry *io_parent;
+static struct proc_dir_entry *proc_parent;
+
+struct io_stats {
+	u64 read_bytes;
+	u64 write_bytes;
+	u64 rchar;
+	u64 wchar;
+	u64 fsync;
+};
+
+#define UID_STATE_FOREGROUND	0
+#define UID_STATE_BACKGROUND	1
+#define UID_STATE_BUCKET_SIZE	2
+
+#define UID_STATE_TOTAL_CURR	2
+#define UID_STATE_TOTAL_LAST	3
+#define UID_STATE_DEAD_TASKS	4
+#define UID_STATE_SIZE		5
+
+#define MAX_TASK_COMM_LEN 256
+
+struct task_entry {
+	char comm[MAX_TASK_COMM_LEN];
+	pid_t pid;
+	struct io_stats io[UID_STATE_SIZE];
+	struct hlist_node hash;
+};
+
+struct uid_entry {
+	uid_t uid;
+	u64 utime;
+	u64 stime;
+	u64 active_utime;
+	u64 active_stime;
+	int state;
+	struct io_stats io[UID_STATE_SIZE];
+	struct hlist_node hash;
+#ifdef CONFIG_UID_SYS_STATS_DEBUG
+	DECLARE_HASHTABLE(task_entries, UID_HASH_BITS);
+#endif
+};
+
+static u64 compute_write_bytes(struct task_struct *task)
+{
+	if (task->ioac.write_bytes <= task->ioac.cancelled_write_bytes)
+		return 0;
+
+	return task->ioac.write_bytes - task->ioac.cancelled_write_bytes;
+}
+
+static void compute_io_bucket_stats(struct io_stats *io_bucket,
+					struct io_stats *io_curr,
+					struct io_stats *io_last,
+					struct io_stats *io_dead)
+{
+	/* tasks could switch to another uid group, but its io_last in the
+	 * previous uid group could still be positive.
+	 * therefore before each update, do an overflow check first
+	 */
+	int64_t delta;
+
+	delta = io_curr->read_bytes + io_dead->read_bytes -
+		io_last->read_bytes;
+	io_bucket->read_bytes += delta > 0 ? delta : 0;
+	delta = io_curr->write_bytes + io_dead->write_bytes -
+		io_last->write_bytes;
+	io_bucket->write_bytes += delta > 0 ? delta : 0;
+	delta = io_curr->rchar + io_dead->rchar - io_last->rchar;
+	io_bucket->rchar += delta > 0 ? delta : 0;
+	delta = io_curr->wchar + io_dead->wchar - io_last->wchar;
+	io_bucket->wchar += delta > 0 ? delta : 0;
+	delta = io_curr->fsync + io_dead->fsync - io_last->fsync;
+	io_bucket->fsync += delta > 0 ? delta : 0;
+
+	io_last->read_bytes = io_curr->read_bytes;
+	io_last->write_bytes = io_curr->write_bytes;
+	io_last->rchar = io_curr->rchar;
+	io_last->wchar = io_curr->wchar;
+	io_last->fsync = io_curr->fsync;
+
+	memset(io_dead, 0, sizeof(struct io_stats));
+}
+
+#ifdef CONFIG_UID_SYS_STATS_DEBUG
+static void get_full_task_comm(struct task_entry *task_entry,
+		struct task_struct *task)
+{
+	int i = 0, offset = 0, len = 0;
+	/* save one byte for terminating null character */
+	int unused_len = MAX_TASK_COMM_LEN - TASK_COMM_LEN - 1;
+	char buf[MAX_TASK_COMM_LEN - TASK_COMM_LEN - 1];
+	struct mm_struct *mm = task->mm;
+
+	/* fill the first TASK_COMM_LEN bytes with thread name */
+	__get_task_comm(task_entry->comm, TASK_COMM_LEN, task);
+	i = strlen(task_entry->comm);
+	while (i < TASK_COMM_LEN)
+		task_entry->comm[i++] = ' ';
+
+	/* next the executable file name */
+	if (mm) {
+		mmap_write_lock(mm);
+		if (mm->exe_file) {
+			char *pathname = d_path(&mm->exe_file->f_path, buf,
+					unused_len);
+
+			if (!IS_ERR(pathname)) {
+				len = strlcpy(task_entry->comm + i, pathname,
+						unused_len);
+				i += len;
+				task_entry->comm[i++] = ' ';
+				unused_len--;
+			}
+		}
+		mmap_write_unlock(mm);
+	}
+	unused_len -= len;
+
+	/* fill the rest with command line argument
+	 * replace each null or new line character
+	 * between args in argv with whitespace */
+	len = get_cmdline(task, buf, unused_len);
+	while (offset < len) {
+		if (buf[offset] != '\0' && buf[offset] != '\n')
+			task_entry->comm[i++] = buf[offset];
+		else
+			task_entry->comm[i++] = ' ';
+		offset++;
+	}
+
+	/* get rid of trailing whitespaces in case when arg is memset to
+	 * zero before being reset in userspace
+	 */
+	while (task_entry->comm[i-1] == ' ')
+		i--;
+	task_entry->comm[i] = '\0';
+}
+
+static struct task_entry *find_task_entry(struct uid_entry *uid_entry,
+		struct task_struct *task)
+{
+	struct task_entry *task_entry;
+
+	hash_for_each_possible(uid_entry->task_entries, task_entry, hash,
+			task->pid) {
+		if (task->pid == task_entry->pid) {
+			/* if thread name changed, update the entire command */
+			int len = strnchr(task_entry->comm, ' ', TASK_COMM_LEN)
+				- task_entry->comm;
+
+			if (strncmp(task_entry->comm, task->comm, len))
+				get_full_task_comm(task_entry, task);
+			return task_entry;
+		}
+	}
+	return NULL;
+}
+
+static struct task_entry *find_or_register_task(struct uid_entry *uid_entry,
+		struct task_struct *task)
+{
+	struct task_entry *task_entry;
+	pid_t pid = task->pid;
+
+	task_entry = find_task_entry(uid_entry, task);
+	if (task_entry)
+		return task_entry;
+
+	task_entry = kzalloc(sizeof(struct task_entry), GFP_ATOMIC);
+	if (!task_entry)
+		return NULL;
+
+	get_full_task_comm(task_entry, task);
+
+	task_entry->pid = pid;
+	hash_add(uid_entry->task_entries, &task_entry->hash, (unsigned int)pid);
+
+	return task_entry;
+}
+
+static void remove_uid_tasks(struct uid_entry *uid_entry)
+{
+	struct task_entry *task_entry;
+	unsigned long bkt_task;
+	struct hlist_node *tmp_task;
+
+	hash_for_each_safe(uid_entry->task_entries, bkt_task,
+			tmp_task, task_entry, hash) {
+		hash_del(&task_entry->hash);
+		kfree(task_entry);
+	}
+}
+
+static void set_io_uid_tasks_zero(struct uid_entry *uid_entry)
+{
+	struct task_entry *task_entry;
+	unsigned long bkt_task;
+
+	hash_for_each(uid_entry->task_entries, bkt_task, task_entry, hash) {
+		memset(&task_entry->io[UID_STATE_TOTAL_CURR], 0,
+			sizeof(struct io_stats));
+	}
+}
+
+static void add_uid_tasks_io_stats(struct uid_entry *uid_entry,
+		struct task_struct *task, int slot)
+{
+	struct task_entry *task_entry = find_or_register_task(uid_entry, task);
+	struct io_stats *task_io_slot = &task_entry->io[slot];
+
+	task_io_slot->read_bytes += task->ioac.read_bytes;
+	task_io_slot->write_bytes += compute_write_bytes(task);
+	task_io_slot->rchar += task->ioac.rchar;
+	task_io_slot->wchar += task->ioac.wchar;
+	task_io_slot->fsync += task->ioac.syscfs;
+}
+
+static void compute_io_uid_tasks(struct uid_entry *uid_entry)
+{
+	struct task_entry *task_entry;
+	unsigned long bkt_task;
+
+	hash_for_each(uid_entry->task_entries, bkt_task, task_entry, hash) {
+		compute_io_bucket_stats(&task_entry->io[uid_entry->state],
+					&task_entry->io[UID_STATE_TOTAL_CURR],
+					&task_entry->io[UID_STATE_TOTAL_LAST],
+					&task_entry->io[UID_STATE_DEAD_TASKS]);
+	}
+}
+
+static void show_io_uid_tasks(struct seq_file *m, struct uid_entry *uid_entry)
+{
+	struct task_entry *task_entry;
+	unsigned long bkt_task;
+
+	hash_for_each(uid_entry->task_entries, bkt_task, task_entry, hash) {
+		/* Separated by comma because space exists in task comm */
+		seq_printf(m, "task,%s,%lu,%llu,%llu,%llu,%llu,%llu,%llu,%llu,%llu,%llu,%llu\n",
+				task_entry->comm,
+				(unsigned long)task_entry->pid,
+				task_entry->io[UID_STATE_FOREGROUND].rchar,
+				task_entry->io[UID_STATE_FOREGROUND].wchar,
+				task_entry->io[UID_STATE_FOREGROUND].read_bytes,
+				task_entry->io[UID_STATE_FOREGROUND].write_bytes,
+				task_entry->io[UID_STATE_BACKGROUND].rchar,
+				task_entry->io[UID_STATE_BACKGROUND].wchar,
+				task_entry->io[UID_STATE_BACKGROUND].read_bytes,
+				task_entry->io[UID_STATE_BACKGROUND].write_bytes,
+				task_entry->io[UID_STATE_FOREGROUND].fsync,
+				task_entry->io[UID_STATE_BACKGROUND].fsync);
+	}
+}
+#else
+static void remove_uid_tasks(struct uid_entry *uid_entry) {};
+static void set_io_uid_tasks_zero(struct uid_entry *uid_entry) {};
+static void add_uid_tasks_io_stats(struct uid_entry *uid_entry,
+		struct task_struct *task, int slot) {};
+static void compute_io_uid_tasks(struct uid_entry *uid_entry) {};
+static void show_io_uid_tasks(struct seq_file *m,
+		struct uid_entry *uid_entry) {}
+#endif
+
+static struct uid_entry *find_uid_entry(uid_t uid)
+{
+	struct uid_entry *uid_entry;
+	hash_for_each_possible(hash_table, uid_entry, hash, uid) {
+		if (uid_entry->uid == uid)
+			return uid_entry;
+	}
+	return NULL;
+}
+
+static struct uid_entry *find_or_register_uid(uid_t uid)
+{
+	struct uid_entry *uid_entry;
+
+	uid_entry = find_uid_entry(uid);
+	if (uid_entry)
+		return uid_entry;
+
+	uid_entry = kzalloc(sizeof(struct uid_entry), GFP_ATOMIC);
+	if (!uid_entry)
+		return NULL;
+
+	uid_entry->uid = uid;
+#ifdef CONFIG_UID_SYS_STATS_DEBUG
+	hash_init(uid_entry->task_entries);
+#endif
+	hash_add(hash_table, &uid_entry->hash, uid);
+
+	return uid_entry;
+}
+
+static int uid_cputime_show(struct seq_file *m, void *v)
+{
+	struct uid_entry *uid_entry = NULL;
+	struct task_struct *task, *temp;
+	struct user_namespace *user_ns = current_user_ns();
+	u64 utime;
+	u64 stime;
+	unsigned long bkt;
+	uid_t uid;
+
+	rt_mutex_lock(&uid_lock);
+
+	hash_for_each(hash_table, bkt, uid_entry, hash) {
+		uid_entry->active_stime = 0;
+		uid_entry->active_utime = 0;
+	}
+
+	rcu_read_lock();
+	do_each_thread(temp, task) {
+		uid = from_kuid_munged(user_ns, task_uid(task));
+		if (!uid_entry || uid_entry->uid != uid)
+			uid_entry = find_or_register_uid(uid);
+		if (!uid_entry) {
+			rcu_read_unlock();
+			rt_mutex_unlock(&uid_lock);
+			pr_err("%s: failed to find the uid_entry for uid %d\n",
+				__func__, uid);
+			return -ENOMEM;
+		}
+		/* avoid double accounting of dying threads */
+		if (!(task->flags & PF_EXITING)) {
+			task_cputime_adjusted(task, &utime, &stime);
+			uid_entry->active_utime += utime;
+			uid_entry->active_stime += stime;
+		}
+	} while_each_thread(temp, task);
+	rcu_read_unlock();
+
+	hash_for_each(hash_table, bkt, uid_entry, hash) {
+		u64 total_utime = uid_entry->utime +
+							uid_entry->active_utime;
+		u64 total_stime = uid_entry->stime +
+							uid_entry->active_stime;
+		seq_printf(m, "%d: %llu %llu\n", uid_entry->uid,
+			ktime_to_us(total_utime), ktime_to_us(total_stime));
+	}
+
+	rt_mutex_unlock(&uid_lock);
+	return 0;
+}
+
+static int uid_cputime_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, uid_cputime_show, PDE_DATA(inode));
+}
+
+static const struct proc_ops uid_cputime_fops = {
+	.proc_open	= uid_cputime_open,
+	.proc_read	= seq_read,
+	.proc_lseek	= seq_lseek,
+	.proc_release	= single_release,
+};
+
+static int uid_remove_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, NULL, NULL);
+}
+
+static ssize_t uid_remove_write(struct file *file,
+			const char __user *buffer, size_t count, loff_t *ppos)
+{
+	struct uid_entry *uid_entry;
+	struct hlist_node *tmp;
+	char uids[128];
+	char *start_uid, *end_uid = NULL;
+	long int uid_start = 0, uid_end = 0;
+
+	if (count >= sizeof(uids))
+		count = sizeof(uids) - 1;
+
+	if (copy_from_user(uids, buffer, count))
+		return -EFAULT;
+
+	uids[count] = '\0';
+	end_uid = uids;
+	start_uid = strsep(&end_uid, "-");
+
+	if (!start_uid || !end_uid)
+		return -EINVAL;
+
+	if (kstrtol(start_uid, 10, &uid_start) != 0 ||
+		kstrtol(end_uid, 10, &uid_end) != 0) {
+		return -EINVAL;
+	}
+
+	rt_mutex_lock(&uid_lock);
+
+	for (; uid_start <= uid_end; uid_start++) {
+		hash_for_each_possible_safe(hash_table, uid_entry, tmp,
+							hash, (uid_t)uid_start) {
+			if (uid_start == uid_entry->uid) {
+				remove_uid_tasks(uid_entry);
+				hash_del(&uid_entry->hash);
+				kfree(uid_entry);
+			}
+		}
+	}
+
+	rt_mutex_unlock(&uid_lock);
+	return count;
+}
+
+static const struct proc_ops uid_remove_fops = {
+	.proc_open	= uid_remove_open,
+	.proc_release	= single_release,
+	.proc_write	= uid_remove_write,
+};
+
+
+static void add_uid_io_stats(struct uid_entry *uid_entry,
+			struct task_struct *task, int slot)
+{
+	struct io_stats *io_slot = &uid_entry->io[slot];
+
+	/* avoid double accounting of dying threads */
+	if (slot != UID_STATE_DEAD_TASKS && (task->flags & PF_EXITING))
+		return;
+
+	io_slot->read_bytes += task->ioac.read_bytes;
+	io_slot->write_bytes += compute_write_bytes(task);
+	io_slot->rchar += task->ioac.rchar;
+	io_slot->wchar += task->ioac.wchar;
+	io_slot->fsync += task->ioac.syscfs;
+
+	add_uid_tasks_io_stats(uid_entry, task, slot);
+}
+
+static void update_io_stats_all_locked(void)
+{
+	struct uid_entry *uid_entry = NULL;
+	struct task_struct *task, *temp;
+	struct user_namespace *user_ns = current_user_ns();
+	unsigned long bkt;
+	uid_t uid;
+
+	hash_for_each(hash_table, bkt, uid_entry, hash) {
+		memset(&uid_entry->io[UID_STATE_TOTAL_CURR], 0,
+			sizeof(struct io_stats));
+		set_io_uid_tasks_zero(uid_entry);
+	}
+
+	rcu_read_lock();
+	do_each_thread(temp, task) {
+		uid = from_kuid_munged(user_ns, task_uid(task));
+		if (!uid_entry || uid_entry->uid != uid)
+			uid_entry = find_or_register_uid(uid);
+		if (!uid_entry)
+			continue;
+		add_uid_io_stats(uid_entry, task, UID_STATE_TOTAL_CURR);
+	} while_each_thread(temp, task);
+	rcu_read_unlock();
+
+	hash_for_each(hash_table, bkt, uid_entry, hash) {
+		compute_io_bucket_stats(&uid_entry->io[uid_entry->state],
+					&uid_entry->io[UID_STATE_TOTAL_CURR],
+					&uid_entry->io[UID_STATE_TOTAL_LAST],
+					&uid_entry->io[UID_STATE_DEAD_TASKS]);
+		compute_io_uid_tasks(uid_entry);
+	}
+}
+
+static void update_io_stats_uid_locked(struct uid_entry *uid_entry)
+{
+	struct task_struct *task, *temp;
+	struct user_namespace *user_ns = current_user_ns();
+
+	memset(&uid_entry->io[UID_STATE_TOTAL_CURR], 0,
+		sizeof(struct io_stats));
+	set_io_uid_tasks_zero(uid_entry);
+
+	rcu_read_lock();
+	do_each_thread(temp, task) {
+		if (from_kuid_munged(user_ns, task_uid(task)) != uid_entry->uid)
+			continue;
+		add_uid_io_stats(uid_entry, task, UID_STATE_TOTAL_CURR);
+	} while_each_thread(temp, task);
+	rcu_read_unlock();
+
+	compute_io_bucket_stats(&uid_entry->io[uid_entry->state],
+				&uid_entry->io[UID_STATE_TOTAL_CURR],
+				&uid_entry->io[UID_STATE_TOTAL_LAST],
+				&uid_entry->io[UID_STATE_DEAD_TASKS]);
+	compute_io_uid_tasks(uid_entry);
+}
+
+
+static int uid_io_show(struct seq_file *m, void *v)
+{
+	struct uid_entry *uid_entry;
+	unsigned long bkt;
+
+	rt_mutex_lock(&uid_lock);
+
+	update_io_stats_all_locked();
+
+	hash_for_each(hash_table, bkt, uid_entry, hash) {
+		seq_printf(m, "%d %llu %llu %llu %llu %llu %llu %llu %llu %llu %llu\n",
+				uid_entry->uid,
+				uid_entry->io[UID_STATE_FOREGROUND].rchar,
+				uid_entry->io[UID_STATE_FOREGROUND].wchar,
+				uid_entry->io[UID_STATE_FOREGROUND].read_bytes,
+				uid_entry->io[UID_STATE_FOREGROUND].write_bytes,
+				uid_entry->io[UID_STATE_BACKGROUND].rchar,
+				uid_entry->io[UID_STATE_BACKGROUND].wchar,
+				uid_entry->io[UID_STATE_BACKGROUND].read_bytes,
+				uid_entry->io[UID_STATE_BACKGROUND].write_bytes,
+				uid_entry->io[UID_STATE_FOREGROUND].fsync,
+				uid_entry->io[UID_STATE_BACKGROUND].fsync);
+
+		show_io_uid_tasks(m, uid_entry);
+	}
+
+	rt_mutex_unlock(&uid_lock);
+	return 0;
+}
+
+static int uid_io_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, uid_io_show, PDE_DATA(inode));
+}
+
+static const struct proc_ops uid_io_fops = {
+	.proc_open	= uid_io_open,
+	.proc_read	= seq_read,
+	.proc_lseek	= seq_lseek,
+	.proc_release	= single_release,
+};
+
+static int uid_procstat_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, NULL, NULL);
+}
+
+static ssize_t uid_procstat_write(struct file *file,
+			const char __user *buffer, size_t count, loff_t *ppos)
+{
+	struct uid_entry *uid_entry;
+	uid_t uid;
+	int argc, state;
+	char input[128];
+
+	if (count >= sizeof(input))
+		return -EINVAL;
+
+	if (copy_from_user(input, buffer, count))
+		return -EFAULT;
+
+	input[count] = '\0';
+
+	argc = sscanf(input, "%u %d", &uid, &state);
+	if (argc != 2)
+		return -EINVAL;
+
+	if (state != UID_STATE_BACKGROUND && state != UID_STATE_FOREGROUND)
+		return -EINVAL;
+
+	rt_mutex_lock(&uid_lock);
+
+	uid_entry = find_or_register_uid(uid);
+	if (!uid_entry) {
+		rt_mutex_unlock(&uid_lock);
+		return -EINVAL;
+	}
+
+	if (uid_entry->state == state) {
+		rt_mutex_unlock(&uid_lock);
+		return count;
+	}
+
+	update_io_stats_uid_locked(uid_entry);
+
+	uid_entry->state = state;
+
+	rt_mutex_unlock(&uid_lock);
+
+	return count;
+}
+
+static const struct proc_ops uid_procstat_fops = {
+	.proc_open	= uid_procstat_open,
+	.proc_release	= single_release,
+	.proc_write	= uid_procstat_write,
+};
+
+static int process_notifier(struct notifier_block *self,
+			unsigned long cmd, void *v)
+{
+	struct task_struct *task = v;
+	struct uid_entry *uid_entry;
+	u64 utime, stime;
+	uid_t uid;
+
+	if (!task)
+		return NOTIFY_OK;
+
+	rt_mutex_lock(&uid_lock);
+	uid = from_kuid_munged(current_user_ns(), task_uid(task));
+	uid_entry = find_or_register_uid(uid);
+	if (!uid_entry) {
+		pr_err("%s: failed to find uid %d\n", __func__, uid);
+		goto exit;
+	}
+
+	task_cputime_adjusted(task, &utime, &stime);
+	uid_entry->utime += utime;
+	uid_entry->stime += stime;
+
+	add_uid_io_stats(uid_entry, task, UID_STATE_DEAD_TASKS);
+
+exit:
+	rt_mutex_unlock(&uid_lock);
+	return NOTIFY_OK;
+}
+
+static struct notifier_block process_notifier_block = {
+	.notifier_call	= process_notifier,
+};
+
+static int __init proc_uid_sys_stats_init(void)
+{
+	hash_init(hash_table);
+
+	cpu_parent = proc_mkdir("uid_cputime", NULL);
+	if (!cpu_parent) {
+		pr_err("%s: failed to create uid_cputime proc entry\n",
+			__func__);
+		goto err;
+	}
+
+	proc_create_data("remove_uid_range", 0222, cpu_parent,
+		&uid_remove_fops, NULL);
+	proc_create_data("show_uid_stat", 0444, cpu_parent,
+		&uid_cputime_fops, NULL);
+
+	io_parent = proc_mkdir("uid_io", NULL);
+	if (!io_parent) {
+		pr_err("%s: failed to create uid_io proc entry\n",
+			__func__);
+		goto err;
+	}
+
+	proc_create_data("stats", 0444, io_parent,
+		&uid_io_fops, NULL);
+
+	proc_parent = proc_mkdir("uid_procstat", NULL);
+	if (!proc_parent) {
+		pr_err("%s: failed to create uid_procstat proc entry\n",
+			__func__);
+		goto err;
+	}
+
+	proc_create_data("set", 0222, proc_parent,
+		&uid_procstat_fops, NULL);
+
+	profile_event_register(PROFILE_TASK_EXIT, &process_notifier_block);
+
+	return 0;
+
+err:
+	remove_proc_subtree("uid_cputime", NULL);
+	remove_proc_subtree("uid_io", NULL);
+	remove_proc_subtree("uid_procstat", NULL);
+	return -ENOMEM;
+}
+
+early_initcall(proc_uid_sys_stats_init);
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index 96b2ca1..3e112178 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -493,7 +493,8 @@
 #endif
 
 	mmc_start_host(host);
-	mmc_register_pm_notifier(host);
+	if (!(host->pm_flags & MMC_PM_IGNORE_PM_NOTIFY))
+		mmc_register_pm_notifier(host);
 
 	return 0;
 }
@@ -510,7 +511,8 @@
  */
 void mmc_remove_host(struct mmc_host *host)
 {
-	mmc_unregister_pm_notifier(host);
+	if (!(host->pm_flags & MMC_PM_IGNORE_PM_NOTIFY))
+		mmc_unregister_pm_notifier(host);
 	mmc_stop_host(host);
 
 #ifdef CONFIG_DEBUG_FS
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 21b7114..4092e99 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -2529,6 +2529,9 @@
 	u64 offloads;
 	int err;
 
+	if (!vi->has_cvq)
+		return 0;
+
 	if ((dev->features ^ features) & NETIF_F_LRO) {
 		if (vi->xdp_queue_pairs)
 			return -EBUSY;
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 3b3fc7c..c966069 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -65,6 +65,10 @@
 module_param(support_p2p_device, bool, 0444);
 MODULE_PARM_DESC(support_p2p_device, "Support P2P-Device interface type");
 
+static ushort mac_prefix;
+module_param(mac_prefix, ushort, 0444);
+MODULE_PARM_DESC(mac_prefix, "Second and third most significant octets in MAC");
+
 /**
  * enum hwsim_regtest - the type of regulatory tests we offer
  *
@@ -2998,6 +3002,8 @@
 	if (!param->perm_addr) {
 		eth_zero_addr(addr);
 		addr[0] = 0x02;
+		addr[1] = (mac_prefix >> 8) & 0xFF;
+		addr[2] = mac_prefix & 0xFF;
 		addr[3] = idx >> 8;
 		addr[4] = idx;
 		memcpy(data->addresses[0].addr, addr, ETH_ALEN);
diff --git a/drivers/net/wireless/virt_wifi.c b/drivers/net/wireless/virt_wifi.c
index c878097..69d136a 100644
--- a/drivers/net/wireless/virt_wifi.c
+++ b/drivers/net/wireless/virt_wifi.c
@@ -13,6 +13,7 @@
 #include <net/rtnetlink.h>
 #include <linux/etherdevice.h>
 #include <linux/module.h>
+#include <net/virt_wifi.h>
 
 static struct wiphy *common_wiphy;
 
@@ -20,6 +21,7 @@
 	struct delayed_work scan_result;
 	struct cfg80211_scan_request *scan_request;
 	bool being_deleted;
+	struct virt_wifi_network_simulation *network_simulation;
 };
 
 static struct ieee80211_channel channel_2ghz = {
@@ -148,6 +150,9 @@
 
 	priv->scan_request = request;
 	schedule_delayed_work(&priv->scan_result, HZ * 2);
+	if (priv->network_simulation &&
+	    priv->network_simulation->notify_scan_trigger)
+		priv->network_simulation->notify_scan_trigger(wiphy, request);
 
 	return 0;
 }
@@ -178,6 +183,12 @@
 					   DBM_TO_MBM(-50), GFP_KERNEL);
 	cfg80211_put_bss(wiphy, informed_bss);
 
+	if(priv->network_simulation &&
+	   priv->network_simulation->generate_virt_scan_result) {
+		if(priv->network_simulation->generate_virt_scan_result(wiphy))
+			wiphy_err(wiphy, "Fail to generater the simulated scan result.\n");
+	}
+
 	/* Schedules work which acquires and releases the rtnl lock. */
 	cfg80211_scan_done(priv->scan_request, &scan_info);
 	priv->scan_request = NULL;
@@ -365,6 +376,8 @@
 	priv = wiphy_priv(wiphy);
 	priv->being_deleted = false;
 	priv->scan_request = NULL;
+	priv->network_simulation = NULL;
+
 	INIT_DELAYED_WORK(&priv->scan_result, virt_wifi_scan_result);
 
 	err = wiphy_register(wiphy);
@@ -380,7 +393,6 @@
 static void virt_wifi_destroy_wiphy(struct wiphy *wiphy)
 {
 	struct virt_wifi_wiphy_priv *priv;
-
 	WARN(!wiphy, "%s called with null wiphy", __func__);
 	if (!wiphy)
 		return;
@@ -414,8 +426,13 @@
 static int virt_wifi_net_device_open(struct net_device *dev)
 {
 	struct virt_wifi_netdev_priv *priv = netdev_priv(dev);
-
+	struct virt_wifi_wiphy_priv *w_priv;
 	priv->is_up = true;
+	w_priv = wiphy_priv(dev->ieee80211_ptr->wiphy);
+	if(w_priv->network_simulation &&
+	   w_priv->network_simulation->notify_device_open)
+		w_priv->network_simulation->notify_device_open(dev);
+
 	return 0;
 }
 
@@ -423,16 +440,22 @@
 static int virt_wifi_net_device_stop(struct net_device *dev)
 {
 	struct virt_wifi_netdev_priv *n_priv = netdev_priv(dev);
+	struct virt_wifi_wiphy_priv *w_priv;
 
 	n_priv->is_up = false;
 
 	if (!dev->ieee80211_ptr)
 		return 0;
+	w_priv = wiphy_priv(dev->ieee80211_ptr->wiphy);
 
 	virt_wifi_cancel_scan(dev->ieee80211_ptr->wiphy);
 	virt_wifi_cancel_connect(dev);
 	netif_carrier_off(dev);
 
+	if (w_priv->network_simulation &&
+	    w_priv->network_simulation->notify_device_stop)
+		w_priv->network_simulation->notify_device_stop(dev);
+
 	return 0;
 }
 
@@ -675,6 +698,27 @@
 	unregister_netdevice_notifier(&virt_wifi_notifier);
 }
 
+int virt_wifi_register_network_simulation
+	(struct virt_wifi_network_simulation *ops)
+{
+	struct virt_wifi_wiphy_priv *priv = wiphy_priv(common_wiphy);
+	if (priv->network_simulation)
+		return -EEXIST;
+	priv->network_simulation = ops;
+	return 0;
+}
+EXPORT_SYMBOL_GPL(virt_wifi_register_network_simulation);
+
+int virt_wifi_unregister_network_simulation(void)
+{
+	struct virt_wifi_wiphy_priv *priv = wiphy_priv(common_wiphy);
+	if(!priv->network_simulation)
+		return -ENODATA;
+	priv->network_simulation = NULL;
+	return 0;
+}
+EXPORT_SYMBOL_GPL(virt_wifi_unregister_network_simulation);
+
 module_init(virt_wifi_init_module);
 module_exit(virt_wifi_cleanup_module);
 
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index 4602e46..5d9c710 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -1036,43 +1036,67 @@
 	return 0;
 }
 
+/*
+ * Convert configs to something easy to use in C code
+ */
+#if defined(CONFIG_CMDLINE_FORCE)
+static const int overwrite_incoming_cmdline = 1;
+static const int read_dt_cmdline;
+static const int concat_cmdline;
+#elif defined(CONFIG_CMDLINE_EXTEND)
+static const int overwrite_incoming_cmdline;
+static const int read_dt_cmdline = 1;
+static const int concat_cmdline = 1;
+#else /* CMDLINE_FROM_BOOTLOADER */
+static const int overwrite_incoming_cmdline;
+static const int read_dt_cmdline = 1;
+static const int concat_cmdline;
+#endif
+
+#ifdef CONFIG_CMDLINE
+static const char *config_cmdline = CONFIG_CMDLINE;
+#else
+static const char *config_cmdline = "";
+#endif
+
 int __init early_init_dt_scan_chosen(unsigned long node, const char *uname,
 				     int depth, void *data)
 {
-	int l;
-	const char *p;
+	int l = 0;
+	const char *p = NULL;
 	const void *rng_seed;
+	char *cmdline = data;
 
 	pr_debug("search \"chosen\", depth: %d, uname: %s\n", depth, uname);
 
-	if (depth != 1 || !data ||
+	if (depth != 1 || !cmdline ||
 	    (strcmp(uname, "chosen") != 0 && strcmp(uname, "chosen@0") != 0))
 		return 0;
 
 	early_init_dt_check_for_initrd(node);
 
-	/* Retrieve command line */
-	p = of_get_flat_dt_prop(node, "bootargs", &l);
-	if (p != NULL && l > 0)
-		strlcpy(data, p, min(l, COMMAND_LINE_SIZE));
+	/* Put CONFIG_CMDLINE in if forced or if data had nothing in it to start */
+	if (overwrite_incoming_cmdline || !cmdline[0])
+		strlcpy(cmdline, config_cmdline, COMMAND_LINE_SIZE);
 
-	/*
-	 * CONFIG_CMDLINE is meant to be a default in case nothing else
-	 * managed to set the command line, unless CONFIG_CMDLINE_FORCE
-	 * is set in which case we override whatever was found earlier.
-	 */
-#ifdef CONFIG_CMDLINE
-#if defined(CONFIG_CMDLINE_EXTEND)
-	strlcat(data, " ", COMMAND_LINE_SIZE);
-	strlcat(data, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
-#elif defined(CONFIG_CMDLINE_FORCE)
-	strlcpy(data, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
-#else
-	/* No arguments from boot loader, use kernel's  cmdl*/
-	if (!((char *)data)[0])
-		strlcpy(data, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
-#endif
-#endif /* CONFIG_CMDLINE */
+	/* Retrieve command line unless forcing */
+	if (read_dt_cmdline)
+		p = of_get_flat_dt_prop(node, "bootargs", &l);
+
+	if (p != NULL && l > 0) {
+		if (concat_cmdline) {
+			int cmdline_len;
+			int copy_len;
+			strlcat(cmdline, " ", COMMAND_LINE_SIZE);
+			cmdline_len = strlen(cmdline);
+			copy_len = COMMAND_LINE_SIZE - cmdline_len - 1;
+			copy_len = min((int)l, copy_len);
+			strncpy(cmdline + cmdline_len, p, copy_len);
+			cmdline[cmdline_len + copy_len] = '\0';
+		} else {
+			strlcpy(cmdline, p, min(l, COMMAND_LINE_SIZE));
+		}
+	}
 
 	pr_debug("Command line is: %s\n", (char *)data);
 
diff --git a/drivers/pci/controller/dwc/pcie-designware-host.c b/drivers/pci/controller/dwc/pcie-designware-host.c
index 44c2a65..72a7637 100644
--- a/drivers/pci/controller/dwc/pcie-designware-host.c
+++ b/drivers/pci/controller/dwc/pcie-designware-host.c
@@ -84,6 +84,7 @@
 
 	return ret;
 }
+EXPORT_SYMBOL_GPL(dw_handle_msi_irq);
 
 /* Chained MSI interrupt service routine */
 static void dw_chained_msi_isr(struct irq_desc *desc)
diff --git a/drivers/pinctrl/qcom/Kconfig b/drivers/pinctrl/qcom/Kconfig
index 5fe7b8a..416bd51 100644
--- a/drivers/pinctrl/qcom/Kconfig
+++ b/drivers/pinctrl/qcom/Kconfig
@@ -2,7 +2,7 @@
 if (ARCH_QCOM || COMPILE_TEST)
 
 config PINCTRL_MSM
-	bool
+	tristate
 	select PINMUX
 	select PINCONF
 	select GENERIC_PINCONF
@@ -13,6 +13,7 @@
 config PINCTRL_APQ8064
 	tristate "Qualcomm APQ8064 pin controller driver"
 	depends on GPIOLIB && OF
+	depends on QCOM_SCM || !QCOM_SCM
 	select PINCTRL_MSM
 	help
 	  This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -21,6 +22,7 @@
 config PINCTRL_APQ8084
 	tristate "Qualcomm APQ8084 pin controller driver"
 	depends on GPIOLIB && OF
+	depends on QCOM_SCM || !QCOM_SCM
 	select PINCTRL_MSM
 	help
 	  This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -29,6 +31,7 @@
 config PINCTRL_IPQ4019
 	tristate "Qualcomm IPQ4019 pin controller driver"
 	depends on GPIOLIB && OF
+	depends on QCOM_SCM || !QCOM_SCM
 	select PINCTRL_MSM
 	help
 	  This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -37,6 +40,7 @@
 config PINCTRL_IPQ8064
 	tristate "Qualcomm IPQ8064 pin controller driver"
 	depends on GPIOLIB && OF
+	depends on QCOM_SCM || !QCOM_SCM
 	select PINCTRL_MSM
 	help
 	  This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -45,6 +49,7 @@
 config PINCTRL_IPQ8074
 	tristate "Qualcomm Technologies, Inc. IPQ8074 pin controller driver"
 	depends on GPIOLIB && OF
+	depends on QCOM_SCM || !QCOM_SCM
 	select PINCTRL_MSM
 	help
 	  This is the pinctrl, pinmux, pinconf and gpiolib driver for
@@ -55,6 +60,7 @@
 config PINCTRL_IPQ6018
 	tristate "Qualcomm Technologies, Inc. IPQ6018 pin controller driver"
 	depends on GPIOLIB && OF
+	depends on QCOM_SCM || !QCOM_SCM
 	select PINCTRL_MSM
 	help
 	  This is the pinctrl, pinmux, pinconf and gpiolib driver for
@@ -74,6 +80,7 @@
 config PINCTRL_MSM8660
 	tristate "Qualcomm 8660 pin controller driver"
 	depends on GPIOLIB && OF
+	depends on QCOM_SCM || !QCOM_SCM
 	select PINCTRL_MSM
 	help
 	  This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -82,6 +89,7 @@
 config PINCTRL_MSM8960
 	tristate "Qualcomm 8960 pin controller driver"
 	depends on GPIOLIB && OF
+	depends on QCOM_SCM || !QCOM_SCM
 	select PINCTRL_MSM
 	help
 	  This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -90,6 +98,7 @@
 config PINCTRL_MDM9615
 	tristate "Qualcomm 9615 pin controller driver"
 	depends on GPIOLIB && OF
+	depends on QCOM_SCM || !QCOM_SCM
 	select PINCTRL_MSM
 	help
 	  This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -98,6 +107,7 @@
 config PINCTRL_MSM8X74
 	tristate "Qualcomm 8x74 pin controller driver"
 	depends on GPIOLIB && OF
+	depends on QCOM_SCM || !QCOM_SCM
 	select PINCTRL_MSM
 	help
 	  This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -106,6 +116,7 @@
 config PINCTRL_MSM8916
 	tristate "Qualcomm 8916 pin controller driver"
 	depends on GPIOLIB && OF
+	depends on QCOM_SCM || !QCOM_SCM
 	select PINCTRL_MSM
 	help
 	  This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -114,6 +125,7 @@
 config PINCTRL_MSM8976
 	tristate "Qualcomm 8976 pin controller driver"
 	depends on GPIOLIB && OF
+	depends on QCOM_SCM || !QCOM_SCM
 	select PINCTRL_MSM
 	help
 	  This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -124,6 +136,7 @@
 config PINCTRL_MSM8994
 	tristate "Qualcomm 8994 pin controller driver"
 	depends on GPIOLIB && OF
+	depends on QCOM_SCM || !QCOM_SCM
 	select PINCTRL_MSM
 	help
 	  This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -133,6 +146,7 @@
 config PINCTRL_MSM8996
 	tristate "Qualcomm MSM8996 pin controller driver"
 	depends on GPIOLIB && OF
+	depends on QCOM_SCM || !QCOM_SCM
 	select PINCTRL_MSM
 	help
 	  This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -141,6 +155,7 @@
 config PINCTRL_MSM8998
 	tristate "Qualcomm MSM8998 pin controller driver"
 	depends on GPIOLIB && OF
+	depends on QCOM_SCM || !QCOM_SCM
 	select PINCTRL_MSM
 	help
 	  This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -149,6 +164,7 @@
 config PINCTRL_QCS404
 	tristate "Qualcomm QCS404 pin controller driver"
 	depends on GPIOLIB && OF
+	depends on QCOM_SCM || !QCOM_SCM
 	select PINCTRL_MSM
 	help
 	  This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -157,6 +173,7 @@
 config PINCTRL_QDF2XXX
 	tristate "Qualcomm Technologies QDF2xxx pin controller driver"
 	depends on GPIOLIB && ACPI
+	depends on QCOM_SCM || !QCOM_SCM
 	select PINCTRL_MSM
 	help
 	  This is the GPIO driver for the TLMM block found on the
@@ -194,6 +211,7 @@
 config PINCTRL_SC7180
 	tristate "Qualcomm Technologies Inc SC7180 pin controller driver"
 	depends on GPIOLIB && OF
+	depends on QCOM_SCM || !QCOM_SCM
 	select PINCTRL_MSM
 	help
 	  This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -203,6 +221,7 @@
 config PINCTRL_SDM660
 	tristate "Qualcomm Technologies Inc SDM660 pin controller driver"
 	depends on GPIOLIB && OF
+	depends on QCOM_SCM || !QCOM_SCM
 	select PINCTRL_MSM
 	help
 	 This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -212,6 +231,7 @@
 config PINCTRL_SDM845
 	tristate "Qualcomm Technologies Inc SDM845 pin controller driver"
 	depends on GPIOLIB && (OF || ACPI)
+	depends on QCOM_SCM || !QCOM_SCM
 	select PINCTRL_MSM
 	help
 	 This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -221,6 +241,7 @@
 config PINCTRL_SM8150
 	tristate "Qualcomm Technologies Inc SM8150 pin controller driver"
 	depends on GPIOLIB && OF
+	depends on QCOM_SCM || !QCOM_SCM
 	select PINCTRL_MSM
 	help
 	 This is the pinctrl, pinmux, pinconf and gpiolib driver for the
@@ -230,6 +251,7 @@
 config PINCTRL_SM8250
 	tristate "Qualcomm Technologies Inc SM8250 pin controller driver"
 	depends on GPIOLIB && OF
+	depends on QCOM_SCM || !QCOM_SCM
 	select PINCTRL_MSM
 	help
 	  This is the pinctrl, pinmux, pinconf and gpiolib driver for the
diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c
index 77a25bd..ced4d74 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm.c
@@ -1449,3 +1449,6 @@
 }
 EXPORT_SYMBOL(msm_pinctrl_remove);
 
+MODULE_DESCRIPTION("Qualcomm Technologies, Inc. pinctrl-msm driver");
+MODULE_LICENSE("GPL v2");
+
diff --git a/drivers/platform/chrome/cros_ec_typec.c b/drivers/platform/chrome/cros_ec_typec.c
index 31be311..8111ed1f 100644
--- a/drivers/platform/chrome/cros_ec_typec.c
+++ b/drivers/platform/chrome/cros_ec_typec.c
@@ -438,8 +438,7 @@
 	if (pd_ctrl->control_flags & USB_PD_CTRL_ACTIVE_LINK_UNIDIR)
 		data.cable_mode |= TBT_CABLE_LINK_TRAINING;
 
-	if (pd_ctrl->cable_gen)
-		data.cable_mode |= TBT_CABLE_ROUNDED;
+	data.cable_mode |= TBT_SET_CABLE_ROUNDED(pd_ctrl->cable_gen);
 
 	/* Enter Mode VDO */
 	data.enter_vdo = TBT_SET_CABLE_SPEED(pd_ctrl->cable_speed);
diff --git a/drivers/power/supply/power_supply_core.c b/drivers/power/supply/power_supply_core.c
index 38e3aa64..668369b 100644
--- a/drivers/power/supply/power_supply_core.c
+++ b/drivers/power/supply/power_supply_core.c
@@ -32,6 +32,13 @@
 
 static struct device_type power_supply_dev_type;
 
+struct match_device_node_array_param {
+	struct device_node *parent_of_node;
+	struct power_supply **psy;
+	ssize_t psy_size;
+	ssize_t psy_count;
+};
+
 #define POWER_SUPPLY_DEFERRED_REGISTER_TIME	msecs_to_jiffies(10)
 
 static bool __power_supply_is_supplied_by(struct power_supply *supplier,
@@ -522,6 +529,77 @@
 }
 EXPORT_SYMBOL_GPL(power_supply_get_by_phandle);
 
+static int power_supply_match_device_node_array(struct device *dev,
+						void *data)
+{
+	struct match_device_node_array_param *param =
+		(struct match_device_node_array_param *)data;
+	struct power_supply **psy = param->psy;
+	ssize_t size = param->psy_size;
+	ssize_t *count = &param->psy_count;
+
+	if (!dev->parent || dev->parent->of_node != param->parent_of_node)
+		return 0;
+
+	if (*count >= size)
+		return -EOVERFLOW;
+
+	psy[*count] = dev_get_drvdata(dev);
+	atomic_inc(&psy[*count]->use_cnt);
+	(*count)++;
+
+	return 0;
+}
+
+/**
+ * power_supply_get_by_phandle_array() - Similar to
+ * power_supply_get_by_phandle but returns an array of power supply
+ * objects which are associated with the phandle.
+ * @np: Pointer to device node holding phandle property.
+ * @property: Name of property holding a power supply name.
+ * @psy: Array of power_supply pointers provided by the client which is
+ * filled by power_supply_get_by_phandle_array.
+ * @size: size of power_supply pointer array.
+ *
+ * If power supply was found, it increases reference count for the
+ * internal power supply's device. The user should power_supply_put()
+ * after usage.
+ *
+ * Return: On success returns the number of power supply objects filled
+ * in the @psy array.
+ * -EOVERFLOW when size of @psy array is not suffice.
+ * -EINVAL when @psy is NULL or @size is 0.
+ * -ENODEV when matching device_node is not found.
+ */
+int power_supply_get_by_phandle_array(struct device_node *np,
+				      const char *property,
+				      struct power_supply **psy,
+				      ssize_t size)
+{
+	struct device_node *power_supply_np;
+	int ret;
+	struct match_device_node_array_param param;
+
+	if (!psy || !size)
+		return -EINVAL;
+
+	power_supply_np = of_parse_phandle(np, property, 0);
+	if (!power_supply_np)
+		return -ENODEV;
+
+	param.parent_of_node = power_supply_np;
+	param.psy = psy;
+	param.psy_size = size;
+	param.psy_count = 0;
+	ret = class_for_each_device(power_supply_class, NULL, &param,
+				    power_supply_match_device_node_array);
+
+	of_node_put(power_supply_np);
+
+	return param.psy_count;
+}
+EXPORT_SYMBOL_GPL(power_supply_get_by_phandle_array);
+
 static void devm_power_supply_put(struct device *dev, void *res)
 {
 	struct power_supply **psy = res;
diff --git a/drivers/power/supply/power_supply_sysfs.c b/drivers/power/supply/power_supply_sysfs.c
index a616b9d..a5e6563 100644
--- a/drivers/power/supply/power_supply_sysfs.c
+++ b/drivers/power/supply/power_supply_sysfs.c
@@ -89,6 +89,7 @@
 	[POWER_SUPPLY_CHARGE_TYPE_ADAPTIVE]	= "Adaptive",
 	[POWER_SUPPLY_CHARGE_TYPE_CUSTOM]	= "Custom",
 	[POWER_SUPPLY_CHARGE_TYPE_LONGLIFE]	= "Long Life",
+	[POWER_SUPPLY_CHARGE_TYPE_TAPER]	= "Taper",
 };
 
 static const char * const POWER_SUPPLY_HEALTH_TEXT[] = {
diff --git a/drivers/pwm/core.c b/drivers/pwm/core.c
index 1f16f53..919189d 100644
--- a/drivers/pwm/core.c
+++ b/drivers/pwm/core.c
@@ -304,6 +304,7 @@
 		pwm->pwm = chip->base + i;
 		pwm->hwpwm = i;
 		pwm->state.polarity = polarity;
+		pwm->state.output_type = PWM_OUTPUT_FIXED;
 
 		radix_tree_insert(&pwm_tree, pwm->pwm, pwm);
 	}
diff --git a/drivers/pwm/sysfs.c b/drivers/pwm/sysfs.c
index 9903c3a..f5f3766 100644
--- a/drivers/pwm/sysfs.c
+++ b/drivers/pwm/sysfs.c
@@ -215,11 +215,35 @@
 	return sprintf(buf, "%u %u\n", result.period, result.duty_cycle);
 }
 
+static ssize_t output_type_show(struct device *child,
+			     struct device_attribute *attr,
+			     char *buf)
+{
+	const struct pwm_device *pwm = child_to_pwm_device(child);
+	const char *output_type = "unknown";
+	struct pwm_state state;
+
+	pwm_get_state(pwm, &state);
+	switch (state.output_type) {
+	case PWM_OUTPUT_FIXED:
+		output_type = "fixed";
+		break;
+	case PWM_OUTPUT_MODULATED:
+		output_type = "modulated";
+		break;
+	default:
+		break;
+	}
+
+	return snprintf(buf, PAGE_SIZE, "%s\n", output_type);
+}
+
 static DEVICE_ATTR_RW(period);
 static DEVICE_ATTR_RW(duty_cycle);
 static DEVICE_ATTR_RW(enable);
 static DEVICE_ATTR_RW(polarity);
 static DEVICE_ATTR_RO(capture);
+static DEVICE_ATTR_RO(output_type);
 
 static struct attribute *pwm_attrs[] = {
 	&dev_attr_period.attr,
@@ -227,6 +251,7 @@
 	&dev_attr_enable.attr,
 	&dev_attr_polarity.attr,
 	&dev_attr_capture.attr,
+	&dev_attr_output_type.attr,
 	NULL
 };
 ATTRIBUTE_GROUPS(pwm);
diff --git a/drivers/rpmsg/rpmsg_core.c b/drivers/rpmsg/rpmsg_core.c
index 91de940..e6eb5a1 100644
--- a/drivers/rpmsg/rpmsg_core.c
+++ b/drivers/rpmsg/rpmsg_core.c
@@ -283,6 +283,42 @@
 }
 EXPORT_SYMBOL(rpmsg_trysend_offchannel);
 
+/**
+ * rpmsg_get_signals() - get the signals for this endpoint
+ * @ept:	the rpmsg endpoint
+ *
+ * Returns signal bits on success and an appropriate error value on failure.
+ */
+int rpmsg_get_signals(struct rpmsg_endpoint *ept)
+{
+	if (WARN_ON(!ept))
+		return -EINVAL;
+	if (!ept->ops->get_signals)
+		return -ENXIO;
+
+	return ept->ops->get_signals(ept);
+}
+EXPORT_SYMBOL(rpmsg_get_signals);
+
+/**
+ * rpmsg_set_signals() - set the remote signals for this endpoint
+ * @ept:	the rpmsg endpoint
+ * @set:	set mask for signals
+ * @clear:	clear mask for signals
+ *
+ * Returns 0 on success and an appropriate error value on failure.
+ */
+int rpmsg_set_signals(struct rpmsg_endpoint *ept, u32 set, u32 clear)
+{
+	if (WARN_ON(!ept))
+		return -EINVAL;
+	if (!ept->ops->set_signals)
+		return -ENXIO;
+
+	return ept->ops->set_signals(ept, set, clear);
+}
+EXPORT_SYMBOL(rpmsg_set_signals);
+
 /*
  * match a rpmsg channel with a channel info struct.
  * this is used to make sure we're not creating rpmsg devices for channels
@@ -468,6 +504,10 @@
 
 		rpdev->ept = ept;
 		rpdev->src = ept->addr;
+
+		if (rpdrv->signals)
+			ept->sig_cb = rpdrv->signals;
+
 	}
 
 	err = rpdrv->probe(rpdev);
diff --git a/drivers/rpmsg/rpmsg_internal.h b/drivers/rpmsg/rpmsg_internal.h
index 3fc83cd..03356e8 100644
--- a/drivers/rpmsg/rpmsg_internal.h
+++ b/drivers/rpmsg/rpmsg_internal.h
@@ -2,6 +2,7 @@
 /*
  * remote processor messaging bus internals
  *
+ * Copyright (c) 2020 The Linux Foundation.
  * Copyright (C) 2011 Texas Instruments, Inc.
  * Copyright (C) 2011 Google, Inc.
  *
@@ -47,6 +48,8 @@
  * @trysendto:		see @rpmsg_trysendto(), optional
  * @trysend_offchannel:	see @rpmsg_trysend_offchannel(), optional
  * @poll:		see @rpmsg_poll(), optional
+ * @get_signals:	see @rpmsg_get_signals(), optional
+ * @set_signals:	see @rpmsg_set_signals(), optional
  *
  * Indirection table for the operations that a rpmsg backend should implement.
  * In addition to @destroy_ept, the backend must at least implement @send and
@@ -66,6 +69,8 @@
 			     void *data, int len);
 	__poll_t (*poll)(struct rpmsg_endpoint *ept, struct file *filp,
 			     poll_table *wait);
+	int (*get_signals)(struct rpmsg_endpoint *ept);
+	int (*set_signals)(struct rpmsg_endpoint *ept, u32 set, u32 clear);
 };
 
 int rpmsg_register_device(struct rpmsg_device *rpdev);
diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c
index f9d6ef3..f03feb0 100644
--- a/drivers/scsi/ufs/ufs-qcom.c
+++ b/drivers/scsi/ufs/ufs-qcom.c
@@ -1206,24 +1206,34 @@
 	int err = 0;
 
 	if (status == PRE_CHANGE) {
+		err = ufshcd_uic_hibern8_enter(hba);
+		if (err)
+			return err;
 		if (scale_up)
 			err = ufs_qcom_clk_scale_up_pre_change(hba);
 		else
 			err = ufs_qcom_clk_scale_down_pre_change(hba);
+		if (err)
+			ufshcd_uic_hibern8_exit(hba);
+
 	} else {
 		if (scale_up)
 			err = ufs_qcom_clk_scale_up_post_change(hba);
 		else
 			err = ufs_qcom_clk_scale_down_post_change(hba);
 
-		if (err || !dev_req_params)
+
+		if (err || !dev_req_params) {
+			ufshcd_uic_hibern8_exit(hba);
 			goto out;
+		}
 
 		ufs_qcom_cfg_timers(hba,
 				    dev_req_params->gear_rx,
 				    dev_req_params->pwr_rx,
 				    dev_req_params->hs_rate,
 				    false);
+		ufshcd_uic_hibern8_exit(hba);
 	}
 
 out:
diff --git a/drivers/scsi/ufs/ufs-sysfs.c b/drivers/scsi/ufs/ufs-sysfs.c
index bdcd27f..5e4d0a4 100644
--- a/drivers/scsi/ufs/ufs-sysfs.c
+++ b/drivers/scsi/ufs/ufs-sysfs.c
@@ -862,15 +862,23 @@
 	.attrs = ufs_sysfs_lun_attributes,
 };
 
-void ufs_sysfs_add_nodes(struct device *dev)
+void ufs_sysfs_add_nodes(struct ufs_hba *hba)
 {
 	int ret;
 
-	ret = sysfs_create_groups(&dev->kobj, ufs_sysfs_groups);
-	if (ret)
-		dev_err(dev,
+	ret = sysfs_create_groups(&hba->dev->kobj, ufs_sysfs_groups);
+	if (ret) {
+		dev_err(hba->dev,
 			"%s: sysfs groups creation failed (err = %d)\n",
 			__func__, ret);
+		return;
+	}
+
+	ret = ufshcd_vops_update_sysfs(hba);
+	if (ret)
+		dev_err(hba->dev,
+			"%s: vops sysfs groups update failed (err = %d)\n",
+			__func__, ret);
 }
 
 void ufs_sysfs_remove_nodes(struct device *dev)
diff --git a/drivers/scsi/ufs/ufs-sysfs.h b/drivers/scsi/ufs/ufs-sysfs.h
index 0f4e750..ec7b0c8 100644
--- a/drivers/scsi/ufs/ufs-sysfs.h
+++ b/drivers/scsi/ufs/ufs-sysfs.h
@@ -9,7 +9,7 @@
 
 #include "ufshcd.h"
 
-void ufs_sysfs_add_nodes(struct device *dev);
+void ufs_sysfs_add_nodes(struct ufs_hba *hba);
 void ufs_sysfs_remove_nodes(struct device *dev);
 
 extern const struct attribute_group ufs_sysfs_unit_descriptor_group;
diff --git a/drivers/scsi/ufs/ufshcd-crypto.c b/drivers/scsi/ufs/ufshcd-crypto.c
index 07310b1..db1b1d9 100644
--- a/drivers/scsi/ufs/ufshcd-crypto.c
+++ b/drivers/scsi/ufs/ufshcd-crypto.c
@@ -120,7 +120,12 @@
 		return false;
 
 	/* Reset might clear all keys, so reprogram all the keys. */
-	blk_ksm_reprogram_all_keys(&hba->ksm);
+	if (hba->ksm.num_slots)
+		blk_ksm_reprogram_all_keys(&hba->ksm);
+
+	if (hba->quirks & UFSHCD_QUIRK_BROKEN_CRYPTO_ENABLE)
+		return false;
+
 	return true;
 }
 
@@ -157,6 +162,9 @@
 	int err = 0;
 	enum blk_crypto_mode_num blk_mode_num;
 
+	if (hba->quirks & UFSHCD_QUIRK_CUSTOM_KEYSLOT_MANAGER)
+		return 0;
+
 	/*
 	 * Don't use crypto if either the hardware doesn't advertise the
 	 * standard crypto capability bit *or* if the vendor specific driver
@@ -187,6 +195,7 @@
 	hba->ksm.ksm_ll_ops = ufshcd_ksm_ops;
 	/* UFS only supports 8 bytes for any DUN */
 	hba->ksm.max_dun_bytes_supported = 8;
+	hba->ksm.features = BLK_CRYPTO_FEATURE_STANDARD_KEYS;
 	hba->ksm.dev = hba->dev;
 
 	/*
@@ -227,9 +236,9 @@
 	if (!(hba->caps & UFSHCD_CAP_CRYPTO))
 		return;
 
-	/* Clear all keyslots - the number of keyslots is (CFGC + 1) */
-	for (slot = 0; slot < hba->crypto_capabilities.config_count + 1; slot++)
-		ufshcd_clear_keyslot(hba, slot);
+	/* Clear all keyslots */
+	for (slot = 0; slot < hba->ksm.num_slots; slot++)
+		hba->ksm.ksm_ll_ops.keyslot_evict(&hba->ksm, NULL, slot);
 }
 
 void ufshcd_crypto_setup_rq_keyslot_manager(struct ufs_hba *hba,
diff --git a/drivers/scsi/ufs/ufshcd-crypto.h b/drivers/scsi/ufs/ufshcd-crypto.h
index d53851b..b75d8ca 100644
--- a/drivers/scsi/ufs/ufshcd-crypto.h
+++ b/drivers/scsi/ufs/ufshcd-crypto.h
@@ -34,6 +34,19 @@
 	}
 }
 
+static inline void ufshcd_crypto_clear_prdt(struct ufs_hba *hba,
+					    struct ufshcd_lrb *lrbp)
+{
+	if (!(hba->quirks & UFSHCD_QUIRK_KEYS_IN_PRDT))
+		return;
+
+	if (!lrbp->cmd->request->crypt_ctx)
+		return;
+
+	memzero_explicit(lrbp->ucd_prdt_ptr,
+			 hba->sg_entry_size * scsi_sg_count(lrbp->cmd));
+}
+
 bool ufshcd_crypto_enable(struct ufs_hba *hba);
 
 int ufshcd_hba_init_crypto_capabilities(struct ufs_hba *hba);
@@ -54,6 +67,9 @@
 ufshcd_prepare_req_desc_hdr_crypto(struct ufshcd_lrb *lrbp, u32 *dword_0,
 				   u32 *dword_1, u32 *dword_3) { }
 
+static inline void ufshcd_crypto_clear_prdt(struct ufs_hba *hba,
+					    struct ufshcd_lrb *lrbp) { }
+
 static inline bool ufshcd_crypto_enable(struct ufs_hba *hba)
 {
 	return false;
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 0c148fc..333304d 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -220,11 +220,11 @@
 static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd);
 static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag);
 static void ufshcd_hba_exit(struct ufs_hba *hba);
+static int ufshcd_clear_ua_wluns(struct ufs_hba *hba);
 static int ufshcd_probe_hba(struct ufs_hba *hba, bool async);
 static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
 				 bool skip_ref_clk);
 static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on);
-static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba);
 static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba);
 static int ufshcd_host_reset_and_restore(struct ufs_hba *hba);
 static void ufshcd_resume_clkscaling(struct ufs_hba *hba);
@@ -245,6 +245,8 @@
 static int ufshcd_wb_ctrl(struct ufs_hba *hba, bool enable);
 static int ufshcd_wb_toggle_flush_during_h8(struct ufs_hba *hba, bool set);
 static inline void ufshcd_wb_toggle_flush(struct ufs_hba *hba, bool enable);
+static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba);
+static void ufshcd_hba_vreg_set_hpm(struct ufs_hba *hba);
 
 static inline bool ufshcd_valid_tag(struct ufs_hba *hba, int tag)
 {
@@ -348,7 +350,7 @@
 		unsigned int tag, const char *str)
 {
 	sector_t lba = -1;
-	u8 opcode = 0;
+	u8 opcode = 0, group_id = 0;
 	u32 intr, doorbell;
 	struct ufshcd_lrb *lrbp = &hba->lrb[tag];
 	struct scsi_cmnd *cmd = lrbp->cmd;
@@ -374,13 +376,20 @@
 				lba = cmd->request->bio->bi_iter.bi_sector;
 			transfer_len = be32_to_cpu(
 				lrbp->ucd_req_ptr->sc.exp_data_transfer_len);
+			if (opcode == WRITE_10)
+				group_id = lrbp->cmd->cmnd[6];
+		} else if (opcode == UNMAP) {
+			if (cmd->request) {
+				lba = scsi_get_lba(cmd);
+				transfer_len = blk_rq_bytes(cmd->request);
+			}
 		}
 	}
 
 	intr = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
 	doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
 	trace_ufshcd_command(dev_name(hba->dev), str, tag,
-				doorbell, transfer_len, intr, lba, opcode);
+			doorbell, transfer_len, intr, lba, opcode, group_id);
 }
 
 static void ufshcd_print_clk_freqs(struct ufs_hba *hba)
@@ -476,7 +485,7 @@
 		prdt_length = le16_to_cpu(
 			lrbp->utr_descriptor_ptr->prd_table_length);
 		if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN)
-			prdt_length /= sizeof(struct ufshcd_sg_entry);
+			prdt_length /= hba->sg_entry_size;
 
 		dev_err(hba->dev,
 			"UPIU[%d] - PRDT - %d entries  phys@0x%llx\n",
@@ -485,7 +494,7 @@
 
 		if (pr_prdt)
 			ufshcd_hex_dump("UPIU PRDT: ", lrbp->ucd_prdt_ptr,
-				sizeof(struct ufshcd_sg_entry) * prdt_length);
+				hba->sg_entry_size * prdt_length);
 	}
 }
 
@@ -1555,6 +1564,7 @@
 	}
 
 	spin_unlock_irqrestore(hba->host->host_lock, flags);
+	ufshcd_hba_vreg_set_hpm(hba);
 	ufshcd_setup_clocks(hba, true);
 
 	ufshcd_enable_irq(hba);
@@ -1720,6 +1730,8 @@
 		/* If link is active, device ref_clk can't be switched off */
 		__ufshcd_setup_clocks(hba, false, true);
 
+	/* Put the host controller in low power mode if possible */
+	ufshcd_hba_vreg_set_lpm(hba);
 	/*
 	 * In case you are here to cancel this work the gating state
 	 * would be marked as REQ_CLKS_ON. In this case keep the state
@@ -1751,8 +1763,9 @@
 
 	if (hba->clk_gating.active_reqs || hba->clk_gating.is_suspended ||
 	    hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL ||
-	    ufshcd_any_tag_in_use(hba) || hba->outstanding_tasks ||
-	    hba->active_uic_cmd || hba->uic_async_done)
+	    hba->outstanding_tasks ||
+	    hba->active_uic_cmd || hba->uic_async_done ||
+	    hba->clk_gating.state == CLKS_OFF)
 		return;
 
 	hba->clk_gating.state = REQ_CLKS_OFF;
@@ -1814,19 +1827,19 @@
 		return -EINVAL;
 
 	value = !!value;
+
+	spin_lock_irqsave(hba->host->host_lock, flags);
 	if (value == hba->clk_gating.is_enabled)
 		goto out;
 
-	if (value) {
-		ufshcd_release(hba);
-	} else {
-		spin_lock_irqsave(hba->host->host_lock, flags);
+	if (value)
+		__ufshcd_release(hba);
+	else
 		hba->clk_gating.active_reqs++;
-		spin_unlock_irqrestore(hba->host->host_lock, flags);
-	}
 
 	hba->clk_gating.is_enabled = value;
 out:
+	spin_unlock_irqrestore(hba->host->host_lock, flags);
 	return count;
 }
 
@@ -1874,7 +1887,7 @@
 	snprintf(wq_name, ARRAY_SIZE(wq_name), "ufs_clk_gating_%d",
 		 hba->host->host_no);
 	hba->clk_gating.clk_gating_workq = alloc_ordered_workqueue(wq_name,
-							   WQ_MEM_RECLAIM);
+					WQ_MEM_RECLAIM | WQ_HIGHPRI);
 
 	hba->clk_gating.is_enabled = true;
 
@@ -1964,6 +1977,7 @@
 	lrbp->issue_time_stamp = ktime_get();
 	lrbp->compl_time_stamp = ktime_set(0, 0);
 	ufshcd_vops_setup_xfer_req(hba, task_tag, (lrbp->cmd ? true : false));
+	ufshcd_vops_send_command(hba, lrbp);
 	ufshcd_add_command_trace(hba, task_tag, "send");
 	ufshcd_clk_scaling_start_busy(hba);
 	__set_bit(task_tag, &hba->outstanding_reqs);
@@ -2210,7 +2224,7 @@
  */
 static int ufshcd_map_sg(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
 {
-	struct ufshcd_sg_entry *prd_table;
+	struct ufshcd_sg_entry *prd;
 	struct scatterlist *sg;
 	struct scsi_cmnd *cmd;
 	int sg_segments;
@@ -2225,28 +2239,28 @@
 
 		if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN)
 			lrbp->utr_descriptor_ptr->prd_table_length =
-				cpu_to_le16((sg_segments *
-					sizeof(struct ufshcd_sg_entry)));
+				cpu_to_le16(sg_segments * hba->sg_entry_size);
 		else
 			lrbp->utr_descriptor_ptr->prd_table_length =
 				cpu_to_le16((u16) (sg_segments));
 
-		prd_table = (struct ufshcd_sg_entry *)lrbp->ucd_prdt_ptr;
+		prd = (struct ufshcd_sg_entry *)lrbp->ucd_prdt_ptr;
 
 		scsi_for_each_sg(cmd, sg, sg_segments, i) {
-			prd_table[i].size  =
+			prd->size =
 				cpu_to_le32(((u32) sg_dma_len(sg))-1);
-			prd_table[i].base_addr =
+			prd->base_addr =
 				cpu_to_le32(lower_32_bits(sg->dma_address));
-			prd_table[i].upper_addr =
+			prd->upper_addr =
 				cpu_to_le32(upper_32_bits(sg->dma_address));
-			prd_table[i].reserved = 0;
+			prd->reserved = 0;
+			prd = (void *)prd + hba->sg_entry_size;
 		}
 	} else {
 		lrbp->utr_descriptor_ptr->prd_table_length = 0;
 	}
 
-	return 0;
+	return ufshcd_vops_fill_prdt(hba, lrbp, sg_segments);
 }
 
 /**
@@ -2499,10 +2513,11 @@
 
 static void ufshcd_init_lrb(struct ufs_hba *hba, struct ufshcd_lrb *lrb, int i)
 {
-	struct utp_transfer_cmd_desc *cmd_descp = hba->ucdl_base_addr;
+	struct utp_transfer_cmd_desc *cmd_descp = (void *)hba->ucdl_base_addr +
+		i * sizeof_utp_transfer_cmd_desc(hba);
 	struct utp_transfer_req_desc *utrdlp = hba->utrdl_base_addr;
 	dma_addr_t cmd_desc_element_addr = hba->ucdl_dma_addr +
-		i * sizeof(struct utp_transfer_cmd_desc);
+		i * sizeof_utp_transfer_cmd_desc(hba);
 	u16 response_offset = offsetof(struct utp_transfer_cmd_desc,
 				       response_upiu);
 	u16 prdt_offset = offsetof(struct utp_transfer_cmd_desc, prd_table);
@@ -2510,11 +2525,11 @@
 	lrb->utr_descriptor_ptr = utrdlp + i;
 	lrb->utrd_dma_addr = hba->utrdl_dma_addr +
 		i * sizeof(struct utp_transfer_req_desc);
-	lrb->ucd_req_ptr = (struct utp_upiu_req *)(cmd_descp + i);
+	lrb->ucd_req_ptr = (struct utp_upiu_req *)cmd_descp;
 	lrb->ucd_req_dma_addr = cmd_desc_element_addr;
-	lrb->ucd_rsp_ptr = (struct utp_upiu_rsp *)cmd_descp[i].response_upiu;
+	lrb->ucd_rsp_ptr = (struct utp_upiu_rsp *)cmd_descp->response_upiu;
 	lrb->ucd_rsp_dma_addr = cmd_desc_element_addr + response_offset;
-	lrb->ucd_prdt_ptr = (struct ufshcd_sg_entry *)cmd_descp[i].prd_table;
+	lrb->ucd_prdt_ptr = (struct ufshcd_sg_entry *)cmd_descp->prd_table;
 	lrb->ucd_prdt_dma_addr = cmd_desc_element_addr + prdt_offset;
 }
 
@@ -2566,6 +2581,13 @@
 	lrbp->lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun);
 	lrbp->intr_cmd = !ufshcd_is_intr_aggr_allowed(hba) ? true : false;
 
+	err = ufshcd_vops_prepare_command(hba, cmd->request, lrbp);
+	if (err) {
+		lrbp->cmd = NULL;
+		ufshcd_release(hba);
+		goto out;
+	}
+
 	ufshcd_prepare_lrbp_crypto(cmd->request, lrbp);
 
 	lrbp->req_abort_skip = false;
@@ -2849,7 +2871,7 @@
 	(*request)->upiu_req.selector = selector;
 }
 
-static int ufshcd_query_flag_retry(struct ufs_hba *hba,
+int ufshcd_query_flag_retry(struct ufs_hba *hba,
 	enum query_opcode opcode, enum flag_idn idn, u8 index, bool *flag_res)
 {
 	int ret;
@@ -2871,6 +2893,7 @@
 			__func__, opcode, idn, ret, retries);
 	return ret;
 }
+EXPORT_SYMBOL_GPL(ufshcd_query_flag_retry);
 
 /**
  * ufshcd_query_flag() - API function for sending flag query requests
@@ -2939,6 +2962,7 @@
 	ufshcd_release(hba);
 	return err;
 }
+EXPORT_SYMBOL_GPL(ufshcd_query_flag);
 
 /**
  * ufshcd_query_attr - API function for sending attribute requests
@@ -3003,6 +3027,7 @@
 	ufshcd_release(hba);
 	return err;
 }
+EXPORT_SYMBOL_GPL(ufshcd_query_attr);
 
 /**
  * ufshcd_query_attr_retry() - API function for sending query
@@ -3017,7 +3042,7 @@
  *
  * Returns 0 for success, non-zero in case of failure
 */
-static int ufshcd_query_attr_retry(struct ufs_hba *hba,
+int ufshcd_query_attr_retry(struct ufs_hba *hba,
 	enum query_opcode opcode, enum attr_idn idn, u8 index, u8 selector,
 	u32 *attr_val)
 {
@@ -3040,6 +3065,7 @@
 			__func__, idn, ret, QUERY_REQ_RETRIES);
 	return ret;
 }
+EXPORT_SYMBOL_GPL(ufshcd_query_attr_retry);
 
 static int __ufshcd_query_descriptor(struct ufs_hba *hba,
 			enum query_opcode opcode, enum desc_idn idn, u8 index,
@@ -3137,6 +3163,7 @@
 
 	return err;
 }
+EXPORT_SYMBOL_GPL(ufshcd_query_descriptor_retry);
 
 /**
  * ufshcd_map_desc_id_to_length - map descriptor IDN to its length
@@ -3253,6 +3280,7 @@
 		kfree(desc_buf);
 	return ret;
 }
+EXPORT_SYMBOL_GPL(ufshcd_read_desc_param);
 
 /**
  * struct uc_string_id - unicode string
@@ -3426,7 +3454,7 @@
 	size_t utmrdl_size, utrdl_size, ucdl_size;
 
 	/* Allocate memory for UTP command descriptors */
-	ucdl_size = (sizeof(struct utp_transfer_cmd_desc) * hba->nutrs);
+	ucdl_size = (sizeof_utp_transfer_cmd_desc(hba) * hba->nutrs);
 	hba->ucdl_base_addr = dmam_alloc_coherent(hba->dev,
 						  ucdl_size,
 						  &hba->ucdl_dma_addr,
@@ -3520,7 +3548,7 @@
 	prdt_offset =
 		offsetof(struct utp_transfer_cmd_desc, prd_table);
 
-	cmd_desc_size = sizeof(struct utp_transfer_cmd_desc);
+	cmd_desc_size = sizeof_utp_transfer_cmd_desc(hba);
 	cmd_desc_dma_addr = hba->ucdl_dma_addr;
 
 	for (i = 0; i < hba->nutrs; i++) {
@@ -3936,7 +3964,7 @@
 }
 EXPORT_SYMBOL_GPL(ufshcd_link_recovery);
 
-static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
+int ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
 {
 	int ret;
 	struct uic_command uic_cmd = {0};
@@ -3958,6 +3986,7 @@
 
 	return ret;
 }
+EXPORT_SYMBOL_GPL(ufshcd_uic_hibern8_enter);
 
 int ufshcd_uic_hibern8_exit(struct ufs_hba *hba)
 {
@@ -4970,10 +4999,12 @@
 		lrbp->compl_time_stamp = ktime_get();
 		cmd = lrbp->cmd;
 		if (cmd) {
+			ufshcd_vops_compl_command(hba, lrbp);
 			ufshcd_add_command_trace(hba, index, "complete");
 			result = ufshcd_transfer_rsp_status(hba, lrbp);
 			scsi_dma_unmap(cmd);
 			cmd->result = result;
+			ufshcd_crypto_clear_prdt(hba, lrbp);
 			/* Mark completed command as NULL in LRB */
 			lrbp->cmd = NULL;
 			/* Do not touch lrbp after scsi done */
@@ -5218,7 +5249,7 @@
  * to know whether auto bkops is enabled or disabled after this function
  * returns control to it.
  */
-static int ufshcd_bkops_ctrl(struct ufs_hba *hba,
+int ufshcd_bkops_ctrl(struct ufs_hba *hba,
 			     enum bkops_status status)
 {
 	int err;
@@ -5243,6 +5274,7 @@
 out:
 	return err;
 }
+EXPORT_SYMBOL_GPL(ufshcd_bkops_ctrl);
 
 /**
  * ufshcd_urgent_bkops - handle urgent bkops exception event
@@ -6841,7 +6873,8 @@
 
 	/* Establish the link again and restore the device */
 	err = ufshcd_probe_hba(hba, false);
-
+	if (!err)
+		ufshcd_clear_ua_wluns(hba);
 out:
 	if (err)
 		dev_err(hba->dev, "%s: Host init failed %d\n", __func__, err);
@@ -7092,7 +7125,6 @@
 static int ufshcd_scsi_add_wlus(struct ufs_hba *hba)
 {
 	int ret = 0;
-	struct scsi_device *sdev_rpmb;
 	struct scsi_device *sdev_boot;
 
 	hba->sdev_ufs_device = __scsi_add_device(hba->host, 0, 0,
@@ -7105,14 +7137,14 @@
 	ufshcd_blk_pm_runtime_init(hba->sdev_ufs_device);
 	scsi_device_put(hba->sdev_ufs_device);
 
-	sdev_rpmb = __scsi_add_device(hba->host, 0, 0,
+	hba->sdev_rpmb = __scsi_add_device(hba->host, 0, 0,
 		ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_RPMB_WLUN), NULL);
-	if (IS_ERR(sdev_rpmb)) {
-		ret = PTR_ERR(sdev_rpmb);
+	if (IS_ERR(hba->sdev_rpmb)) {
+		ret = PTR_ERR(hba->sdev_rpmb);
 		goto remove_sdev_ufs_device;
 	}
-	ufshcd_blk_pm_runtime_init(sdev_rpmb);
-	scsi_device_put(sdev_rpmb);
+	ufshcd_blk_pm_runtime_init(hba->sdev_rpmb);
+	scsi_device_put(hba->sdev_rpmb);
 
 	sdev_boot = __scsi_add_device(hba->host, 0, 0,
 		ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_BOOT_WLUN), NULL);
@@ -7636,6 +7668,64 @@
 	return ret;
 }
 
+static int
+ufshcd_send_request_sense(struct ufs_hba *hba, struct scsi_device *sdp);
+
+static int ufshcd_clear_ua_wlun(struct ufs_hba *hba, u8 wlun)
+{
+	struct scsi_device *sdp;
+	unsigned long flags;
+	int ret = 0;
+
+	spin_lock_irqsave(hba->host->host_lock, flags);
+	if (wlun  == UFS_UPIU_UFS_DEVICE_WLUN)
+		sdp = hba->sdev_ufs_device;
+	else if (wlun  == UFS_UPIU_RPMB_WLUN)
+		sdp = hba->sdev_rpmb;
+	else
+		return -EINVAL;
+
+	if (sdp) {
+		ret = scsi_device_get(sdp);
+		if (!ret && !scsi_device_online(sdp)) {
+			ret = -ENODEV;
+			scsi_device_put(sdp);
+		}
+	} else {
+		ret = -ENODEV;
+	}
+	spin_unlock_irqrestore(hba->host->host_lock, flags);
+	if (ret)
+		goto out_err;
+
+	ret = ufshcd_send_request_sense(hba, sdp);
+	scsi_device_put(sdp);
+out_err:
+	if (ret)
+		dev_err(hba->dev, "%s: UAC clear LU=%x ret = %d\n",
+				__func__, wlun, ret);
+	return ret;
+}
+
+static int ufshcd_clear_ua_wluns(struct ufs_hba *hba)
+{
+	int ret = 0;
+
+	if (!hba->wlun_dev_clr_ua)
+		goto out;
+
+	ret = ufshcd_clear_ua_wlun(hba, UFS_UPIU_UFS_DEVICE_WLUN);
+	if (!ret)
+		ret = ufshcd_clear_ua_wlun(hba, UFS_UPIU_RPMB_WLUN);
+	if (!ret)
+		hba->wlun_dev_clr_ua = false;
+out:
+	if (ret)
+		dev_err(hba->dev, "%s: Failed to clear UAC WLUNS ret = %d\n",
+				__func__, ret);
+	return ret;
+}
+
 /**
  * ufshcd_probe_hba - probe hba to detect device and initialize
  * @hba: per-adapter instance
@@ -7755,6 +7845,8 @@
 		pm_runtime_put_sync(hba->dev);
 		ufshcd_exit_clk_scaling(hba);
 		ufshcd_hba_exit(hba);
+	} else {
+		ufshcd_clear_ua_wluns(hba);
 	}
 }
 
@@ -8273,13 +8365,7 @@
 	 * handling context.
 	 */
 	hba->host->eh_noresume = 1;
-	if (hba->wlun_dev_clr_ua) {
-		ret = ufshcd_send_request_sense(hba, sdp);
-		if (ret)
-			goto out;
-		/* Unit attention condition is cleared now */
-		hba->wlun_dev_clr_ua = false;
-	}
+	ufshcd_clear_ua_wluns(hba);
 
 	cmd[4] = pwr_mode << 4;
 
@@ -8440,13 +8526,13 @@
 
 static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba)
 {
-	if (ufshcd_is_link_off(hba))
+	if (ufshcd_is_link_off(hba) || ufshcd_can_aggressive_pc(hba))
 		ufshcd_setup_hba_vreg(hba, false);
 }
 
 static void ufshcd_hba_vreg_set_hpm(struct ufs_hba *hba)
 {
-	if (ufshcd_is_link_off(hba))
+	if (ufshcd_is_link_off(hba) || ufshcd_can_aggressive_pc(hba))
 		ufshcd_setup_hba_vreg(hba, true);
 }
 
@@ -9008,6 +9094,7 @@
 	hba->dev = dev;
 	*hba_handle = hba;
 	hba->dev_ref_clk_freq = REF_CLK_FREQ_INVAL;
+	hba->sg_entry_size = sizeof(struct ufshcd_sg_entry);
 
 	INIT_LIST_HEAD(&hba->clk_list_head);
 
@@ -9223,7 +9310,7 @@
 	ufshcd_set_ufs_dev_active(hba);
 
 	async_schedule(ufshcd_async_scan, hba);
-	ufs_sysfs_add_nodes(hba->dev);
+	ufs_sysfs_add_nodes(hba);
 
 	return 0;
 
diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
index e0f00a4..792dcca 100644
--- a/drivers/scsi/ufs/ufshcd.h
+++ b/drivers/scsi/ufs/ufshcd.h
@@ -289,6 +289,12 @@
  * @phy_initialization: used to initialize phys
  * @device_reset: called to issue a reset pulse on the UFS device
  * @program_key: program or evict an inline encryption key
+ * @fill_prdt: called after initializing the standard PRDT fields so that any
+ *	       variant-specific PRDT fields can be initialized too
+ * @prepare_command: called when receiving a request in the first place
+ * @update_sysfs: adds vendor-specific sysfs entries
+ * @send_command: adds vendor-specific work when sending a command
+ * @compl_command: adds vendor-specific work when completing a command
  */
 struct ufs_hba_variant_ops {
 	const char *name;
@@ -324,6 +330,13 @@
 					void *data);
 	int	(*program_key)(struct ufs_hba *hba,
 			       const union ufs_crypto_cfg_entry *cfg, int slot);
+	int	(*fill_prdt)(struct ufs_hba *hba, struct ufshcd_lrb *lrbp,
+			     unsigned int segments);
+	int	(*prepare_command)(struct ufs_hba *hba,
+				struct request *rq, struct ufshcd_lrb *lrbp);
+	int     (*update_sysfs)(struct ufs_hba *hba);
+	void	(*send_command)(struct ufs_hba *hba, struct ufshcd_lrb *lrbp);
+	void	(*compl_command)(struct ufs_hba *hba, struct ufshcd_lrb *lrbp);
 };
 
 /* clock gating state  */
@@ -544,6 +557,28 @@
 	 */
 	UFSHCI_QUIRK_SKIP_MANUAL_WB_FLUSH_CTRL		= 1 << 12,
 
+	/*
+	 * This quirk needs to be enabled if the host controller supports inline
+	 * encryption, but it needs to initialize the crypto capabilities in a
+	 * nonstandard way and/or it needs to override blk_ksm_ll_ops.  If
+	 * enabled, the standard code won't initialize the blk_keyslot_manager;
+	 * ufs_hba_variant_ops::init() must do it instead.
+	 */
+	UFSHCD_QUIRK_CUSTOM_KEYSLOT_MANAGER		= 1 << 20,
+
+	/*
+	 * This quirk needs to be enabled if the host controller supports inline
+	 * encryption, but the CRYPTO_GENERAL_ENABLE bit is not implemented and
+	 * breaks the HCE sequence if used.
+	 */
+	UFSHCD_QUIRK_BROKEN_CRYPTO_ENABLE		= 1 << 21,
+
+	/*
+	 * This quirk needs to be enabled if the host controller requires that
+	 * the PRDT be cleared after each encrypted request because encryption
+	 * keys were stored in it.
+	 */
+	UFSHCD_QUIRK_KEYS_IN_PRDT			= 1 << 22,
 };
 
 enum ufshcd_caps {
@@ -594,6 +629,13 @@
 	 * inline crypto engine, if it is present
 	 */
 	UFSHCD_CAP_CRYPTO				= 1 << 8,
+
+	/*
+	 * This capability allows the controller regulators to be put into
+	 * lpm mode aggressively during clock gating.
+	 * This would increase power savings.
+	 */
+	UFSHCD_CAP_AGGR_POWER_COLLAPSE			= 1 << 9,
 };
 
 struct ufs_hba_variant_params {
@@ -624,6 +666,7 @@
  * @ufs_version: UFS Version to which controller complies
  * @vops: pointer to variant specific operations
  * @priv: pointer to variant specific private data
+ * @sg_entry_size: size of struct ufshcd_sg_entry (may include variant fields)
  * @irq: Irq number of the controller
  * @active_uic_cmd: handle of active UIC command
  * @uic_cmd_mutex: mutex for uic command
@@ -683,6 +726,7 @@
 	 * "UFS device" W-LU.
 	 */
 	struct scsi_device *sdev_ufs_device;
+	struct scsi_device *sdev_rpmb;
 
 	enum ufs_dev_pwr_mode curr_dev_pwr_mode;
 	enum uic_link_state uic_link_state;
@@ -709,6 +753,7 @@
 	const struct ufs_hba_variant_ops *vops;
 	struct ufs_hba_variant_params *vps;
 	void *priv;
+	size_t sg_entry_size;
 	unsigned int irq;
 	bool is_irq_enabled;
 	enum ufs_ref_clk_freq dev_ref_clk_freq;
@@ -831,6 +876,12 @@
 #endif
 }
 
+static inline bool ufshcd_can_aggressive_pc(struct ufs_hba *hba)
+{
+	return !!(ufshcd_is_link_hibern8(hba) &&
+		  (hba->caps & UFSHCD_CAP_AGGR_POWER_COLLAPSE));
+}
+
 static inline bool ufshcd_is_auto_hibern8_supported(struct ufs_hba *hba)
 {
 	return (hba->capabilities & MASK_AUTO_HIBERN8_SUPPORT) &&
@@ -1010,8 +1061,14 @@
 			   u8 param_size);
 int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
 		      enum attr_idn idn, u8 index, u8 selector, u32 *attr_val);
+int ufshcd_query_attr_retry(struct ufs_hba *hba,
+	enum query_opcode opcode, enum attr_idn idn, u8 index, u8 selector,
+	u32 *attr_val);
 int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
 	enum flag_idn idn, u8 index, bool *flag_res);
+int ufshcd_query_flag_retry(struct ufs_hba *hba,
+	enum query_opcode opcode, enum flag_idn idn, u8 index, bool *flag_res);
+int ufshcd_bkops_ctrl(struct ufs_hba *hba, enum bkops_status status);
 
 void ufshcd_auto_hibern8_enable(struct ufs_hba *hba);
 void ufshcd_auto_hibern8_update(struct ufs_hba *hba, u32 ahit);
@@ -1195,6 +1252,45 @@
 		hba->vops->config_scaling_param(hba, profile, data);
 }
 
+static inline int ufshcd_vops_fill_prdt(struct ufs_hba *hba,
+					struct ufshcd_lrb *lrbp,
+					unsigned int segments)
+{
+	if (hba->vops && hba->vops->fill_prdt)
+		return hba->vops->fill_prdt(hba, lrbp, segments);
+
+	return 0;
+}
+
+static inline int ufshcd_vops_prepare_command(struct ufs_hba *hba,
+		struct request *rq, struct ufshcd_lrb *lrbp)
+{
+	if (hba->vops && hba->vops->prepare_command)
+		return hba->vops->prepare_command(hba, rq, lrbp);
+	return 0;
+}
+
+static inline int ufshcd_vops_update_sysfs(struct ufs_hba *hba)
+{
+	if (hba->vops && hba->vops->update_sysfs)
+		return hba->vops->update_sysfs(hba);
+	return 0;
+}
+
+static inline void ufshcd_vops_send_command(struct ufs_hba *hba,
+				struct ufshcd_lrb *lrbp)
+{
+	if (hba->vops && hba->vops->send_command)
+		hba->vops->send_command(hba, lrbp);
+}
+
+static inline void ufshcd_vops_compl_command(struct ufs_hba *hba,
+				struct ufshcd_lrb *lrbp)
+{
+	if (hba->vops && hba->vops->compl_command)
+		hba->vops->compl_command(hba, lrbp);
+}
+
 extern struct ufs_pm_lvl_states ufs_pm_lvl_states[];
 
 /*
@@ -1214,5 +1310,6 @@
 
 int ufshcd_dump_regs(struct ufs_hba *hba, size_t offset, size_t len,
 		     const char *prefix);
-
+int ufshcd_uic_hibern8_enter(struct ufs_hba *hba);
+int ufshcd_uic_hibern8_exit(struct ufs_hba *hba);
 #endif /* End of Header */
diff --git a/drivers/scsi/ufs/ufshci.h b/drivers/scsi/ufs/ufshci.h
index 6795e1f..f41f74b 100644
--- a/drivers/scsi/ufs/ufshci.h
+++ b/drivers/scsi/ufs/ufshci.h
@@ -418,20 +418,28 @@
 	__le32    upper_addr;
 	__le32    reserved;
 	__le32    size;
+	/*
+	 * followed by variant-specific fields if
+	 * hba->sg_entry_size != sizeof(struct ufshcd_sg_entry)
+	 */
 };
 
 /**
  * struct utp_transfer_cmd_desc - UFS Command Descriptor structure
  * @command_upiu: Command UPIU Frame address
  * @response_upiu: Response UPIU Frame address
- * @prd_table: Physical Region Descriptor
+ * @prd_table: Physical Region Descriptor: an array of SG_ALL struct
+ *	ufshcd_sg_entry's.  Variant-specific fields may be present after each.
  */
 struct utp_transfer_cmd_desc {
 	u8 command_upiu[ALIGNED_UPIU_SIZE];
 	u8 response_upiu[ALIGNED_UPIU_SIZE];
-	struct ufshcd_sg_entry    prd_table[SG_ALL];
+	u8 prd_table[];
 };
 
+#define sizeof_utp_transfer_cmd_desc(hba)	\
+	(sizeof(struct utp_transfer_cmd_desc) + SG_ALL * (hba)->sg_entry_size)
+
 /**
  * struct request_desc_header - Descriptor Header common to both UTRD and UTMRD
  * @dword0: Descriptor Header DW0
diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig
index 6a3b69b..79b568f 100644
--- a/drivers/soc/qcom/Kconfig
+++ b/drivers/soc/qcom/Kconfig
@@ -17,7 +17,7 @@
 	  Subsystem (AOSS) using Qualcomm Messaging Protocol (QMP).
 
 config QCOM_COMMAND_DB
-	bool "Qualcomm Command DB"
+	tristate "Qualcomm Command DB"
 	depends on ARCH_QCOM || COMPILE_TEST
 	depends on OF_RESERVED_MEM
 	help
@@ -108,8 +108,9 @@
 	  Say y here if you intend to boot the modem remoteproc.
 
 config QCOM_RPMH
-	bool "Qualcomm RPM-Hardened (RPMH) Communication"
+	tristate "Qualcomm RPM-Hardened (RPMH) Communication"
 	depends on ARCH_QCOM || COMPILE_TEST
+	depends on (QCOM_COMMAND_DB || !QCOM_COMMAND_DB)
 	help
 	  Support for communication with the hardened-RPM blocks in
 	  Qualcomm Technologies Inc (QTI) SoCs. RPMH communication uses an
diff --git a/drivers/soc/qcom/cmd-db.c b/drivers/soc/qcom/cmd-db.c
index fc56106..dd87201 100644
--- a/drivers/soc/qcom/cmd-db.c
+++ b/drivers/soc/qcom/cmd-db.c
@@ -1,8 +1,9 @@
 /* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved. */
+/* Copyright (c) 2016-2018, 2020, The Linux Foundation. All rights reserved. */
 
 #include <linux/debugfs.h>
 #include <linux/kernel.h>
+#include <linux/module.h>
 #include <linux/of.h>
 #include <linux/of_address.h>
 #include <linux/of_reserved_mem.h>
@@ -340,12 +341,14 @@
 	{ .compatible = "qcom,cmd-db" },
 	{ }
 };
+MODULE_DEVICE_TABLE(of, cmd_db_match_table);
 
 static struct platform_driver cmd_db_dev_driver = {
 	.probe  = cmd_db_dev_probe,
 	.driver = {
 		   .name = "cmd-db",
 		   .of_match_table = cmd_db_match_table,
+		   .suppress_bind_attrs = true,
 	},
 };
 
@@ -354,3 +357,6 @@
 	return platform_driver_register(&cmd_db_dev_driver);
 }
 arch_initcall(cmd_db_device_init);
+
+MODULE_DESCRIPTION("Qualcomm Technologies, Inc. Command DB Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/rpmh-rsc.c b/drivers/soc/qcom/rpmh-rsc.c
index a297911..44a32cb 100644
--- a/drivers/soc/qcom/rpmh-rsc.c
+++ b/drivers/soc/qcom/rpmh-rsc.c
@@ -13,6 +13,7 @@
 #include <linux/iopoll.h>
 #include <linux/kernel.h>
 #include <linux/list.h>
+#include <linux/module.h>
 #include <linux/of.h>
 #include <linux/of_irq.h>
 #include <linux/of_platform.h>
@@ -497,7 +498,7 @@
 		write_tcs_cmd(drv, RSC_DRV_CMD_MSGID, tcs_id, j, msgid);
 		write_tcs_cmd(drv, RSC_DRV_CMD_ADDR, tcs_id, j, cmd->addr);
 		write_tcs_cmd(drv, RSC_DRV_CMD_DATA, tcs_id, j, cmd->data);
-		trace_rpmh_send_msg_rcuidle(drv, tcs_id, j, msgid, cmd);
+		// trace_rpmh_send_msg_rcuidle(drv, tcs_id, j, msgid, cmd);
 	}
 
 	write_tcs_reg(drv, RSC_DRV_CMD_WAIT_FOR_CMPL, tcs_id, cmd_complete);
@@ -1018,6 +1019,8 @@
 	{ .compatible = "qcom,rpmh-rsc", },
 	{ }
 };
+MODULE_DEVICE_TABLE(of, rpmh_drv_match);
+
 
 static struct platform_driver rpmh_driver = {
 	.probe = rpmh_rsc_probe,
@@ -1033,3 +1036,6 @@
 	return platform_driver_register(&rpmh_driver);
 }
 arch_initcall(rpmh_driver_init);
+
+MODULE_DESCRIPTION("Qualcomm Technologies, Inc. RPMh Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index fc9a597..cea5396 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -2026,6 +2026,7 @@
 	/* Store a pointer to the node in the device structure */
 	of_node_get(nc);
 	spi->dev.of_node = nc;
+	spi->dev.fwnode = of_fwnode_handle(nc);
 
 	/* Register the new device */
 	rc = spi_add_device(spi);
diff --git a/drivers/staging/android/ion/Kconfig b/drivers/staging/android/ion/Kconfig
index 989fe84..7b7da97 100644
--- a/drivers/staging/android/ion/Kconfig
+++ b/drivers/staging/android/ion/Kconfig
@@ -11,17 +11,4 @@
 	  If you're not using Android its probably safe to
 	  say N here.
 
-config ION_SYSTEM_HEAP
-	bool "Ion system heap"
-	depends on ION
-	help
-	  Choose this option to enable the Ion system heap. The system heap
-	  is backed by pages from the buddy allocator. If in doubt, say Y.
-
-config ION_CMA_HEAP
-	bool "Ion CMA heap support"
-	depends on ION && DMA_CMA
-	help
-	  Choose this option to enable CMA heaps with Ion. This heap is backed
-	  by the Contiguous Memory Allocator (CMA). If your system has these
-	  regions, you should say Y here.
+source "drivers/staging/android/ion/heaps/Kconfig"
diff --git a/drivers/staging/android/ion/Makefile b/drivers/staging/android/ion/Makefile
index 5f4487b..7f8fd0f 100644
--- a/drivers/staging/android/ion/Makefile
+++ b/drivers/staging/android/ion/Makefile
@@ -1,4 +1,4 @@
 # SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_ION) += ion.o ion_heap.o
-obj-$(CONFIG_ION_SYSTEM_HEAP) += ion_system_heap.o ion_page_pool.o
-obj-$(CONFIG_ION_CMA_HEAP) += ion_cma_heap.o
+obj-$(CONFIG_ION)	+= ion.o ion_buffer.o ion_dma_buf.o ion_heap.o
+CFLAGS_ion_buffer.o 	= -I$(src)
+obj-y			+= heaps/
diff --git a/drivers/staging/android/ion/heaps/Kconfig b/drivers/staging/android/ion/heaps/Kconfig
new file mode 100644
index 0000000..5034c45
--- /dev/null
+++ b/drivers/staging/android/ion/heaps/Kconfig
@@ -0,0 +1,15 @@
+# SPDX-License-Identifier: GPL-2.0
+config ION_SYSTEM_HEAP
+	tristate "Ion system heap"
+	depends on ION
+	help
+	  Choose this option to enable the Ion system heap. The system heap
+	  is backed by pages from the buddy allocator. If in doubt, say Y.
+
+config ION_CMA_HEAP
+	tristate "Ion CMA heap support"
+	depends on ION && DMA_CMA
+	help
+	  Choose this option to enable CMA heaps with Ion. This heap is backed
+	  by the Contiguous Memory Allocator (CMA). If your system has these
+	  regions, you should say Y here.
diff --git a/drivers/staging/android/ion/heaps/Makefile b/drivers/staging/android/ion/heaps/Makefile
new file mode 100644
index 0000000..82e36e8
--- /dev/null
+++ b/drivers/staging/android/ion/heaps/Makefile
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_ION_SYSTEM_HEAP) += ion_sys_heap.o
+ion_sys_heap-y := ion_system_heap.o ion_page_pool.o
+
+obj-$(CONFIG_ION_CMA_HEAP) += ion_cma_heap.o
diff --git a/drivers/staging/android/ion/ion_cma_heap.c b/drivers/staging/android/ion/heaps/ion_cma_heap.c
similarity index 72%
rename from drivers/staging/android/ion/ion_cma_heap.c
rename to drivers/staging/android/ion/heaps/ion_cma_heap.c
index bf65e67..6ba7fd8 100644
--- a/drivers/staging/android/ion/ion_cma_heap.c
+++ b/drivers/staging/android/ion/heaps/ion_cma_heap.c
@@ -7,6 +7,7 @@
  */
 
 #include <linux/device.h>
+#include <linux/ion.h>
 #include <linux/slab.h>
 #include <linux/errno.h>
 #include <linux/err.h>
@@ -14,12 +15,10 @@
 #include <linux/scatterlist.h>
 #include <linux/highmem.h>
 
-#include "ion.h"
-
 struct ion_cma_heap {
 	struct ion_heap heap;
 	struct cma *cma;
-};
+} cma_heaps[MAX_CMA_AREAS];
 
 #define to_cma_heap(x) container_of(x, struct ion_cma_heap, heap)
 
@@ -71,6 +70,9 @@
 
 	buffer->priv_virt = pages;
 	buffer->sg_table = table;
+
+	ion_buffer_prep_noncached(buffer);
+
 	return 0;
 
 free_mem:
@@ -96,43 +98,54 @@
 static struct ion_heap_ops ion_cma_ops = {
 	.allocate = ion_cma_allocate,
 	.free = ion_cma_free,
-	.map_user = ion_heap_map_user,
-	.map_kernel = ion_heap_map_kernel,
-	.unmap_kernel = ion_heap_unmap_kernel,
 };
 
-static struct ion_heap *__ion_cma_heap_create(struct cma *cma)
+static int __ion_add_cma_heap(struct cma *cma, void *data)
 {
+	int *cma_nr = data;
 	struct ion_cma_heap *cma_heap;
+	int ret;
 
-	cma_heap = kzalloc(sizeof(*cma_heap), GFP_KERNEL);
+	if (*cma_nr >= MAX_CMA_AREAS)
+		return -EINVAL;
 
-	if (!cma_heap)
-		return ERR_PTR(-ENOMEM);
-
+	cma_heap = &cma_heaps[*cma_nr];
 	cma_heap->heap.ops = &ion_cma_ops;
-	cma_heap->cma = cma;
 	cma_heap->heap.type = ION_HEAP_TYPE_DMA;
-	return &cma_heap->heap;
-}
+	cma_heap->heap.name = cma_get_name(cma);
 
-static int __ion_add_cma_heaps(struct cma *cma, void *data)
-{
-	struct ion_heap *heap;
+	ret = ion_device_add_heap(&cma_heap->heap);
+	if (ret)
+		goto out;
 
-	heap = __ion_cma_heap_create(cma);
-	if (IS_ERR(heap))
-		return PTR_ERR(heap);
-
-	heap->name = cma_get_name(cma);
-
-	ion_device_add_heap(heap);
+	cma_heap->cma = cma;
+	*cma_nr += 1;
+out:
 	return 0;
 }
 
-static int ion_add_cma_heaps(void)
+static int __init ion_cma_heap_init(void)
 {
-	cma_for_each_area(__ion_add_cma_heaps, NULL);
-	return 0;
+	int ret;
+	int nr = 0;
+
+	ret = cma_for_each_area(__ion_add_cma_heap, &nr);
+	if (ret) {
+		for (nr = 0; nr < MAX_CMA_AREAS && cma_heaps[nr].cma; nr++)
+			ion_device_remove_heap(&cma_heaps[nr].heap);
+	}
+
+	return ret;
 }
-device_initcall(ion_add_cma_heaps);
+
+static void __exit ion_cma_heap_exit(void)
+{
+	int nr;
+
+	for (nr = 0; nr < MAX_CMA_AREAS && cma_heaps[nr].cma; nr++)
+		ion_device_remove_heap(&cma_heaps[nr].heap);
+}
+
+module_init(ion_cma_heap_init);
+module_exit(ion_cma_heap_exit);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/android/ion/ion_page_pool.c b/drivers/staging/android/ion/heaps/ion_page_pool.c
similarity index 87%
rename from drivers/staging/android/ion/ion_page_pool.c
rename to drivers/staging/android/ion/heaps/ion_page_pool.c
index 0198b88..efd6ce4 100644
--- a/drivers/staging/android/ion/ion_page_pool.c
+++ b/drivers/staging/android/ion/heaps/ion_page_pool.c
@@ -10,7 +10,7 @@
 #include <linux/swap.h>
 #include <linux/sched/signal.h>
 
-#include "ion.h"
+#include "ion_page_pool.h"
 
 static inline struct page *ion_page_pool_alloc_pages(struct ion_page_pool *pool)
 {
@@ -79,6 +79,7 @@
 
 	return page;
 }
+EXPORT_SYMBOL_GPL(ion_page_pool_alloc);
 
 void ion_page_pool_free(struct ion_page_pool *pool, struct page *page)
 {
@@ -86,6 +87,7 @@
 
 	ion_page_pool_add(pool, page);
 }
+EXPORT_SYMBOL_GPL(ion_page_pool_free);
 
 static int ion_page_pool_total(struct ion_page_pool *pool, bool high)
 {
@@ -97,6 +99,18 @@
 	return count << pool->order;
 }
 
+int ion_page_pool_nr_pages(struct ion_page_pool *pool)
+{
+	int nr_total_pages;
+
+	mutex_lock(&pool->mutex);
+	nr_total_pages = ion_page_pool_total(pool, true);
+	mutex_unlock(&pool->mutex);
+
+	return nr_total_pages;
+}
+EXPORT_SYMBOL_GPL(ion_page_pool_nr_pages);
+
 int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask,
 			 int nr_to_scan)
 {
@@ -130,6 +144,7 @@
 
 	return freed;
 }
+EXPORT_SYMBOL_GPL(ion_page_pool_shrink);
 
 struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order)
 {
@@ -148,8 +163,10 @@
 
 	return pool;
 }
+EXPORT_SYMBOL_GPL(ion_page_pool_create);
 
 void ion_page_pool_destroy(struct ion_page_pool *pool)
 {
 	kfree(pool);
 }
+EXPORT_SYMBOL_GPL(ion_page_pool_destroy);
diff --git a/drivers/staging/android/ion/heaps/ion_page_pool.h b/drivers/staging/android/ion/heaps/ion_page_pool.h
new file mode 100644
index 0000000..10c7909
--- /dev/null
+++ b/drivers/staging/android/ion/heaps/ion_page_pool.h
@@ -0,0 +1,67 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * ION Page Pool kernel interface header
+ *
+ * Copyright (C) 2011 Google, Inc.
+ */
+
+#ifndef _ION_PAGE_POOL_H
+#define _ION_PAGE_POOL_H
+
+#include <linux/mm_types.h>
+#include <linux/mutex.h>
+#include <linux/shrinker.h>
+#include <linux/types.h>
+
+/**
+ * functions for creating and destroying a heap pool -- allows you
+ * to keep a pool of pre allocated memory to use from your heap.  Keeping
+ * a pool of memory that is ready for dma, ie any cached mapping have been
+ * invalidated from the cache, provides a significant performance benefit on
+ * many systems
+ */
+
+/**
+ * struct ion_page_pool - pagepool struct
+ * @high_count:		number of highmem items in the pool
+ * @low_count:		number of lowmem items in the pool
+ * @high_items:		list of highmem items
+ * @low_items:		list of lowmem items
+ * @mutex:		lock protecting this struct and especially the count
+ *			item list
+ * @gfp_mask:		gfp_mask to use from alloc
+ * @order:		order of pages in the pool
+ * @list:		plist node for list of pools
+ *
+ * Allows you to keep a pool of pre allocated pages to use from your heap.
+ * Keeping a pool of pages that is ready for dma, ie any cached mapping have
+ * been invalidated from the cache, provides a significant performance benefit
+ * on many systems
+ */
+struct ion_page_pool {
+	int high_count;
+	int low_count;
+	struct list_head high_items;
+	struct list_head low_items;
+	struct mutex mutex;
+	gfp_t gfp_mask;
+	unsigned int order;
+	struct plist_node list;
+};
+
+struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order);
+void ion_page_pool_destroy(struct ion_page_pool *pool);
+struct page *ion_page_pool_alloc(struct ion_page_pool *pool);
+void ion_page_pool_free(struct ion_page_pool *pool, struct page *page);
+int ion_page_pool_nr_pages(struct ion_page_pool *pool);
+
+/** ion_page_pool_shrink - shrinks the size of the memory cached in the pool
+ * @pool:		the pool
+ * @gfp_mask:		the memory type to reclaim
+ * @nr_to_scan:		number of items to shrink in pages
+ *
+ * returns the number of items freed in pages
+ */
+int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask,
+			 int nr_to_scan);
+#endif /* _ION_PAGE_POOL_H */
diff --git a/drivers/staging/android/ion/ion_system_heap.c b/drivers/staging/android/ion/heaps/ion_system_heap.c
similarity index 65%
rename from drivers/staging/android/ion/ion_system_heap.c
rename to drivers/staging/android/ion/heaps/ion_system_heap.c
index eac0632..fc63049 100644
--- a/drivers/staging/android/ion/ion_system_heap.c
+++ b/drivers/staging/android/ion/heaps/ion_system_heap.c
@@ -9,12 +9,14 @@
 #include <linux/dma-mapping.h>
 #include <linux/err.h>
 #include <linux/highmem.h>
+#include <linux/ion.h>
 #include <linux/mm.h>
+#include <linux/module.h>
 #include <linux/scatterlist.h>
 #include <linux/slab.h>
 #include <linux/vmalloc.h>
 
-#include "ion.h"
+#include "ion_page_pool.h"
 
 #define NUM_ORDERS ARRAY_SIZE(orders)
 
@@ -139,6 +141,9 @@
 	}
 
 	buffer->sg_table = table;
+
+	ion_buffer_prep_noncached(buffer);
+
 	return 0;
 
 free_table:
@@ -160,7 +165,7 @@
 
 	/* zero the buffer before goto page pool */
 	if (!(buffer->private_flags & ION_PRIV_FLAG_SHRINKER_FREE))
-		ion_heap_buffer_zero(buffer);
+		ion_buffer_zero(buffer);
 
 	for_each_sgtable_sg(table, sg, i)
 		free_buffer_page(sys_heap, buffer, sg_page(sg));
@@ -203,14 +208,18 @@
 	return nr_total;
 }
 
-static struct ion_heap_ops system_heap_ops = {
-	.allocate = ion_system_heap_allocate,
-	.free = ion_system_heap_free,
-	.map_kernel = ion_heap_map_kernel,
-	.unmap_kernel = ion_heap_unmap_kernel,
-	.map_user = ion_heap_map_user,
-	.shrink = ion_system_heap_shrink,
-};
+static long ion_system_get_pool_size(struct ion_heap *heap)
+{
+	struct ion_system_heap *sys_heap;
+	long total_pages = 0;
+	int i;
+
+	sys_heap = container_of(heap, struct ion_system_heap, heap);
+	for (i = 0; i < NUM_ORDERS; i++)
+		total_pages += ion_page_pool_nr_pages(sys_heap->pools[i]);
+
+	return total_pages;
+}
 
 static void ion_system_heap_destroy_pools(struct ion_page_pool **pools)
 {
@@ -245,133 +254,37 @@
 	return -ENOMEM;
 }
 
-static struct ion_heap *__ion_system_heap_create(void)
-{
-	struct ion_system_heap *heap;
-
-	heap = kzalloc(sizeof(*heap), GFP_KERNEL);
-	if (!heap)
-		return ERR_PTR(-ENOMEM);
-	heap->heap.ops = &system_heap_ops;
-	heap->heap.type = ION_HEAP_TYPE_SYSTEM;
-	heap->heap.flags = ION_HEAP_FLAG_DEFER_FREE;
-
-	if (ion_system_heap_create_pools(heap->pools))
-		goto free_heap;
-
-	return &heap->heap;
-
-free_heap:
-	kfree(heap);
-	return ERR_PTR(-ENOMEM);
-}
-
-static int ion_system_heap_create(void)
-{
-	struct ion_heap *heap;
-
-	heap = __ion_system_heap_create();
-	if (IS_ERR(heap))
-		return PTR_ERR(heap);
-	heap->name = "ion_system_heap";
-
-	ion_device_add_heap(heap);
-
-	return 0;
-}
-device_initcall(ion_system_heap_create);
-
-static int ion_system_contig_heap_allocate(struct ion_heap *heap,
-					   struct ion_buffer *buffer,
-					   unsigned long len,
-					   unsigned long flags)
-{
-	int order = get_order(len);
-	struct page *page;
-	struct sg_table *table;
-	unsigned long i;
-	int ret;
-
-	page = alloc_pages(low_order_gfp_flags | __GFP_NOWARN, order);
-	if (!page)
-		return -ENOMEM;
-
-	split_page(page, order);
-
-	len = PAGE_ALIGN(len);
-	for (i = len >> PAGE_SHIFT; i < (1 << order); i++)
-		__free_page(page + i);
-
-	table = kmalloc(sizeof(*table), GFP_KERNEL);
-	if (!table) {
-		ret = -ENOMEM;
-		goto free_pages;
-	}
-
-	ret = sg_alloc_table(table, 1, GFP_KERNEL);
-	if (ret)
-		goto free_table;
-
-	sg_set_page(table->sgl, page, len, 0);
-
-	buffer->sg_table = table;
-
-	return 0;
-
-free_table:
-	kfree(table);
-free_pages:
-	for (i = 0; i < len >> PAGE_SHIFT; i++)
-		__free_page(page + i);
-
-	return ret;
-}
-
-static void ion_system_contig_heap_free(struct ion_buffer *buffer)
-{
-	struct sg_table *table = buffer->sg_table;
-	struct page *page = sg_page(table->sgl);
-	unsigned long pages = PAGE_ALIGN(buffer->size) >> PAGE_SHIFT;
-	unsigned long i;
-
-	for (i = 0; i < pages; i++)
-		__free_page(page + i);
-	sg_free_table(table);
-	kfree(table);
-}
-
-static struct ion_heap_ops kmalloc_ops = {
-	.allocate = ion_system_contig_heap_allocate,
-	.free = ion_system_contig_heap_free,
-	.map_kernel = ion_heap_map_kernel,
-	.unmap_kernel = ion_heap_unmap_kernel,
-	.map_user = ion_heap_map_user,
+static struct ion_heap_ops system_heap_ops = {
+	.allocate = ion_system_heap_allocate,
+	.free = ion_system_heap_free,
+	.shrink = ion_system_heap_shrink,
+	.get_pool_size = ion_system_get_pool_size,
 };
 
-static struct ion_heap *__ion_system_contig_heap_create(void)
+static struct ion_system_heap system_heap = {
+	.heap = {
+		.ops = &system_heap_ops,
+		.type = ION_HEAP_TYPE_SYSTEM,
+		.flags = ION_HEAP_FLAG_DEFER_FREE,
+		.name = "ion_system_heap",
+	}
+};
+
+static int __init ion_system_heap_init(void)
 {
-	struct ion_heap *heap;
+	int ret = ion_system_heap_create_pools(system_heap.pools);
+	if (ret)
+		return ret;
 
-	heap = kzalloc(sizeof(*heap), GFP_KERNEL);
-	if (!heap)
-		return ERR_PTR(-ENOMEM);
-	heap->ops = &kmalloc_ops;
-	heap->type = ION_HEAP_TYPE_SYSTEM_CONTIG;
-	heap->name = "ion_system_contig_heap";
-
-	return heap;
+	return ion_device_add_heap(&system_heap.heap);
 }
 
-static int ion_system_contig_heap_create(void)
+static void __exit ion_system_heap_exit(void)
 {
-	struct ion_heap *heap;
-
-	heap = __ion_system_contig_heap_create();
-	if (IS_ERR(heap))
-		return PTR_ERR(heap);
-
-	ion_device_add_heap(heap);
-
-	return 0;
+	ion_device_remove_heap(&system_heap.heap);
+	ion_system_heap_destroy_pools(system_heap.pools);
 }
-device_initcall(ion_system_contig_heap_create);
+
+module_init(ion_system_heap_init);
+module_exit(ion_system_heap_exit);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c
index e1fe03c..268c461 100644
--- a/drivers/staging/android/ion/ion.c
+++ b/drivers/staging/android/ion/ion.c
@@ -3,8 +3,11 @@
  * ION Memory Allocator
  *
  * Copyright (C) 2011 Google, Inc.
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ *
  */
 
+#include <linux/bitmap.h>
 #include <linux/debugfs.h>
 #include <linux/device.h>
 #include <linux/dma-buf.h>
@@ -15,376 +18,42 @@
 #include <linux/fs.h>
 #include <linux/kthread.h>
 #include <linux/list.h>
-#include <linux/miscdevice.h>
 #include <linux/mm.h>
 #include <linux/mm_types.h>
 #include <linux/rbtree.h>
 #include <linux/sched/task.h>
 #include <linux/slab.h>
 #include <linux/uaccess.h>
-#include <linux/vmalloc.h>
 
-#include "ion.h"
+#include "ion_private.h"
+
+#define ION_CURRENT_ABI_VERSION  2
 
 static struct ion_device *internal_dev;
-static int heap_id;
 
-/* this function should only be called while dev->lock is held */
-static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
-					    struct ion_device *dev,
-					    unsigned long len,
-					    unsigned long flags)
+/* Entry into ION allocator for rest of the kernel */
+struct dma_buf *ion_alloc(size_t len, unsigned int heap_id_mask,
+			  unsigned int flags)
 {
-	struct ion_buffer *buffer;
-	int ret;
-
-	buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
-	if (!buffer)
-		return ERR_PTR(-ENOMEM);
-
-	buffer->heap = heap;
-	buffer->flags = flags;
-	buffer->dev = dev;
-	buffer->size = len;
-
-	ret = heap->ops->allocate(heap, buffer, len, flags);
-
-	if (ret) {
-		if (!(heap->flags & ION_HEAP_FLAG_DEFER_FREE))
-			goto err2;
-
-		ion_heap_freelist_drain(heap, 0);
-		ret = heap->ops->allocate(heap, buffer, len, flags);
-		if (ret)
-			goto err2;
-	}
-
-	if (!buffer->sg_table) {
-		WARN_ONCE(1, "This heap needs to set the sgtable");
-		ret = -EINVAL;
-		goto err1;
-	}
-
-	spin_lock(&heap->stat_lock);
-	heap->num_of_buffers++;
-	heap->num_of_alloc_bytes += len;
-	if (heap->num_of_alloc_bytes > heap->alloc_bytes_wm)
-		heap->alloc_bytes_wm = heap->num_of_alloc_bytes;
-	spin_unlock(&heap->stat_lock);
-
-	INIT_LIST_HEAD(&buffer->attachments);
-	mutex_init(&buffer->lock);
-	return buffer;
-
-err1:
-	heap->ops->free(buffer);
-err2:
-	kfree(buffer);
-	return ERR_PTR(ret);
+	return ion_dmabuf_alloc(internal_dev, len, heap_id_mask, flags);
 }
+EXPORT_SYMBOL_GPL(ion_alloc);
 
-void ion_buffer_destroy(struct ion_buffer *buffer)
+int ion_free(struct ion_buffer *buffer)
 {
-	if (buffer->kmap_cnt > 0) {
-		pr_warn_once("%s: buffer still mapped in the kernel\n",
-			     __func__);
-		buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
-	}
-	buffer->heap->ops->free(buffer);
-	spin_lock(&buffer->heap->stat_lock);
-	buffer->heap->num_of_buffers--;
-	buffer->heap->num_of_alloc_bytes -= buffer->size;
-	spin_unlock(&buffer->heap->stat_lock);
-
-	kfree(buffer);
+	return ion_buffer_destroy(internal_dev, buffer);
 }
+EXPORT_SYMBOL_GPL(ion_free);
 
-static void _ion_buffer_destroy(struct ion_buffer *buffer)
+static int ion_alloc_fd(size_t len, unsigned int heap_id_mask,
+			unsigned int flags)
 {
-	struct ion_heap *heap = buffer->heap;
-
-	if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
-		ion_heap_freelist_add(heap, buffer);
-	else
-		ion_buffer_destroy(buffer);
-}
-
-static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
-{
-	void *vaddr;
-
-	if (buffer->kmap_cnt) {
-		buffer->kmap_cnt++;
-		return buffer->vaddr;
-	}
-	vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
-	if (WARN_ONCE(!vaddr,
-		      "heap->ops->map_kernel should return ERR_PTR on error"))
-		return ERR_PTR(-EINVAL);
-	if (IS_ERR(vaddr))
-		return vaddr;
-	buffer->vaddr = vaddr;
-	buffer->kmap_cnt++;
-	return vaddr;
-}
-
-static void ion_buffer_kmap_put(struct ion_buffer *buffer)
-{
-	buffer->kmap_cnt--;
-	if (!buffer->kmap_cnt) {
-		buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
-		buffer->vaddr = NULL;
-	}
-}
-
-static struct sg_table *dup_sg_table(struct sg_table *table)
-{
-	struct sg_table *new_table;
-	int ret, i;
-	struct scatterlist *sg, *new_sg;
-
-	new_table = kzalloc(sizeof(*new_table), GFP_KERNEL);
-	if (!new_table)
-		return ERR_PTR(-ENOMEM);
-
-	ret = sg_alloc_table(new_table, table->orig_nents, GFP_KERNEL);
-	if (ret) {
-		kfree(new_table);
-		return ERR_PTR(-ENOMEM);
-	}
-
-	new_sg = new_table->sgl;
-	for_each_sgtable_sg(table, sg, i) {
-		memcpy(new_sg, sg, sizeof(*sg));
-		new_sg->dma_address = 0;
-		new_sg = sg_next(new_sg);
-	}
-
-	return new_table;
-}
-
-static void free_duped_table(struct sg_table *table)
-{
-	sg_free_table(table);
-	kfree(table);
-}
-
-struct ion_dma_buf_attachment {
-	struct device *dev;
-	struct sg_table *table;
-	struct list_head list;
-};
-
-static int ion_dma_buf_attach(struct dma_buf *dmabuf,
-			      struct dma_buf_attachment *attachment)
-{
-	struct ion_dma_buf_attachment *a;
-	struct sg_table *table;
-	struct ion_buffer *buffer = dmabuf->priv;
-
-	a = kzalloc(sizeof(*a), GFP_KERNEL);
-	if (!a)
-		return -ENOMEM;
-
-	table = dup_sg_table(buffer->sg_table);
-	if (IS_ERR(table)) {
-		kfree(a);
-		return -ENOMEM;
-	}
-
-	a->table = table;
-	a->dev = attachment->dev;
-	INIT_LIST_HEAD(&a->list);
-
-	attachment->priv = a;
-
-	mutex_lock(&buffer->lock);
-	list_add(&a->list, &buffer->attachments);
-	mutex_unlock(&buffer->lock);
-
-	return 0;
-}
-
-static void ion_dma_buf_detach(struct dma_buf *dmabuf,
-			       struct dma_buf_attachment *attachment)
-{
-	struct ion_dma_buf_attachment *a = attachment->priv;
-	struct ion_buffer *buffer = dmabuf->priv;
-
-	mutex_lock(&buffer->lock);
-	list_del(&a->list);
-	mutex_unlock(&buffer->lock);
-	free_duped_table(a->table);
-
-	kfree(a);
-}
-
-static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment,
-					enum dma_data_direction direction)
-{
-	struct ion_dma_buf_attachment *a = attachment->priv;
-	struct sg_table *table;
-	int ret;
-
-	table = a->table;
-
-	ret = dma_map_sgtable(attachment->dev, table, direction, 0);
-	if (ret)
-		return ERR_PTR(ret);
-
-	return table;
-}
-
-static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
-			      struct sg_table *table,
-			      enum dma_data_direction direction)
-{
-	dma_unmap_sgtable(attachment->dev, table, direction, 0);
-}
-
-static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
-{
-	struct ion_buffer *buffer = dmabuf->priv;
-	int ret = 0;
-
-	if (!buffer->heap->ops->map_user) {
-		pr_err("%s: this heap does not define a method for mapping to userspace\n",
-		       __func__);
-		return -EINVAL;
-	}
-
-	if (!(buffer->flags & ION_FLAG_CACHED))
-		vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
-
-	mutex_lock(&buffer->lock);
-	/* now map it to userspace */
-	ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma);
-	mutex_unlock(&buffer->lock);
-
-	if (ret)
-		pr_err("%s: failure mapping buffer to userspace\n",
-		       __func__);
-
-	return ret;
-}
-
-static void ion_dma_buf_release(struct dma_buf *dmabuf)
-{
-	struct ion_buffer *buffer = dmabuf->priv;
-
-	_ion_buffer_destroy(buffer);
-}
-
-static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
-					enum dma_data_direction direction)
-{
-	struct ion_buffer *buffer = dmabuf->priv;
-	void *vaddr;
-	struct ion_dma_buf_attachment *a;
-	int ret = 0;
-
-	/*
-	 * TODO: Move this elsewhere because we don't always need a vaddr
-	 */
-	if (buffer->heap->ops->map_kernel) {
-		mutex_lock(&buffer->lock);
-		vaddr = ion_buffer_kmap_get(buffer);
-		if (IS_ERR(vaddr)) {
-			ret = PTR_ERR(vaddr);
-			goto unlock;
-		}
-		mutex_unlock(&buffer->lock);
-	}
-
-	mutex_lock(&buffer->lock);
-	list_for_each_entry(a, &buffer->attachments, list)
-		dma_sync_sgtable_for_cpu(a->dev, a->table, direction);
-
-unlock:
-	mutex_unlock(&buffer->lock);
-	return ret;
-}
-
-static int ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
-				      enum dma_data_direction direction)
-{
-	struct ion_buffer *buffer = dmabuf->priv;
-	struct ion_dma_buf_attachment *a;
-
-	if (buffer->heap->ops->map_kernel) {
-		mutex_lock(&buffer->lock);
-		ion_buffer_kmap_put(buffer);
-		mutex_unlock(&buffer->lock);
-	}
-
-	mutex_lock(&buffer->lock);
-	list_for_each_entry(a, &buffer->attachments, list)
-		dma_sync_sgtable_for_device(a->dev, a->table, direction);
-	mutex_unlock(&buffer->lock);
-
-	return 0;
-}
-
-static const struct dma_buf_ops dma_buf_ops = {
-	.map_dma_buf = ion_map_dma_buf,
-	.unmap_dma_buf = ion_unmap_dma_buf,
-	.mmap = ion_mmap,
-	.release = ion_dma_buf_release,
-	.attach = ion_dma_buf_attach,
-	.detach = ion_dma_buf_detach,
-	.begin_cpu_access = ion_dma_buf_begin_cpu_access,
-	.end_cpu_access = ion_dma_buf_end_cpu_access,
-};
-
-static int ion_alloc(size_t len, unsigned int heap_id_mask, unsigned int flags)
-{
-	struct ion_device *dev = internal_dev;
-	struct ion_buffer *buffer = NULL;
-	struct ion_heap *heap;
-	DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
 	int fd;
 	struct dma_buf *dmabuf;
 
-	pr_debug("%s: len %zu heap_id_mask %u flags %x\n", __func__,
-		 len, heap_id_mask, flags);
-	/*
-	 * traverse the list of heaps available in this system in priority
-	 * order.  If the heap type is supported by the client, and matches the
-	 * request of the caller allocate from it.  Repeat until allocate has
-	 * succeeded or all heaps have been tried
-	 */
-	len = PAGE_ALIGN(len);
-
-	if (!len)
-		return -EINVAL;
-
-	down_read(&dev->lock);
-	plist_for_each_entry(heap, &dev->heaps, node) {
-		/* if the caller didn't specify this heap id */
-		if (!((1 << heap->id) & heap_id_mask))
-			continue;
-		buffer = ion_buffer_create(heap, dev, len, flags);
-		if (!IS_ERR(buffer))
-			break;
-	}
-	up_read(&dev->lock);
-
-	if (!buffer)
-		return -ENODEV;
-
-	if (IS_ERR(buffer))
-		return PTR_ERR(buffer);
-
-	exp_info.ops = &dma_buf_ops;
-	exp_info.size = buffer->size;
-	exp_info.flags = O_RDWR;
-	exp_info.priv = buffer;
-
-	dmabuf = dma_buf_export(&exp_info);
-	if (IS_ERR(dmabuf)) {
-		_ion_buffer_destroy(buffer);
+	dmabuf = ion_dmabuf_alloc(internal_dev, len, heap_id_mask, flags);
+	if (IS_ERR(dmabuf))
 		return PTR_ERR(dmabuf);
-	}
 
 	fd = dma_buf_fd(dmabuf, O_CLOEXEC);
 	if (fd < 0)
@@ -393,6 +62,38 @@
 	return fd;
 }
 
+size_t ion_query_heaps_kernel(struct ion_heap_data *hdata, size_t size)
+{
+	struct ion_device *dev = internal_dev;
+	size_t i = 0, num_heaps = 0;
+	struct ion_heap *heap;
+
+	down_read(&dev->lock);
+
+	// If size is 0, return without updating hdata.
+	if (size == 0) {
+		num_heaps = dev->heap_cnt;
+		goto out;
+	}
+
+	plist_for_each_entry(heap, &dev->heaps, node) {
+		strncpy(hdata[i].name, heap->name, MAX_HEAP_NAME);
+		hdata[i].name[MAX_HEAP_NAME - 1] = '\0';
+		hdata[i].type = heap->type;
+		hdata[i].heap_id = heap->id;
+
+		i++;
+		if (i >= size)
+			break;
+	}
+
+	num_heaps = i;
+out:
+	up_read(&dev->lock);
+	return num_heaps;
+}
+EXPORT_SYMBOL_GPL(ion_query_heaps_kernel);
+
 static int ion_query_heaps(struct ion_heap_query *query)
 {
 	struct ion_device *dev = internal_dev;
@@ -441,6 +142,7 @@
 union ion_ioctl_arg {
 	struct ion_allocation_data allocation;
 	struct ion_heap_query query;
+	u32 ion_abi_version;
 };
 
 static int validate_ioctl_arg(unsigned int cmd, union ion_ioctl_arg *arg)
@@ -489,9 +191,9 @@
 	{
 		int fd;
 
-		fd = ion_alloc(data.allocation.len,
-			       data.allocation.heap_id_mask,
-			       data.allocation.flags);
+		fd = ion_alloc_fd(data.allocation.len,
+				  data.allocation.heap_id_mask,
+				  data.allocation.flags);
 		if (fd < 0)
 			return fd;
 
@@ -502,6 +204,9 @@
 	case ION_IOC_HEAP_QUERY:
 		ret = ion_query_heaps(&data.query);
 		break;
+	case ION_IOC_ABI_VERSION:
+		data.ion_abi_version = ION_CURRENT_ABI_VERSION;
+		break;
 	default:
 		return -ENOTTY;
 	}
@@ -554,31 +259,88 @@
 DEFINE_SIMPLE_ATTRIBUTE(debug_shrink_fops, debug_shrink_get,
 			debug_shrink_set, "%llu\n");
 
-void ion_device_add_heap(struct ion_heap *heap)
+static int ion_assign_heap_id(struct ion_heap *heap, struct ion_device *dev)
+{
+	int id_bit = -EINVAL;
+	int start_bit = -1, end_bit = -1;
+
+	switch (heap->type) {
+	case ION_HEAP_TYPE_SYSTEM:
+		id_bit = __ffs(ION_HEAP_SYSTEM);
+		break;
+	case ION_HEAP_TYPE_DMA:
+		start_bit = __ffs(ION_HEAP_DMA_START);
+		end_bit = __ffs(ION_HEAP_DMA_END);
+		break;
+	case ION_HEAP_TYPE_CUSTOM ... ION_HEAP_TYPE_MAX:
+		start_bit = __ffs(ION_HEAP_CUSTOM_START);
+		end_bit = __ffs(ION_HEAP_CUSTOM_END);
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	/* For carveout, dma & custom heaps, we first let the heaps choose their
+	 * own IDs. This allows the old behaviour of knowing the heap ids
+	 * of these type of heaps  in advance in user space. If a heap with
+	 * that ID already exists, it is an error.
+	 *
+	 * If the heap hasn't picked an id by itself, then we assign it
+	 * one.
+	 */
+	if (id_bit < 0) {
+		if (heap->id) {
+			id_bit = __ffs(heap->id);
+			if (id_bit < start_bit || id_bit > end_bit)
+				return -EINVAL;
+		} else {
+			id_bit = find_next_zero_bit(dev->heap_ids, end_bit + 1,
+						    start_bit);
+			if (id_bit > end_bit)
+				return -ENOSPC;
+		}
+	}
+
+	if (test_and_set_bit(id_bit, dev->heap_ids))
+		return -EEXIST;
+	heap->id = id_bit;
+	dev->heap_cnt++;
+
+	return 0;
+}
+
+int __ion_device_add_heap(struct ion_heap *heap, struct module *owner)
 {
 	struct ion_device *dev = internal_dev;
 	int ret;
 	struct dentry *heap_root;
 	char debug_name[64];
 
-	if (!heap->ops->allocate || !heap->ops->free)
-		pr_err("%s: can not add heap with invalid ops struct.\n",
-		       __func__);
+	if (!heap || !heap->ops || !heap->ops->allocate || !heap->ops->free) {
+		pr_err("%s: invalid heap or heap_ops\n", __func__);
+		ret = -EINVAL;
+		goto out;
+	}
 
+	heap->owner = owner;
 	spin_lock_init(&heap->free_lock);
 	spin_lock_init(&heap->stat_lock);
 	heap->free_list_size = 0;
 
-	if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
-		ion_heap_init_deferred_free(heap);
+	if (heap->flags & ION_HEAP_FLAG_DEFER_FREE) {
+		ret = ion_heap_init_deferred_free(heap);
+		if (ret)
+			goto out_heap_cleanup;
+	}
 
 	if ((heap->flags & ION_HEAP_FLAG_DEFER_FREE) || heap->ops->shrink) {
 		ret = ion_heap_init_shrinker(heap);
-		if (ret)
+		if (ret) {
 			pr_err("%s: Failed to register shrinker\n", __func__);
+			goto out_heap_cleanup;
+		}
 	}
 
-	heap->dev = dev;
 	heap->num_of_buffers = 0;
 	heap->num_of_alloc_bytes = 0;
 	heap->alloc_bytes_wm = 0;
@@ -606,8 +368,16 @@
 				    &debug_shrink_fops);
 	}
 
+	heap->debugfs_dir = heap_root;
 	down_write(&dev->lock);
-	heap->id = heap_id++;
+	ret = ion_assign_heap_id(heap, dev);
+	if (ret) {
+		pr_err("%s: Failed to assign heap id for heap type %x\n",
+		       __func__, heap->type);
+		up_write(&dev->lock);
+		goto out_debugfs_cleanup;
+	}
+
 	/*
 	 * use negative heap->id to reverse the priority -- when traversing
 	 * the list later attempt higher id numbers first
@@ -615,10 +385,99 @@
 	plist_node_init(&heap->node, -heap->id);
 	plist_add(&heap->node, &dev->heaps);
 
-	dev->heap_cnt++;
+	up_write(&dev->lock);
+
+	return 0;
+
+out_debugfs_cleanup:
+	debugfs_remove_recursive(heap->debugfs_dir);
+out_heap_cleanup:
+	ion_heap_cleanup(heap);
+out:
+	return ret;
+}
+EXPORT_SYMBOL_GPL(__ion_device_add_heap);
+
+void ion_device_remove_heap(struct ion_heap *heap)
+{
+	struct ion_device *dev = internal_dev;
+
+	if (!heap) {
+		pr_err("%s: Invalid argument\n", __func__);
+		return;
+	}
+
+	// take semaphore and remove the heap from dev->heap list
+	down_write(&dev->lock);
+	/* So no new allocations can happen from this heap */
+	plist_del(&heap->node, &dev->heaps);
+	if (ion_heap_cleanup(heap) != 0) {
+		pr_warn("%s: failed to cleanup heap (%s)\n",
+			__func__, heap->name);
+	}
+	debugfs_remove_recursive(heap->debugfs_dir);
+	clear_bit(heap->id, dev->heap_ids);
+	dev->heap_cnt--;
 	up_write(&dev->lock);
 }
-EXPORT_SYMBOL(ion_device_add_heap);
+EXPORT_SYMBOL_GPL(ion_device_remove_heap);
+
+static ssize_t
+total_heaps_kb_show(struct kobject *kobj, struct kobj_attribute *attr,
+		    char *buf)
+{
+	return sprintf(buf, "%llu\n",
+		       div_u64(ion_get_total_heap_bytes(), 1024));
+}
+
+static ssize_t
+total_pools_kb_show(struct kobject *kobj, struct kobj_attribute *attr,
+		    char *buf)
+{
+	struct ion_device *dev = internal_dev;
+	struct ion_heap *heap;
+	u64 total_pages = 0;
+
+	down_read(&dev->lock);
+	plist_for_each_entry(heap, &dev->heaps, node)
+		if (heap->ops->get_pool_size)
+			total_pages += heap->ops->get_pool_size(heap);
+	up_read(&dev->lock);
+
+	return sprintf(buf, "%llu\n", total_pages * (PAGE_SIZE / 1024));
+}
+
+static struct kobj_attribute total_heaps_kb_attr =
+	__ATTR_RO(total_heaps_kb);
+
+static struct kobj_attribute total_pools_kb_attr =
+	__ATTR_RO(total_pools_kb);
+
+static struct attribute *ion_device_attrs[] = {
+	&total_heaps_kb_attr.attr,
+	&total_pools_kb_attr.attr,
+	NULL,
+};
+
+ATTRIBUTE_GROUPS(ion_device);
+
+static int ion_init_sysfs(void)
+{
+	struct kobject *ion_kobj;
+	int ret;
+
+	ion_kobj = kobject_create_and_add("ion", kernel_kobj);
+	if (!ion_kobj)
+		return -ENOMEM;
+
+	ret = sysfs_create_groups(ion_kobj, ion_device_groups);
+	if (ret) {
+		kobject_put(ion_kobj);
+		return ret;
+	}
+
+	return 0;
+}
 
 static int ion_device_create(void)
 {
@@ -636,8 +495,13 @@
 	ret = misc_register(&idev->dev);
 	if (ret) {
 		pr_err("ion: failed to register misc device.\n");
-		kfree(idev);
-		return ret;
+		goto err_reg;
+	}
+
+	ret = ion_init_sysfs();
+	if (ret) {
+		pr_err("ion: failed to add sysfs attributes.\n");
+		goto err_sysfs;
 	}
 
 	idev->debug_root = debugfs_create_dir("ion", NULL);
@@ -645,5 +509,11 @@
 	plist_head_init(&idev->heaps);
 	internal_dev = idev;
 	return 0;
+
+err_sysfs:
+	misc_deregister(&idev->dev);
+err_reg:
+	kfree(idev);
+	return ret;
 }
 subsys_initcall(ion_device_create);
diff --git a/drivers/staging/android/ion/ion.h b/drivers/staging/android/ion/ion.h
deleted file mode 100644
index c199e88..0000000
--- a/drivers/staging/android/ion/ion.h
+++ /dev/null
@@ -1,302 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * ION Memory Allocator kernel interface header
- *
- * Copyright (C) 2011 Google, Inc.
- */
-
-#ifndef _ION_H
-#define _ION_H
-
-#include <linux/device.h>
-#include <linux/dma-direction.h>
-#include <linux/kref.h>
-#include <linux/mm_types.h>
-#include <linux/mutex.h>
-#include <linux/rbtree.h>
-#include <linux/sched.h>
-#include <linux/shrinker.h>
-#include <linux/types.h>
-#include <linux/miscdevice.h>
-
-#include "../uapi/ion.h"
-
-/**
- * struct ion_buffer - metadata for a particular buffer
- * @list:		element in list of deferred freeable buffers
- * @dev:		back pointer to the ion_device
- * @heap:		back pointer to the heap the buffer came from
- * @flags:		buffer specific flags
- * @private_flags:	internal buffer specific flags
- * @size:		size of the buffer
- * @priv_virt:		private data to the buffer representable as
- *			a void *
- * @lock:		protects the buffers cnt fields
- * @kmap_cnt:		number of times the buffer is mapped to the kernel
- * @vaddr:		the kernel mapping if kmap_cnt is not zero
- * @sg_table:		the sg table for the buffer
- * @attachments:	list of devices attached to this buffer
- */
-struct ion_buffer {
-	struct list_head list;
-	struct ion_device *dev;
-	struct ion_heap *heap;
-	unsigned long flags;
-	unsigned long private_flags;
-	size_t size;
-	void *priv_virt;
-	struct mutex lock;
-	int kmap_cnt;
-	void *vaddr;
-	struct sg_table *sg_table;
-	struct list_head attachments;
-};
-
-void ion_buffer_destroy(struct ion_buffer *buffer);
-
-/**
- * struct ion_device - the metadata of the ion device node
- * @dev:		the actual misc device
- * @lock:		rwsem protecting the tree of heaps and clients
- */
-struct ion_device {
-	struct miscdevice dev;
-	struct rw_semaphore lock;
-	struct plist_head heaps;
-	struct dentry *debug_root;
-	int heap_cnt;
-};
-
-/**
- * struct ion_heap_ops - ops to operate on a given heap
- * @allocate:		allocate memory
- * @free:		free memory
- * @map_kernel		map memory to the kernel
- * @unmap_kernel	unmap memory to the kernel
- * @map_user		map memory to userspace
- *
- * allocate, phys, and map_user return 0 on success, -errno on error.
- * map_dma and map_kernel return pointer on success, ERR_PTR on
- * error. @free will be called with ION_PRIV_FLAG_SHRINKER_FREE set in
- * the buffer's private_flags when called from a shrinker. In that
- * case, the pages being free'd must be truly free'd back to the
- * system, not put in a page pool or otherwise cached.
- */
-struct ion_heap_ops {
-	int (*allocate)(struct ion_heap *heap,
-			struct ion_buffer *buffer, unsigned long len,
-			unsigned long flags);
-	void (*free)(struct ion_buffer *buffer);
-	void * (*map_kernel)(struct ion_heap *heap, struct ion_buffer *buffer);
-	void (*unmap_kernel)(struct ion_heap *heap, struct ion_buffer *buffer);
-	int (*map_user)(struct ion_heap *mapper, struct ion_buffer *buffer,
-			struct vm_area_struct *vma);
-	int (*shrink)(struct ion_heap *heap, gfp_t gfp_mask, int nr_to_scan);
-};
-
-/**
- * heap flags - flags between the heaps and core ion code
- */
-#define ION_HEAP_FLAG_DEFER_FREE BIT(0)
-
-/**
- * private flags - flags internal to ion
- */
-/*
- * Buffer is being freed from a shrinker function. Skip any possible
- * heap-specific caching mechanism (e.g. page pools). Guarantees that
- * any buffer storage that came from the system allocator will be
- * returned to the system allocator.
- */
-#define ION_PRIV_FLAG_SHRINKER_FREE BIT(0)
-
-/**
- * struct ion_heap - represents a heap in the system
- * @node:		rb node to put the heap on the device's tree of heaps
- * @dev:		back pointer to the ion_device
- * @type:		type of heap
- * @ops:		ops struct as above
- * @flags:		flags
- * @id:			id of heap, also indicates priority of this heap when
- *			allocating.  These are specified by platform data and
- *			MUST be unique
- * @name:		used for debugging
- * @shrinker:		a shrinker for the heap
- * @free_list:		free list head if deferred free is used
- * @free_list_size	size of the deferred free list in bytes
- * @lock:		protects the free list
- * @waitqueue:		queue to wait on from deferred free thread
- * @task:		task struct of deferred free thread
- * @num_of_buffers	the number of currently allocated buffers
- * @num_of_alloc_bytes	the number of allocated bytes
- * @alloc_bytes_wm	the number of allocated bytes watermark
- *
- * Represents a pool of memory from which buffers can be made.  In some
- * systems the only heap is regular system memory allocated via vmalloc.
- * On others, some blocks might require large physically contiguous buffers
- * that are allocated from a specially reserved heap.
- */
-struct ion_heap {
-	struct plist_node node;
-	struct ion_device *dev;
-	enum ion_heap_type type;
-	struct ion_heap_ops *ops;
-	unsigned long flags;
-	unsigned int id;
-	const char *name;
-
-	/* deferred free support */
-	struct shrinker shrinker;
-	struct list_head free_list;
-	size_t free_list_size;
-	spinlock_t free_lock;
-	wait_queue_head_t waitqueue;
-	struct task_struct *task;
-
-	/* heap statistics */
-	u64 num_of_buffers;
-	u64 num_of_alloc_bytes;
-	u64 alloc_bytes_wm;
-
-	/* protect heap statistics */
-	spinlock_t stat_lock;
-};
-
-/**
- * ion_device_add_heap - adds a heap to the ion device
- * @heap:		the heap to add
- */
-void ion_device_add_heap(struct ion_heap *heap);
-
-/**
- * some helpers for common operations on buffers using the sg_table
- * and vaddr fields
- */
-void *ion_heap_map_kernel(struct ion_heap *heap, struct ion_buffer *buffer);
-void ion_heap_unmap_kernel(struct ion_heap *heap, struct ion_buffer *buffer);
-int ion_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
-		      struct vm_area_struct *vma);
-int ion_heap_buffer_zero(struct ion_buffer *buffer);
-
-/**
- * ion_heap_init_shrinker
- * @heap:		the heap
- *
- * If a heap sets the ION_HEAP_FLAG_DEFER_FREE flag or defines the shrink op
- * this function will be called to setup a shrinker to shrink the freelists
- * and call the heap's shrink op.
- */
-int ion_heap_init_shrinker(struct ion_heap *heap);
-
-/**
- * ion_heap_init_deferred_free -- initialize deferred free functionality
- * @heap:		the heap
- *
- * If a heap sets the ION_HEAP_FLAG_DEFER_FREE flag this function will
- * be called to setup deferred frees. Calls to free the buffer will
- * return immediately and the actual free will occur some time later
- */
-int ion_heap_init_deferred_free(struct ion_heap *heap);
-
-/**
- * ion_heap_freelist_add - add a buffer to the deferred free list
- * @heap:		the heap
- * @buffer:		the buffer
- *
- * Adds an item to the deferred freelist.
- */
-void ion_heap_freelist_add(struct ion_heap *heap, struct ion_buffer *buffer);
-
-/**
- * ion_heap_freelist_drain - drain the deferred free list
- * @heap:		the heap
- * @size:		amount of memory to drain in bytes
- *
- * Drains the indicated amount of memory from the deferred freelist immediately.
- * Returns the total amount freed.  The total freed may be higher depending
- * on the size of the items in the list, or lower if there is insufficient
- * total memory on the freelist.
- */
-size_t ion_heap_freelist_drain(struct ion_heap *heap, size_t size);
-
-/**
- * ion_heap_freelist_shrink - drain the deferred free
- *				list, skipping any heap-specific
- *				pooling or caching mechanisms
- *
- * @heap:		the heap
- * @size:		amount of memory to drain in bytes
- *
- * Drains the indicated amount of memory from the deferred freelist immediately.
- * Returns the total amount freed.  The total freed may be higher depending
- * on the size of the items in the list, or lower if there is insufficient
- * total memory on the freelist.
- *
- * Unlike with @ion_heap_freelist_drain, don't put any pages back into
- * page pools or otherwise cache the pages. Everything must be
- * genuinely free'd back to the system. If you're free'ing from a
- * shrinker you probably want to use this. Note that this relies on
- * the heap.ops.free callback honoring the ION_PRIV_FLAG_SHRINKER_FREE
- * flag.
- */
-size_t ion_heap_freelist_shrink(struct ion_heap *heap,
-				size_t size);
-
-/**
- * ion_heap_freelist_size - returns the size of the freelist in bytes
- * @heap:		the heap
- */
-size_t ion_heap_freelist_size(struct ion_heap *heap);
-
-/**
- * functions for creating and destroying a heap pool -- allows you
- * to keep a pool of pre allocated memory to use from your heap.  Keeping
- * a pool of memory that is ready for dma, ie any cached mapping have been
- * invalidated from the cache, provides a significant performance benefit on
- * many systems
- */
-
-/**
- * struct ion_page_pool - pagepool struct
- * @high_count:		number of highmem items in the pool
- * @low_count:		number of lowmem items in the pool
- * @high_items:		list of highmem items
- * @low_items:		list of lowmem items
- * @mutex:		lock protecting this struct and especially the count
- *			item list
- * @gfp_mask:		gfp_mask to use from alloc
- * @order:		order of pages in the pool
- * @list:		plist node for list of pools
- *
- * Allows you to keep a pool of pre allocated pages to use from your heap.
- * Keeping a pool of pages that is ready for dma, ie any cached mapping have
- * been invalidated from the cache, provides a significant performance benefit
- * on many systems
- */
-struct ion_page_pool {
-	int high_count;
-	int low_count;
-	struct list_head high_items;
-	struct list_head low_items;
-	struct mutex mutex;
-	gfp_t gfp_mask;
-	unsigned int order;
-	struct plist_node list;
-};
-
-struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order);
-void ion_page_pool_destroy(struct ion_page_pool *pool);
-struct page *ion_page_pool_alloc(struct ion_page_pool *pool);
-void ion_page_pool_free(struct ion_page_pool *pool, struct page *page);
-
-/** ion_page_pool_shrink - shrinks the size of the memory cached in the pool
- * @pool:		the pool
- * @gfp_mask:		the memory type to reclaim
- * @nr_to_scan:		number of items to shrink in pages
- *
- * returns the number of items freed in pages
- */
-int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask,
-			 int nr_to_scan);
-
-#endif /* _ION_H */
diff --git a/drivers/staging/android/ion/ion_buffer.c b/drivers/staging/android/ion/ion_buffer.c
new file mode 100644
index 0000000..9baca1a
--- /dev/null
+++ b/drivers/staging/android/ion/ion_buffer.c
@@ -0,0 +1,278 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ION Memory Allocator - buffer interface
+ *
+ * Copyright (c) 2019, Google, Inc.
+ */
+
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/dma-map-ops.h>
+
+#define CREATE_TRACE_POINTS
+#include "ion_trace.h"
+#include "ion_private.h"
+
+static atomic_long_t total_heap_bytes;
+
+static void track_buffer_created(struct ion_buffer *buffer)
+{
+	long total = atomic_long_add_return(buffer->size, &total_heap_bytes);
+
+	trace_ion_stat(buffer->sg_table, buffer->size, total);
+}
+
+static void track_buffer_destroyed(struct ion_buffer *buffer)
+{
+	long total = atomic_long_sub_return(buffer->size, &total_heap_bytes);
+
+	trace_ion_stat(buffer->sg_table, -buffer->size, total);
+}
+
+/* this function should only be called while dev->lock is held */
+static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
+					    struct ion_device *dev,
+					    unsigned long len,
+					    unsigned long flags)
+{
+	struct ion_buffer *buffer;
+	int ret;
+
+	buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
+	if (!buffer)
+		return ERR_PTR(-ENOMEM);
+
+	buffer->heap = heap;
+	buffer->flags = flags;
+	buffer->size = len;
+
+	ret = heap->ops->allocate(heap, buffer, len, flags);
+
+	if (ret) {
+		if (!(heap->flags & ION_HEAP_FLAG_DEFER_FREE))
+			goto err2;
+
+		ion_heap_freelist_drain(heap, 0);
+		ret = heap->ops->allocate(heap, buffer, len, flags);
+		if (ret)
+			goto err2;
+	}
+
+	if (!buffer->sg_table) {
+		WARN_ONCE(1, "This heap needs to set the sgtable");
+		ret = -EINVAL;
+		goto err1;
+	}
+
+	spin_lock(&heap->stat_lock);
+	heap->num_of_buffers++;
+	heap->num_of_alloc_bytes += len;
+	if (heap->num_of_alloc_bytes > heap->alloc_bytes_wm)
+		heap->alloc_bytes_wm = heap->num_of_alloc_bytes;
+	if (heap->num_of_buffers == 1) {
+		/* This module reference lasts as long as at least one
+		 * buffer is allocated from the heap. We are protected
+		 * against ion_device_remove_heap() with dev->lock, so we can
+		 * safely assume the module reference is going to* succeed.
+		 */
+		__module_get(heap->owner);
+	}
+	spin_unlock(&heap->stat_lock);
+
+	INIT_LIST_HEAD(&buffer->attachments);
+	mutex_init(&buffer->lock);
+	track_buffer_created(buffer);
+	return buffer;
+
+err1:
+	heap->ops->free(buffer);
+err2:
+	kfree(buffer);
+	return ERR_PTR(ret);
+}
+
+static int ion_clear_pages(struct page **pages, int num, pgprot_t pgprot)
+{
+	void *addr = vmap(pages, num, VM_MAP, pgprot);
+
+	if (!addr)
+		return -ENOMEM;
+	memset(addr, 0, PAGE_SIZE * num);
+	vunmap(addr);
+
+	return 0;
+}
+
+static int ion_sglist_zero(struct scatterlist *sgl, unsigned int nents,
+			   pgprot_t pgprot)
+{
+	int p = 0;
+	int ret = 0;
+	struct sg_page_iter piter;
+	struct page *pages[32];
+
+	for_each_sg_page(sgl, &piter, nents, 0) {
+		pages[p++] = sg_page_iter_page(&piter);
+		if (p == ARRAY_SIZE(pages)) {
+			ret = ion_clear_pages(pages, p, pgprot);
+			if (ret)
+				return ret;
+			p = 0;
+		}
+	}
+	if (p)
+		ret = ion_clear_pages(pages, p, pgprot);
+
+	return ret;
+}
+
+struct ion_buffer *ion_buffer_alloc(struct ion_device *dev, size_t len,
+				    unsigned int heap_id_mask,
+				    unsigned int flags)
+{
+	struct ion_buffer *buffer = NULL;
+	struct ion_heap *heap;
+
+	if (!dev || !len) {
+		return ERR_PTR(-EINVAL);
+	}
+
+	/*
+	 * traverse the list of heaps available in this system in priority
+	 * order.  If the heap type is supported by the client, and matches the
+	 * request of the caller allocate from it.  Repeat until allocate has
+	 * succeeded or all heaps have been tried
+	 */
+	len = PAGE_ALIGN(len);
+	if (!len)
+		return ERR_PTR(-EINVAL);
+
+	down_read(&dev->lock);
+	plist_for_each_entry(heap, &dev->heaps, node) {
+		/* if the caller didn't specify this heap id */
+		if (!((1 << heap->id) & heap_id_mask))
+			continue;
+		buffer = ion_buffer_create(heap, dev, len, flags);
+		if (!IS_ERR(buffer))
+			break;
+	}
+	up_read(&dev->lock);
+
+	if (!buffer)
+		return ERR_PTR(-ENODEV);
+
+	if (IS_ERR(buffer))
+		return ERR_CAST(buffer);
+
+	return buffer;
+}
+
+int ion_buffer_zero(struct ion_buffer *buffer)
+{
+	struct sg_table *table;
+	pgprot_t pgprot;
+
+	if (!buffer)
+		return -EINVAL;
+
+	table = buffer->sg_table;
+	if (buffer->flags & ION_FLAG_CACHED)
+		pgprot = PAGE_KERNEL;
+	else
+		pgprot = pgprot_writecombine(PAGE_KERNEL);
+
+	return ion_sglist_zero(table->sgl, table->nents, pgprot);
+}
+EXPORT_SYMBOL_GPL(ion_buffer_zero);
+
+void ion_buffer_prep_noncached(struct ion_buffer *buffer)
+{
+	struct scatterlist *sg;
+	struct sg_table *table;
+	int i;
+
+	if (WARN_ONCE(!buffer || !buffer->sg_table,
+		      "%s needs a buffer and a sg_table", __func__) ||
+	    buffer->flags & ION_FLAG_CACHED)
+		return;
+
+	table = buffer->sg_table;
+
+	for_each_sg(table->sgl, sg, table->orig_nents, i)
+		arch_dma_prep_coherent(sg_page(sg), sg->length);
+}
+EXPORT_SYMBOL_GPL(ion_buffer_prep_noncached);
+
+void ion_buffer_release(struct ion_buffer *buffer)
+{
+	if (buffer->kmap_cnt > 0) {
+		pr_warn_once("%s: buffer still mapped in the kernel\n",
+			     __func__);
+		ion_heap_unmap_kernel(buffer->heap, buffer);
+	}
+	buffer->heap->ops->free(buffer);
+	spin_lock(&buffer->heap->stat_lock);
+	buffer->heap->num_of_buffers--;
+	buffer->heap->num_of_alloc_bytes -= buffer->size;
+	if (buffer->heap->num_of_buffers == 0)
+		module_put(buffer->heap->owner);
+	spin_unlock(&buffer->heap->stat_lock);
+	/* drop reference to the heap module */
+
+	kfree(buffer);
+}
+
+int ion_buffer_destroy(struct ion_device *dev, struct ion_buffer *buffer)
+{
+	struct ion_heap *heap;
+
+	if (!dev || !buffer) {
+		pr_warn("%s: invalid argument\n", __func__);
+		return -EINVAL;
+	}
+
+	heap = buffer->heap;
+	track_buffer_destroyed(buffer);
+
+	if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
+		ion_heap_freelist_add(heap, buffer);
+	else
+		ion_buffer_release(buffer);
+
+	return 0;
+}
+
+void *ion_buffer_kmap_get(struct ion_buffer *buffer)
+{
+	void *vaddr;
+
+	if (buffer->kmap_cnt) {
+		buffer->kmap_cnt++;
+		return buffer->vaddr;
+	}
+	vaddr = ion_heap_map_kernel(buffer->heap, buffer);
+	if (WARN_ONCE(!vaddr,
+		      "heap->ops->map_kernel should return ERR_PTR on error"))
+		return ERR_PTR(-EINVAL);
+	if (IS_ERR(vaddr))
+		return vaddr;
+	buffer->vaddr = vaddr;
+	buffer->kmap_cnt++;
+	return vaddr;
+}
+
+void ion_buffer_kmap_put(struct ion_buffer *buffer)
+{
+	buffer->kmap_cnt--;
+	if (!buffer->kmap_cnt) {
+		ion_heap_unmap_kernel(buffer->heap, buffer);
+		buffer->vaddr = NULL;
+	}
+}
+
+u64 ion_get_total_heap_bytes(void)
+{
+	return atomic_long_read(&total_heap_bytes);
+}
diff --git a/drivers/staging/android/ion/ion_dma_buf.c b/drivers/staging/android/ion/ion_dma_buf.c
new file mode 100644
index 0000000..47ae54a
--- /dev/null
+++ b/drivers/staging/android/ion/ion_dma_buf.c
@@ -0,0 +1,359 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ION Memory Allocator - dmabuf interface
+ *
+ * Copyright (c) 2019, Google, Inc.
+ */
+
+#include <linux/device.h>
+#include <linux/mm.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+
+#include "ion_private.h"
+
+static struct sg_table *dup_sg_table(struct sg_table *table)
+{
+	struct sg_table *new_table;
+	int ret, i;
+	struct scatterlist *sg, *new_sg;
+
+	new_table = kzalloc(sizeof(*new_table), GFP_KERNEL);
+	if (!new_table)
+		return ERR_PTR(-ENOMEM);
+
+	ret = sg_alloc_table(new_table, table->nents, GFP_KERNEL);
+	if (ret) {
+		kfree(new_table);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	new_sg = new_table->sgl;
+	for_each_sg(table->sgl, sg, table->nents, i) {
+		memcpy(new_sg, sg, sizeof(*sg));
+		new_sg->dma_address = 0;
+		new_sg = sg_next(new_sg);
+	}
+
+	return new_table;
+}
+
+static void free_duped_table(struct sg_table *table)
+{
+	sg_free_table(table);
+	kfree(table);
+}
+
+static int ion_dma_buf_attach(struct dma_buf *dmabuf,
+			      struct dma_buf_attachment *attachment)
+{
+	struct ion_dma_buf_attachment *a;
+	struct sg_table *table;
+	struct ion_buffer *buffer = dmabuf->priv;
+	struct ion_heap *heap = buffer->heap;
+
+	if (heap->buf_ops.attach)
+		return heap->buf_ops.attach(dmabuf, attachment);
+
+	a = kzalloc(sizeof(*a), GFP_KERNEL);
+	if (!a)
+		return -ENOMEM;
+
+	table = dup_sg_table(buffer->sg_table);
+	if (IS_ERR(table)) {
+		kfree(a);
+		return -ENOMEM;
+	}
+
+	a->table = table;
+	a->dev = attachment->dev;
+	INIT_LIST_HEAD(&a->list);
+	a->mapped = false;
+
+	attachment->priv = a;
+
+	mutex_lock(&buffer->lock);
+	list_add(&a->list, &buffer->attachments);
+	mutex_unlock(&buffer->lock);
+
+	return 0;
+}
+
+static void ion_dma_buf_detatch(struct dma_buf *dmabuf,
+				struct dma_buf_attachment *attachment)
+{
+	struct ion_dma_buf_attachment *a = attachment->priv;
+	struct ion_buffer *buffer = dmabuf->priv;
+	struct ion_heap *heap = buffer->heap;
+
+	if (heap->buf_ops.detach)
+		return heap->buf_ops.detach(dmabuf, attachment);
+
+	mutex_lock(&buffer->lock);
+	list_del(&a->list);
+	mutex_unlock(&buffer->lock);
+	free_duped_table(a->table);
+
+	kfree(a);
+}
+
+static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment,
+					enum dma_data_direction direction)
+{
+	struct ion_buffer *buffer = attachment->dmabuf->priv;
+	struct ion_heap *heap = buffer->heap;
+	struct ion_dma_buf_attachment *a;
+	struct sg_table *table;
+	unsigned long attrs = attachment->dma_map_attrs;
+
+	if (heap->buf_ops.map_dma_buf)
+		return heap->buf_ops.map_dma_buf(attachment, direction);
+
+	a = attachment->priv;
+	table = a->table;
+
+	if (!(buffer->flags & ION_FLAG_CACHED))
+		attrs |= DMA_ATTR_SKIP_CPU_SYNC;
+
+	if (!dma_map_sg_attrs(attachment->dev, table->sgl, table->nents,
+			      direction, attrs))
+		return ERR_PTR(-ENOMEM);
+
+	a->mapped = true;
+
+	return table;
+}
+
+static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
+			      struct sg_table *table,
+			      enum dma_data_direction direction)
+{
+	struct ion_buffer *buffer = attachment->dmabuf->priv;
+	struct ion_heap *heap = buffer->heap;
+	struct ion_dma_buf_attachment *a = attachment->priv;
+	unsigned long attrs = attachment->dma_map_attrs;
+
+	if (heap->buf_ops.unmap_dma_buf)
+		return heap->buf_ops.unmap_dma_buf(attachment, table,
+						   direction);
+
+	a->mapped = false;
+
+	if (!(buffer->flags & ION_FLAG_CACHED))
+		attrs |= DMA_ATTR_SKIP_CPU_SYNC;
+
+	dma_unmap_sg_attrs(attachment->dev, table->sgl, table->nents,
+			   direction, attrs);
+}
+
+static void ion_dma_buf_release(struct dma_buf *dmabuf)
+{
+	struct ion_buffer *buffer = dmabuf->priv;
+	struct ion_heap *heap = buffer->heap;
+
+	if (heap->buf_ops.release)
+		return heap->buf_ops.release(dmabuf);
+
+	ion_free(buffer);
+}
+
+static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
+					enum dma_data_direction direction)
+{
+	struct ion_buffer *buffer = dmabuf->priv;
+	struct ion_heap *heap = buffer->heap;
+	struct ion_dma_buf_attachment *a;
+
+	if (heap->buf_ops.begin_cpu_access)
+		return heap->buf_ops.begin_cpu_access(dmabuf, direction);
+
+	mutex_lock(&buffer->lock);
+	if (!(buffer->flags & ION_FLAG_CACHED))
+		goto unlock;
+
+	list_for_each_entry(a, &buffer->attachments, list) {
+		if (!a->mapped)
+			continue;
+		dma_sync_sg_for_cpu(a->dev, a->table->sgl, a->table->nents,
+				    direction);
+	}
+
+unlock:
+	mutex_unlock(&buffer->lock);
+	return 0;
+}
+
+static int
+ion_dma_buf_begin_cpu_access_partial(struct dma_buf *dmabuf,
+				     enum dma_data_direction direction,
+				     unsigned int offset, unsigned int len)
+{
+	struct ion_buffer *buffer = dmabuf->priv;
+	struct ion_heap *heap = buffer->heap;
+
+	/* This is done to make sure partial buffer cache flush / invalidate is
+	 * allowed. The implementation may be vendor specific in this case, so
+	 * ion core does not provide a default implementation
+	 */
+	if (!heap->buf_ops.begin_cpu_access_partial)
+		return -EOPNOTSUPP;
+
+	return heap->buf_ops.begin_cpu_access_partial(dmabuf, direction, offset,
+						      len);
+}
+
+static int ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
+				      enum dma_data_direction direction)
+{
+	struct ion_buffer *buffer = dmabuf->priv;
+	struct ion_heap *heap = buffer->heap;
+	struct ion_dma_buf_attachment *a;
+
+	if (heap->buf_ops.end_cpu_access)
+		return heap->buf_ops.end_cpu_access(dmabuf, direction);
+
+	mutex_lock(&buffer->lock);
+	if (!(buffer->flags & ION_FLAG_CACHED))
+		goto unlock;
+
+	list_for_each_entry(a, &buffer->attachments, list) {
+		if (!a->mapped)
+			continue;
+		dma_sync_sg_for_device(a->dev, a->table->sgl, a->table->nents,
+				       direction);
+	}
+unlock:
+	mutex_unlock(&buffer->lock);
+
+	return 0;
+}
+
+static int ion_dma_buf_end_cpu_access_partial(struct dma_buf *dmabuf,
+					      enum dma_data_direction direction,
+					      unsigned int offset,
+					      unsigned int len)
+{
+	struct ion_buffer *buffer = dmabuf->priv;
+	struct ion_heap *heap = buffer->heap;
+
+	/* This is done to make sure partial buffer cache flush / invalidate is
+	 * allowed. The implementation may be vendor specific in this case, so
+	 * ion core does not provide a default implementation
+	 */
+	if (!heap->buf_ops.end_cpu_access_partial)
+		return -EOPNOTSUPP;
+
+	return heap->buf_ops.end_cpu_access_partial(dmabuf, direction, offset,
+						    len);
+}
+
+static int ion_dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
+{
+	struct ion_buffer *buffer = dmabuf->priv;
+	struct ion_heap *heap = buffer->heap;
+	int ret;
+
+	/* now map it to userspace */
+	if (heap->buf_ops.mmap) {
+		ret = heap->buf_ops.mmap(dmabuf, vma);
+	} else {
+		mutex_lock(&buffer->lock);
+		if (!(buffer->flags & ION_FLAG_CACHED))
+			vma->vm_page_prot =
+				pgprot_writecombine(vma->vm_page_prot);
+
+		ret = ion_heap_map_user(heap, buffer, vma);
+		mutex_unlock(&buffer->lock);
+	}
+
+	if (ret)
+		pr_err("%s: failure mapping buffer to userspace\n", __func__);
+
+	return ret;
+}
+
+static void *ion_dma_buf_vmap(struct dma_buf *dmabuf)
+{
+	struct ion_buffer *buffer = dmabuf->priv;
+	struct ion_heap *heap = buffer->heap;
+	void *vaddr;
+
+	if (heap->buf_ops.vmap)
+		return heap->buf_ops.vmap(dmabuf);
+
+	mutex_lock(&buffer->lock);
+	vaddr = ion_buffer_kmap_get(buffer);
+	mutex_unlock(&buffer->lock);
+
+	return vaddr;
+}
+
+static void ion_dma_buf_vunmap(struct dma_buf *dmabuf, void *vaddr)
+{
+	struct ion_buffer *buffer = dmabuf->priv;
+	struct ion_heap *heap = buffer->heap;
+
+	if (heap->buf_ops.vunmap) {
+		heap->buf_ops.vunmap(dmabuf, vaddr);
+		return;
+	}
+
+	mutex_lock(&buffer->lock);
+	ion_buffer_kmap_put(buffer);
+	mutex_unlock(&buffer->lock);
+}
+
+static int ion_dma_buf_get_flags(struct dma_buf *dmabuf, unsigned long *flags)
+{
+	struct ion_buffer *buffer = dmabuf->priv;
+	struct ion_heap *heap = buffer->heap;
+
+	if (!heap->buf_ops.get_flags)
+		return -EOPNOTSUPP;
+
+	return heap->buf_ops.get_flags(dmabuf, flags);
+}
+
+static const struct dma_buf_ops dma_buf_ops = {
+	.attach = ion_dma_buf_attach,
+	.detach = ion_dma_buf_detatch,
+	.map_dma_buf = ion_map_dma_buf,
+	.unmap_dma_buf = ion_unmap_dma_buf,
+	.release = ion_dma_buf_release,
+	.begin_cpu_access = ion_dma_buf_begin_cpu_access,
+	.begin_cpu_access_partial = ion_dma_buf_begin_cpu_access_partial,
+	.end_cpu_access = ion_dma_buf_end_cpu_access,
+	.end_cpu_access_partial = ion_dma_buf_end_cpu_access_partial,
+	.mmap = ion_dma_buf_mmap,
+	.vmap = ion_dma_buf_vmap,
+	.vunmap = ion_dma_buf_vunmap,
+	.get_flags = ion_dma_buf_get_flags,
+};
+
+struct dma_buf *ion_dmabuf_alloc(struct ion_device *dev, size_t len,
+				 unsigned int heap_id_mask,
+				 unsigned int flags)
+{
+	struct ion_buffer *buffer;
+	DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
+	struct dma_buf *dmabuf;
+
+	pr_debug("%s: len %zu heap_id_mask %u flags %x\n", __func__,
+		 len, heap_id_mask, flags);
+
+	buffer = ion_buffer_alloc(dev, len, heap_id_mask, flags);
+	if (IS_ERR(buffer))
+		return ERR_CAST(buffer);
+
+	exp_info.ops = &dma_buf_ops;
+	exp_info.size = buffer->size;
+	exp_info.flags = O_RDWR;
+	exp_info.priv = buffer;
+
+	dmabuf = dma_buf_export(&exp_info);
+	if (IS_ERR(dmabuf))
+		ion_buffer_destroy(dev, buffer);
+
+	return dmabuf;
+}
diff --git a/drivers/staging/android/ion/ion_heap.c b/drivers/staging/android/ion/ion_heap.c
index ea7e0a2..aa29a4ee 100644
--- a/drivers/staging/android/ion/ion_heap.c
+++ b/drivers/staging/android/ion/ion_heap.c
@@ -15,221 +15,7 @@
 #include <linux/scatterlist.h>
 #include <linux/vmalloc.h>
 
-#include "ion.h"
-
-void *ion_heap_map_kernel(struct ion_heap *heap,
-			  struct ion_buffer *buffer)
-{
-	struct sg_page_iter piter;
-	void *vaddr;
-	pgprot_t pgprot;
-	struct sg_table *table = buffer->sg_table;
-	int npages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
-	struct page **pages = vmalloc(array_size(npages,
-						 sizeof(struct page *)));
-	struct page **tmp = pages;
-
-	if (!pages)
-		return ERR_PTR(-ENOMEM);
-
-	if (buffer->flags & ION_FLAG_CACHED)
-		pgprot = PAGE_KERNEL;
-	else
-		pgprot = pgprot_writecombine(PAGE_KERNEL);
-
-	for_each_sgtable_page(table, &piter, 0) {
-		BUG_ON(tmp - pages >= npages);
-		*tmp++ = sg_page_iter_page(&piter);
-	}
-
-	vaddr = vmap(pages, npages, VM_MAP, pgprot);
-	vfree(pages);
-
-	if (!vaddr)
-		return ERR_PTR(-ENOMEM);
-
-	return vaddr;
-}
-
-void ion_heap_unmap_kernel(struct ion_heap *heap,
-			   struct ion_buffer *buffer)
-{
-	vunmap(buffer->vaddr);
-}
-
-int ion_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
-		      struct vm_area_struct *vma)
-{
-	struct sg_page_iter piter;
-	struct sg_table *table = buffer->sg_table;
-	unsigned long addr = vma->vm_start;
-	int ret;
-
-	for_each_sgtable_page(table, &piter, vma->vm_pgoff) {
-		struct page *page = sg_page_iter_page(&piter);
-
-		ret = remap_pfn_range(vma, addr, page_to_pfn(page), PAGE_SIZE,
-				      vma->vm_page_prot);
-		if (ret)
-			return ret;
-		addr += PAGE_SIZE;
-		if (addr >= vma->vm_end)
-			return 0;
-	}
-
-	return 0;
-}
-
-static int ion_heap_clear_pages(struct page **pages, int num, pgprot_t pgprot)
-{
-	void *addr = vmap(pages, num, VM_MAP, pgprot);
-
-	if (!addr)
-		return -ENOMEM;
-	memset(addr, 0, PAGE_SIZE * num);
-	vunmap(addr);
-
-	return 0;
-}
-
-static int ion_heap_sglist_zero(struct sg_table *sgt, pgprot_t pgprot)
-{
-	int p = 0;
-	int ret = 0;
-	struct sg_page_iter piter;
-	struct page *pages[32];
-
-	for_each_sgtable_page(sgt, &piter, 0) {
-		pages[p++] = sg_page_iter_page(&piter);
-		if (p == ARRAY_SIZE(pages)) {
-			ret = ion_heap_clear_pages(pages, p, pgprot);
-			if (ret)
-				return ret;
-			p = 0;
-		}
-	}
-	if (p)
-		ret = ion_heap_clear_pages(pages, p, pgprot);
-
-	return ret;
-}
-
-int ion_heap_buffer_zero(struct ion_buffer *buffer)
-{
-	struct sg_table *table = buffer->sg_table;
-	pgprot_t pgprot;
-
-	if (buffer->flags & ION_FLAG_CACHED)
-		pgprot = PAGE_KERNEL;
-	else
-		pgprot = pgprot_writecombine(PAGE_KERNEL);
-
-	return ion_heap_sglist_zero(table, pgprot);
-}
-
-void ion_heap_freelist_add(struct ion_heap *heap, struct ion_buffer *buffer)
-{
-	spin_lock(&heap->free_lock);
-	list_add(&buffer->list, &heap->free_list);
-	heap->free_list_size += buffer->size;
-	spin_unlock(&heap->free_lock);
-	wake_up(&heap->waitqueue);
-}
-
-size_t ion_heap_freelist_size(struct ion_heap *heap)
-{
-	size_t size;
-
-	spin_lock(&heap->free_lock);
-	size = heap->free_list_size;
-	spin_unlock(&heap->free_lock);
-
-	return size;
-}
-
-static size_t _ion_heap_freelist_drain(struct ion_heap *heap, size_t size,
-				       bool skip_pools)
-{
-	struct ion_buffer *buffer;
-	size_t total_drained = 0;
-
-	if (ion_heap_freelist_size(heap) == 0)
-		return 0;
-
-	spin_lock(&heap->free_lock);
-	if (size == 0)
-		size = heap->free_list_size;
-
-	while (!list_empty(&heap->free_list)) {
-		if (total_drained >= size)
-			break;
-		buffer = list_first_entry(&heap->free_list, struct ion_buffer,
-					  list);
-		list_del(&buffer->list);
-		heap->free_list_size -= buffer->size;
-		if (skip_pools)
-			buffer->private_flags |= ION_PRIV_FLAG_SHRINKER_FREE;
-		total_drained += buffer->size;
-		spin_unlock(&heap->free_lock);
-		ion_buffer_destroy(buffer);
-		spin_lock(&heap->free_lock);
-	}
-	spin_unlock(&heap->free_lock);
-
-	return total_drained;
-}
-
-size_t ion_heap_freelist_drain(struct ion_heap *heap, size_t size)
-{
-	return _ion_heap_freelist_drain(heap, size, false);
-}
-
-size_t ion_heap_freelist_shrink(struct ion_heap *heap, size_t size)
-{
-	return _ion_heap_freelist_drain(heap, size, true);
-}
-
-static int ion_heap_deferred_free(void *data)
-{
-	struct ion_heap *heap = data;
-
-	while (true) {
-		struct ion_buffer *buffer;
-
-		wait_event_freezable(heap->waitqueue,
-				     ion_heap_freelist_size(heap) > 0);
-
-		spin_lock(&heap->free_lock);
-		if (list_empty(&heap->free_list)) {
-			spin_unlock(&heap->free_lock);
-			continue;
-		}
-		buffer = list_first_entry(&heap->free_list, struct ion_buffer,
-					  list);
-		list_del(&buffer->list);
-		heap->free_list_size -= buffer->size;
-		spin_unlock(&heap->free_lock);
-		ion_buffer_destroy(buffer);
-	}
-
-	return 0;
-}
-
-int ion_heap_init_deferred_free(struct ion_heap *heap)
-{
-	INIT_LIST_HEAD(&heap->free_list);
-	init_waitqueue_head(&heap->waitqueue);
-	heap->task = kthread_run(ion_heap_deferred_free, heap,
-				 "%s", heap->name);
-	if (IS_ERR(heap->task)) {
-		pr_err("%s: creating thread for deferred free failed\n",
-		       __func__);
-		return PTR_ERR_OR_ZERO(heap->task);
-	}
-	sched_set_normal(heap->task, 19);
-
-	return 0;
-}
+#include "ion_private.h"
 
 static unsigned long ion_heap_shrink_count(struct shrinker *shrinker,
 					   struct shrink_control *sc)
@@ -275,6 +61,196 @@
 	return freed;
 }
 
+static size_t _ion_heap_freelist_drain(struct ion_heap *heap, size_t size,
+				       bool skip_pools)
+{
+	struct ion_buffer *buffer;
+	size_t total_drained = 0;
+
+	if (ion_heap_freelist_size(heap) == 0)
+		return 0;
+
+	spin_lock(&heap->free_lock);
+	if (size == 0)
+		size = heap->free_list_size;
+
+	while (!list_empty(&heap->free_list)) {
+		if (total_drained >= size)
+			break;
+		buffer = list_first_entry(&heap->free_list, struct ion_buffer,
+					  list);
+		list_del(&buffer->list);
+		heap->free_list_size -= buffer->size;
+		if (skip_pools)
+			buffer->private_flags |= ION_PRIV_FLAG_SHRINKER_FREE;
+		total_drained += buffer->size;
+		spin_unlock(&heap->free_lock);
+		ion_buffer_release(buffer);
+		spin_lock(&heap->free_lock);
+	}
+	spin_unlock(&heap->free_lock);
+
+	return total_drained;
+}
+
+static int ion_heap_deferred_free(void *data)
+{
+	struct ion_heap *heap = data;
+
+	while (true) {
+		struct ion_buffer *buffer;
+
+		wait_event_freezable(heap->waitqueue,
+				     (ion_heap_freelist_size(heap) > 0 ||
+				      kthread_should_stop()));
+
+		spin_lock(&heap->free_lock);
+		if (list_empty(&heap->free_list)) {
+			spin_unlock(&heap->free_lock);
+			if (!kthread_should_stop())
+				continue;
+			break;
+		}
+		buffer = list_first_entry(&heap->free_list, struct ion_buffer,
+					  list);
+		list_del(&buffer->list);
+		heap->free_list_size -= buffer->size;
+		spin_unlock(&heap->free_lock);
+		ion_buffer_release(buffer);
+	}
+
+	return 0;
+}
+
+void *ion_heap_map_kernel(struct ion_heap *heap,
+			  struct ion_buffer *buffer)
+{
+	struct scatterlist *sg;
+	int i, j;
+	void *vaddr;
+	pgprot_t pgprot;
+	struct sg_table *table = buffer->sg_table;
+	int npages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
+	struct page **pages = vmalloc(array_size(npages,
+						 sizeof(struct page *)));
+	struct page **tmp = pages;
+
+	if (!pages)
+		return ERR_PTR(-ENOMEM);
+
+	if (buffer->flags & ION_FLAG_CACHED)
+		pgprot = PAGE_KERNEL;
+	else
+		pgprot = pgprot_writecombine(PAGE_KERNEL);
+
+	for_each_sg(table->sgl, sg, table->nents, i) {
+		int npages_this_entry = PAGE_ALIGN(sg->length) / PAGE_SIZE;
+		struct page *page = sg_page(sg);
+
+		BUG_ON(i >= npages);
+		for (j = 0; j < npages_this_entry; j++)
+			*(tmp++) = page++;
+	}
+	vaddr = vmap(pages, npages, VM_MAP, pgprot);
+	vfree(pages);
+
+	if (!vaddr)
+		return ERR_PTR(-ENOMEM);
+
+	return vaddr;
+}
+EXPORT_SYMBOL_GPL(ion_heap_map_kernel);
+
+void ion_heap_unmap_kernel(struct ion_heap *heap,
+			   struct ion_buffer *buffer)
+{
+	vunmap(buffer->vaddr);
+}
+EXPORT_SYMBOL_GPL(ion_heap_unmap_kernel);
+
+int ion_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
+		      struct vm_area_struct *vma)
+{
+	struct sg_table *table = buffer->sg_table;
+	unsigned long addr = vma->vm_start;
+	unsigned long offset = vma->vm_pgoff * PAGE_SIZE;
+	struct scatterlist *sg;
+	int i;
+	int ret;
+
+	for_each_sg(table->sgl, sg, table->nents, i) {
+		struct page *page = sg_page(sg);
+		unsigned long remainder = vma->vm_end - addr;
+		unsigned long len = sg->length;
+
+		if (offset >= sg->length) {
+			offset -= sg->length;
+			continue;
+		} else if (offset) {
+			page += offset / PAGE_SIZE;
+			len = sg->length - offset;
+			offset = 0;
+		}
+		len = min(len, remainder);
+		ret = remap_pfn_range(vma, addr, page_to_pfn(page), len,
+				      vma->vm_page_prot);
+		if (ret)
+			return ret;
+		addr += len;
+		if (addr >= vma->vm_end)
+			return 0;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(ion_heap_map_user);
+
+void ion_heap_freelist_add(struct ion_heap *heap, struct ion_buffer *buffer)
+{
+	spin_lock(&heap->free_lock);
+	list_add(&buffer->list, &heap->free_list);
+	heap->free_list_size += buffer->size;
+	spin_unlock(&heap->free_lock);
+	wake_up(&heap->waitqueue);
+}
+
+size_t ion_heap_freelist_size(struct ion_heap *heap)
+{
+	size_t size;
+
+	spin_lock(&heap->free_lock);
+	size = heap->free_list_size;
+	spin_unlock(&heap->free_lock);
+
+	return size;
+}
+
+size_t ion_heap_freelist_drain(struct ion_heap *heap, size_t size)
+{
+	return _ion_heap_freelist_drain(heap, size, false);
+}
+
+size_t ion_heap_freelist_shrink(struct ion_heap *heap, size_t size)
+{
+	return _ion_heap_freelist_drain(heap, size, true);
+}
+
+int ion_heap_init_deferred_free(struct ion_heap *heap)
+{
+	INIT_LIST_HEAD(&heap->free_list);
+	init_waitqueue_head(&heap->waitqueue);
+	heap->task = kthread_run(ion_heap_deferred_free, heap,
+				 "%s", heap->name);
+	if (IS_ERR(heap->task)) {
+		pr_err("%s: creating thread for deferred free failed\n",
+		       __func__);
+		return PTR_ERR_OR_ZERO(heap->task);
+	}
+	sched_set_normal(heap->task, 19);
+
+	return 0;
+}
+
 int ion_heap_init_shrinker(struct ion_heap *heap)
 {
 	heap->shrinker.count_objects = ion_heap_shrink_count;
@@ -284,3 +260,32 @@
 
 	return register_shrinker(&heap->shrinker);
 }
+
+int ion_heap_cleanup(struct ion_heap *heap)
+{
+	int ret;
+
+	if (heap->flags & ION_HEAP_FLAG_DEFER_FREE &&
+	    !IS_ERR_OR_NULL(heap->task)) {
+		size_t free_list_size = ion_heap_freelist_size(heap);
+		size_t total_drained = ion_heap_freelist_drain(heap, 0);
+
+		if (total_drained != free_list_size) {
+			pr_err("%s: %s heap drained %zu bytes, requested %zu\n",
+			       __func__, heap->name, free_list_size,
+			       total_drained);
+			return -EBUSY;
+		}
+		ret = kthread_stop(heap->task);
+		if (ret < 0) {
+			pr_err("%s: failed to stop heap free thread\n",
+			       __func__);
+			return ret;
+		}
+	}
+
+	if ((heap->flags & ION_HEAP_FLAG_DEFER_FREE) || heap->ops->shrink)
+		unregister_shrinker(&heap->shrinker);
+
+	return 0;
+}
diff --git a/drivers/staging/android/ion/ion_private.h b/drivers/staging/android/ion/ion_private.h
new file mode 100644
index 0000000..db4e906
--- /dev/null
+++ b/drivers/staging/android/ion/ion_private.h
@@ -0,0 +1,58 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * ION Memory Allocator - Internal header
+ *
+ * Copyright (C) 2019 Google, Inc.
+ */
+
+#ifndef _ION_PRIVATE_H
+#define _ION_PRIVATE_H
+
+#include <linux/dcache.h>
+#include <linux/dma-buf.h>
+#include <linux/ion.h>
+#include <linux/miscdevice.h>
+#include <linux/mutex.h>
+#include <linux/plist.h>
+#include <linux/rbtree.h>
+#include <linux/rwsem.h>
+#include <linux/types.h>
+
+/**
+ * struct ion_device - the metadata of the ion device node
+ * @dev:		the actual misc device
+ * @lock:		rwsem protecting the tree of heaps, heap_bitmap and
+ *			clients
+ * @heap_ids:		bitmap of register heap ids
+ */
+struct ion_device {
+	struct miscdevice dev;
+	struct rw_semaphore lock;
+	DECLARE_BITMAP(heap_ids, ION_NUM_MAX_HEAPS);
+	struct plist_head heaps;
+	struct dentry *debug_root;
+	int heap_cnt;
+};
+
+/* ion_buffer manipulators */
+extern struct ion_buffer *ion_buffer_alloc(struct ion_device *dev, size_t len,
+					   unsigned int heap_id_mask,
+					   unsigned int flags);
+extern void ion_buffer_release(struct ion_buffer *buffer);
+extern int ion_buffer_destroy(struct ion_device *dev,
+			      struct ion_buffer *buffer);
+extern void *ion_buffer_kmap_get(struct ion_buffer *buffer);
+extern void ion_buffer_kmap_put(struct ion_buffer *buffer);
+
+/* ion dmabuf allocator */
+extern struct dma_buf *ion_dmabuf_alloc(struct ion_device *dev, size_t len,
+					unsigned int heap_id_mask,
+					unsigned int flags);
+extern int ion_free(struct ion_buffer *buffer);
+
+/* ion heap helpers */
+extern int ion_heap_cleanup(struct ion_heap *heap);
+
+u64 ion_get_total_heap_bytes(void);
+
+#endif /* _ION_PRIVATE_H */
diff --git a/drivers/staging/android/ion/ion_trace.h b/drivers/staging/android/ion/ion_trace.h
new file mode 100644
index 0000000..eacb47d
--- /dev/null
+++ b/drivers/staging/android/ion/ion_trace.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * drivers/staging/android/ion/ion-trace.h
+ *
+ * Copyright (C) 2020 Google, Inc.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM ion
+
+#if !defined(_ION_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _ION_TRACE_H
+
+#include <linux/tracepoint.h>
+
+#ifndef __ION_PTR_TO_HASHVAL
+static unsigned int __maybe_unused __ion_ptr_to_hash(const void *ptr)
+{
+	unsigned long hashval;
+
+	if (ptr_to_hashval(ptr, &hashval))
+		return 0;
+
+	/* The hashed value is only 32-bit */
+	return (unsigned int)hashval;
+}
+
+#define __ION_PTR_TO_HASHVAL
+#endif
+
+TRACE_EVENT(ion_stat,
+	    TP_PROTO(const void *addr, long len,
+		     unsigned long total_allocated),
+	    TP_ARGS(addr, len, total_allocated),
+	    TP_STRUCT__entry(__field(unsigned int, buffer_id)
+		__field(long, len)
+		__field(unsigned long, total_allocated)
+	    ),
+	    TP_fast_assign(__entry->buffer_id = __ion_ptr_to_hash(addr);
+		__entry->len = len;
+		__entry->total_allocated = total_allocated;
+	    ),
+	    TP_printk("buffer_id=%u len=%ldB total_allocated=%ldB",
+		      __entry->buffer_id,
+		      __entry->len,
+		      __entry->total_allocated)
+	    );
+
+#endif /* _ION_TRACE_H */
+
+/* This part must be outside protection */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE ion_trace
+#include <trace/define_trace.h>
diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
index c6d74bc..a681265 100644
--- a/drivers/thermal/thermal_core.c
+++ b/drivers/thermal/thermal_core.c
@@ -541,6 +541,7 @@
 
 	return mode == THERMAL_DEVICE_ENABLED;
 }
+EXPORT_SYMBOL_GPL(thermal_zone_device_is_enabled);
 
 void thermal_zone_device_update(struct thermal_zone_device *tz,
 				enum thermal_notify_event event)
diff --git a/drivers/thermal/thermal_core.h b/drivers/thermal/thermal_core.h
index 681209d..5e80cb3 100644
--- a/drivers/thermal/thermal_core.h
+++ b/drivers/thermal/thermal_core.h
@@ -179,6 +179,4 @@
 }
 #endif
 
-int thermal_zone_device_is_enabled(struct thermal_zone_device *tz);
-
 #endif /* __THERMAL_CORE_H__ */
diff --git a/drivers/tty/hvc/Kconfig b/drivers/tty/hvc/Kconfig
index 8d60e0f..ee2eba4 100644
--- a/drivers/tty/hvc/Kconfig
+++ b/drivers/tty/hvc/Kconfig
@@ -87,6 +87,27 @@
 	  driver. This console is used through a JTAG only on ARM. If you don't have
 	  a JTAG then you probably don't want this option.
 
+config HVC_DCC_SERIALIZE_SMP
+	bool "Use DCC only on core 0"
+	depends on SMP && HVC_DCC
+	help
+	  Some debuggers, such as Trace32 from Lauterbach GmbH, do not handle
+	  reads/writes from/to DCC on more than one core. Each core has its
+	  own DCC device registers, so when a core reads or writes from/to DCC,
+	  it only accesses its own DCC device. Since kernel code can run on
+	  any core, every time the kernel wants to write to the console, it
+	  might write to a different DCC.
+
+	  In SMP mode, Trace32 only uses the DCC on core 0. In AMP mode, it
+	  creates multiple windows, and each window shows the DCC output
+	  only from that core's DCC. The result is that console output is
+	  either lost or scattered across windows.
+
+	  Selecting this option will enable code that serializes all console
+	  input and output to core 0. The DCC driver will create input and
+	  output FIFOs that all cores will use. Reads and writes from/to DCC
+	  are handled by a workqueue that runs only core 0.
+
 config HVC_RISCV_SBI
 	bool "RISC-V SBI console support"
 	depends on RISCV_SBI_V01
diff --git a/drivers/tty/hvc/hvc_console.h b/drivers/tty/hvc/hvc_console.h
index 18d0058..97200fb 100644
--- a/drivers/tty/hvc/hvc_console.h
+++ b/drivers/tty/hvc/hvc_console.h
@@ -30,7 +30,7 @@
  * for the tty device.  Since this driver supports hotplug of vty adapters we
  * need to make sure we have enough allocated.
  */
-#define HVC_ALLOC_TTY_ADAPTERS	8
+#define HVC_ALLOC_TTY_ADAPTERS	64
 
 struct hvc_struct {
 	struct tty_port port;
diff --git a/drivers/tty/hvc/hvc_dcc.c b/drivers/tty/hvc/hvc_dcc.c
index 8e0edb7..e9aff1f 100644
--- a/drivers/tty/hvc/hvc_dcc.c
+++ b/drivers/tty/hvc/hvc_dcc.c
@@ -3,14 +3,24 @@
 
 #include <linux/console.h>
 #include <linux/init.h>
+#include <linux/kfifo.h>
+#include <linux/moduleparam.h>
 #include <linux/serial.h>
 #include <linux/serial_core.h>
+#include <linux/spinlock.h>
 
 #include <asm/dcc.h>
 #include <asm/processor.h>
 
 #include "hvc_console.h"
 
+/*
+ * Disable DCC driver at runtime. Want driver enabled for GKI, but some devices
+ * do not support the registers and crash when driver pokes the registers
+ */
+static bool enable;
+module_param(enable, bool, 0444);
+
 /* DCC Status Bits */
 #define DCC_STATUS_RX		(1 << 30)
 #define DCC_STATUS_TX		(1 << 29)
@@ -67,31 +77,182 @@
 	return i;
 }
 
+/*
+ * Check if the DCC is enabled.  If CONFIG_HVC_DCC_SERIALIZE_SMP is enabled,
+ * then we assume then this function will be called first on core 0.  That
+ * way, dcc_core0_available will be true only if it's available on core 0.
+ */
 static bool hvc_dcc_check(void)
 {
 	unsigned long time = jiffies + (HZ / 10);
 
+#ifdef CONFIG_HVC_DCC_SERIALIZE_SMP
+	static bool dcc_core0_available;
+
+	/*
+	 * If we're not on core 0, but we previously confirmed that DCC is
+	 * active, then just return true.
+	 */
+	if (smp_processor_id() && dcc_core0_available)
+		return true;
+#endif
+
 	/* Write a test character to check if it is handled */
 	__dcc_putchar('\n');
 
 	while (time_is_after_jiffies(time)) {
-		if (!(__dcc_getstatus() & DCC_STATUS_TX))
+		if (!(__dcc_getstatus() & DCC_STATUS_TX)) {
+#ifdef CONFIG_HVC_DCC_SERIALIZE_SMP
+			dcc_core0_available = true;
+#endif
 			return true;
+		}
 	}
 
 	return false;
 }
 
+#ifdef CONFIG_HVC_DCC_SERIALIZE_SMP
+
+static void dcc_put_work_fn(struct work_struct *work);
+static void dcc_get_work_fn(struct work_struct *work);
+static DECLARE_WORK(dcc_pwork, dcc_put_work_fn);
+static DECLARE_WORK(dcc_gwork, dcc_get_work_fn);
+static DEFINE_SPINLOCK(dcc_lock);
+static DEFINE_KFIFO(inbuf, unsigned char, 128);
+static DEFINE_KFIFO(outbuf, unsigned char, 1024);
+
+/*
+ * Workqueue function that writes the output FIFO to the DCC on core 0.
+ */
+static void dcc_put_work_fn(struct work_struct *work)
+{
+	unsigned char ch;
+
+	spin_lock(&dcc_lock);
+
+	/* While there's data in the output FIFO, write it to the DCC */
+	while (kfifo_get(&outbuf, &ch))
+		hvc_dcc_put_chars(0, &ch, 1);
+
+	/* While we're at it, check for any input characters */
+	while (!kfifo_is_full(&inbuf)) {
+		if (!hvc_dcc_get_chars(0, &ch, 1))
+			break;
+		kfifo_put(&inbuf, ch);
+	}
+
+	spin_unlock(&dcc_lock);
+}
+
+/*
+ * Workqueue function that reads characters from DCC and puts them into the
+ * input FIFO.
+ */
+static void dcc_get_work_fn(struct work_struct *work)
+{
+	unsigned char ch;
+
+	/*
+	 * Read characters from DCC and put them into the input FIFO, as
+	 * long as there is room and we have characters to read.
+	 */
+	spin_lock(&dcc_lock);
+
+	while (!kfifo_is_full(&inbuf)) {
+		if (!hvc_dcc_get_chars(0, &ch, 1))
+			break;
+		kfifo_put(&inbuf, ch);
+	}
+	spin_unlock(&dcc_lock);
+}
+
+/*
+ * Write characters directly to the DCC if we're on core 0 and the FIFO
+ * is empty, or write them to the FIFO if we're not.
+ */
+static int hvc_dcc0_put_chars(uint32_t vt, const char *buf,
+					     int count)
+{
+	int len;
+
+	spin_lock(&dcc_lock);
+	if (smp_processor_id() || (!kfifo_is_empty(&outbuf))) {
+		len = kfifo_in(&outbuf, buf, count);
+		spin_unlock(&dcc_lock);
+		/*
+		 * We just push data to the output FIFO, so schedule the
+		 * workqueue that will actually write that data to DCC.
+		 */
+		schedule_work_on(0, &dcc_pwork);
+		return len;
+	}
+
+	/*
+	 * If we're already on core 0, and the FIFO is empty, then just
+	 * write the data to DCC.
+	 */
+	len = hvc_dcc_put_chars(vt, buf, count);
+	spin_unlock(&dcc_lock);
+
+	return len;
+}
+
+/*
+ * Read characters directly from the DCC if we're on core 0 and the FIFO
+ * is empty, or read them from the FIFO if we're not.
+ */
+static int hvc_dcc0_get_chars(uint32_t vt, char *buf, int count)
+{
+	int len;
+
+	spin_lock(&dcc_lock);
+
+	if (smp_processor_id() || (!kfifo_is_empty(&inbuf))) {
+		len = kfifo_out(&inbuf, buf, count);
+		spin_unlock(&dcc_lock);
+
+		/*
+		 * If the FIFO was empty, there may be characters in the DCC
+		 * that we haven't read yet.  Schedule a workqueue to fill
+		 * the input FIFO, so that the next time this function is
+		 * called, we'll have data.
+		*/
+		if (!len)
+			schedule_work_on(0, &dcc_gwork);
+
+		return len;
+	}
+
+	/*
+	 * If we're already on core 0, and the FIFO is empty, then just
+	 * read the data from DCC.
+	 */
+	len = hvc_dcc_get_chars(vt, buf, count);
+	spin_unlock(&dcc_lock);
+
+	return len;
+}
+
+static const struct hv_ops hvc_dcc_get_put_ops = {
+	.get_chars = hvc_dcc0_get_chars,
+	.put_chars = hvc_dcc0_put_chars,
+};
+
+#else
+
 static const struct hv_ops hvc_dcc_get_put_ops = {
 	.get_chars = hvc_dcc_get_chars,
 	.put_chars = hvc_dcc_put_chars,
 };
 
+#endif
+
 static int __init hvc_dcc_console_init(void)
 {
 	int ret;
 
-	if (!hvc_dcc_check())
+	if (!enable || !hvc_dcc_check())
 		return -ENODEV;
 
 	/* Returns -1 if error */
@@ -105,7 +266,7 @@
 {
 	struct hvc_struct *p;
 
-	if (!hvc_dcc_check())
+	if (!enable || !hvc_dcc_check())
 		return -ENODEV;
 
 	p = hvc_alloc(0, 0, &hvc_dcc_get_put_ops, 128);
diff --git a/drivers/tty/serdev/core.c b/drivers/tty/serdev/core.c
index c5f0d93..fe97d60 100644
--- a/drivers/tty/serdev/core.c
+++ b/drivers/tty/serdev/core.c
@@ -32,7 +32,18 @@
 	if (len != -ENODEV)
 		return len;
 
-	return of_device_modalias(dev, buf, PAGE_SIZE);
+	len = of_device_modalias(dev, buf, PAGE_SIZE);
+	if (len != -ENODEV)
+		return len;
+
+	if (dev->parent->parent->bus == &platform_bus_type) {
+		struct platform_device *pdev =
+			to_platform_device(dev->parent->parent);
+
+		len = snprintf(buf, PAGE_SIZE, "platform:%s\n", pdev->name);
+	}
+
+	return len;
 }
 static DEVICE_ATTR_RO(modalias);
 
@@ -46,13 +57,18 @@
 {
 	int rc;
 
-	/* TODO: platform modalias */
-
 	rc = acpi_device_uevent_modalias(dev, env);
 	if (rc != -ENODEV)
 		return rc;
 
-	return of_device_uevent_modalias(dev, env);
+	rc = of_device_uevent_modalias(dev, env);
+	if (rc != -ENODEV)
+		return rc;
+
+	if (dev->parent->parent->bus == &platform_bus_type)
+		rc = dev->parent->parent->bus->uevent(dev->parent->parent, env);
+
+	return rc;
 }
 
 static void serdev_device_release(struct device *dev)
@@ -88,11 +104,17 @@
 	if (!is_serdev_device(dev))
 		return 0;
 
-	/* TODO: platform matching */
 	if (acpi_driver_match_device(dev, drv))
 		return 1;
 
-	return of_driver_match_device(dev, drv);
+	if (of_driver_match_device(dev, drv))
+		return 1;
+
+	if (dev->parent->parent->bus == &platform_bus_type &&
+	    dev->parent->parent->bus->match(dev->parent->parent, drv))
+		return 1;
+
+	return 0;
 }
 
 /**
@@ -729,16 +751,45 @@
 }
 #endif /* CONFIG_ACPI */
 
+static int platform_serdev_register_devices(struct serdev_controller *ctrl)
+{
+	struct serdev_device *serdev;
+	int err;
+
+	if (ctrl->dev.parent->bus != &platform_bus_type)
+		return -ENODEV;
+
+	serdev = serdev_device_alloc(ctrl);
+	if (!serdev) {
+		dev_err(&ctrl->dev, "failed to allocate serdev device for %s\n",
+				    dev_name(ctrl->dev.parent));
+		return -ENOMEM;
+	}
+
+	pm_runtime_no_callbacks(&serdev->dev);
+
+	err = serdev_device_add(serdev);
+	if (err) {
+		dev_err(&serdev->dev,
+			"failure adding device. status %d\n", err);
+		serdev_device_put(serdev);
+	}
+
+	return err;
+}
+
+
 /**
- * serdev_controller_add() - Add an serdev controller
+ * serdev_controller_add_platform() - Add an serdev controller
  * @ctrl:	controller to be registered.
+ * @platform:	whether to permit fallthrough to platform device probe
  *
  * Register a controller previously allocated via serdev_controller_alloc() with
- * the serdev core.
+ * the serdev core. Optionally permit probing via a platform device fallback.
  */
-int serdev_controller_add(struct serdev_controller *ctrl)
+int serdev_controller_add_platform(struct serdev_controller *ctrl, bool platform)
 {
-	int ret_of, ret_acpi, ret;
+	int ret, ret_of, ret_acpi, ret_platform = -ENODEV;
 
 	/* Can't register until after driver model init */
 	if (WARN_ON(!is_registered))
@@ -752,9 +803,13 @@
 
 	ret_of = of_serdev_register_devices(ctrl);
 	ret_acpi = acpi_serdev_register_devices(ctrl);
-	if (ret_of && ret_acpi) {
-		dev_dbg(&ctrl->dev, "no devices registered: of:%pe acpi:%pe\n",
-			ERR_PTR(ret_of), ERR_PTR(ret_acpi));
+	if (platform)
+		ret_platform = platform_serdev_register_devices(ctrl);
+	if (ret_of && ret_acpi && ret_platform) {
+		dev_dbg(&ctrl->dev,
+			"no devices registered: of:%pe acpi:%pe platform:%pe\n",
+			ERR_PTR(ret_of), ERR_PTR(ret_acpi),
+			ERR_PTR(ret_platform));
 		ret = -ENODEV;
 		goto err_rpm_disable;
 	}
@@ -768,7 +823,7 @@
 	device_del(&ctrl->dev);
 	return ret;
 };
-EXPORT_SYMBOL_GPL(serdev_controller_add);
+EXPORT_SYMBOL_GPL(serdev_controller_add_platform);
 
 /* Remove a device associated with a controller */
 static int serdev_remove_device(struct device *dev, void *data)
diff --git a/drivers/tty/serdev/serdev-ttyport.c b/drivers/tty/serdev/serdev-ttyport.c
index d367803e..6cdffde 100644
--- a/drivers/tty/serdev/serdev-ttyport.c
+++ b/drivers/tty/serdev/serdev-ttyport.c
@@ -7,9 +7,15 @@
 #include <linux/tty.h>
 #include <linux/tty_driver.h>
 #include <linux/poll.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
 
 #define SERPORT_ACTIVE		1
 
+static char *pdev_tty_port;
+module_param(pdev_tty_port, charp, 0644);
+MODULE_PARM_DESC(pdev_tty_port, "platform device tty port to claim");
+
 struct serport {
 	struct tty_port *port;
 	struct tty_struct *tty;
@@ -267,6 +273,7 @@
 {
 	struct serdev_controller *ctrl;
 	struct serport *serport;
+	bool platform = false;
 	int ret;
 
 	if (!port || !drv || !parent)
@@ -286,7 +293,28 @@
 	port->client_ops = &client_ops;
 	port->client_data = ctrl;
 
-	ret = serdev_controller_add(ctrl);
+	/* There is not always a way to bind specific platform devices because
+	 * they may be defined on platforms without DT or ACPI. When dealing
+	 * with a platform devices, do not allow direct binding unless it is
+	 * whitelisted by module parameter. If a platform device is otherwise
+	 * described by DT or ACPI it will still be bound and this check will
+	 * be ignored.
+	 */
+	if (parent->bus == &platform_bus_type) {
+		if (pdev_tty_port) {
+			unsigned long pdev_idx;
+			int tty_len = strlen(drv->name);
+
+			if (!strncmp(pdev_tty_port, drv->name, tty_len)) {
+				if (!kstrtoul(pdev_tty_port + tty_len, 10,
+					     &pdev_idx) && pdev_idx == idx) {
+					platform = true;
+				}
+			}
+		}
+	}
+
+	ret = serdev_controller_add_platform(ctrl, platform);
 	if (ret)
 		goto err_reset_data;
 
diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
index 28f22e5..f6ad40d 100644
--- a/drivers/tty/serial/Kconfig
+++ b/drivers/tty/serial/Kconfig
@@ -236,7 +236,6 @@
 
 config SERIAL_SAMSUNG
 	tristate "Samsung SoC serial support"
-	depends on PLAT_SAMSUNG || ARCH_S5PV210 || ARCH_EXYNOS || COMPILE_TEST
 	select SERIAL_CORE
 	help
 	  Support for the on-chip UARTs on the Samsung S3C24XX series CPUs,
@@ -967,6 +966,14 @@
 	select SERIAL_CORE_CONSOLE
 	select SERIAL_EARLYCON
 
+config SERIAL_MSM_GENI_EARLY_CONSOLE
+	bool "MSM on-chip GENI HW based early console support"
+	select SERIAL_CORE_CONSOLE
+	select SERIAL_EARLYCON
+	help
+	  Serial early console driver for Qualcomm Technologies Inc's GENI
+	  based QUP hardware.
+
 config SERIAL_QCOM_GENI
 	tristate "QCOM on-chip GENI based serial port support"
 	depends on ARCH_QCOM || COMPILE_TEST
diff --git a/drivers/tty/serial/Makefile b/drivers/tty/serial/Makefile
index caf167f..ac5566c 100644
--- a/drivers/tty/serial/Makefile
+++ b/drivers/tty/serial/Makefile
@@ -56,6 +56,7 @@
 obj-$(CONFIG_SERIAL_ATMEL) += atmel_serial.o
 obj-$(CONFIG_SERIAL_UARTLITE) += uartlite.o
 obj-$(CONFIG_SERIAL_MSM) += msm_serial.o
+obj-$(CONFIG_SERIAL_MSM_GENI_EARLY_CONSOLE) += msm_geni_serial_earlycon.o
 obj-$(CONFIG_SERIAL_QCOM_GENI) += qcom_geni_serial.o
 obj-$(CONFIG_SERIAL_OMAP) += omap-serial.o
 obj-$(CONFIG_SERIAL_ALTERA_UART) += altera_uart.o
diff --git a/drivers/tty/serial/msm_geni_serial_earlycon.c b/drivers/tty/serial/msm_geni_serial_earlycon.c
new file mode 100644
index 0000000..3c214fa
--- /dev/null
+++ b/drivers/tty/serial/msm_geni_serial_earlycon.c
@@ -0,0 +1,354 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ */
+#include <linux/console.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/serial_core.h>
+
+#define SE_GENI_DMA_MODE_EN             0x258
+#define SE_UART_TX_TRANS_CFG		0x25C
+#define SE_UART_TX_WORD_LEN		0x268
+#define SE_UART_TX_STOP_BIT_LEN		0x26C
+#define SE_UART_TX_TRANS_LEN		0x270
+#define SE_UART_TX_PARITY_CFG		0x2A4
+/* SE_UART_TRANS_CFG */
+#define UART_CTS_MASK		BIT(1)
+/* UART M_CMD OP codes */
+#define UART_START_TX		0x1
+
+#define UART_OVERSAMPLING	32
+#define DEF_FIFO_DEPTH_WORDS	16
+#define DEF_TX_WM		2
+#define DEF_FIFO_WIDTH_BITS	32
+
+#define GENI_FORCE_DEFAULT_REG		0x20
+#define GENI_OUTPUT_CTRL		0x24
+#define GENI_CGC_CTRL			0x28
+#define GENI_SER_M_CLK_CFG		0x48
+#define GENI_FW_REVISION_RO		0x68
+
+#define SE_GENI_TX_PACKING_CFG0		0x260
+#define SE_GENI_TX_PACKING_CFG1		0x264
+#define SE_GENI_M_CMD0			0x600
+#define SE_GENI_M_CMD_CTRL_REG		0x604
+#define SE_GENI_M_IRQ_STATUS		0x610
+#define SE_GENI_M_IRQ_EN		0x614
+#define SE_GENI_M_IRQ_CLEAR		0x618
+#define SE_GENI_TX_FIFOn		0x700
+#define SE_GENI_TX_WATERMARK_REG	0x80C
+
+#define SE_IRQ_EN			0xE1C
+#define SE_HW_PARAM_0			0xE24
+#define SE_HW_PARAM_1			0xE28
+
+/* GENI_OUTPUT_CTRL fields */
+#define DEFAULT_IO_OUTPUT_CTRL_MSK	GENMASK(6, 0)
+
+/* GENI_FORCE_DEFAULT_REG fields */
+#define FORCE_DEFAULT	BIT(0)
+
+/* GENI_CGC_CTRL fields */
+#define CFG_AHB_CLK_CGC_ON		BIT(0)
+#define CFG_AHB_WR_ACLK_CGC_ON		BIT(1)
+#define DATA_AHB_CLK_CGC_ON		BIT(2)
+#define SCLK_CGC_ON			BIT(3)
+#define TX_CLK_CGC_ON			BIT(4)
+#define RX_CLK_CGC_ON			BIT(5)
+#define EXT_CLK_CGC_ON			BIT(6)
+#define PROG_RAM_HCLK_OFF		BIT(8)
+#define PROG_RAM_SCLK_OFF		BIT(9)
+#define DEFAULT_CGC_EN			GENMASK(6, 0)
+
+/* GENI_STATUS fields */
+#define M_GENI_CMD_ACTIVE		BIT(0)
+
+/* GENI_SER_M_CLK_CFG/GENI_SER_S_CLK_CFG */
+#define SER_CLK_EN			BIT(0)
+#define CLK_DIV_MSK			GENMASK(15, 4)
+#define CLK_DIV_SHFT			4
+
+/* CLK_CTRL_RO fields */
+
+/* FIFO_IF_DISABLE_RO fields */
+#define FIFO_IF_DISABLE			BIT(0)
+
+/* FW_REVISION_RO fields */
+#define FW_REV_PROTOCOL_MSK	GENMASK(15, 8)
+#define FW_REV_PROTOCOL_SHFT	8
+#define FW_REV_VERSION_MSK	GENMASK(7, 0)
+
+/* GENI_CLK_SEL fields */
+#define CLK_SEL_MSK		GENMASK(2, 0)
+
+/* SE_GENI_DMA_MODE_EN */
+#define GENI_DMA_MODE_EN	BIT(0)
+
+/* GENI_M_CMD0 fields */
+#define M_OPCODE_MSK		GENMASK(31, 27)
+#define M_OPCODE_SHFT		27
+#define M_PARAMS_MSK		GENMASK(26, 0)
+
+/* GENI_M_CMD_CTRL_REG */
+#define M_GENI_CMD_CANCEL	BIT(2)
+#define M_GENI_CMD_ABORT	BIT(1)
+#define M_GENI_DISABLE		BIT(0)
+
+/* GENI_M_IRQ_EN fields */
+#define M_CMD_DONE_EN		BIT(0)
+#define M_CMD_OVERRUN_EN	BIT(1)
+#define M_ILLEGAL_CMD_EN	BIT(2)
+#define M_CMD_FAILURE_EN	BIT(3)
+#define M_CMD_CANCEL_EN		BIT(4)
+#define M_CMD_ABORT_EN		BIT(5)
+#define M_TIMESTAMP_EN		BIT(6)
+#define M_GP_SYNC_IRQ_0_EN	BIT(8)
+#define M_IO_DATA_DEASSERT_EN	BIT(22)
+#define M_IO_DATA_ASSERT_EN	BIT(23)
+#define M_TX_FIFO_RD_ERR_EN	BIT(28)
+#define M_TX_FIFO_WR_ERR_EN	BIT(29)
+#define M_TX_FIFO_WATERMARK_EN	BIT(30)
+#define M_SEC_IRQ_EN		BIT(31)
+#define M_COMMON_GENI_M_IRQ_EN	(GENMASK(6, 1) | \
+				M_IO_DATA_DEASSERT_EN | \
+				M_IO_DATA_ASSERT_EN | M_TX_FIFO_RD_ERR_EN | \
+				M_TX_FIFO_WR_ERR_EN)
+
+/* GENI_TX_FIFO_STATUS fields */
+#define TX_FIFO_WC		GENMASK(27, 0)
+
+/* SE_IRQ_EN fields */
+#define GENI_M_IRQ_EN		BIT(2)
+
+#define UART_PROTOCOL	2
+
+static int get_se_proto_earlycon(void __iomem *base)
+{
+	int proto;
+
+	proto = ((readl_relaxed(base + GENI_FW_REVISION_RO)
+			& FW_REV_PROTOCOL_MSK) >> FW_REV_PROTOCOL_SHFT);
+	return proto;
+}
+
+static void se_get_packing_config_earlycon(int bpw, int pack_words,
+	bool msb_to_lsb, unsigned long *cfg0, unsigned long *cfg1)
+{
+	u32 cfg[4] = {0};
+	int len, i;
+	int temp_bpw = bpw;
+	int idx_start = (msb_to_lsb ? (bpw - 1) : 0);
+	int idx_delta = (msb_to_lsb ? -BITS_PER_BYTE : BITS_PER_BYTE);
+	int ceil_bpw = ((bpw & (BITS_PER_BYTE - 1)) ?
+			((bpw & ~(BITS_PER_BYTE - 1)) + BITS_PER_BYTE) : bpw);
+	int iter = (ceil_bpw * pack_words) >> 3;
+	int idx = idx_start;
+
+	if (iter <= 0 || iter > 4) {
+		*cfg0 = 0;
+		*cfg1 = 0;
+		return;
+	}
+
+	for (i = 0; i < iter; i++) {
+		len = (temp_bpw < BITS_PER_BYTE) ?
+				(temp_bpw - 1) : BITS_PER_BYTE - 1;
+		cfg[i] = ((idx << 5) | (msb_to_lsb << 4) | (len << 1));
+		idx = ((temp_bpw - BITS_PER_BYTE) <= 0) ?
+				((i + 1) * BITS_PER_BYTE) + idx_start :
+				idx + idx_delta;
+		temp_bpw = ((temp_bpw - BITS_PER_BYTE) <= 0) ?
+				bpw : (temp_bpw - BITS_PER_BYTE);
+	}
+	cfg[iter - 1] |= 1;
+	*cfg0 = cfg[0] | (cfg[1] << 10);
+	*cfg1 = cfg[2] | (cfg[3] << 10);
+}
+
+static void se_io_init_earlycon(void __iomem *base)
+{
+	u32 io_op_ctrl;
+	u32 geni_cgc_ctrl;
+
+	geni_cgc_ctrl = readl_relaxed(base + GENI_CGC_CTRL);
+	geni_cgc_ctrl |= DEFAULT_CGC_EN;
+	io_op_ctrl = DEFAULT_IO_OUTPUT_CTRL_MSK;
+	writel_relaxed(geni_cgc_ctrl, base + GENI_CGC_CTRL);
+
+	writel_relaxed(io_op_ctrl, base + GENI_OUTPUT_CTRL);
+	writel_relaxed(FORCE_DEFAULT, base + GENI_FORCE_DEFAULT_REG);
+}
+
+static void geni_se_select_fifo_mode_earlycon(void __iomem *base)
+{
+	u32 val;
+
+	val = readl_relaxed(base + SE_GENI_DMA_MODE_EN);
+	val &= ~GENI_DMA_MODE_EN;
+	writel_relaxed(val, base + SE_GENI_M_IRQ_EN);
+}
+
+static void msm_geni_serial_wr_char(struct uart_port *uport, int ch)
+{
+	writel_relaxed(ch, uport->membase + SE_GENI_TX_FIFOn);
+}
+
+static int msm_geni_serial_poll_bit(struct uart_port *uport,
+					int offset, int bit_field, bool set)
+{
+	int iter = 0;
+	bool met = false, cond = false;
+	unsigned int reg, total_iter = 1000;
+
+	while (iter < total_iter) {
+		reg = readl_relaxed(uport->membase + offset);
+		cond = reg & bit_field;
+		if (cond == set) {
+			met = true;
+			break;
+		}
+		udelay(10);
+		iter++;
+	}
+	return met;
+}
+
+static void msm_geni_serial_poll_abort_tx(struct uart_port *uport)
+{
+	int done = 0;
+	u32 irq_clear = M_CMD_DONE_EN;
+
+	done = msm_geni_serial_poll_bit(uport, SE_GENI_M_IRQ_STATUS,
+					M_CMD_DONE_EN, true);
+	if (!done) {
+		writel_relaxed(M_GENI_CMD_ABORT,
+				uport->membase + SE_GENI_M_CMD_CTRL_REG);
+		irq_clear |= M_CMD_ABORT_EN;
+		msm_geni_serial_poll_bit(uport, SE_GENI_M_IRQ_STATUS,
+					M_CMD_ABORT_EN, true);
+	}
+	writel_relaxed(irq_clear, uport->membase + SE_GENI_M_IRQ_CLEAR);
+}
+
+static void msm_geni_serial_setup_tx(struct uart_port *uport,
+				unsigned int xmit_size)
+{
+	u32 m_cmd = 0;
+
+	writel_relaxed(xmit_size, uport->membase + SE_UART_TX_TRANS_LEN);
+	m_cmd |= (UART_START_TX << M_OPCODE_SHFT);
+	writel_relaxed(m_cmd, uport->membase + SE_GENI_M_CMD0);
+}
+
+static void
+__msm_geni_serial_console_write(struct uart_port *uport, const char *s,
+				unsigned int count)
+{
+	int new_line = 0;
+	int i;
+	int bytes_to_send = count;
+	int fifo_depth = DEF_FIFO_DEPTH_WORDS;
+	int tx_wm = DEF_TX_WM;
+
+	for (i = 0; i < count; i++) {
+		if (s[i] == '\n')
+			new_line++;
+	}
+
+	bytes_to_send += new_line;
+	writel_relaxed(tx_wm, uport->membase + SE_GENI_TX_WATERMARK_REG);
+	msm_geni_serial_setup_tx(uport, bytes_to_send);
+	i = 0;
+	while (i < count) {
+		u32 chars_to_write = 0;
+		u32 avail_fifo_bytes = (fifo_depth - tx_wm);
+		/*
+		 * If the WM bit never set, then the Tx state machine is not
+		 * in a valid state, so break, cancel/abort any existing
+		 * command. Unfortunately the current data being written is
+		 * lost.
+		 */
+		while (!msm_geni_serial_poll_bit(uport, SE_GENI_M_IRQ_STATUS,
+						M_TX_FIFO_WATERMARK_EN, true))
+			break;
+		chars_to_write = min((unsigned int)(count - i),
+					avail_fifo_bytes);
+		if ((chars_to_write << 1) > avail_fifo_bytes)
+			chars_to_write = (avail_fifo_bytes >> 1);
+		uart_console_write(uport, (s + i), chars_to_write,
+					msm_geni_serial_wr_char);
+		writel_relaxed(M_TX_FIFO_WATERMARK_EN,
+				uport->membase + SE_GENI_M_IRQ_CLEAR);
+		i += chars_to_write;
+	}
+	msm_geni_serial_poll_abort_tx(uport);
+}
+
+static void
+msm_geni_serial_early_console_write(struct console *con, const char *s,
+					unsigned int n)
+{
+	struct earlycon_device *dev = con->data;
+
+	__msm_geni_serial_console_write(&dev->port, s, n);
+}
+
+static int __init
+msm_geni_serial_earlycon_setup(struct earlycon_device *dev,
+				const char *opt)
+{
+	int ret = 0;
+	u32 tx_trans_cfg = 0;
+	u32 tx_parity_cfg = 0;
+	u32 stop_bit = 0;
+	u32 bits_per_char = 0;
+	unsigned long cfg0, cfg1;
+	struct uart_port *uport = &dev->port;
+
+	if (!uport->membase) {
+		ret = -ENOMEM;
+		goto exit;
+	}
+
+	if (get_se_proto_earlycon(uport->membase) != UART_PROTOCOL) {
+		ret = -ENXIO;
+		goto exit;
+	}
+
+	/*
+	 * Ignore Flow control.
+	 * Disable Tx Parity.
+	 * Don't check Parity during Rx.
+	 * Disable Rx Parity.
+	 * n = 8.
+	 * Stop bit = 0.
+	 * Stale timeout in bit-time (3 chars worth).
+	 */
+	tx_trans_cfg |= UART_CTS_MASK;
+	tx_parity_cfg = 0;
+	bits_per_char = 0x8;
+	stop_bit = 0;
+
+	msm_geni_serial_poll_abort_tx(uport);
+
+	se_get_packing_config_earlycon(8, 1, false, &cfg0, &cfg1);
+
+	se_io_init_earlycon(uport->membase);
+
+	geni_se_select_fifo_mode_earlycon(uport->membase);
+	writel_relaxed(cfg0, uport->membase + SE_GENI_TX_PACKING_CFG0);
+	writel_relaxed(cfg1, uport->membase + SE_GENI_TX_PACKING_CFG1);
+	writel_relaxed(tx_trans_cfg, uport->membase + SE_UART_TX_TRANS_CFG);
+	writel_relaxed(tx_parity_cfg, uport->membase + SE_UART_TX_PARITY_CFG);
+	writel_relaxed(bits_per_char, uport->membase + SE_UART_TX_WORD_LEN);
+	writel_relaxed(stop_bit, uport->membase + SE_UART_TX_STOP_BIT_LEN);
+
+	dev->con->write = msm_geni_serial_early_console_write;
+	dev->con->setup = NULL;
+exit:
+	return ret;
+}
+
+OF_EARLYCON_DECLARE(msm_geni_serial, "qcom,msm-geni-console",
+			msm_geni_serial_earlycon_setup);
diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
index 959f9e1..8f01cde 100644
--- a/drivers/tty/sysrq.c
+++ b/drivers/tty/sysrq.c
@@ -55,6 +55,8 @@
 #include <asm/ptrace.h>
 #include <asm/irq_regs.h>
 
+#include <trace/hooks/sysrqcrash.h>
+
 /* Whether we react on sysrq keys or just ignore them */
 static int __read_mostly sysrq_enabled = CONFIG_MAGIC_SYSRQ_DEFAULT_ENABLE;
 static bool __read_mostly sysrq_always_enabled;
@@ -151,6 +153,8 @@
 	/* release the RCU read lock before crashing */
 	rcu_read_unlock();
 
+	trace_android_vh_sysrq_crash(current);
+
 	panic("sysrq triggered crash\n");
 }
 static const struct sysrq_key_op sysrq_crash_op = {
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
index 7e47e62..7340c2a 100644
--- a/drivers/usb/gadget/Kconfig
+++ b/drivers/usb/gadget/Kconfig
@@ -216,6 +216,12 @@
 config USB_F_TCM
 	tristate
 
+config USB_F_ACC
+	tristate
+
+config USB_F_AUDIO_SRC
+	tristate
+
 # this first set of drivers all depend on bulk-capable hardware.
 
 config USB_CONFIGFS
@@ -230,6 +236,14 @@
 	  appropriate symbolic links.
 	  For more information see Documentation/usb/gadget_configfs.rst.
 
+config USB_CONFIGFS_UEVENT
+	bool "Uevent notification of Gadget state"
+	depends on USB_CONFIGFS
+	help
+	  Enable uevent notifications to userspace when the gadget
+	  state changes. The gadget can be in any of the following
+	  three states: "CONNECTED/DISCONNECTED/CONFIGURED"
+
 config USB_CONFIGFS_SERIAL
 	bool "Generic serial bulk in/out"
 	depends on USB_CONFIGFS
@@ -369,6 +383,23 @@
 	  implemented in kernel space (for instance Ethernet, serial or
 	  mass storage) and other are implemented in user space.
 
+config USB_CONFIGFS_F_ACC
+	bool "Accessory gadget"
+	depends on USB_CONFIGFS
+	depends on HID=y
+	select USB_F_ACC
+	help
+	  USB gadget Accessory support
+
+config USB_CONFIGFS_F_AUDIO_SRC
+	bool "Audio Source gadget"
+	depends on USB_CONFIGFS
+	depends on SND
+	select SND_PCM
+	select USB_F_AUDIO_SRC
+	help
+	  USB gadget Audio Source support
+
 config USB_CONFIGFS_F_UAC1
 	bool "Audio Class 1.0"
 	depends on USB_CONFIGFS
diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c
index 56051bb..0e88b38 100644
--- a/drivers/usb/gadget/configfs.c
+++ b/drivers/usb/gadget/configfs.c
@@ -10,6 +10,32 @@
 #include "u_f.h"
 #include "u_os_desc.h"
 
+#ifdef CONFIG_USB_CONFIGFS_UEVENT
+#include <linux/platform_device.h>
+#include <linux/kdev_t.h>
+#include <linux/usb/ch9.h>
+
+#ifdef CONFIG_USB_CONFIGFS_F_ACC
+extern int acc_ctrlrequest(struct usb_composite_dev *cdev,
+				const struct usb_ctrlrequest *ctrl);
+void acc_disconnect(void);
+#endif
+static struct class *android_class;
+static struct device *android_device;
+static int index;
+static int gadget_index;
+
+struct device *create_function_device(char *name)
+{
+	if (android_device && !IS_ERR(android_device))
+		return device_create(android_class, android_device,
+			MKDEV(0, index++), NULL, name);
+	else
+		return ERR_PTR(-EINVAL);
+}
+EXPORT_SYMBOL_GPL(create_function_device);
+#endif
+
 int check_user_usb_string(const char *name,
 		struct usb_gadget_strings *stringtab_dev)
 {
@@ -51,6 +77,12 @@
 	char qw_sign[OS_STRING_QW_SIGN_LEN];
 	spinlock_t spinlock;
 	bool unbind;
+#ifdef CONFIG_USB_CONFIGFS_UEVENT
+	bool connected;
+	bool sw_connected;
+	struct work_struct work;
+	struct device *dev;
+#endif
 };
 
 static inline struct gadget_info *to_gadget_info(struct config_item *item)
@@ -259,7 +291,7 @@
 
 	mutex_lock(&gi->lock);
 
-	if (!strlen(name)) {
+	if (!strlen(name) || strcmp(name, "none") == 0) {
 		ret = unregister_gadget(gi);
 		if (ret)
 			goto err;
@@ -1409,6 +1441,57 @@
 	return ret;
 }
 
+#ifdef CONFIG_USB_CONFIGFS_UEVENT
+static void android_work(struct work_struct *data)
+{
+	struct gadget_info *gi = container_of(data, struct gadget_info, work);
+	struct usb_composite_dev *cdev = &gi->cdev;
+	char *disconnected[2] = { "USB_STATE=DISCONNECTED", NULL };
+	char *connected[2]    = { "USB_STATE=CONNECTED", NULL };
+	char *configured[2]   = { "USB_STATE=CONFIGURED", NULL };
+	/* 0-connected 1-configured 2-disconnected*/
+	bool status[3] = { false, false, false };
+	unsigned long flags;
+	bool uevent_sent = false;
+
+	spin_lock_irqsave(&cdev->lock, flags);
+	if (cdev->config)
+		status[1] = true;
+
+	if (gi->connected != gi->sw_connected) {
+		if (gi->connected)
+			status[0] = true;
+		else
+			status[2] = true;
+		gi->sw_connected = gi->connected;
+	}
+	spin_unlock_irqrestore(&cdev->lock, flags);
+
+	if (status[0]) {
+		kobject_uevent_env(&gi->dev->kobj, KOBJ_CHANGE, connected);
+		pr_info("%s: sent uevent %s\n", __func__, connected[0]);
+		uevent_sent = true;
+	}
+
+	if (status[1]) {
+		kobject_uevent_env(&gi->dev->kobj, KOBJ_CHANGE, configured);
+		pr_info("%s: sent uevent %s\n", __func__, configured[0]);
+		uevent_sent = true;
+	}
+
+	if (status[2]) {
+		kobject_uevent_env(&gi->dev->kobj, KOBJ_CHANGE, disconnected);
+		pr_info("%s: sent uevent %s\n", __func__, disconnected[0]);
+		uevent_sent = true;
+	}
+
+	if (!uevent_sent) {
+		pr_info("%s: did not send uevent (%d %d %p)\n", __func__,
+			gi->connected, gi->sw_connected, cdev->config);
+	}
+}
+#endif
+
 static void configfs_composite_unbind(struct usb_gadget *gadget)
 {
 	struct usb_composite_dev	*cdev;
@@ -1434,6 +1517,80 @@
 	spin_unlock_irqrestore(&gi->spinlock, flags);
 }
 
+#ifdef CONFIG_USB_CONFIGFS_UEVENT
+static int android_setup(struct usb_gadget *gadget,
+			const struct usb_ctrlrequest *c)
+{
+	struct usb_composite_dev *cdev = get_gadget_data(gadget);
+	unsigned long flags;
+	struct gadget_info *gi = container_of(cdev, struct gadget_info, cdev);
+	int value = -EOPNOTSUPP;
+	struct usb_function_instance *fi;
+
+	spin_lock_irqsave(&cdev->lock, flags);
+	if (!gi->connected) {
+		gi->connected = 1;
+		schedule_work(&gi->work);
+	}
+	spin_unlock_irqrestore(&cdev->lock, flags);
+	list_for_each_entry(fi, &gi->available_func, cfs_list) {
+		if (fi != NULL && fi->f != NULL && fi->f->setup != NULL) {
+			value = fi->f->setup(fi->f, c);
+			if (value >= 0)
+				break;
+		}
+	}
+
+#ifdef CONFIG_USB_CONFIGFS_F_ACC
+	if (value < 0)
+		value = acc_ctrlrequest(cdev, c);
+#endif
+
+	if (value < 0)
+		value = composite_setup(gadget, c);
+
+	spin_lock_irqsave(&cdev->lock, flags);
+	if (c->bRequest == USB_REQ_SET_CONFIGURATION &&
+						cdev->config) {
+		schedule_work(&gi->work);
+	}
+	spin_unlock_irqrestore(&cdev->lock, flags);
+
+	return value;
+}
+
+static void android_disconnect(struct usb_gadget *gadget)
+{
+	struct usb_composite_dev        *cdev = get_gadget_data(gadget);
+	struct gadget_info *gi = container_of(cdev, struct gadget_info, cdev);
+
+	/* FIXME: There's a race between usb_gadget_udc_stop() which is likely
+	 * to set the gadget driver to NULL in the udc driver and this drivers
+	 * gadget disconnect fn which likely checks for the gadget driver to
+	 * be a null ptr. It happens that unbind (doing set_gadget_data(NULL))
+	 * is called before the gadget driver is set to NULL and the udc driver
+	 * calls disconnect fn which results in cdev being a null ptr.
+	 */
+	if (cdev == NULL) {
+		WARN(1, "%s: gadget driver already disconnected\n", __func__);
+		return;
+	}
+
+	/* accessory HID support can be active while the
+		accessory function is not actually enabled,
+		so we need to inform it when we are disconnected.
+	*/
+
+#ifdef CONFIG_USB_CONFIGFS_F_ACC
+	acc_disconnect();
+#endif
+	gi->connected = 0;
+	schedule_work(&gi->work);
+	composite_disconnect(gadget);
+}
+
+#else // CONFIG_USB_CONFIGFS_UEVENT
+
 static int configfs_composite_setup(struct usb_gadget *gadget,
 		const struct usb_ctrlrequest *ctrl)
 {
@@ -1481,6 +1638,8 @@
 	spin_unlock_irqrestore(&gi->spinlock, flags);
 }
 
+#endif // CONFIG_USB_CONFIGFS_UEVENT
+
 static void configfs_composite_suspend(struct usb_gadget *gadget)
 {
 	struct usb_composite_dev *cdev;
@@ -1529,10 +1688,15 @@
 	.bind           = configfs_composite_bind,
 	.unbind         = configfs_composite_unbind,
 
+#ifdef CONFIG_USB_CONFIGFS_UEVENT
+	.setup          = android_setup,
+	.reset          = android_disconnect,
+	.disconnect     = android_disconnect,
+#else
 	.setup          = configfs_composite_setup,
 	.reset          = configfs_composite_disconnect,
 	.disconnect     = configfs_composite_disconnect,
-
+#endif
 	.suspend	= configfs_composite_suspend,
 	.resume		= configfs_composite_resume,
 
@@ -1544,6 +1708,91 @@
 	.match_existing_only = 1,
 };
 
+#ifdef CONFIG_USB_CONFIGFS_UEVENT
+static ssize_t state_show(struct device *pdev, struct device_attribute *attr,
+			char *buf)
+{
+	struct gadget_info *dev = dev_get_drvdata(pdev);
+	struct usb_composite_dev *cdev;
+	char *state = "DISCONNECTED";
+	unsigned long flags;
+
+	if (!dev)
+		goto out;
+
+	cdev = &dev->cdev;
+
+	if (!cdev)
+		goto out;
+
+	spin_lock_irqsave(&cdev->lock, flags);
+	if (cdev->config)
+		state = "CONFIGURED";
+	else if (dev->connected)
+		state = "CONNECTED";
+	spin_unlock_irqrestore(&cdev->lock, flags);
+out:
+	return sprintf(buf, "%s\n", state);
+}
+
+static DEVICE_ATTR(state, S_IRUGO, state_show, NULL);
+
+static struct device_attribute *android_usb_attributes[] = {
+	&dev_attr_state,
+	NULL
+};
+
+static int android_device_create(struct gadget_info *gi)
+{
+	struct device_attribute **attrs;
+	struct device_attribute *attr;
+
+	INIT_WORK(&gi->work, android_work);
+	gi->dev = device_create(android_class, NULL,
+			MKDEV(0, 0), NULL, "android%d", gadget_index++);
+	if (IS_ERR(gi->dev))
+		return PTR_ERR(gi->dev);
+
+	dev_set_drvdata(gi->dev, gi);
+	if (!android_device)
+		android_device = gi->dev;
+
+	attrs = android_usb_attributes;
+	while ((attr = *attrs++)) {
+		int err;
+
+		err = device_create_file(gi->dev, attr);
+		if (err) {
+			device_destroy(gi->dev->class,
+				       gi->dev->devt);
+			return err;
+		}
+	}
+
+	return 0;
+}
+
+static void android_device_destroy(struct gadget_info *gi)
+{
+	struct device_attribute **attrs;
+	struct device_attribute *attr;
+
+	attrs = android_usb_attributes;
+	while ((attr = *attrs++))
+		device_remove_file(gi->dev, attr);
+	device_destroy(gi->dev->class, gi->dev->devt);
+}
+#else
+static inline int android_device_create(struct gadget_info *gi)
+{
+	return 0;
+}
+
+static inline void android_device_destroy(struct gadget_info *gi)
+{
+}
+#endif
+
 static struct config_group *gadgets_make(
 		struct config_group *group,
 		const char *name)
@@ -1596,7 +1845,11 @@
 	if (!gi->composite.gadget_driver.function)
 		goto err;
 
+	if (android_device_create(gi) < 0)
+		goto err;
+
 	return &gi->group;
+
 err:
 	kfree(gi);
 	return ERR_PTR(-ENOMEM);
@@ -1604,7 +1857,11 @@
 
 static void gadgets_drop(struct config_group *group, struct config_item *item)
 {
+	struct gadget_info *gi;
+
+	gi = container_of(to_config_group(item), struct gadget_info, group);
 	config_item_put(item);
+	android_device_destroy(gi);
 }
 
 static struct configfs_group_operations gadgets_ops = {
@@ -1644,6 +1901,13 @@
 	config_group_init(&gadget_subsys.su_group);
 
 	ret = configfs_register_subsystem(&gadget_subsys);
+
+#ifdef CONFIG_USB_CONFIGFS_UEVENT
+	android_class = class_create(THIS_MODULE, "android_usb");
+	if (IS_ERR(android_class))
+		return PTR_ERR(android_class);
+#endif
+
 	return ret;
 }
 module_init(gadget_cfs_init);
@@ -1651,5 +1915,10 @@
 static void __exit gadget_cfs_exit(void)
 {
 	configfs_unregister_subsystem(&gadget_subsys);
+#ifdef CONFIG_USB_CONFIGFS_UEVENT
+	if (!IS_ERR(android_class))
+		class_destroy(android_class);
+#endif
+
 }
 module_exit(gadget_cfs_exit);
diff --git a/drivers/usb/gadget/function/Makefile b/drivers/usb/gadget/function/Makefile
index 5d3a6cf..dd33a12 100644
--- a/drivers/usb/gadget/function/Makefile
+++ b/drivers/usb/gadget/function/Makefile
@@ -50,3 +50,7 @@
 obj-$(CONFIG_USB_F_PRINTER)	+= usb_f_printer.o
 usb_f_tcm-y			:= f_tcm.o
 obj-$(CONFIG_USB_F_TCM)		+= usb_f_tcm.o
+usb_f_accessory-y		:= f_accessory.o
+obj-$(CONFIG_USB_F_ACC)		+= usb_f_accessory.o
+usb_f_audio_source-y		:= f_audio_source.o
+obj-$(CONFIG_USB_F_AUDIO_SRC)	+= usb_f_audio_source.o
diff --git a/drivers/usb/gadget/function/f_accessory.c b/drivers/usb/gadget/function/f_accessory.c
new file mode 100644
index 0000000..a09d52f
--- /dev/null
+++ b/drivers/usb/gadget/function/f_accessory.c
@@ -0,0 +1,1453 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Gadget Function Driver for Android USB accessories
+ *
+ * Copyright (C) 2011 Google, Inc.
+ * Author: Mike Lockwood <lockwood@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+/* #define DEBUG */
+/* #define VERBOSE_DEBUG */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/poll.h>
+#include <linux/delay.h>
+#include <linux/wait.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/kthread.h>
+#include <linux/freezer.h>
+
+#include <linux/types.h>
+#include <linux/file.h>
+#include <linux/device.h>
+#include <linux/miscdevice.h>
+
+#include <linux/hid.h>
+#include <linux/hiddev.h>
+#include <linux/usb.h>
+#include <linux/usb/ch9.h>
+#include <linux/usb/f_accessory.h>
+
+#include <linux/configfs.h>
+#include <linux/usb/composite.h>
+
+#define MAX_INST_NAME_LEN        40
+#define BULK_BUFFER_SIZE    16384
+#define ACC_STRING_SIZE     256
+
+#define PROTOCOL_VERSION    2
+
+/* String IDs */
+#define INTERFACE_STRING_INDEX	0
+
+/* number of tx and rx requests to allocate */
+#define TX_REQ_MAX 4
+#define RX_REQ_MAX 2
+
+struct acc_hid_dev {
+	struct list_head	list;
+	struct hid_device *hid;
+	struct acc_dev *dev;
+	/* accessory defined ID */
+	int id;
+	/* HID report descriptor */
+	u8 *report_desc;
+	/* length of HID report descriptor */
+	int report_desc_len;
+	/* number of bytes of report_desc we have received so far */
+	int report_desc_offset;
+};
+
+struct acc_dev {
+	struct usb_function function;
+	struct usb_composite_dev *cdev;
+	spinlock_t lock;
+
+	struct usb_ep *ep_in;
+	struct usb_ep *ep_out;
+
+	/* online indicates state of function_set_alt & function_unbind
+	 * set to 1 when we connect
+	 */
+	int online:1;
+
+	/* disconnected indicates state of open & release
+	 * Set to 1 when we disconnect.
+	 * Not cleared until our file is closed.
+	 */
+	int disconnected:1;
+
+	/* strings sent by the host */
+	char manufacturer[ACC_STRING_SIZE];
+	char model[ACC_STRING_SIZE];
+	char description[ACC_STRING_SIZE];
+	char version[ACC_STRING_SIZE];
+	char uri[ACC_STRING_SIZE];
+	char serial[ACC_STRING_SIZE];
+
+	/* for acc_complete_set_string */
+	int string_index;
+
+	/* set to 1 if we have a pending start request */
+	int start_requested;
+
+	int audio_mode;
+
+	/* synchronize access to our device file */
+	atomic_t open_excl;
+
+	struct list_head tx_idle;
+
+	wait_queue_head_t read_wq;
+	wait_queue_head_t write_wq;
+	struct usb_request *rx_req[RX_REQ_MAX];
+	int rx_done;
+
+	/* delayed work for handling ACCESSORY_START */
+	struct delayed_work start_work;
+
+	/* work for handling ACCESSORY GET PROTOCOL */
+	struct work_struct getprotocol_work;
+
+	/* work for handling ACCESSORY SEND STRING */
+	struct work_struct sendstring_work;
+
+	/* worker for registering and unregistering hid devices */
+	struct work_struct hid_work;
+
+	/* list of active HID devices */
+	struct list_head	hid_list;
+
+	/* list of new HID devices to register */
+	struct list_head	new_hid_list;
+
+	/* list of dead HID devices to unregister */
+	struct list_head	dead_hid_list;
+};
+
+static struct usb_interface_descriptor acc_interface_desc = {
+	.bLength                = USB_DT_INTERFACE_SIZE,
+	.bDescriptorType        = USB_DT_INTERFACE,
+	.bInterfaceNumber       = 0,
+	.bNumEndpoints          = 2,
+	.bInterfaceClass        = USB_CLASS_VENDOR_SPEC,
+	.bInterfaceSubClass     = USB_SUBCLASS_VENDOR_SPEC,
+	.bInterfaceProtocol     = 0,
+};
+
+static struct usb_endpoint_descriptor acc_superspeedplus_in_desc = {
+	.bLength                = USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType        = USB_DT_ENDPOINT,
+	.bEndpointAddress       = USB_DIR_IN,
+	.bmAttributes           = USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize         = cpu_to_le16(1024),
+};
+
+static struct usb_endpoint_descriptor acc_superspeedplus_out_desc = {
+	.bLength                = USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType        = USB_DT_ENDPOINT,
+	.bEndpointAddress       = USB_DIR_OUT,
+	.bmAttributes           = USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize         = cpu_to_le16(1024),
+};
+
+static struct usb_ss_ep_comp_descriptor acc_superspeedplus_comp_desc = {
+	.bLength                = sizeof(acc_superspeedplus_comp_desc),
+	.bDescriptorType        = USB_DT_SS_ENDPOINT_COMP,
+
+	/* the following 2 values can be tweaked if necessary */
+	/* .bMaxBurst =         0, */
+	/* .bmAttributes =      0, */
+};
+
+static struct usb_endpoint_descriptor acc_superspeed_in_desc = {
+	.bLength                = USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType        = USB_DT_ENDPOINT,
+	.bEndpointAddress       = USB_DIR_IN,
+	.bmAttributes           = USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize         = cpu_to_le16(1024),
+};
+
+static struct usb_endpoint_descriptor acc_superspeed_out_desc = {
+	.bLength                = USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType        = USB_DT_ENDPOINT,
+	.bEndpointAddress       = USB_DIR_OUT,
+	.bmAttributes           = USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize         = cpu_to_le16(1024),
+};
+
+static struct usb_ss_ep_comp_descriptor acc_superspeed_comp_desc = {
+	.bLength                = sizeof(acc_superspeed_comp_desc),
+	.bDescriptorType        = USB_DT_SS_ENDPOINT_COMP,
+
+	/* the following 2 values can be tweaked if necessary */
+	/* .bMaxBurst =         0, */
+	/* .bmAttributes =      0, */
+};
+
+static struct usb_endpoint_descriptor acc_highspeed_in_desc = {
+	.bLength                = USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType        = USB_DT_ENDPOINT,
+	.bEndpointAddress       = USB_DIR_IN,
+	.bmAttributes           = USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize         = cpu_to_le16(512),
+};
+
+static struct usb_endpoint_descriptor acc_highspeed_out_desc = {
+	.bLength                = USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType        = USB_DT_ENDPOINT,
+	.bEndpointAddress       = USB_DIR_OUT,
+	.bmAttributes           = USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize         = cpu_to_le16(512),
+};
+
+static struct usb_endpoint_descriptor acc_fullspeed_in_desc = {
+	.bLength                = USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType        = USB_DT_ENDPOINT,
+	.bEndpointAddress       = USB_DIR_IN,
+	.bmAttributes           = USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_endpoint_descriptor acc_fullspeed_out_desc = {
+	.bLength                = USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType        = USB_DT_ENDPOINT,
+	.bEndpointAddress       = USB_DIR_OUT,
+	.bmAttributes           = USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_descriptor_header *fs_acc_descs[] = {
+	(struct usb_descriptor_header *) &acc_interface_desc,
+	(struct usb_descriptor_header *) &acc_fullspeed_in_desc,
+	(struct usb_descriptor_header *) &acc_fullspeed_out_desc,
+	NULL,
+};
+
+static struct usb_descriptor_header *hs_acc_descs[] = {
+	(struct usb_descriptor_header *) &acc_interface_desc,
+	(struct usb_descriptor_header *) &acc_highspeed_in_desc,
+	(struct usb_descriptor_header *) &acc_highspeed_out_desc,
+	NULL,
+};
+
+static struct usb_descriptor_header *ss_acc_descs[] = {
+	(struct usb_descriptor_header *) &acc_interface_desc,
+	(struct usb_descriptor_header *) &acc_superspeed_in_desc,
+	(struct usb_descriptor_header *) &acc_superspeed_comp_desc,
+	(struct usb_descriptor_header *) &acc_superspeed_out_desc,
+	(struct usb_descriptor_header *) &acc_superspeed_comp_desc,
+	NULL,
+};
+
+static struct usb_descriptor_header *ssp_acc_descs[] = {
+	(struct usb_descriptor_header *) &acc_interface_desc,
+	(struct usb_descriptor_header *) &acc_superspeedplus_in_desc,
+	(struct usb_descriptor_header *) &acc_superspeedplus_comp_desc,
+	(struct usb_descriptor_header *) &acc_superspeedplus_out_desc,
+	(struct usb_descriptor_header *) &acc_superspeedplus_comp_desc,
+	NULL,
+};
+
+static struct usb_string acc_string_defs[] = {
+	[INTERFACE_STRING_INDEX].s	= "Android Accessory Interface",
+	{  },	/* end of list */
+};
+
+static struct usb_gadget_strings acc_string_table = {
+	.language		= 0x0409,	/* en-US */
+	.strings		= acc_string_defs,
+};
+
+static struct usb_gadget_strings *acc_strings[] = {
+	&acc_string_table,
+	NULL,
+};
+
+/* temporary variable used between acc_open() and acc_gadget_bind() */
+static struct acc_dev *_acc_dev;
+
+struct acc_instance {
+	struct usb_function_instance func_inst;
+	const char *name;
+};
+
+static inline struct acc_dev *func_to_dev(struct usb_function *f)
+{
+	return container_of(f, struct acc_dev, function);
+}
+
+static struct usb_request *acc_request_new(struct usb_ep *ep, int buffer_size)
+{
+	struct usb_request *req = usb_ep_alloc_request(ep, GFP_KERNEL);
+
+	if (!req)
+		return NULL;
+
+	/* now allocate buffers for the requests */
+	req->buf = kmalloc(buffer_size, GFP_KERNEL);
+	if (!req->buf) {
+		usb_ep_free_request(ep, req);
+		return NULL;
+	}
+
+	return req;
+}
+
+static void acc_request_free(struct usb_request *req, struct usb_ep *ep)
+{
+	if (req) {
+		kfree(req->buf);
+		usb_ep_free_request(ep, req);
+	}
+}
+
+/* add a request to the tail of a list */
+static void req_put(struct acc_dev *dev, struct list_head *head,
+		struct usb_request *req)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev->lock, flags);
+	list_add_tail(&req->list, head);
+	spin_unlock_irqrestore(&dev->lock, flags);
+}
+
+/* remove a request from the head of a list */
+static struct usb_request *req_get(struct acc_dev *dev, struct list_head *head)
+{
+	unsigned long flags;
+	struct usb_request *req;
+
+	spin_lock_irqsave(&dev->lock, flags);
+	if (list_empty(head)) {
+		req = 0;
+	} else {
+		req = list_first_entry(head, struct usb_request, list);
+		list_del(&req->list);
+	}
+	spin_unlock_irqrestore(&dev->lock, flags);
+	return req;
+}
+
+static void acc_set_disconnected(struct acc_dev *dev)
+{
+	dev->disconnected = 1;
+}
+
+static void acc_complete_in(struct usb_ep *ep, struct usb_request *req)
+{
+	struct acc_dev *dev = _acc_dev;
+
+	if (req->status == -ESHUTDOWN) {
+		pr_debug("acc_complete_in set disconnected");
+		acc_set_disconnected(dev);
+	}
+
+	req_put(dev, &dev->tx_idle, req);
+
+	wake_up(&dev->write_wq);
+}
+
+static void acc_complete_out(struct usb_ep *ep, struct usb_request *req)
+{
+	struct acc_dev *dev = _acc_dev;
+
+	dev->rx_done = 1;
+	if (req->status == -ESHUTDOWN) {
+		pr_debug("acc_complete_out set disconnected");
+		acc_set_disconnected(dev);
+	}
+
+	wake_up(&dev->read_wq);
+}
+
+static void acc_complete_set_string(struct usb_ep *ep, struct usb_request *req)
+{
+	struct acc_dev	*dev = ep->driver_data;
+	char *string_dest = NULL;
+	int length = req->actual;
+
+	if (req->status != 0) {
+		pr_err("acc_complete_set_string, err %d\n", req->status);
+		return;
+	}
+
+	switch (dev->string_index) {
+	case ACCESSORY_STRING_MANUFACTURER:
+		string_dest = dev->manufacturer;
+		break;
+	case ACCESSORY_STRING_MODEL:
+		string_dest = dev->model;
+		break;
+	case ACCESSORY_STRING_DESCRIPTION:
+		string_dest = dev->description;
+		break;
+	case ACCESSORY_STRING_VERSION:
+		string_dest = dev->version;
+		break;
+	case ACCESSORY_STRING_URI:
+		string_dest = dev->uri;
+		break;
+	case ACCESSORY_STRING_SERIAL:
+		string_dest = dev->serial;
+		break;
+	}
+	if (string_dest) {
+		unsigned long flags;
+
+		if (length >= ACC_STRING_SIZE)
+			length = ACC_STRING_SIZE - 1;
+
+		spin_lock_irqsave(&dev->lock, flags);
+		memcpy(string_dest, req->buf, length);
+		/* ensure zero termination */
+		string_dest[length] = 0;
+		spin_unlock_irqrestore(&dev->lock, flags);
+	} else {
+		pr_err("unknown accessory string index %d\n",
+			dev->string_index);
+	}
+}
+
+static void acc_complete_set_hid_report_desc(struct usb_ep *ep,
+		struct usb_request *req)
+{
+	struct acc_hid_dev *hid = req->context;
+	struct acc_dev *dev = hid->dev;
+	int length = req->actual;
+
+	if (req->status != 0) {
+		pr_err("acc_complete_set_hid_report_desc, err %d\n",
+			req->status);
+		return;
+	}
+
+	memcpy(hid->report_desc + hid->report_desc_offset, req->buf, length);
+	hid->report_desc_offset += length;
+	if (hid->report_desc_offset == hid->report_desc_len) {
+		/* After we have received the entire report descriptor
+		 * we schedule work to initialize the HID device
+		 */
+		schedule_work(&dev->hid_work);
+	}
+}
+
+static void acc_complete_send_hid_event(struct usb_ep *ep,
+		struct usb_request *req)
+{
+	struct acc_hid_dev *hid = req->context;
+	int length = req->actual;
+
+	if (req->status != 0) {
+		pr_err("acc_complete_send_hid_event, err %d\n", req->status);
+		return;
+	}
+
+	hid_report_raw_event(hid->hid, HID_INPUT_REPORT, req->buf, length, 1);
+}
+
+static int acc_hid_parse(struct hid_device *hid)
+{
+	struct acc_hid_dev *hdev = hid->driver_data;
+
+	hid_parse_report(hid, hdev->report_desc, hdev->report_desc_len);
+	return 0;
+}
+
+static int acc_hid_start(struct hid_device *hid)
+{
+	return 0;
+}
+
+static void acc_hid_stop(struct hid_device *hid)
+{
+}
+
+static int acc_hid_open(struct hid_device *hid)
+{
+	return 0;
+}
+
+static void acc_hid_close(struct hid_device *hid)
+{
+}
+
+static int acc_hid_raw_request(struct hid_device *hid, unsigned char reportnum,
+	__u8 *buf, size_t len, unsigned char rtype, int reqtype)
+{
+	return 0;
+}
+
+static struct hid_ll_driver acc_hid_ll_driver = {
+	.parse = acc_hid_parse,
+	.start = acc_hid_start,
+	.stop = acc_hid_stop,
+	.open = acc_hid_open,
+	.close = acc_hid_close,
+	.raw_request = acc_hid_raw_request,
+};
+
+static struct acc_hid_dev *acc_hid_new(struct acc_dev *dev,
+		int id, int desc_len)
+{
+	struct acc_hid_dev *hdev;
+
+	hdev = kzalloc(sizeof(*hdev), GFP_ATOMIC);
+	if (!hdev)
+		return NULL;
+	hdev->report_desc = kzalloc(desc_len, GFP_ATOMIC);
+	if (!hdev->report_desc) {
+		kfree(hdev);
+		return NULL;
+	}
+	hdev->dev = dev;
+	hdev->id = id;
+	hdev->report_desc_len = desc_len;
+
+	return hdev;
+}
+
+static struct acc_hid_dev *acc_hid_get(struct list_head *list, int id)
+{
+	struct acc_hid_dev *hid;
+
+	list_for_each_entry(hid, list, list) {
+		if (hid->id == id)
+			return hid;
+	}
+	return NULL;
+}
+
+static int acc_register_hid(struct acc_dev *dev, int id, int desc_length)
+{
+	struct acc_hid_dev *hid;
+	unsigned long flags;
+
+	/* report descriptor length must be > 0 */
+	if (desc_length <= 0)
+		return -EINVAL;
+
+	spin_lock_irqsave(&dev->lock, flags);
+	/* replace HID if one already exists with this ID */
+	hid = acc_hid_get(&dev->hid_list, id);
+	if (!hid)
+		hid = acc_hid_get(&dev->new_hid_list, id);
+	if (hid)
+		list_move(&hid->list, &dev->dead_hid_list);
+
+	hid = acc_hid_new(dev, id, desc_length);
+	if (!hid) {
+		spin_unlock_irqrestore(&dev->lock, flags);
+		return -ENOMEM;
+	}
+
+	list_add(&hid->list, &dev->new_hid_list);
+	spin_unlock_irqrestore(&dev->lock, flags);
+
+	/* schedule work to register the HID device */
+	schedule_work(&dev->hid_work);
+	return 0;
+}
+
+static int acc_unregister_hid(struct acc_dev *dev, int id)
+{
+	struct acc_hid_dev *hid;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev->lock, flags);
+	hid = acc_hid_get(&dev->hid_list, id);
+	if (!hid)
+		hid = acc_hid_get(&dev->new_hid_list, id);
+	if (!hid) {
+		spin_unlock_irqrestore(&dev->lock, flags);
+		return -EINVAL;
+	}
+
+	list_move(&hid->list, &dev->dead_hid_list);
+	spin_unlock_irqrestore(&dev->lock, flags);
+
+	schedule_work(&dev->hid_work);
+	return 0;
+}
+
+static int create_bulk_endpoints(struct acc_dev *dev,
+				struct usb_endpoint_descriptor *in_desc,
+				struct usb_endpoint_descriptor *out_desc)
+{
+	struct usb_composite_dev *cdev = dev->cdev;
+	struct usb_request *req;
+	struct usb_ep *ep;
+	int i;
+
+	DBG(cdev, "create_bulk_endpoints dev: %p\n", dev);
+
+	ep = usb_ep_autoconfig(cdev->gadget, in_desc);
+	if (!ep) {
+		DBG(cdev, "usb_ep_autoconfig for ep_in failed\n");
+		return -ENODEV;
+	}
+	DBG(cdev, "usb_ep_autoconfig for ep_in got %s\n", ep->name);
+	ep->driver_data = dev;		/* claim the endpoint */
+	dev->ep_in = ep;
+
+	ep = usb_ep_autoconfig(cdev->gadget, out_desc);
+	if (!ep) {
+		DBG(cdev, "usb_ep_autoconfig for ep_out failed\n");
+		return -ENODEV;
+	}
+	DBG(cdev, "usb_ep_autoconfig for ep_out got %s\n", ep->name);
+	ep->driver_data = dev;		/* claim the endpoint */
+	dev->ep_out = ep;
+
+	/* now allocate requests for our endpoints */
+	for (i = 0; i < TX_REQ_MAX; i++) {
+		req = acc_request_new(dev->ep_in, BULK_BUFFER_SIZE);
+		if (!req)
+			goto fail;
+		req->complete = acc_complete_in;
+		req_put(dev, &dev->tx_idle, req);
+	}
+	for (i = 0; i < RX_REQ_MAX; i++) {
+		req = acc_request_new(dev->ep_out, BULK_BUFFER_SIZE);
+		if (!req)
+			goto fail;
+		req->complete = acc_complete_out;
+		dev->rx_req[i] = req;
+	}
+
+	return 0;
+
+fail:
+	pr_err("acc_bind() could not allocate requests\n");
+	while ((req = req_get(dev, &dev->tx_idle)))
+		acc_request_free(req, dev->ep_in);
+	for (i = 0; i < RX_REQ_MAX; i++)
+		acc_request_free(dev->rx_req[i], dev->ep_out);
+	return -1;
+}
+
+static ssize_t acc_read(struct file *fp, char __user *buf,
+	size_t count, loff_t *pos)
+{
+	struct acc_dev *dev = fp->private_data;
+	struct usb_request *req;
+	ssize_t r = count;
+	unsigned xfer;
+	int ret = 0;
+
+	pr_debug("acc_read(%zu)\n", count);
+
+	if (dev->disconnected) {
+		pr_debug("acc_read disconnected");
+		return -ENODEV;
+	}
+
+	if (count > BULK_BUFFER_SIZE)
+		count = BULK_BUFFER_SIZE;
+
+	/* we will block until we're online */
+	pr_debug("acc_read: waiting for online\n");
+	ret = wait_event_interruptible(dev->read_wq, dev->online);
+	if (ret < 0) {
+		r = ret;
+		goto done;
+	}
+
+	if (dev->rx_done) {
+		// last req cancelled. try to get it.
+		req = dev->rx_req[0];
+		goto copy_data;
+	}
+
+requeue_req:
+	/* queue a request */
+	req = dev->rx_req[0];
+	req->length = count;
+	dev->rx_done = 0;
+	ret = usb_ep_queue(dev->ep_out, req, GFP_KERNEL);
+	if (ret < 0) {
+		r = -EIO;
+		goto done;
+	} else {
+		pr_debug("rx %p queue\n", req);
+	}
+
+	/* wait for a request to complete */
+	ret = wait_event_interruptible(dev->read_wq, dev->rx_done);
+	if (ret < 0) {
+		r = ret;
+		ret = usb_ep_dequeue(dev->ep_out, req);
+		if (ret != 0) {
+			// cancel failed. There can be a data already received.
+			// it will be retrieved in the next read.
+			pr_debug("acc_read: cancelling failed %d", ret);
+		}
+		goto done;
+	}
+
+copy_data:
+	dev->rx_done = 0;
+	if (dev->online) {
+		/* If we got a 0-len packet, throw it back and try again. */
+		if (req->actual == 0)
+			goto requeue_req;
+
+		pr_debug("rx %p %u\n", req, req->actual);
+		xfer = (req->actual < count) ? req->actual : count;
+		r = xfer;
+		if (copy_to_user(buf, req->buf, xfer))
+			r = -EFAULT;
+	} else
+		r = -EIO;
+
+done:
+	pr_debug("acc_read returning %zd\n", r);
+	return r;
+}
+
+static ssize_t acc_write(struct file *fp, const char __user *buf,
+	size_t count, loff_t *pos)
+{
+	struct acc_dev *dev = fp->private_data;
+	struct usb_request *req = 0;
+	ssize_t r = count;
+	unsigned xfer;
+	int ret;
+
+	pr_debug("acc_write(%zu)\n", count);
+
+	if (!dev->online || dev->disconnected) {
+		pr_debug("acc_write disconnected or not online");
+		return -ENODEV;
+	}
+
+	while (count > 0) {
+		/* get an idle tx request to use */
+		req = 0;
+		ret = wait_event_interruptible(dev->write_wq,
+			((req = req_get(dev, &dev->tx_idle)) || !dev->online));
+		if (!dev->online || dev->disconnected) {
+			pr_debug("acc_write dev->error\n");
+			r = -EIO;
+			break;
+		}
+
+		if (!req) {
+			r = ret;
+			break;
+		}
+
+		if (count > BULK_BUFFER_SIZE) {
+			xfer = BULK_BUFFER_SIZE;
+			/* ZLP, They will be more TX requests so not yet. */
+			req->zero = 0;
+		} else {
+			xfer = count;
+			/* If the data length is a multple of the
+			 * maxpacket size then send a zero length packet(ZLP).
+			*/
+			req->zero = ((xfer % dev->ep_in->maxpacket) == 0);
+		}
+		if (copy_from_user(req->buf, buf, xfer)) {
+			r = -EFAULT;
+			break;
+		}
+
+		req->length = xfer;
+		ret = usb_ep_queue(dev->ep_in, req, GFP_KERNEL);
+		if (ret < 0) {
+			pr_debug("acc_write: xfer error %d\n", ret);
+			r = -EIO;
+			break;
+		}
+
+		buf += xfer;
+		count -= xfer;
+
+		/* zero this so we don't try to free it on error exit */
+		req = 0;
+	}
+
+	if (req)
+		req_put(dev, &dev->tx_idle, req);
+
+	pr_debug("acc_write returning %zd\n", r);
+	return r;
+}
+
+static long acc_ioctl(struct file *fp, unsigned code, unsigned long value)
+{
+	struct acc_dev *dev = fp->private_data;
+	char *src = NULL;
+	int ret;
+
+	switch (code) {
+	case ACCESSORY_GET_STRING_MANUFACTURER:
+		src = dev->manufacturer;
+		break;
+	case ACCESSORY_GET_STRING_MODEL:
+		src = dev->model;
+		break;
+	case ACCESSORY_GET_STRING_DESCRIPTION:
+		src = dev->description;
+		break;
+	case ACCESSORY_GET_STRING_VERSION:
+		src = dev->version;
+		break;
+	case ACCESSORY_GET_STRING_URI:
+		src = dev->uri;
+		break;
+	case ACCESSORY_GET_STRING_SERIAL:
+		src = dev->serial;
+		break;
+	case ACCESSORY_IS_START_REQUESTED:
+		return dev->start_requested;
+	case ACCESSORY_GET_AUDIO_MODE:
+		return dev->audio_mode;
+	}
+	if (!src)
+		return -EINVAL;
+
+	ret = strlen(src) + 1;
+	if (copy_to_user((void __user *)value, src, ret))
+		ret = -EFAULT;
+	return ret;
+}
+
+static int acc_open(struct inode *ip, struct file *fp)
+{
+	printk(KERN_INFO "acc_open\n");
+	if (atomic_xchg(&_acc_dev->open_excl, 1))
+		return -EBUSY;
+
+	_acc_dev->disconnected = 0;
+	fp->private_data = _acc_dev;
+	return 0;
+}
+
+static int acc_release(struct inode *ip, struct file *fp)
+{
+	printk(KERN_INFO "acc_release\n");
+
+	WARN_ON(!atomic_xchg(&_acc_dev->open_excl, 0));
+	/* indicate that we are disconnected
+	 * still could be online so don't touch online flag
+	 */
+	_acc_dev->disconnected = 1;
+	return 0;
+}
+
+/* file operations for /dev/usb_accessory */
+static const struct file_operations acc_fops = {
+	.owner = THIS_MODULE,
+	.read = acc_read,
+	.write = acc_write,
+	.unlocked_ioctl = acc_ioctl,
+	.open = acc_open,
+	.release = acc_release,
+};
+
+static int acc_hid_probe(struct hid_device *hdev,
+		const struct hid_device_id *id)
+{
+	int ret;
+
+	ret = hid_parse(hdev);
+	if (ret)
+		return ret;
+	return hid_hw_start(hdev, HID_CONNECT_DEFAULT);
+}
+
+static struct miscdevice acc_device = {
+	.minor = MISC_DYNAMIC_MINOR,
+	.name = "usb_accessory",
+	.fops = &acc_fops,
+};
+
+static const struct hid_device_id acc_hid_table[] = {
+	{ HID_USB_DEVICE(HID_ANY_ID, HID_ANY_ID) },
+	{ }
+};
+
+static struct hid_driver acc_hid_driver = {
+	.name = "USB accessory",
+	.id_table = acc_hid_table,
+	.probe = acc_hid_probe,
+};
+
+static void acc_complete_setup_noop(struct usb_ep *ep, struct usb_request *req)
+{
+	/*
+	 * Default no-op function when nothing needs to be done for the
+	 * setup request
+	 */
+}
+
+int acc_ctrlrequest(struct usb_composite_dev *cdev,
+				const struct usb_ctrlrequest *ctrl)
+{
+	struct acc_dev	*dev = _acc_dev;
+	int	value = -EOPNOTSUPP;
+	struct acc_hid_dev *hid;
+	int offset;
+	u8 b_requestType = ctrl->bRequestType;
+	u8 b_request = ctrl->bRequest;
+	u16	w_index = le16_to_cpu(ctrl->wIndex);
+	u16	w_value = le16_to_cpu(ctrl->wValue);
+	u16	w_length = le16_to_cpu(ctrl->wLength);
+	unsigned long flags;
+
+	/*
+	 * If instance is not created which is the case in power off charging
+	 * mode, dev will be NULL. Hence return error if it is the case.
+	 */
+	if (!dev)
+		return -ENODEV;
+/*
+	printk(KERN_INFO "acc_ctrlrequest "
+			"%02x.%02x v%04x i%04x l%u\n",
+			b_requestType, b_request,
+			w_value, w_index, w_length);
+*/
+
+	if (b_requestType == (USB_DIR_OUT | USB_TYPE_VENDOR)) {
+		if (b_request == ACCESSORY_START) {
+			dev->start_requested = 1;
+			schedule_delayed_work(
+				&dev->start_work, msecs_to_jiffies(10));
+			value = 0;
+			cdev->req->complete = acc_complete_setup_noop;
+		} else if (b_request == ACCESSORY_SEND_STRING) {
+			schedule_work(&dev->sendstring_work);
+			dev->string_index = w_index;
+			cdev->gadget->ep0->driver_data = dev;
+			cdev->req->complete = acc_complete_set_string;
+			value = w_length;
+		} else if (b_request == ACCESSORY_SET_AUDIO_MODE &&
+				w_index == 0 && w_length == 0) {
+			dev->audio_mode = w_value;
+			cdev->req->complete = acc_complete_setup_noop;
+			value = 0;
+		} else if (b_request == ACCESSORY_REGISTER_HID) {
+			cdev->req->complete = acc_complete_setup_noop;
+			value = acc_register_hid(dev, w_value, w_index);
+		} else if (b_request == ACCESSORY_UNREGISTER_HID) {
+			cdev->req->complete = acc_complete_setup_noop;
+			value = acc_unregister_hid(dev, w_value);
+		} else if (b_request == ACCESSORY_SET_HID_REPORT_DESC) {
+			spin_lock_irqsave(&dev->lock, flags);
+			hid = acc_hid_get(&dev->new_hid_list, w_value);
+			spin_unlock_irqrestore(&dev->lock, flags);
+			if (!hid) {
+				value = -EINVAL;
+				goto err;
+			}
+			offset = w_index;
+			if (offset != hid->report_desc_offset
+				|| offset + w_length > hid->report_desc_len) {
+				value = -EINVAL;
+				goto err;
+			}
+			cdev->req->context = hid;
+			cdev->req->complete = acc_complete_set_hid_report_desc;
+			value = w_length;
+		} else if (b_request == ACCESSORY_SEND_HID_EVENT) {
+			spin_lock_irqsave(&dev->lock, flags);
+			hid = acc_hid_get(&dev->hid_list, w_value);
+			spin_unlock_irqrestore(&dev->lock, flags);
+			if (!hid) {
+				value = -EINVAL;
+				goto err;
+			}
+			cdev->req->context = hid;
+			cdev->req->complete = acc_complete_send_hid_event;
+			value = w_length;
+		}
+	} else if (b_requestType == (USB_DIR_IN | USB_TYPE_VENDOR)) {
+		if (b_request == ACCESSORY_GET_PROTOCOL) {
+			schedule_work(&dev->getprotocol_work);
+			*((u16 *)cdev->req->buf) = PROTOCOL_VERSION;
+			value = sizeof(u16);
+			cdev->req->complete = acc_complete_setup_noop;
+			/* clear any string left over from a previous session */
+			memset(dev->manufacturer, 0, sizeof(dev->manufacturer));
+			memset(dev->model, 0, sizeof(dev->model));
+			memset(dev->description, 0, sizeof(dev->description));
+			memset(dev->version, 0, sizeof(dev->version));
+			memset(dev->uri, 0, sizeof(dev->uri));
+			memset(dev->serial, 0, sizeof(dev->serial));
+			dev->start_requested = 0;
+			dev->audio_mode = 0;
+		}
+	}
+
+	if (value >= 0) {
+		cdev->req->zero = 0;
+		cdev->req->length = value;
+		value = usb_ep_queue(cdev->gadget->ep0, cdev->req, GFP_ATOMIC);
+		if (value < 0)
+			ERROR(cdev, "%s setup response queue error\n",
+				__func__);
+	}
+
+err:
+	if (value == -EOPNOTSUPP)
+		VDBG(cdev,
+			"unknown class-specific control req "
+			"%02x.%02x v%04x i%04x l%u\n",
+			ctrl->bRequestType, ctrl->bRequest,
+			w_value, w_index, w_length);
+	return value;
+}
+EXPORT_SYMBOL_GPL(acc_ctrlrequest);
+
+static int
+__acc_function_bind(struct usb_configuration *c,
+			struct usb_function *f, bool configfs)
+{
+	struct usb_composite_dev *cdev = c->cdev;
+	struct acc_dev	*dev = func_to_dev(f);
+	int			id;
+	int			ret;
+
+	DBG(cdev, "acc_function_bind dev: %p\n", dev);
+
+	if (configfs) {
+		if (acc_string_defs[INTERFACE_STRING_INDEX].id == 0) {
+			ret = usb_string_id(c->cdev);
+			if (ret < 0)
+				return ret;
+			acc_string_defs[INTERFACE_STRING_INDEX].id = ret;
+			acc_interface_desc.iInterface = ret;
+		}
+		dev->cdev = c->cdev;
+	}
+	ret = hid_register_driver(&acc_hid_driver);
+	if (ret)
+		return ret;
+
+	dev->start_requested = 0;
+
+	/* allocate interface ID(s) */
+	id = usb_interface_id(c, f);
+	if (id < 0)
+		return id;
+	acc_interface_desc.bInterfaceNumber = id;
+
+	/* allocate endpoints */
+	ret = create_bulk_endpoints(dev, &acc_fullspeed_in_desc,
+			&acc_fullspeed_out_desc);
+	if (ret)
+		return ret;
+
+	/* support high speed hardware */
+	if (gadget_is_dualspeed(c->cdev->gadget)) {
+		acc_highspeed_in_desc.bEndpointAddress =
+			acc_fullspeed_in_desc.bEndpointAddress;
+		acc_highspeed_out_desc.bEndpointAddress =
+			acc_fullspeed_out_desc.bEndpointAddress;
+	}
+
+	DBG(cdev, "%s speed %s: IN/%s, OUT/%s\n",
+			gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full",
+			f->name, dev->ep_in->name, dev->ep_out->name);
+	return 0;
+}
+
+static int
+acc_function_bind_configfs(struct usb_configuration *c,
+			struct usb_function *f) {
+	return __acc_function_bind(c, f, true);
+}
+
+static void
+kill_all_hid_devices(struct acc_dev *dev)
+{
+	struct acc_hid_dev *hid;
+	struct list_head *entry, *temp;
+	unsigned long flags;
+
+	/* do nothing if usb accessory device doesn't exist */
+	if (!dev)
+		return;
+
+	spin_lock_irqsave(&dev->lock, flags);
+	list_for_each_safe(entry, temp, &dev->hid_list) {
+		hid = list_entry(entry, struct acc_hid_dev, list);
+		list_del(&hid->list);
+		list_add(&hid->list, &dev->dead_hid_list);
+	}
+	list_for_each_safe(entry, temp, &dev->new_hid_list) {
+		hid = list_entry(entry, struct acc_hid_dev, list);
+		list_del(&hid->list);
+		list_add(&hid->list, &dev->dead_hid_list);
+	}
+	spin_unlock_irqrestore(&dev->lock, flags);
+
+	schedule_work(&dev->hid_work);
+}
+
+static void
+acc_hid_unbind(struct acc_dev *dev)
+{
+	hid_unregister_driver(&acc_hid_driver);
+	kill_all_hid_devices(dev);
+}
+
+static void
+acc_function_unbind(struct usb_configuration *c, struct usb_function *f)
+{
+	struct acc_dev	*dev = func_to_dev(f);
+	struct usb_request *req;
+	int i;
+
+	dev->online = 0;		/* clear online flag */
+	wake_up(&dev->read_wq);		/* unblock reads on closure */
+	wake_up(&dev->write_wq);	/* likewise for writes */
+
+	while ((req = req_get(dev, &dev->tx_idle)))
+		acc_request_free(req, dev->ep_in);
+	for (i = 0; i < RX_REQ_MAX; i++)
+		acc_request_free(dev->rx_req[i], dev->ep_out);
+
+	acc_hid_unbind(dev);
+}
+
+static void acc_getprotocol_work(struct work_struct *data)
+{
+	char *envp[2] = { "ACCESSORY=GETPROTOCOL", NULL };
+
+	kobject_uevent_env(&acc_device.this_device->kobj, KOBJ_CHANGE, envp);
+}
+
+static void acc_sendstring_work(struct work_struct *data)
+{
+	char *envp[2] = { "ACCESSORY=SENDSTRING", NULL };
+
+	kobject_uevent_env(&acc_device.this_device->kobj, KOBJ_CHANGE, envp);
+}
+
+static void acc_start_work(struct work_struct *data)
+{
+	char *envp[2] = { "ACCESSORY=START", NULL };
+
+	kobject_uevent_env(&acc_device.this_device->kobj, KOBJ_CHANGE, envp);
+}
+
+static int acc_hid_init(struct acc_hid_dev *hdev)
+{
+	struct hid_device *hid;
+	int ret;
+
+	hid = hid_allocate_device();
+	if (IS_ERR(hid))
+		return PTR_ERR(hid);
+
+	hid->ll_driver = &acc_hid_ll_driver;
+	hid->dev.parent = acc_device.this_device;
+
+	hid->bus = BUS_USB;
+	hid->vendor = HID_ANY_ID;
+	hid->product = HID_ANY_ID;
+	hid->driver_data = hdev;
+	ret = hid_add_device(hid);
+	if (ret) {
+		pr_err("can't add hid device: %d\n", ret);
+		hid_destroy_device(hid);
+		return ret;
+	}
+
+	hdev->hid = hid;
+	return 0;
+}
+
+static void acc_hid_delete(struct acc_hid_dev *hid)
+{
+	kfree(hid->report_desc);
+	kfree(hid);
+}
+
+static void acc_hid_work(struct work_struct *data)
+{
+	struct acc_dev *dev = _acc_dev;
+	struct list_head	*entry, *temp;
+	struct acc_hid_dev *hid;
+	struct list_head	new_list, dead_list;
+	unsigned long flags;
+
+	INIT_LIST_HEAD(&new_list);
+
+	spin_lock_irqsave(&dev->lock, flags);
+
+	/* copy hids that are ready for initialization to new_list */
+	list_for_each_safe(entry, temp, &dev->new_hid_list) {
+		hid = list_entry(entry, struct acc_hid_dev, list);
+		if (hid->report_desc_offset == hid->report_desc_len)
+			list_move(&hid->list, &new_list);
+	}
+
+	if (list_empty(&dev->dead_hid_list)) {
+		INIT_LIST_HEAD(&dead_list);
+	} else {
+		/* move all of dev->dead_hid_list to dead_list */
+		dead_list.prev = dev->dead_hid_list.prev;
+		dead_list.next = dev->dead_hid_list.next;
+		dead_list.next->prev = &dead_list;
+		dead_list.prev->next = &dead_list;
+		INIT_LIST_HEAD(&dev->dead_hid_list);
+	}
+
+	spin_unlock_irqrestore(&dev->lock, flags);
+
+	/* register new HID devices */
+	list_for_each_safe(entry, temp, &new_list) {
+		hid = list_entry(entry, struct acc_hid_dev, list);
+		if (acc_hid_init(hid)) {
+			pr_err("can't add HID device %p\n", hid);
+			acc_hid_delete(hid);
+		} else {
+			spin_lock_irqsave(&dev->lock, flags);
+			list_move(&hid->list, &dev->hid_list);
+			spin_unlock_irqrestore(&dev->lock, flags);
+		}
+	}
+
+	/* remove dead HID devices */
+	list_for_each_safe(entry, temp, &dead_list) {
+		hid = list_entry(entry, struct acc_hid_dev, list);
+		list_del(&hid->list);
+		if (hid->hid)
+			hid_destroy_device(hid->hid);
+		acc_hid_delete(hid);
+	}
+}
+
+static int acc_function_set_alt(struct usb_function *f,
+		unsigned intf, unsigned alt)
+{
+	struct acc_dev	*dev = func_to_dev(f);
+	struct usb_composite_dev *cdev = f->config->cdev;
+	int ret;
+
+	DBG(cdev, "acc_function_set_alt intf: %d alt: %d\n", intf, alt);
+
+	ret = config_ep_by_speed(cdev->gadget, f, dev->ep_in);
+	if (ret)
+		return ret;
+
+	ret = usb_ep_enable(dev->ep_in);
+	if (ret)
+		return ret;
+
+	ret = config_ep_by_speed(cdev->gadget, f, dev->ep_out);
+	if (ret)
+		return ret;
+
+	ret = usb_ep_enable(dev->ep_out);
+	if (ret) {
+		usb_ep_disable(dev->ep_in);
+		return ret;
+	}
+
+	dev->online = 1;
+	dev->disconnected = 0; /* if online then not disconnected */
+
+	/* readers may be blocked waiting for us to go online */
+	wake_up(&dev->read_wq);
+	return 0;
+}
+
+static void acc_function_disable(struct usb_function *f)
+{
+	struct acc_dev	*dev = func_to_dev(f);
+	struct usb_composite_dev	*cdev = dev->cdev;
+
+	DBG(cdev, "acc_function_disable\n");
+	acc_set_disconnected(dev); /* this now only sets disconnected */
+	dev->online = 0; /* so now need to clear online flag here too */
+	usb_ep_disable(dev->ep_in);
+	usb_ep_disable(dev->ep_out);
+
+	/* readers may be blocked waiting for us to go online */
+	wake_up(&dev->read_wq);
+
+	VDBG(cdev, "%s disabled\n", dev->function.name);
+}
+
+static int acc_setup(void)
+{
+	struct acc_dev *dev;
+	int ret;
+
+	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+	if (!dev)
+		return -ENOMEM;
+
+	spin_lock_init(&dev->lock);
+	init_waitqueue_head(&dev->read_wq);
+	init_waitqueue_head(&dev->write_wq);
+	atomic_set(&dev->open_excl, 0);
+	INIT_LIST_HEAD(&dev->tx_idle);
+	INIT_LIST_HEAD(&dev->hid_list);
+	INIT_LIST_HEAD(&dev->new_hid_list);
+	INIT_LIST_HEAD(&dev->dead_hid_list);
+	INIT_DELAYED_WORK(&dev->start_work, acc_start_work);
+	INIT_WORK(&dev->hid_work, acc_hid_work);
+	INIT_WORK(&dev->getprotocol_work, acc_getprotocol_work);
+	INIT_WORK(&dev->sendstring_work, acc_sendstring_work);
+
+	/* _acc_dev must be set before calling usb_gadget_register_driver */
+	_acc_dev = dev;
+
+	ret = misc_register(&acc_device);
+	if (ret)
+		goto err;
+
+	return 0;
+
+err:
+	kfree(dev);
+	pr_err("USB accessory gadget driver failed to initialize\n");
+	return ret;
+}
+
+void acc_disconnect(void)
+{
+	/* unregister all HID devices if USB is disconnected */
+	kill_all_hid_devices(_acc_dev);
+}
+EXPORT_SYMBOL_GPL(acc_disconnect);
+
+static void acc_cleanup(void)
+{
+	misc_deregister(&acc_device);
+	kfree(_acc_dev);
+	_acc_dev = NULL;
+}
+static struct acc_instance *to_acc_instance(struct config_item *item)
+{
+	return container_of(to_config_group(item), struct acc_instance,
+		func_inst.group);
+}
+
+static void acc_attr_release(struct config_item *item)
+{
+	struct acc_instance *fi_acc = to_acc_instance(item);
+
+	usb_put_function_instance(&fi_acc->func_inst);
+}
+
+static struct configfs_item_operations acc_item_ops = {
+	.release        = acc_attr_release,
+};
+
+static struct config_item_type acc_func_type = {
+	.ct_item_ops    = &acc_item_ops,
+	.ct_owner       = THIS_MODULE,
+};
+
+static struct acc_instance *to_fi_acc(struct usb_function_instance *fi)
+{
+	return container_of(fi, struct acc_instance, func_inst);
+}
+
+static int acc_set_inst_name(struct usb_function_instance *fi, const char *name)
+{
+	struct acc_instance *fi_acc;
+	char *ptr;
+	int name_len;
+
+	name_len = strlen(name) + 1;
+	if (name_len > MAX_INST_NAME_LEN)
+		return -ENAMETOOLONG;
+
+	ptr = kstrndup(name, name_len, GFP_KERNEL);
+	if (!ptr)
+		return -ENOMEM;
+
+	fi_acc = to_fi_acc(fi);
+	fi_acc->name = ptr;
+	return 0;
+}
+
+static void acc_free_inst(struct usb_function_instance *fi)
+{
+	struct acc_instance *fi_acc;
+
+	fi_acc = to_fi_acc(fi);
+	kfree(fi_acc->name);
+	acc_cleanup();
+}
+
+static struct usb_function_instance *acc_alloc_inst(void)
+{
+	struct acc_instance *fi_acc;
+	struct acc_dev *dev;
+	int err;
+
+	fi_acc = kzalloc(sizeof(*fi_acc), GFP_KERNEL);
+	if (!fi_acc)
+		return ERR_PTR(-ENOMEM);
+	fi_acc->func_inst.set_inst_name = acc_set_inst_name;
+	fi_acc->func_inst.free_func_inst = acc_free_inst;
+
+	err = acc_setup();
+	if (err) {
+		kfree(fi_acc);
+		pr_err("Error setting ACCESSORY\n");
+		return ERR_PTR(err);
+	}
+
+	config_group_init_type_name(&fi_acc->func_inst.group,
+					"", &acc_func_type);
+	dev = _acc_dev;
+	return  &fi_acc->func_inst;
+}
+
+static void acc_free(struct usb_function *f)
+{
+/*NO-OP: no function specific resource allocation in mtp_alloc*/
+}
+
+int acc_ctrlrequest_configfs(struct usb_function *f,
+			const struct usb_ctrlrequest *ctrl) {
+	if (f->config != NULL && f->config->cdev != NULL)
+		return acc_ctrlrequest(f->config->cdev, ctrl);
+	else
+		return -1;
+}
+
+static struct usb_function *acc_alloc(struct usb_function_instance *fi)
+{
+	struct acc_dev *dev = _acc_dev;
+
+	pr_info("acc_alloc\n");
+
+	dev->function.name = "accessory";
+	dev->function.strings = acc_strings,
+	dev->function.fs_descriptors = fs_acc_descs;
+	dev->function.hs_descriptors = hs_acc_descs;
+	dev->function.ss_descriptors = ss_acc_descs;
+	dev->function.ssp_descriptors = ssp_acc_descs;
+	dev->function.bind = acc_function_bind_configfs;
+	dev->function.unbind = acc_function_unbind;
+	dev->function.set_alt = acc_function_set_alt;
+	dev->function.disable = acc_function_disable;
+	dev->function.free_func = acc_free;
+	dev->function.setup = acc_ctrlrequest_configfs;
+
+	return &dev->function;
+}
+DECLARE_USB_FUNCTION_INIT(accessory, acc_alloc_inst, acc_alloc);
+MODULE_LICENSE("GPL");
diff --git a/drivers/usb/gadget/function/f_acm.c b/drivers/usb/gadget/function/f_acm.c
index 46647bf..349945e 100644
--- a/drivers/usb/gadget/function/f_acm.c
+++ b/drivers/usb/gadget/function/f_acm.c
@@ -686,7 +686,7 @@
 	acm_ss_out_desc.bEndpointAddress = acm_fs_out_desc.bEndpointAddress;
 
 	status = usb_assign_descriptors(f, acm_fs_function, acm_hs_function,
-			acm_ss_function, NULL);
+			acm_ss_function, acm_ss_function);
 	if (status)
 		goto fail;
 
diff --git a/drivers/usb/gadget/function/f_audio_source.c b/drivers/usb/gadget/function/f_audio_source.c
new file mode 100644
index 0000000..c768a52
--- /dev/null
+++ b/drivers/usb/gadget/function/f_audio_source.c
@@ -0,0 +1,1071 @@
+/*
+ * Gadget Function Driver for USB audio source device
+ *
+ * Copyright (C) 2012 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/device.h>
+#include <linux/usb/audio.h>
+#include <linux/wait.h>
+#include <linux/pm_qos.h>
+#include <sound/core.h>
+#include <sound/initval.h>
+#include <sound/pcm.h>
+
+#include <linux/usb.h>
+#include <linux/usb_usual.h>
+#include <linux/usb/ch9.h>
+#include <linux/configfs.h>
+#include <linux/usb/composite.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#define SAMPLE_RATE 44100
+#define FRAMES_PER_MSEC (SAMPLE_RATE / 1000)
+
+#define IN_EP_MAX_PACKET_SIZE 256
+
+/* Number of requests to allocate */
+#define IN_EP_REQ_COUNT 4
+
+#define AUDIO_AC_INTERFACE	0
+#define AUDIO_AS_INTERFACE	1
+#define AUDIO_NUM_INTERFACES	2
+#define MAX_INST_NAME_LEN     40
+
+/* B.3.1  Standard AC Interface Descriptor */
+static struct usb_interface_descriptor ac_interface_desc = {
+	.bLength =		USB_DT_INTERFACE_SIZE,
+	.bDescriptorType =	USB_DT_INTERFACE,
+	.bNumEndpoints =	0,
+	.bInterfaceClass =	USB_CLASS_AUDIO,
+	.bInterfaceSubClass =	USB_SUBCLASS_AUDIOCONTROL,
+};
+
+DECLARE_UAC_AC_HEADER_DESCRIPTOR(2);
+
+#define UAC_DT_AC_HEADER_LENGTH	UAC_DT_AC_HEADER_SIZE(AUDIO_NUM_INTERFACES)
+/* 1 input terminal, 1 output terminal and 1 feature unit */
+#define UAC_DT_TOTAL_LENGTH (UAC_DT_AC_HEADER_LENGTH \
+	+ UAC_DT_INPUT_TERMINAL_SIZE + UAC_DT_OUTPUT_TERMINAL_SIZE \
+	+ UAC_DT_FEATURE_UNIT_SIZE(0))
+/* B.3.2  Class-Specific AC Interface Descriptor */
+static struct uac1_ac_header_descriptor_2 ac_header_desc = {
+	.bLength =		UAC_DT_AC_HEADER_LENGTH,
+	.bDescriptorType =	USB_DT_CS_INTERFACE,
+	.bDescriptorSubtype =	UAC_HEADER,
+	.bcdADC =		__constant_cpu_to_le16(0x0100),
+	.wTotalLength =		__constant_cpu_to_le16(UAC_DT_TOTAL_LENGTH),
+	.bInCollection =	AUDIO_NUM_INTERFACES,
+	.baInterfaceNr = {
+		[0] =		AUDIO_AC_INTERFACE,
+		[1] =		AUDIO_AS_INTERFACE,
+	}
+};
+
+#define INPUT_TERMINAL_ID	1
+static struct uac_input_terminal_descriptor input_terminal_desc = {
+	.bLength =		UAC_DT_INPUT_TERMINAL_SIZE,
+	.bDescriptorType =	USB_DT_CS_INTERFACE,
+	.bDescriptorSubtype =	UAC_INPUT_TERMINAL,
+	.bTerminalID =		INPUT_TERMINAL_ID,
+	.wTerminalType =	UAC_INPUT_TERMINAL_MICROPHONE,
+	.bAssocTerminal =	0,
+	.wChannelConfig =	0x3,
+};
+
+DECLARE_UAC_FEATURE_UNIT_DESCRIPTOR(0);
+
+#define FEATURE_UNIT_ID		2
+static struct uac_feature_unit_descriptor_0 feature_unit_desc = {
+	.bLength		= UAC_DT_FEATURE_UNIT_SIZE(0),
+	.bDescriptorType	= USB_DT_CS_INTERFACE,
+	.bDescriptorSubtype	= UAC_FEATURE_UNIT,
+	.bUnitID		= FEATURE_UNIT_ID,
+	.bSourceID		= INPUT_TERMINAL_ID,
+	.bControlSize		= 2,
+};
+
+#define OUTPUT_TERMINAL_ID	3
+static struct uac1_output_terminal_descriptor output_terminal_desc = {
+	.bLength		= UAC_DT_OUTPUT_TERMINAL_SIZE,
+	.bDescriptorType	= USB_DT_CS_INTERFACE,
+	.bDescriptorSubtype	= UAC_OUTPUT_TERMINAL,
+	.bTerminalID		= OUTPUT_TERMINAL_ID,
+	.wTerminalType		= UAC_TERMINAL_STREAMING,
+	.bAssocTerminal		= FEATURE_UNIT_ID,
+	.bSourceID		= FEATURE_UNIT_ID,
+};
+
+/* B.4.1  Standard AS Interface Descriptor */
+static struct usb_interface_descriptor as_interface_alt_0_desc = {
+	.bLength =		USB_DT_INTERFACE_SIZE,
+	.bDescriptorType =	USB_DT_INTERFACE,
+	.bAlternateSetting =	0,
+	.bNumEndpoints =	0,
+	.bInterfaceClass =	USB_CLASS_AUDIO,
+	.bInterfaceSubClass =	USB_SUBCLASS_AUDIOSTREAMING,
+};
+
+static struct usb_interface_descriptor as_interface_alt_1_desc = {
+	.bLength =		USB_DT_INTERFACE_SIZE,
+	.bDescriptorType =	USB_DT_INTERFACE,
+	.bAlternateSetting =	1,
+	.bNumEndpoints =	1,
+	.bInterfaceClass =	USB_CLASS_AUDIO,
+	.bInterfaceSubClass =	USB_SUBCLASS_AUDIOSTREAMING,
+};
+
+/* B.4.2  Class-Specific AS Interface Descriptor */
+static struct uac1_as_header_descriptor as_header_desc = {
+	.bLength =		UAC_DT_AS_HEADER_SIZE,
+	.bDescriptorType =	USB_DT_CS_INTERFACE,
+	.bDescriptorSubtype =	UAC_AS_GENERAL,
+	.bTerminalLink =	INPUT_TERMINAL_ID,
+	.bDelay =		1,
+	.wFormatTag =		UAC_FORMAT_TYPE_I_PCM,
+};
+
+DECLARE_UAC_FORMAT_TYPE_I_DISCRETE_DESC(1);
+
+static struct uac_format_type_i_discrete_descriptor_1 as_type_i_desc = {
+	.bLength =		UAC_FORMAT_TYPE_I_DISCRETE_DESC_SIZE(1),
+	.bDescriptorType =	USB_DT_CS_INTERFACE,
+	.bDescriptorSubtype =	UAC_FORMAT_TYPE,
+	.bFormatType =		UAC_FORMAT_TYPE_I,
+	.bSubframeSize =	2,
+	.bBitResolution =	16,
+	.bSamFreqType =		1,
+};
+
+/* Standard ISO IN Endpoint Descriptor for highspeed */
+static struct usb_endpoint_descriptor hs_as_in_ep_desc  = {
+	.bLength =		USB_DT_ENDPOINT_AUDIO_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_SYNC_SYNC
+				| USB_ENDPOINT_XFER_ISOC,
+	.wMaxPacketSize =	__constant_cpu_to_le16(IN_EP_MAX_PACKET_SIZE),
+	.bInterval =		4, /* poll 1 per millisecond */
+};
+
+/* Standard ISO IN Endpoint Descriptor for highspeed */
+static struct usb_endpoint_descriptor fs_as_in_ep_desc  = {
+	.bLength =		USB_DT_ENDPOINT_AUDIO_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_SYNC_SYNC
+				| USB_ENDPOINT_XFER_ISOC,
+	.wMaxPacketSize =	__constant_cpu_to_le16(IN_EP_MAX_PACKET_SIZE),
+	.bInterval =		1, /* poll 1 per millisecond */
+};
+
+/* Class-specific AS ISO OUT Endpoint Descriptor */
+static struct uac_iso_endpoint_descriptor as_iso_in_desc = {
+	.bLength =		UAC_ISO_ENDPOINT_DESC_SIZE,
+	.bDescriptorType =	USB_DT_CS_ENDPOINT,
+	.bDescriptorSubtype =	UAC_EP_GENERAL,
+	.bmAttributes =		1,
+	.bLockDelayUnits =	1,
+	.wLockDelay =		__constant_cpu_to_le16(1),
+};
+
+static struct usb_descriptor_header *hs_audio_desc[] = {
+	(struct usb_descriptor_header *)&ac_interface_desc,
+	(struct usb_descriptor_header *)&ac_header_desc,
+
+	(struct usb_descriptor_header *)&input_terminal_desc,
+	(struct usb_descriptor_header *)&output_terminal_desc,
+	(struct usb_descriptor_header *)&feature_unit_desc,
+
+	(struct usb_descriptor_header *)&as_interface_alt_0_desc,
+	(struct usb_descriptor_header *)&as_interface_alt_1_desc,
+	(struct usb_descriptor_header *)&as_header_desc,
+
+	(struct usb_descriptor_header *)&as_type_i_desc,
+
+	(struct usb_descriptor_header *)&hs_as_in_ep_desc,
+	(struct usb_descriptor_header *)&as_iso_in_desc,
+	NULL,
+};
+
+static struct usb_descriptor_header *fs_audio_desc[] = {
+	(struct usb_descriptor_header *)&ac_interface_desc,
+	(struct usb_descriptor_header *)&ac_header_desc,
+
+	(struct usb_descriptor_header *)&input_terminal_desc,
+	(struct usb_descriptor_header *)&output_terminal_desc,
+	(struct usb_descriptor_header *)&feature_unit_desc,
+
+	(struct usb_descriptor_header *)&as_interface_alt_0_desc,
+	(struct usb_descriptor_header *)&as_interface_alt_1_desc,
+	(struct usb_descriptor_header *)&as_header_desc,
+
+	(struct usb_descriptor_header *)&as_type_i_desc,
+
+	(struct usb_descriptor_header *)&fs_as_in_ep_desc,
+	(struct usb_descriptor_header *)&as_iso_in_desc,
+	NULL,
+};
+
+static struct snd_pcm_hardware audio_hw_info = {
+	.info =			SNDRV_PCM_INFO_MMAP |
+				SNDRV_PCM_INFO_MMAP_VALID |
+				SNDRV_PCM_INFO_BATCH |
+				SNDRV_PCM_INFO_INTERLEAVED |
+				SNDRV_PCM_INFO_BLOCK_TRANSFER,
+
+	.formats		= SNDRV_PCM_FMTBIT_S16_LE,
+	.channels_min		= 2,
+	.channels_max		= 2,
+	.rate_min		= SAMPLE_RATE,
+	.rate_max		= SAMPLE_RATE,
+
+	.buffer_bytes_max =	1024 * 1024,
+	.period_bytes_min =	64,
+	.period_bytes_max =	512 * 1024,
+	.periods_min =		2,
+	.periods_max =		1024,
+};
+
+/*-------------------------------------------------------------------------*/
+
+struct audio_source_config {
+	int	card;
+	int	device;
+};
+
+struct audio_dev {
+	struct usb_function		func;
+	struct snd_card			*card;
+	struct snd_pcm			*pcm;
+	struct snd_pcm_substream *substream;
+
+	struct list_head		idle_reqs;
+	struct usb_ep			*in_ep;
+
+	spinlock_t			lock;
+
+	/* beginning, end and current position in our buffer */
+	void				*buffer_start;
+	void				*buffer_end;
+	void				*buffer_pos;
+
+	/* byte size of a "period" */
+	unsigned int			period;
+	/* bytes sent since last call to snd_pcm_period_elapsed */
+	unsigned int			period_offset;
+	/* time we started playing */
+	ktime_t				start_time;
+	/* number of frames sent since start_time */
+	s64				frames_sent;
+	struct audio_source_config	*config;
+	/* for creating and issuing QoS requests */
+	struct pm_qos_request pm_qos;
+};
+
+static inline struct audio_dev *func_to_audio(struct usb_function *f)
+{
+	return container_of(f, struct audio_dev, func);
+}
+
+/*-------------------------------------------------------------------------*/
+
+struct audio_source_instance {
+	struct usb_function_instance func_inst;
+	const char *name;
+	struct audio_source_config *config;
+	struct device *audio_device;
+};
+
+static void audio_source_attr_release(struct config_item *item);
+
+static struct configfs_item_operations audio_source_item_ops = {
+	.release        = audio_source_attr_release,
+};
+
+static struct config_item_type audio_source_func_type = {
+	.ct_item_ops    = &audio_source_item_ops,
+	.ct_owner       = THIS_MODULE,
+};
+
+static ssize_t audio_source_pcm_show(struct device *dev,
+		struct device_attribute *attr, char *buf);
+
+static DEVICE_ATTR(pcm, S_IRUGO, audio_source_pcm_show, NULL);
+
+static struct device_attribute *audio_source_function_attributes[] = {
+	&dev_attr_pcm,
+	NULL
+};
+
+/*--------------------------------------------------------------------------*/
+
+static struct usb_request *audio_request_new(struct usb_ep *ep, int buffer_size)
+{
+	struct usb_request *req = usb_ep_alloc_request(ep, GFP_KERNEL);
+
+	if (!req)
+		return NULL;
+
+	req->buf = kmalloc(buffer_size, GFP_KERNEL);
+	if (!req->buf) {
+		usb_ep_free_request(ep, req);
+		return NULL;
+	}
+	req->length = buffer_size;
+	return req;
+}
+
+static void audio_request_free(struct usb_request *req, struct usb_ep *ep)
+{
+	if (req) {
+		kfree(req->buf);
+		usb_ep_free_request(ep, req);
+	}
+}
+
+static void audio_req_put(struct audio_dev *audio, struct usb_request *req)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&audio->lock, flags);
+	list_add_tail(&req->list, &audio->idle_reqs);
+	spin_unlock_irqrestore(&audio->lock, flags);
+}
+
+static struct usb_request *audio_req_get(struct audio_dev *audio)
+{
+	unsigned long flags;
+	struct usb_request *req;
+
+	spin_lock_irqsave(&audio->lock, flags);
+	if (list_empty(&audio->idle_reqs)) {
+		req = 0;
+	} else {
+		req = list_first_entry(&audio->idle_reqs, struct usb_request,
+				list);
+		list_del(&req->list);
+	}
+	spin_unlock_irqrestore(&audio->lock, flags);
+	return req;
+}
+
+/* send the appropriate number of packets to match our bitrate */
+static void audio_send(struct audio_dev *audio)
+{
+	struct snd_pcm_runtime *runtime;
+	struct usb_request *req;
+	int length, length1, length2, ret;
+	s64 msecs;
+	s64 frames;
+	ktime_t now;
+
+	/* audio->substream will be null if we have been closed */
+	if (!audio->substream)
+		return;
+	/* audio->buffer_pos will be null if we have been stopped */
+	if (!audio->buffer_pos)
+		return;
+
+	runtime = audio->substream->runtime;
+
+	/* compute number of frames to send */
+	now = ktime_get();
+	msecs = div_s64((ktime_to_ns(now) - ktime_to_ns(audio->start_time)),
+			1000000);
+	frames = div_s64((msecs * SAMPLE_RATE), 1000);
+
+	/* Readjust our frames_sent if we fall too far behind.
+	 * If we get too far behind it is better to drop some frames than
+	 * to keep sending data too fast in an attempt to catch up.
+	 */
+	if (frames - audio->frames_sent > 10 * FRAMES_PER_MSEC)
+		audio->frames_sent = frames - FRAMES_PER_MSEC;
+
+	frames -= audio->frames_sent;
+
+	/* We need to send something to keep the pipeline going */
+	if (frames <= 0)
+		frames = FRAMES_PER_MSEC;
+
+	while (frames > 0) {
+		req = audio_req_get(audio);
+		if (!req)
+			break;
+
+		length = frames_to_bytes(runtime, frames);
+		if (length > IN_EP_MAX_PACKET_SIZE)
+			length = IN_EP_MAX_PACKET_SIZE;
+
+		if (audio->buffer_pos + length > audio->buffer_end)
+			length1 = audio->buffer_end - audio->buffer_pos;
+		else
+			length1 = length;
+		memcpy(req->buf, audio->buffer_pos, length1);
+		if (length1 < length) {
+			/* Wrap around and copy remaining length
+			 * at beginning of buffer.
+			 */
+			length2 = length - length1;
+			memcpy(req->buf + length1, audio->buffer_start,
+					length2);
+			audio->buffer_pos = audio->buffer_start + length2;
+		} else {
+			audio->buffer_pos += length1;
+			if (audio->buffer_pos >= audio->buffer_end)
+				audio->buffer_pos = audio->buffer_start;
+		}
+
+		req->length = length;
+		ret = usb_ep_queue(audio->in_ep, req, GFP_ATOMIC);
+		if (ret < 0) {
+			pr_err("usb_ep_queue failed ret: %d\n", ret);
+			audio_req_put(audio, req);
+			break;
+		}
+
+		frames -= bytes_to_frames(runtime, length);
+		audio->frames_sent += bytes_to_frames(runtime, length);
+	}
+}
+
+static void audio_control_complete(struct usb_ep *ep, struct usb_request *req)
+{
+	/* nothing to do here */
+}
+
+static void audio_data_complete(struct usb_ep *ep, struct usb_request *req)
+{
+	struct audio_dev *audio = req->context;
+
+	pr_debug("audio_data_complete req->status %d req->actual %d\n",
+		req->status, req->actual);
+
+	audio_req_put(audio, req);
+
+	if (!audio->buffer_start || req->status)
+		return;
+
+	audio->period_offset += req->actual;
+	if (audio->period_offset >= audio->period) {
+		snd_pcm_period_elapsed(audio->substream);
+		audio->period_offset = 0;
+	}
+	audio_send(audio);
+}
+
+static int audio_set_endpoint_req(struct usb_function *f,
+		const struct usb_ctrlrequest *ctrl)
+{
+	int value = -EOPNOTSUPP;
+	u16 ep = le16_to_cpu(ctrl->wIndex);
+	u16 len = le16_to_cpu(ctrl->wLength);
+	u16 w_value = le16_to_cpu(ctrl->wValue);
+
+	pr_debug("bRequest 0x%x, w_value 0x%04x, len %d, endpoint %d\n",
+			ctrl->bRequest, w_value, len, ep);
+
+	switch (ctrl->bRequest) {
+	case UAC_SET_CUR:
+	case UAC_SET_MIN:
+	case UAC_SET_MAX:
+	case UAC_SET_RES:
+		value = len;
+		break;
+	default:
+		break;
+	}
+
+	return value;
+}
+
+static int audio_get_endpoint_req(struct usb_function *f,
+		const struct usb_ctrlrequest *ctrl)
+{
+	struct usb_composite_dev *cdev = f->config->cdev;
+	int value = -EOPNOTSUPP;
+	u8 ep = ((le16_to_cpu(ctrl->wIndex) >> 8) & 0xFF);
+	u16 len = le16_to_cpu(ctrl->wLength);
+	u16 w_value = le16_to_cpu(ctrl->wValue);
+	u8 *buf = cdev->req->buf;
+
+	pr_debug("bRequest 0x%x, w_value 0x%04x, len %d, endpoint %d\n",
+			ctrl->bRequest, w_value, len, ep);
+
+	if (w_value == UAC_EP_CS_ATTR_SAMPLE_RATE << 8) {
+		switch (ctrl->bRequest) {
+		case UAC_GET_CUR:
+		case UAC_GET_MIN:
+		case UAC_GET_MAX:
+		case UAC_GET_RES:
+			/* return our sample rate */
+			buf[0] = (u8)SAMPLE_RATE;
+			buf[1] = (u8)(SAMPLE_RATE >> 8);
+			buf[2] = (u8)(SAMPLE_RATE >> 16);
+			value = 3;
+			break;
+		default:
+			break;
+		}
+	}
+
+	return value;
+}
+
+static int
+audio_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
+{
+	struct usb_composite_dev *cdev = f->config->cdev;
+	struct usb_request *req = cdev->req;
+	int value = -EOPNOTSUPP;
+	u16 w_index = le16_to_cpu(ctrl->wIndex);
+	u16 w_value = le16_to_cpu(ctrl->wValue);
+	u16 w_length = le16_to_cpu(ctrl->wLength);
+
+	/* composite driver infrastructure handles everything; interface
+	 * activation uses set_alt().
+	 */
+	switch (ctrl->bRequestType) {
+	case USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_ENDPOINT:
+		value = audio_set_endpoint_req(f, ctrl);
+		break;
+
+	case USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_ENDPOINT:
+		value = audio_get_endpoint_req(f, ctrl);
+		break;
+	}
+
+	/* respond with data transfer or status phase? */
+	if (value >= 0) {
+		pr_debug("audio req%02x.%02x v%04x i%04x l%d\n",
+			ctrl->bRequestType, ctrl->bRequest,
+			w_value, w_index, w_length);
+		req->zero = 0;
+		req->length = value;
+		req->complete = audio_control_complete;
+		value = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC);
+		if (value < 0)
+			pr_err("audio response on err %d\n", value);
+	}
+
+	/* device either stalls (value < 0) or reports success */
+	return value;
+}
+
+static int audio_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
+{
+	struct audio_dev *audio = func_to_audio(f);
+	struct usb_composite_dev *cdev = f->config->cdev;
+	int ret;
+
+	pr_debug("audio_set_alt intf %d, alt %d\n", intf, alt);
+
+	ret = config_ep_by_speed(cdev->gadget, f, audio->in_ep);
+	if (ret)
+		return ret;
+
+	usb_ep_enable(audio->in_ep);
+	return 0;
+}
+
+static void audio_disable(struct usb_function *f)
+{
+	struct audio_dev	*audio = func_to_audio(f);
+
+	pr_debug("audio_disable\n");
+	usb_ep_disable(audio->in_ep);
+}
+
+static void audio_free_func(struct usb_function *f)
+{
+	/* no-op */
+}
+
+/*-------------------------------------------------------------------------*/
+
+static void audio_build_desc(struct audio_dev *audio)
+{
+	u8 *sam_freq;
+	int rate;
+
+	/* Set channel numbers */
+	input_terminal_desc.bNrChannels = 2;
+	as_type_i_desc.bNrChannels = 2;
+
+	/* Set sample rates */
+	rate = SAMPLE_RATE;
+	sam_freq = as_type_i_desc.tSamFreq[0];
+	memcpy(sam_freq, &rate, 3);
+}
+
+
+static int snd_card_setup(struct usb_configuration *c,
+	struct audio_source_config *config);
+static struct audio_source_instance *to_fi_audio_source(
+	const struct usb_function_instance *fi);
+
+
+/* audio function driver setup/binding */
+static int
+audio_bind(struct usb_configuration *c, struct usb_function *f)
+{
+	struct usb_composite_dev *cdev = c->cdev;
+	struct audio_dev *audio = func_to_audio(f);
+	int status;
+	struct usb_ep *ep;
+	struct usb_request *req;
+	int i;
+	int err;
+
+	if (IS_ENABLED(CONFIG_USB_CONFIGFS)) {
+		struct audio_source_instance *fi_audio =
+				to_fi_audio_source(f->fi);
+		struct audio_source_config *config =
+				fi_audio->config;
+
+		err = snd_card_setup(c, config);
+		if (err)
+			return err;
+	}
+
+	audio_build_desc(audio);
+
+	/* allocate instance-specific interface IDs, and patch descriptors */
+	status = usb_interface_id(c, f);
+	if (status < 0)
+		goto fail;
+	ac_interface_desc.bInterfaceNumber = status;
+
+	/* AUDIO_AC_INTERFACE */
+	ac_header_desc.baInterfaceNr[0] = status;
+
+	status = usb_interface_id(c, f);
+	if (status < 0)
+		goto fail;
+	as_interface_alt_0_desc.bInterfaceNumber = status;
+	as_interface_alt_1_desc.bInterfaceNumber = status;
+
+	/* AUDIO_AS_INTERFACE */
+	ac_header_desc.baInterfaceNr[1] = status;
+
+	status = -ENODEV;
+
+	/* allocate our endpoint */
+	ep = usb_ep_autoconfig(cdev->gadget, &fs_as_in_ep_desc);
+	if (!ep)
+		goto fail;
+	audio->in_ep = ep;
+	ep->driver_data = audio; /* claim */
+
+	if (gadget_is_dualspeed(c->cdev->gadget))
+		hs_as_in_ep_desc.bEndpointAddress =
+			fs_as_in_ep_desc.bEndpointAddress;
+
+	f->fs_descriptors = fs_audio_desc;
+	f->hs_descriptors = hs_audio_desc;
+
+	for (i = 0, status = 0; i < IN_EP_REQ_COUNT && status == 0; i++) {
+		req = audio_request_new(ep, IN_EP_MAX_PACKET_SIZE);
+		if (req) {
+			req->context = audio;
+			req->complete = audio_data_complete;
+			audio_req_put(audio, req);
+		} else
+			status = -ENOMEM;
+	}
+
+fail:
+	return status;
+}
+
+static void
+audio_unbind(struct usb_configuration *c, struct usb_function *f)
+{
+	struct audio_dev *audio = func_to_audio(f);
+	struct usb_request *req;
+
+	while ((req = audio_req_get(audio)))
+		audio_request_free(req, audio->in_ep);
+
+	snd_card_free_when_closed(audio->card);
+	audio->card = NULL;
+	audio->pcm = NULL;
+	audio->substream = NULL;
+	audio->in_ep = NULL;
+
+	if (IS_ENABLED(CONFIG_USB_CONFIGFS)) {
+		struct audio_source_instance *fi_audio =
+				to_fi_audio_source(f->fi);
+		struct audio_source_config *config =
+				fi_audio->config;
+
+		config->card = -1;
+		config->device = -1;
+	}
+}
+
+static void audio_pcm_playback_start(struct audio_dev *audio)
+{
+	audio->start_time = ktime_get();
+	audio->frames_sent = 0;
+	audio_send(audio);
+}
+
+static void audio_pcm_playback_stop(struct audio_dev *audio)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&audio->lock, flags);
+	audio->buffer_start = 0;
+	audio->buffer_end = 0;
+	audio->buffer_pos = 0;
+	spin_unlock_irqrestore(&audio->lock, flags);
+}
+
+static int audio_pcm_open(struct snd_pcm_substream *substream)
+{
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct audio_dev *audio = substream->private_data;
+
+	runtime->private_data = audio;
+	runtime->hw = audio_hw_info;
+	snd_pcm_limit_hw_rates(runtime);
+	runtime->hw.channels_max = 2;
+
+	audio->substream = substream;
+
+	/* Add the QoS request and set the latency to 0 */
+	cpu_latency_qos_add_request(&audio->pm_qos, 0);
+
+	return 0;
+}
+
+static int audio_pcm_close(struct snd_pcm_substream *substream)
+{
+	struct audio_dev *audio = substream->private_data;
+	unsigned long flags;
+
+	spin_lock_irqsave(&audio->lock, flags);
+
+	/* Remove the QoS request */
+	cpu_latency_qos_remove_request(&audio->pm_qos);
+
+	audio->substream = NULL;
+	spin_unlock_irqrestore(&audio->lock, flags);
+
+	return 0;
+}
+
+static int audio_pcm_hw_params(struct snd_pcm_substream *substream,
+				struct snd_pcm_hw_params *params)
+{
+	unsigned int channels = params_channels(params);
+	unsigned int rate = params_rate(params);
+
+	if (rate != SAMPLE_RATE)
+		return -EINVAL;
+	if (channels != 2)
+		return -EINVAL;
+
+	return snd_pcm_lib_alloc_vmalloc_buffer(substream,
+		params_buffer_bytes(params));
+}
+
+static int audio_pcm_hw_free(struct snd_pcm_substream *substream)
+{
+	return snd_pcm_lib_free_vmalloc_buffer(substream);
+}
+
+static int audio_pcm_prepare(struct snd_pcm_substream *substream)
+{
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct audio_dev *audio = runtime->private_data;
+
+	audio->period = snd_pcm_lib_period_bytes(substream);
+	audio->period_offset = 0;
+	audio->buffer_start = runtime->dma_area;
+	audio->buffer_end = audio->buffer_start
+		+ snd_pcm_lib_buffer_bytes(substream);
+	audio->buffer_pos = audio->buffer_start;
+
+	return 0;
+}
+
+static snd_pcm_uframes_t audio_pcm_pointer(struct snd_pcm_substream *substream)
+{
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct audio_dev *audio = runtime->private_data;
+	ssize_t bytes = audio->buffer_pos - audio->buffer_start;
+
+	/* return offset of next frame to fill in our buffer */
+	return bytes_to_frames(runtime, bytes);
+}
+
+static int audio_pcm_playback_trigger(struct snd_pcm_substream *substream,
+					int cmd)
+{
+	struct audio_dev *audio = substream->runtime->private_data;
+	int ret = 0;
+
+	switch (cmd) {
+	case SNDRV_PCM_TRIGGER_START:
+	case SNDRV_PCM_TRIGGER_RESUME:
+		audio_pcm_playback_start(audio);
+		break;
+
+	case SNDRV_PCM_TRIGGER_STOP:
+	case SNDRV_PCM_TRIGGER_SUSPEND:
+		audio_pcm_playback_stop(audio);
+		break;
+
+	default:
+		ret = -EINVAL;
+	}
+
+	return ret;
+}
+
+static struct audio_dev _audio_dev = {
+	.func = {
+		.name = "audio_source",
+		.bind = audio_bind,
+		.unbind = audio_unbind,
+		.set_alt = audio_set_alt,
+		.setup = audio_setup,
+		.disable = audio_disable,
+		.free_func = audio_free_func,
+	},
+	.lock = __SPIN_LOCK_UNLOCKED(_audio_dev.lock),
+	.idle_reqs = LIST_HEAD_INIT(_audio_dev.idle_reqs),
+};
+
+static struct snd_pcm_ops audio_playback_ops = {
+	.open		= audio_pcm_open,
+	.close		= audio_pcm_close,
+	.ioctl		= snd_pcm_lib_ioctl,
+	.hw_params	= audio_pcm_hw_params,
+	.hw_free	= audio_pcm_hw_free,
+	.prepare	= audio_pcm_prepare,
+	.trigger	= audio_pcm_playback_trigger,
+	.pointer	= audio_pcm_pointer,
+};
+
+int audio_source_bind_config(struct usb_configuration *c,
+		struct audio_source_config *config)
+{
+	struct audio_dev *audio;
+	int err;
+
+	config->card = -1;
+	config->device = -1;
+
+	audio = &_audio_dev;
+
+	err = snd_card_setup(c, config);
+	if (err)
+		return err;
+
+	err = usb_add_function(c, &audio->func);
+	if (err)
+		goto add_fail;
+
+	return 0;
+
+add_fail:
+	snd_card_free(audio->card);
+	return err;
+}
+
+static int snd_card_setup(struct usb_configuration *c,
+		struct audio_source_config *config)
+{
+	struct audio_dev *audio;
+	struct snd_card *card;
+	struct snd_pcm *pcm;
+	int err;
+
+	audio = &_audio_dev;
+
+	err = snd_card_new(&c->cdev->gadget->dev,
+			SNDRV_DEFAULT_IDX1, SNDRV_DEFAULT_STR1,
+			THIS_MODULE, 0, &card);
+	if (err)
+		return err;
+
+	err = snd_pcm_new(card, "USB audio source", 0, 1, 0, &pcm);
+	if (err)
+		goto pcm_fail;
+
+	pcm->private_data = audio;
+	pcm->info_flags = 0;
+	audio->pcm = pcm;
+
+	strlcpy(pcm->name, "USB gadget audio", sizeof(pcm->name));
+
+	snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &audio_playback_ops);
+	snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
+				NULL, 0, 64 * 1024);
+
+	strlcpy(card->driver, "audio_source", sizeof(card->driver));
+	strlcpy(card->shortname, card->driver, sizeof(card->shortname));
+	strlcpy(card->longname, "USB accessory audio source",
+		sizeof(card->longname));
+
+	err = snd_card_register(card);
+	if (err)
+		goto register_fail;
+
+	config->card = pcm->card->number;
+	config->device = pcm->device;
+	audio->card = card;
+	return 0;
+
+register_fail:
+pcm_fail:
+	snd_card_free(audio->card);
+	return err;
+}
+
+static struct audio_source_instance *to_audio_source_instance(
+					struct config_item *item)
+{
+	return container_of(to_config_group(item), struct audio_source_instance,
+		func_inst.group);
+}
+
+static struct audio_source_instance *to_fi_audio_source(
+					const struct usb_function_instance *fi)
+{
+	return container_of(fi, struct audio_source_instance, func_inst);
+}
+
+static void audio_source_attr_release(struct config_item *item)
+{
+	struct audio_source_instance *fi_audio = to_audio_source_instance(item);
+
+	usb_put_function_instance(&fi_audio->func_inst);
+}
+
+static int audio_source_set_inst_name(struct usb_function_instance *fi,
+					const char *name)
+{
+	struct audio_source_instance *fi_audio;
+	char *ptr;
+	int name_len;
+
+	name_len = strlen(name) + 1;
+	if (name_len > MAX_INST_NAME_LEN)
+		return -ENAMETOOLONG;
+
+	ptr = kstrndup(name, name_len, GFP_KERNEL);
+	if (!ptr)
+		return -ENOMEM;
+
+	fi_audio = to_fi_audio_source(fi);
+	fi_audio->name = ptr;
+
+	return 0;
+}
+
+static void audio_source_free_inst(struct usb_function_instance *fi)
+{
+	struct audio_source_instance *fi_audio;
+
+	fi_audio = to_fi_audio_source(fi);
+	device_destroy(fi_audio->audio_device->class,
+			fi_audio->audio_device->devt);
+	kfree(fi_audio->name);
+	kfree(fi_audio->config);
+}
+
+static ssize_t audio_source_pcm_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct audio_source_instance *fi_audio = dev_get_drvdata(dev);
+	struct audio_source_config *config = fi_audio->config;
+
+	/* print PCM card and device numbers */
+	return sprintf(buf, "%d %d\n", config->card, config->device);
+}
+
+struct device *create_function_device(char *name);
+
+static struct usb_function_instance *audio_source_alloc_inst(void)
+{
+	struct audio_source_instance *fi_audio;
+	struct device_attribute **attrs;
+	struct device_attribute *attr;
+	struct device *dev;
+	void *err_ptr;
+	int err = 0;
+
+	fi_audio = kzalloc(sizeof(*fi_audio), GFP_KERNEL);
+	if (!fi_audio)
+		return ERR_PTR(-ENOMEM);
+
+	fi_audio->func_inst.set_inst_name = audio_source_set_inst_name;
+	fi_audio->func_inst.free_func_inst = audio_source_free_inst;
+
+	fi_audio->config = kzalloc(sizeof(struct audio_source_config),
+							GFP_KERNEL);
+	if (!fi_audio->config) {
+		err_ptr = ERR_PTR(-ENOMEM);
+		goto fail_audio;
+	}
+
+	config_group_init_type_name(&fi_audio->func_inst.group, "",
+						&audio_source_func_type);
+	dev = create_function_device("f_audio_source");
+
+	if (IS_ERR(dev)) {
+		err_ptr = dev;
+		goto fail_audio_config;
+	}
+
+	fi_audio->config->card = -1;
+	fi_audio->config->device = -1;
+	fi_audio->audio_device = dev;
+
+	attrs = audio_source_function_attributes;
+	if (attrs) {
+		while ((attr = *attrs++) && !err)
+			err = device_create_file(dev, attr);
+		if (err) {
+			err_ptr = ERR_PTR(-EINVAL);
+			goto fail_device;
+		}
+	}
+
+	dev_set_drvdata(dev, fi_audio);
+	_audio_dev.config = fi_audio->config;
+
+	return  &fi_audio->func_inst;
+
+fail_device:
+	device_destroy(dev->class, dev->devt);
+fail_audio_config:
+	kfree(fi_audio->config);
+fail_audio:
+	kfree(fi_audio);
+	return err_ptr;
+
+}
+
+static struct usb_function *audio_source_alloc(struct usb_function_instance *fi)
+{
+	return &_audio_dev.func;
+}
+
+DECLARE_USB_FUNCTION_INIT(audio_source, audio_source_alloc_inst,
+			audio_source_alloc);
+MODULE_LICENSE("GPL");
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index 046f770..a34a7c9 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -1327,6 +1327,7 @@
 		struct usb_endpoint_descriptor *desc;
 
 		switch (epfile->ffs->gadget->speed) {
+		case USB_SPEED_SUPER_PLUS:
 		case USB_SPEED_SUPER:
 			desc_idx = 2;
 			break;
@@ -3222,6 +3223,10 @@
 	func->function.os_desc_n =
 		c->cdev->use_os_string ? ffs->interfaces_count : 0;
 
+	if (likely(super)) {
+		func->function.ssp_descriptors =
+			usb_copy_descriptors(func->function.ss_descriptors);
+	}
 	/* And we're done */
 	ffs_event_add(ffs, FUNCTIONFS_BIND);
 	return 0;
diff --git a/drivers/usb/gadget/function/f_midi.c b/drivers/usb/gadget/function/f_midi.c
index 19d9794..3085a27 100644
--- a/drivers/usb/gadget/function/f_midi.c
+++ b/drivers/usb/gadget/function/f_midi.c
@@ -1048,6 +1048,11 @@
 		f->ss_descriptors = usb_copy_descriptors(midi_function);
 		if (!f->ss_descriptors)
 			goto fail_f_midi;
+		if (gadget_is_superspeed_plus(c->cdev->gadget)) {
+			f->ssp_descriptors = usb_copy_descriptors(midi_function);
+			if (!f->ssp_descriptors)
+				goto fail_f_midi;
+		}
 	}
 
 	kfree(midi_function);
@@ -1216,6 +1221,65 @@
 	}
 }
 
+#ifdef CONFIG_USB_CONFIGFS_UEVENT
+extern struct device *create_function_device(char *name);
+static ssize_t alsa_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct usb_function_instance *fi_midi = dev_get_drvdata(dev);
+	struct f_midi *midi;
+
+	if (!fi_midi->f)
+		dev_warn(dev, "f_midi: function not set\n");
+
+	if (fi_midi && fi_midi->f) {
+		midi = func_to_midi(fi_midi->f);
+		if (midi->rmidi && midi->card && midi->rmidi->card)
+			return sprintf(buf, "%d %d\n",
+			midi->rmidi->card->number, midi->rmidi->device);
+	}
+
+	/* print PCM card and device numbers */
+	return sprintf(buf, "%d %d\n", -1, -1);
+}
+
+static DEVICE_ATTR(alsa, S_IRUGO, alsa_show, NULL);
+
+static struct device_attribute *alsa_function_attributes[] = {
+	&dev_attr_alsa,
+	NULL
+};
+
+static int create_alsa_device(struct usb_function_instance *fi)
+{
+	struct device *dev;
+	struct device_attribute **attrs;
+	struct device_attribute *attr;
+	int err = 0;
+
+	dev = create_function_device("f_midi");
+	if (IS_ERR(dev))
+		return PTR_ERR(dev);
+
+	attrs = alsa_function_attributes;
+	if (attrs) {
+		while ((attr = *attrs++) && !err)
+			err = device_create_file(dev, attr);
+		if (err) {
+			device_destroy(dev->class, dev->devt);
+			return -EINVAL;
+		}
+	}
+	dev_set_drvdata(dev, fi);
+	return 0;
+}
+#else
+static int create_alsa_device(struct usb_function_instance *fi)
+{
+	return 0;
+}
+#endif
+
 static struct usb_function_instance *f_midi_alloc_inst(void)
 {
 	struct f_midi_opts *opts;
@@ -1234,6 +1298,11 @@
 	opts->out_ports = 1;
 	opts->refcnt = 1;
 
+	if (create_alsa_device(&opts->func_inst)) {
+		kfree(opts);
+		return ERR_PTR(-ENODEV);
+	}
+
 	config_group_init_type_name(&opts->func_inst.group, "",
 				    &midi_func_type);
 
@@ -1254,6 +1323,7 @@
 		kfifo_free(&midi->in_req_fifo);
 		kfree(midi);
 		free = true;
+		opts->func_inst.f = NULL;
 	}
 	mutex_unlock(&opts->lock);
 
@@ -1341,6 +1411,7 @@
 	midi->func.disable	= f_midi_disable;
 	midi->func.free_func	= f_midi_free;
 
+	fi->f = &midi->func;
 	return &midi->func;
 
 midi_free:
diff --git a/drivers/usb/gadget/function/f_rndis.c b/drivers/usb/gadget/function/f_rndis.c
index 9534c8a..0739b05 100644
--- a/drivers/usb/gadget/function/f_rndis.c
+++ b/drivers/usb/gadget/function/f_rndis.c
@@ -87,8 +87,10 @@
 /* peak (theoretical) bulk transfer rate in bits-per-second */
 static unsigned int bitrate(struct usb_gadget *g)
 {
+	if (gadget_is_superspeed(g) && g->speed >= USB_SPEED_SUPER_PLUS)
+		return 4250000000U;
 	if (gadget_is_superspeed(g) && g->speed == USB_SPEED_SUPER)
-		return 13 * 1024 * 8 * 1000 * 8;
+		return 3750000000U;
 	else if (gadget_is_dualspeed(g) && g->speed == USB_SPEED_HIGH)
 		return 13 * 512 * 8 * 1000 * 8;
 	else
diff --git a/drivers/usb/typec/class.c b/drivers/usb/typec/class.c
index 35eec70..4f6e58d 100644
--- a/drivers/usb/typec/class.c
+++ b/drivers/usb/typec/class.c
@@ -11,6 +11,7 @@
 #include <linux/mutex.h>
 #include <linux/property.h>
 #include <linux/slab.h>
+#include <linux/usb/pd_vdo.h>
 
 #include "bus.h"
 
@@ -18,6 +19,7 @@
 	struct device			dev;
 	enum typec_plug_index		index;
 	struct ida			mode_ids;
+	int				num_altmodes;
 };
 
 struct typec_cable {
@@ -33,6 +35,7 @@
 	struct usb_pd_identity		*identity;
 	enum typec_accessory		accessory;
 	struct ida			mode_ids;
+	int				num_altmodes;
 };
 
 struct typec_port {
@@ -81,6 +84,29 @@
 	[TYPEC_ACCESSORY_DEBUG]		= "debug",
 };
 
+/* Product types defined in USB PD Specification R3.0 V2.0 */
+static const char * const product_type_ufp[8] = {
+	[IDH_PTYPE_UNDEF]		= "undefined",
+	[IDH_PTYPE_HUB]			= "hub",
+	[IDH_PTYPE_PERIPH]		= "peripheral",
+	[IDH_PTYPE_PSD]			= "psd",
+	[IDH_PTYPE_AMA]			= "ama",
+};
+
+static const char * const product_type_dfp[8] = {
+	[IDH_PTYPE_DFP_UNDEF]		= "undefined",
+	[IDH_PTYPE_DFP_HUB]		= "hub",
+	[IDH_PTYPE_DFP_HOST]		= "host",
+	[IDH_PTYPE_DFP_PB]		= "power_brick",
+	[IDH_PTYPE_DFP_AMC]		= "amc",
+};
+
+static const char * const product_type_cable[8] = {
+	[IDH_PTYPE_UNDEF]		= "undefined",
+	[IDH_PTYPE_PCABLE]		= "passive",
+	[IDH_PTYPE_ACABLE]		= "active",
+};
+
 static struct usb_pd_identity *get_pd_identity(struct device *dev)
 {
 	if (is_typec_partner(dev)) {
@@ -95,6 +121,32 @@
 	return NULL;
 }
 
+static const char *get_pd_product_type(struct device *dev)
+{
+	struct typec_port *port = to_typec_port(dev->parent);
+	struct usb_pd_identity *id = get_pd_identity(dev);
+	const char *ptype = NULL;
+
+	if (is_typec_partner(dev)) {
+		if (!id)
+			return NULL;
+
+		if (port->data_role == TYPEC_HOST)
+			ptype = product_type_ufp[PD_IDH_PTYPE(id->id_header)];
+		else
+			ptype = product_type_dfp[PD_IDH_DFP_PTYPE(id->id_header)];
+	} else if (is_typec_cable(dev)) {
+		if (id)
+			ptype = product_type_cable[PD_IDH_PTYPE(id->id_header)];
+		else
+			ptype = to_typec_cable(dev)->active ?
+				product_type_cable[IDH_PTYPE_ACABLE] :
+				product_type_cable[IDH_PTYPE_PCABLE];
+	}
+
+	return ptype;
+}
+
 static ssize_t id_header_show(struct device *dev, struct device_attribute *attr,
 			      char *buf)
 {
@@ -122,10 +174,40 @@
 }
 static DEVICE_ATTR_RO(product);
 
+static ssize_t product_type_vdo1_show(struct device *dev, struct device_attribute *attr,
+				      char *buf)
+{
+	struct usb_pd_identity *id = get_pd_identity(dev);
+
+	return sysfs_emit(buf, "0x%08x\n", id->vdo[0]);
+}
+static DEVICE_ATTR_RO(product_type_vdo1);
+
+static ssize_t product_type_vdo2_show(struct device *dev, struct device_attribute *attr,
+				      char *buf)
+{
+	struct usb_pd_identity *id = get_pd_identity(dev);
+
+	return sysfs_emit(buf, "0x%08x\n", id->vdo[1]);
+}
+static DEVICE_ATTR_RO(product_type_vdo2);
+
+static ssize_t product_type_vdo3_show(struct device *dev, struct device_attribute *attr,
+				      char *buf)
+{
+	struct usb_pd_identity *id = get_pd_identity(dev);
+
+	return sysfs_emit(buf, "0x%08x\n", id->vdo[2]);
+}
+static DEVICE_ATTR_RO(product_type_vdo3);
+
 static struct attribute *usb_pd_id_attrs[] = {
 	&dev_attr_id_header.attr,
 	&dev_attr_cert_stat.attr,
 	&dev_attr_product.attr,
+	&dev_attr_product_type_vdo1.attr,
+	&dev_attr_product_type_vdo2.attr,
+	&dev_attr_product_type_vdo3.attr,
 	NULL
 };
 
@@ -139,13 +221,49 @@
 	NULL,
 };
 
+static void typec_product_type_notify(struct device *dev)
+{
+	char *envp[2] = { };
+	const char *ptype;
+
+	ptype = get_pd_product_type(dev);
+	if (!ptype)
+		return;
+
+	sysfs_notify(&dev->kobj, NULL, "type");
+
+	envp[0] = kasprintf(GFP_KERNEL, "PRODUCT_TYPE=%s", ptype);
+	if (!envp[0])
+		return;
+
+	kobject_uevent_env(&dev->kobj, KOBJ_CHANGE, envp);
+	kfree(envp[0]);
+}
+
 static void typec_report_identity(struct device *dev)
 {
 	sysfs_notify(&dev->kobj, "identity", "id_header");
 	sysfs_notify(&dev->kobj, "identity", "cert_stat");
 	sysfs_notify(&dev->kobj, "identity", "product");
+	sysfs_notify(&dev->kobj, "identity", "product_type_vdo1");
+	sysfs_notify(&dev->kobj, "identity", "product_type_vdo2");
+	sysfs_notify(&dev->kobj, "identity", "product_type_vdo3");
+	typec_product_type_notify(dev);
 }
 
+static ssize_t
+type_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	const char *ptype;
+
+	ptype = get_pd_product_type(dev);
+	if (!ptype)
+		return 0;
+
+	return sysfs_emit(buf, "%s\n", ptype);
+}
+static DEVICE_ATTR_RO(type);
+
 /* ------------------------------------------------------------------------- */
 /* Alternate Modes */
 
@@ -382,7 +500,7 @@
 	return attr->mode;
 }
 
-static struct attribute_group typec_altmode_group = {
+static const struct attribute_group typec_altmode_group = {
 	.is_visible = typec_altmode_attr_is_visible,
 	.attrs = typec_altmode_attrs,
 };
@@ -532,12 +650,60 @@
 }
 static DEVICE_ATTR_RO(supports_usb_power_delivery);
 
+static ssize_t number_of_alternate_modes_show(struct device *dev, struct device_attribute *attr,
+					      char *buf)
+{
+	struct typec_partner *partner;
+	struct typec_plug *plug;
+	int num_altmodes;
+
+	if (is_typec_partner(dev)) {
+		partner = to_typec_partner(dev);
+		num_altmodes = partner->num_altmodes;
+	} else if (is_typec_plug(dev)) {
+		plug = to_typec_plug(dev);
+		num_altmodes = plug->num_altmodes;
+	} else {
+		return 0;
+	}
+
+	return sysfs_emit(buf, "%d\n", num_altmodes);
+}
+static DEVICE_ATTR_RO(number_of_alternate_modes);
+
 static struct attribute *typec_partner_attrs[] = {
 	&dev_attr_accessory_mode.attr,
 	&dev_attr_supports_usb_power_delivery.attr,
+	&dev_attr_number_of_alternate_modes.attr,
+	&dev_attr_type.attr,
 	NULL
 };
-ATTRIBUTE_GROUPS(typec_partner);
+
+static umode_t typec_partner_attr_is_visible(struct kobject *kobj, struct attribute *attr, int n)
+{
+	struct typec_partner *partner = to_typec_partner(kobj_to_dev(kobj));
+
+	if (attr == &dev_attr_number_of_alternate_modes.attr) {
+		if (partner->num_altmodes < 0)
+			return 0;
+	}
+
+	if (attr == &dev_attr_type.attr)
+		if (!get_pd_product_type(kobj_to_dev(kobj)))
+			return 0;
+
+	return attr->mode;
+}
+
+static const struct attribute_group typec_partner_group = {
+	.is_visible = typec_partner_attr_is_visible,
+	.attrs = typec_partner_attrs
+};
+
+static const struct attribute_group *typec_partner_groups[] = {
+	&typec_partner_group,
+	NULL
+};
 
 static void typec_partner_release(struct device *dev)
 {
@@ -571,6 +737,37 @@
 EXPORT_SYMBOL_GPL(typec_partner_set_identity);
 
 /**
+ * typec_partner_set_num_altmodes - Set the number of available partner altmodes
+ * @partner: The partner to be updated.
+ * @num_altmodes: The number of altmodes we want to specify as available.
+ *
+ * This routine is used to report the number of alternate modes supported by the
+ * partner. This value is *not* enforced in alternate mode registration routines.
+ *
+ * @partner.num_altmodes is set to -1 on partner registration, denoting that
+ * a valid value has not been set for it yet.
+ *
+ * Returns 0 on success or negative error number on failure.
+ */
+int typec_partner_set_num_altmodes(struct typec_partner *partner, int num_altmodes)
+{
+	int ret;
+
+	if (num_altmodes < 0)
+		return -EINVAL;
+
+	partner->num_altmodes = num_altmodes;
+	ret = sysfs_update_group(&partner->dev.kobj, &typec_partner_group);
+	if (ret < 0)
+		return ret;
+
+	sysfs_notify(&partner->dev.kobj, NULL, "number_of_alternate_modes");
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(typec_partner_set_num_altmodes);
+
+/**
  * typec_partner_register_altmode - Register USB Type-C Partner Alternate Mode
  * @partner: USB Type-C Partner that supports the alternate mode
  * @desc: Description of the alternate mode
@@ -612,6 +809,7 @@
 	ida_init(&partner->mode_ids);
 	partner->usb_pd = desc->usb_pd;
 	partner->accessory = desc->accessory;
+	partner->num_altmodes = -1;
 
 	if (desc->identity) {
 		/*
@@ -662,12 +860,71 @@
 	kfree(plug);
 }
 
+static struct attribute *typec_plug_attrs[] = {
+	&dev_attr_number_of_alternate_modes.attr,
+	NULL
+};
+
+static umode_t typec_plug_attr_is_visible(struct kobject *kobj, struct attribute *attr, int n)
+{
+	struct typec_plug *plug = to_typec_plug(kobj_to_dev(kobj));
+
+	if (attr == &dev_attr_number_of_alternate_modes.attr) {
+		if (plug->num_altmodes < 0)
+			return 0;
+	}
+
+	return attr->mode;
+}
+
+static const struct attribute_group typec_plug_group = {
+	.is_visible = typec_plug_attr_is_visible,
+	.attrs = typec_plug_attrs
+};
+
+static const struct attribute_group *typec_plug_groups[] = {
+	&typec_plug_group,
+	NULL
+};
+
 static const struct device_type typec_plug_dev_type = {
 	.name = "typec_plug",
+	.groups = typec_plug_groups,
 	.release = typec_plug_release,
 };
 
 /**
+ * typec_plug_set_num_altmodes - Set the number of available plug altmodes
+ * @plug: The plug to be updated.
+ * @num_altmodes: The number of altmodes we want to specify as available.
+ *
+ * This routine is used to report the number of alternate modes supported by the
+ * plug. This value is *not* enforced in alternate mode registration routines.
+ *
+ * @plug.num_altmodes is set to -1 on plug registration, denoting that
+ * a valid value has not been set for it yet.
+ *
+ * Returns 0 on success or negative error number on failure.
+ */
+int typec_plug_set_num_altmodes(struct typec_plug *plug, int num_altmodes)
+{
+	int ret;
+
+	if (num_altmodes < 0)
+		return -EINVAL;
+
+	plug->num_altmodes = num_altmodes;
+	ret = sysfs_update_group(&plug->dev.kobj, &typec_plug_group);
+	if (ret < 0)
+		return ret;
+
+	sysfs_notify(&plug->dev.kobj, NULL, "number_of_alternate_modes");
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(typec_plug_set_num_altmodes);
+
+/**
  * typec_plug_register_altmode - Register USB Type-C Cable Plug Alternate Mode
  * @plug: USB Type-C Cable Plug that supports the alternate mode
  * @desc: Description of the alternate mode
@@ -712,6 +969,7 @@
 	sprintf(name, "plug%d", desc->index);
 
 	ida_init(&plug->mode_ids);
+	plug->num_altmodes = -1;
 	plug->index = desc->index;
 	plug->dev.class = typec_class;
 	plug->dev.parent = &cable->dev;
@@ -744,15 +1002,6 @@
 
 /* Type-C Cables */
 
-static ssize_t
-type_show(struct device *dev, struct device_attribute *attr, char *buf)
-{
-	struct typec_cable *cable = to_typec_cable(dev);
-
-	return sprintf(buf, "%s\n", cable->active ? "active" : "passive");
-}
-static DEVICE_ATTR_RO(type);
-
 static const char * const typec_plug_types[] = {
 	[USB_PLUG_NONE]		= "unknown",
 	[USB_PLUG_TYPE_A]	= "type-a",
@@ -1309,7 +1558,7 @@
 	return attr->mode;
 }
 
-static struct attribute_group typec_group = {
+static const struct attribute_group typec_group = {
 	.is_visible = typec_attr_is_visible,
 	.attrs = typec_attrs,
 };
@@ -1352,6 +1601,11 @@
 /* --------------------------------------- */
 /* Driver callbacks to report role updates */
 
+static int partner_match(struct device *dev, void *data)
+{
+	return is_typec_partner(dev);
+}
+
 /**
  * typec_set_data_role - Report data role change
  * @port: The USB Type-C Port where the role was changed
@@ -1361,12 +1615,23 @@
  */
 void typec_set_data_role(struct typec_port *port, enum typec_data_role role)
 {
+	struct device *partner_dev;
+
 	if (port->data_role == role)
 		return;
 
 	port->data_role = role;
 	sysfs_notify(&port->dev.kobj, NULL, "data_role");
 	kobject_uevent(&port->dev.kobj, KOBJ_CHANGE);
+
+	partner_dev = device_find_child(&port->dev, NULL, partner_match);
+	if (!partner_dev)
+		return;
+
+	if (to_typec_partner(partner_dev)->identity)
+		typec_product_type_notify(partner_dev);
+
+	put_device(partner_dev);
 }
 EXPORT_SYMBOL_GPL(typec_set_data_role);
 
@@ -1407,11 +1672,6 @@
 }
 EXPORT_SYMBOL_GPL(typec_set_vconn_role);
 
-static int partner_match(struct device *dev, void *data)
-{
-	return is_typec_partner(dev);
-}
-
 /**
  * typec_set_pwr_opmode - Report changed power operation mode
  * @port: The USB Type-C Port where the mode was changed
diff --git a/drivers/usb/typec/mux/intel_pmc_mux.c b/drivers/usb/typec/mux/intel_pmc_mux.c
index d7f63b7..aa3211f 100644
--- a/drivers/usb/typec/mux/intel_pmc_mux.c
+++ b/drivers/usb/typec/mux/intel_pmc_mux.c
@@ -256,6 +256,7 @@
 pmc_usb_mux_tbt(struct pmc_usb_port *port, struct typec_mux_state *state)
 {
 	struct typec_thunderbolt_data *data = state->data;
+	u8 cable_rounded = TBT_CABLE_ROUNDED_SUPPORT(data->cable_mode);
 	u8 cable_speed = TBT_CABLE_SPEED(data->cable_mode);
 	struct altmode_req req = { };
 
@@ -284,6 +285,8 @@
 
 	req.mode_data |= PMC_USB_ALTMODE_CABLE_SPD(cable_speed);
 
+	req.mode_data |= PMC_USB_ALTMODE_TBT_GEN(cable_rounded);
+
 	return pmc_usb_command(port, (void *)&req, sizeof(req));
 }
 
diff --git a/drivers/usb/typec/tcpm/tcpci.c b/drivers/usb/typec/tcpm/tcpci.c
index f9f0af6..12d983a 100644
--- a/drivers/usb/typec/tcpm/tcpci.c
+++ b/drivers/usb/typec/tcpm/tcpci.c
@@ -18,7 +18,10 @@
 
 #include "tcpci.h"
 
-#define PD_RETRY_COUNT 3
+#define	PD_RETRY_COUNT				3
+#define	AUTO_DISCHARGE_DEFAULT_THRESHOLD_MV	3500
+#define	AUTO_DISCHARGE_PD_HEADROOM_MV		850
+#define	AUTO_DISCHARGE_PPS_HEADROOM_MV		1250
 
 struct tcpci {
 	struct device *dev;
@@ -268,6 +271,58 @@
 				enable ? TCPC_POWER_CTRL_VCONN_ENABLE : 0);
 }
 
+static int tcpci_enable_auto_vbus_discharge(struct tcpc_dev *dev, bool enable)
+{
+	struct tcpci *tcpci = tcpc_to_tcpci(dev);
+	int ret;
+
+	ret = regmap_update_bits(tcpci->regmap, TCPC_POWER_CTRL, TCPC_POWER_CTRL_AUTO_DISCHARGE,
+				 enable ? TCPC_POWER_CTRL_AUTO_DISCHARGE : 0);
+	return ret;
+}
+
+static int tcpci_set_auto_vbus_discharge_threshold(struct tcpc_dev *dev, enum typec_pwr_opmode mode,
+						   bool pps_active, u32 requested_vbus_voltage_mv)
+{
+	struct tcpci *tcpci = tcpc_to_tcpci(dev);
+	unsigned int pwr_ctrl, threshold = 0;
+	int ret;
+
+	/*
+	 * Indicates that vbus is going to go away due PR_SWAP, hard reset etc.
+	 * Do not discharge vbus here.
+	 */
+	if (requested_vbus_voltage_mv == 0)
+		goto write_thresh;
+
+	ret = regmap_read(tcpci->regmap, TCPC_POWER_CTRL, &pwr_ctrl);
+	if (ret < 0)
+		return ret;
+
+	if (pwr_ctrl & TCPC_FAST_ROLE_SWAP_EN) {
+		/* To prevent disconnect when the source is fast role swap is capable. */
+		threshold = AUTO_DISCHARGE_DEFAULT_THRESHOLD_MV;
+	} else if (mode == TYPEC_PWR_MODE_PD) {
+		if (pps_active)
+			threshold = (95 * requested_vbus_voltage_mv / 100) -
+				AUTO_DISCHARGE_PD_HEADROOM_MV;
+		else
+			threshold = (95 * requested_vbus_voltage_mv / 100) -
+				AUTO_DISCHARGE_PPS_HEADROOM_MV;
+	} else {
+		/* 3.5V for non-pd sink */
+		threshold = AUTO_DISCHARGE_DEFAULT_THRESHOLD_MV;
+	}
+
+	threshold = threshold / TCPC_VBUS_SINK_DISCONNECT_THRESH_LSB_MV;
+
+	if (threshold > TCPC_VBUS_SINK_DISCONNECT_THRESH_MAX)
+		return -EINVAL;
+
+write_thresh:
+	return tcpci_write16(tcpci, TCPC_VBUS_SINK_DISCONNECT_THRESH, threshold);
+}
+
 static int tcpci_enable_frs(struct tcpc_dev *dev, bool enable)
 {
 	struct tcpci *tcpci = tcpc_to_tcpci(dev);
@@ -284,6 +339,14 @@
 	return ret;
 }
 
+static void tcpci_frs_sourcing_vbus(struct tcpc_dev *dev)
+{
+	struct tcpci *tcpci = tcpc_to_tcpci(dev);
+
+	if (tcpci->data->frs_sourcing_vbus)
+		tcpci->data->frs_sourcing_vbus(tcpci, tcpci->data);
+}
+
 static int tcpci_set_bist_data(struct tcpc_dev *tcpc, bool enable)
 {
 	struct tcpci *tcpci = tcpc_to_tcpci(tcpc);
@@ -628,6 +691,13 @@
 	tcpci->tcpc.pd_transmit = tcpci_pd_transmit;
 	tcpci->tcpc.set_bist_data = tcpci_set_bist_data;
 	tcpci->tcpc.enable_frs = tcpci_enable_frs;
+	tcpci->tcpc.frs_sourcing_vbus = tcpci_frs_sourcing_vbus;
+
+	if (tcpci->data->auto_discharge_disconnect) {
+		tcpci->tcpc.enable_auto_vbus_discharge = tcpci_enable_auto_vbus_discharge;
+		tcpci->tcpc.set_auto_vbus_discharge_threshold =
+			tcpci_set_auto_vbus_discharge_threshold;
+	}
 
 	err = tcpci_parse_config(tcpci);
 	if (err < 0)
diff --git a/drivers/usb/typec/tcpm/tcpci.h b/drivers/usb/typec/tcpm/tcpci.h
index 5ef07a5..3fe3136 100644
--- a/drivers/usb/typec/tcpm/tcpci.h
+++ b/drivers/usb/typec/tcpm/tcpci.h
@@ -8,6 +8,8 @@
 #ifndef __LINUX_USB_TCPCI_H
 #define __LINUX_USB_TCPCI_H
 
+#include <linux/usb/typec.h>
+
 #define TCPC_VENDOR_ID			0x0
 #define TCPC_PRODUCT_ID			0x2
 #define TCPC_BCD_DEV			0x4
@@ -67,6 +69,7 @@
 
 #define TCPC_POWER_CTRL			0x1c
 #define TCPC_POWER_CTRL_VCONN_ENABLE	BIT(0)
+#define TCPC_POWER_CTRL_AUTO_DISCHARGE	BIT(4)
 #define TCPC_FAST_ROLE_SWAP_EN		BIT(7)
 
 #define TCPC_CC_STATUS			0x1d
@@ -133,6 +136,8 @@
 
 #define TCPC_VBUS_VOLTAGE			0x70
 #define TCPC_VBUS_SINK_DISCONNECT_THRESH	0x72
+#define TCPC_VBUS_SINK_DISCONNECT_THRESH_LSB_MV	25
+#define TCPC_VBUS_SINK_DISCONNECT_THRESH_MAX	0x3ff
 #define TCPC_VBUS_STOP_DISCHARGE_THRESH		0x74
 #define TCPC_VBUS_VOLTAGE_ALARM_HI_CFG		0x76
 #define TCPC_VBUS_VOLTAGE_ALARM_LO_CFG		0x78
@@ -140,20 +145,29 @@
 /* I2C_WRITE_BYTE_COUNT + 1 when TX_BUF_BYTE_x is only accessible I2C_WRITE_BYTE_COUNT */
 #define TCPC_TRANSMIT_BUFFER_MAX_LEN		31
 
-/*
- * @TX_BUF_BYTE_x_hidden
- *		optional; Set when TX_BUF_BYTE_x can only be accessed through I2C_WRITE_BYTE_COUNT.
- */
 struct tcpci;
+
+/*
+ * @TX_BUF_BYTE_x_hidden:
+ *		optional; Set when TX_BUF_BYTE_x can only be accessed through I2C_WRITE_BYTE_COUNT.
+ * @frs_sourcing_vbus:
+ *		Optional; Callback to perform chip specific operations when FRS
+ *		is sourcing vbus.
+ * @auto_discharge_disconnect:
+ *		Optional; Enables TCPC to autonously discharge vbus on disconnect.
+ */
 struct tcpci_data {
 	struct regmap *regmap;
 	unsigned char TX_BUF_BYTE_x_hidden:1;
+	unsigned char auto_discharge_disconnect:1;
+
 	int (*init)(struct tcpci *tcpci, struct tcpci_data *data);
 	int (*set_vconn)(struct tcpci *tcpci, struct tcpci_data *data,
 			 bool enable);
 	int (*start_drp_toggling)(struct tcpci *tcpci, struct tcpci_data *data,
 				  enum typec_cc_status cc);
 	int (*set_vbus)(struct tcpci *tcpci, struct tcpci_data *data, bool source, bool sink);
+	void (*frs_sourcing_vbus)(struct tcpci *tcpci, struct tcpci_data *data);
 };
 
 struct tcpci *tcpci_register_port(struct device *dev, struct tcpci_data *data);
diff --git a/drivers/usb/typec/tcpm/tcpci_maxim.c b/drivers/usb/typec/tcpm/tcpci_maxim.c
index 723d7dd..c179723 100644
--- a/drivers/usb/typec/tcpm/tcpci_maxim.c
+++ b/drivers/usb/typec/tcpm/tcpci_maxim.c
@@ -238,23 +238,22 @@
 	if (ret < 0)
 		return;
 
-	if (pwr_status == 0xff) {
+	if (pwr_status == 0xff)
 		max_tcpci_init_regs(chip);
-	} else if (pwr_status & TCPC_POWER_STATUS_SOURCING_VBUS) {
+	else if (pwr_status & TCPC_POWER_STATUS_SOURCING_VBUS)
 		tcpm_sourcing_vbus(chip->port);
-		/*
-		 * Alawys re-enable boost here.
-		 * In normal case, when say an headset is attached, TCPM would
-		 * have instructed to TCPC to enable boost, so the call is a
-		 * no-op.
-		 * But for Fast Role Swap case, Boost turns on autonomously without
-		 * AP intervention, but, needs AP to enable source mode explicitly
-		 * for AP to regain control.
-		 */
-		max_tcpci_set_vbus(chip->tcpci, &chip->data, true, false);
-	} else {
+	else
 		tcpm_vbus_change(chip->port);
-	}
+}
+
+static void max_tcpci_frs_sourcing_vbus(struct tcpci *tcpci, struct tcpci_data *tdata)
+{
+	/*
+	 * For Fast Role Swap case, Boost turns on autonomously without
+	 * AP intervention, but, needs AP to enable source mode explicitly
+	 * for AP to regain control.
+	 */
+	max_tcpci_set_vbus(tcpci, tdata, true, false);
 }
 
 static void process_tx(struct max_tcpci_chip *chip, u16 status)
@@ -344,7 +343,7 @@
 {
 	struct max_tcpci_chip *chip = dev_id;
 	u16 status;
-	irqreturn_t irq_return;
+	irqreturn_t irq_return = IRQ_HANDLED;
 	int ret;
 
 	if (!chip->port)
@@ -441,10 +440,12 @@
 	chip->data.start_drp_toggling = max_tcpci_start_toggling;
 	chip->data.TX_BUF_BYTE_x_hidden = true;
 	chip->data.init = tcpci_init;
+	chip->data.frs_sourcing_vbus = max_tcpci_frs_sourcing_vbus;
+	chip->data.auto_discharge_disconnect = true;
 
 	max_tcpci_init_regs(chip);
 	chip->tcpci = tcpci_register_port(chip->dev, &chip->data);
-	if (IS_ERR_OR_NULL(chip->tcpci)) {
+	if (IS_ERR(chip->tcpci)) {
 		dev_err(&client->dev, "TCPCI port registration failed");
 		ret = PTR_ERR(chip->tcpci);
 		return PTR_ERR(chip->tcpci);
@@ -481,7 +482,7 @@
 
 #ifdef CONFIG_OF
 static const struct of_device_id max_tcpci_of_match[] = {
-	{ .compatible = "maxim,tcpc", },
+	{ .compatible = "maxim,max33359", },
 	{},
 };
 MODULE_DEVICE_TABLE(of, max_tcpci_of_match);
diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c
index a6fae1f..dd408b4 100644
--- a/drivers/usb/typec/tcpm/tcpm.c
+++ b/drivers/usb/typec/tcpm/tcpm.c
@@ -363,12 +363,15 @@
 	/* port belongs to a self powered device */
 	bool self_powered;
 
-	/* FRS */
-	enum frs_typec_current frs_current;
+	/* Sink FRS */
+	enum frs_typec_current new_source_frs_current;
 
 	/* Sink caps have been queried */
 	bool sink_cap_done;
 
+	/* Port is still in tCCDebounce */
+	bool debouncing;
+
 #ifdef CONFIG_DEBUG_FS
 	struct dentry *dentry;
 	struct mutex logbuffer_lock;	/* log buffer access lock */
@@ -752,6 +755,21 @@
 	return ret;
 }
 
+bool tcpm_is_debouncing(struct tcpm_port *port)
+{
+	bool debounce;
+
+	if (!port)
+		return false;
+
+	mutex_lock(&port->lock);
+	debounce = port->debouncing;
+	mutex_unlock(&port->lock);
+
+	return debounce;
+}
+EXPORT_SYMBOL_GPL(tcpm_is_debouncing);
+
 static u32 tcpm_get_current_limit(struct tcpm_port *port)
 {
 	enum typec_cc_status cc;
@@ -1706,6 +1724,24 @@
 	}
 }
 
+static int tcpm_set_auto_vbus_discharge_threshold(struct tcpm_port *port,
+						  enum typec_pwr_opmode mode, bool pps_active,
+						  u32 requested_vbus_voltage)
+{
+	int ret;
+
+	if (!port->tcpc->set_auto_vbus_discharge_threshold)
+		return 0;
+
+	ret = port->tcpc->set_auto_vbus_discharge_threshold(port->tcpc, mode, pps_active,
+							    requested_vbus_voltage);
+	tcpm_log_force(port,
+		       "set_auto_vbus_discharge_threshold mode:%d pps_active:%c vbus:%u ret:%d",
+		       mode, pps_active ? 'y' : 'n', requested_vbus_voltage, ret);
+
+	return ret;
+}
+
 static void tcpm_pd_data_request(struct tcpm_port *port,
 				 const struct pd_message *msg)
 {
@@ -1713,7 +1749,7 @@
 	unsigned int cnt = pd_header_cnt_le(msg->header);
 	unsigned int rev = pd_header_rev_le(msg->header);
 	unsigned int i;
-	enum frs_typec_current frs_current;
+	enum frs_typec_current partner_frs_current;
 	bool frs_enable;
 	int ret;
 
@@ -1786,12 +1822,13 @@
 		for (i = 0; i < cnt; i++)
 			port->sink_caps[i] = le32_to_cpu(msg->payload[i]);
 
-		frs_current = (port->sink_caps[0] & PDO_FIXED_FRS_CURR_MASK) >>
+		partner_frs_current = (port->sink_caps[0] & PDO_FIXED_FRS_CURR_MASK) >>
 			PDO_FIXED_FRS_CURR_SHIFT;
-		frs_enable = frs_current && (frs_current <= port->frs_current);
+		frs_enable = partner_frs_current && (partner_frs_current <=
+						     port->new_source_frs_current);
 		tcpm_log(port,
 			 "Port partner FRS capable partner_frs_current:%u port_frs_current:%u enable:%c",
-			 frs_current, port->frs_current, frs_enable ? 'y' : 'n');
+			 partner_frs_current, port->new_source_frs_current, frs_enable ? 'y' : 'n');
 		if (frs_enable) {
 			ret  = port->tcpc->enable_frs(port->tcpc, true);
 			tcpm_log(port, "Enable FRS %s, ret:%d\n", ret ? "fail" : "success", ret);
@@ -1875,6 +1912,10 @@
 						       port->current_limit,
 						       port->supply_voltage);
 				port->explicit_contract = true;
+				tcpm_set_auto_vbus_discharge_threshold(port,
+								       TYPEC_PWR_MODE_PD,
+								       port->pps_data.active,
+								       port->supply_voltage);
 				tcpm_set_state(port, SNK_READY, 0);
 			} else {
 				/*
@@ -2789,8 +2830,12 @@
 	if (ret < 0)
 		return ret;
 
-	ret = tcpm_set_roles(port, true, TYPEC_SOURCE,
-			     tcpm_data_role_for_source(port));
+	if (port->tcpc->enable_auto_vbus_discharge) {
+		ret = port->tcpc->enable_auto_vbus_discharge(port->tcpc, true);
+		tcpm_log_force(port, "enable vbus discharge ret:%d", ret);
+	}
+
+	ret = tcpm_set_roles(port, true, TYPEC_SOURCE, tcpm_data_role_for_source(port));
 	if (ret < 0)
 		return ret;
 
@@ -2819,6 +2864,7 @@
 	port->partner = NULL;
 
 	port->attached = true;
+	port->debouncing = false;
 	port->send_discover = true;
 
 	return 0;
@@ -2857,6 +2903,12 @@
 
 static void tcpm_reset_port(struct tcpm_port *port)
 {
+	int ret;
+
+	if (port->tcpc->enable_auto_vbus_discharge) {
+		ret = port->tcpc->enable_auto_vbus_discharge(port->tcpc, false);
+		tcpm_log_force(port, "Disable vbus discharge ret:%d", ret);
+	}
 	tcpm_unregister_altmodes(port);
 	tcpm_typec_disconnect(port);
 	port->attached = false;
@@ -2921,8 +2973,13 @@
 	if (ret < 0)
 		return ret;
 
-	ret = tcpm_set_roles(port, true, TYPEC_SINK,
-			     tcpm_data_role_for_sink(port));
+	if (port->tcpc->enable_auto_vbus_discharge) {
+		tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_USB, false, VSAFE5V);
+		ret = port->tcpc->enable_auto_vbus_discharge(port->tcpc, true);
+		tcpm_log_force(port, "enable vbus discharge ret:%d", ret);
+	}
+
+	ret = tcpm_set_roles(port, true, TYPEC_SINK, tcpm_data_role_for_sink(port));
 	if (ret < 0)
 		return ret;
 
@@ -2931,6 +2988,7 @@
 	port->partner = NULL;
 
 	port->attached = true;
+	port->debouncing = false;
 	port->send_discover = true;
 
 	return 0;
@@ -2958,6 +3016,7 @@
 	tcpm_typec_connect(port);
 
 	port->attached = true;
+	port->debouncing = false;
 
 	return 0;
 }
@@ -3041,6 +3100,15 @@
 		if (!port->non_pd_role_swap)
 			tcpm_swap_complete(port, -ENOTCONN);
 		tcpm_src_detach(port);
+		if (port->debouncing) {
+			port->debouncing = false;
+			if (port->tcpc->check_contaminant &&
+			    port->tcpc->check_contaminant(port->tcpc)) {
+				/* Contaminant detection would handle toggling */
+				tcpm_set_state(port, TOGGLING, 0);
+				break;
+			}
+		}
 		if (tcpm_start_toggling(port, tcpm_rp_cc(port))) {
 			tcpm_set_state(port, TOGGLING, 0);
 			break;
@@ -3050,6 +3118,7 @@
 			tcpm_set_state(port, SNK_UNATTACHED, PD_T_DRP_SNK);
 		break;
 	case SRC_ATTACH_WAIT:
+		port->debouncing = true;
 		if (tcpm_port_is_debug(port))
 			tcpm_set_state(port, DEBUG_ACC_ATTACHED,
 				       PD_T_CC_DEBOUNCE);
@@ -3064,6 +3133,7 @@
 		break;
 
 	case SNK_TRY:
+		port->debouncing = false;
 		port->try_snk_count++;
 		/*
 		 * Requirements:
@@ -3086,15 +3156,13 @@
 		break;
 	case SNK_TRY_WAIT_DEBOUNCE:
 		tcpm_set_state(port, SNK_TRY_WAIT_DEBOUNCE_CHECK_VBUS,
-			       PD_T_PD_DEBOUNCE);
+			       PD_T_TRY_CC_DEBOUNCE);
 		break;
 	case SNK_TRY_WAIT_DEBOUNCE_CHECK_VBUS:
-		if (port->vbus_present && tcpm_port_is_sink(port)) {
+		if (port->vbus_present && tcpm_port_is_sink(port))
 			tcpm_set_state(port, SNK_ATTACHED, 0);
-		} else {
-			tcpm_set_state(port, SRC_TRYWAIT, 0);
+		else
 			port->max_wait = 0;
-		}
 		break;
 	case SRC_TRYWAIT:
 		tcpm_set_cc(port, tcpm_rp_cc(port));
@@ -3240,6 +3308,15 @@
 			tcpm_swap_complete(port, -ENOTCONN);
 		tcpm_pps_complete(port, -ENOTCONN);
 		tcpm_snk_detach(port);
+		if (port->debouncing) {
+			port->debouncing = false;
+			if (port->tcpc->check_contaminant &&
+			    port->tcpc->check_contaminant(port->tcpc)) {
+				/* Contaminant detection would handle toggling */
+				tcpm_set_state(port, TOGGLING, 0);
+				break;
+			}
+		}
 		if (tcpm_start_toggling(port, TYPEC_CC_RD)) {
 			tcpm_set_state(port, TOGGLING, 0);
 			break;
@@ -3249,6 +3326,7 @@
 			tcpm_set_state(port, SRC_UNATTACHED, PD_T_DRP_SRC);
 		break;
 	case SNK_ATTACH_WAIT:
+		port->debouncing = true;
 		if ((port->cc1 == TYPEC_CC_OPEN &&
 		     port->cc2 != TYPEC_CC_OPEN) ||
 		    (port->cc1 != TYPEC_CC_OPEN &&
@@ -3260,17 +3338,20 @@
 				       PD_T_PD_DEBOUNCE);
 		break;
 	case SNK_DEBOUNCED:
-		if (tcpm_port_is_disconnected(port))
+		if (tcpm_port_is_disconnected(port)) {
 			tcpm_set_state(port, SNK_UNATTACHED,
 				       PD_T_PD_DEBOUNCE);
-		else if (port->vbus_present)
+		} else if (port->vbus_present) {
 			tcpm_set_state(port,
 				       tcpm_try_src(port) ? SRC_TRY
 							  : SNK_ATTACHED,
 				       0);
-		else
+			port->debouncing = false;
+		} else {
 			/* Wait for VBUS, but not forever */
 			tcpm_set_state(port, PORT_RESET, PD_T_PS_SOURCE_ON);
+			port->debouncing = false;
+		}
 		break;
 
 	case SRC_TRY:
@@ -3506,6 +3587,8 @@
 		tcpm_set_state(port, SRC_UNATTACHED, PD_T_PS_SOURCE_ON);
 		break;
 	case SNK_HARD_RESET_SINK_OFF:
+		/* Do not discharge/disconnect during hard reseet */
+		tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_USB, false, 0);
 		memset(&port->pps_data, 0, sizeof(port->pps_data));
 		tcpm_set_vconn(port, false);
 		if (port->pd_capable)
@@ -3548,6 +3631,7 @@
 			tcpm_set_charge(port, true);
 		}
 		tcpm_set_attached_state(port, true);
+		tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_USB, false, VSAFE5V);
 		tcpm_set_state(port, SNK_STARTUP, 0);
 		break;
 
@@ -3649,6 +3733,10 @@
 			tcpm_set_state(port, PR_SWAP_SNK_SRC_SINK_OFF, 0);
 		break;
 	case PR_SWAP_SRC_SNK_TRANSITION_OFF:
+		/*
+		 * Prevent vbus discharge circuit from turning on during PR_SWAP
+		 * as this is not a disconnect.
+		 */
 		tcpm_set_vbus(port, false);
 		port->explicit_contract = false;
 		/* allow time for Vbus discharge, must be < tSrcSwapStdby */
@@ -3677,9 +3765,17 @@
 		tcpm_set_state_cond(port, SNK_UNATTACHED, PD_T_PS_SOURCE_ON);
 		break;
 	case PR_SWAP_SRC_SNK_SINK_ON:
+		/* Set the vbus disconnect threshold for implicit contract */
+		tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_USB, false, VSAFE5V);
 		tcpm_set_state(port, SNK_STARTUP, 0);
 		break;
 	case PR_SWAP_SNK_SRC_SINK_OFF:
+		/*
+		 * Prevent vbus discharge circuit from turning on during PR_SWAP
+		 * as this is not a disconnect.
+		 */
+		tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_USB,
+						       port->pps_data.active, 0);
 		tcpm_set_charge(port, false);
 		tcpm_set_state(port, hard_reset_state(port),
 			       PD_T_PS_SOURCE_OFF);
@@ -4000,6 +4096,12 @@
 		if (!tcpm_port_is_sink(port))
 			tcpm_set_state(port, SNK_TRYWAIT_DEBOUNCE, 0);
 		break;
+	case SNK_TRY_WAIT_DEBOUNCE_CHECK_VBUS:
+		if (!tcpm_port_is_sink(port))
+			tcpm_set_state(port, SRC_TRYWAIT, PD_T_TRY_CC_DEBOUNCE);
+		else
+			tcpm_set_state(port, SNK_TRY_WAIT_DEBOUNCE_CHECK_VBUS, 0);
+		break;
 	case SNK_TRYWAIT:
 		/* Do nothing, waiting for tCCDebounce */
 		break;
@@ -4086,11 +4188,24 @@
 	case SNK_TRYWAIT_DEBOUNCE:
 		/* Do nothing, waiting for Rp */
 		break;
+	case SNK_TRY_WAIT_DEBOUNCE_CHECK_VBUS:
+		if (port->vbus_present && tcpm_port_is_sink(port))
+			tcpm_set_state(port, SNK_ATTACHED, 0);
+		break;
 	case SRC_TRY_WAIT:
 	case SRC_TRY_DEBOUNCE:
 		/* Do nothing, waiting for sink detection */
 		break;
+	case FR_SWAP_SEND:
+	case FR_SWAP_SEND_TIMEOUT:
+	case FR_SWAP_SNK_SRC_TRANSITION_TO_OFF:
+	case FR_SWAP_SNK_SRC_SOURCE_VBUS_APPLIED:
+		if (port->tcpc->frs_sourcing_vbus)
+			port->tcpc->frs_sourcing_vbus(port->tcpc);
+		break;
 	case FR_SWAP_SNK_SRC_NEW_SINK_READY:
+		if (port->tcpc->frs_sourcing_vbus)
+			port->tcpc->frs_sourcing_vbus(port->tcpc);
 		tcpm_set_state(port, FR_SWAP_SNK_SRC_SOURCE_VBUS_APPLIED, 0);
 		break;
 
@@ -4142,6 +4257,7 @@
 	case SNK_TRYWAIT_DEBOUNCE:
 		break;
 	case SNK_ATTACH_WAIT:
+		port->debouncing = false;
 		tcpm_set_state(port, SNK_UNATTACHED, 0);
 		break;
 
@@ -4156,6 +4272,14 @@
 		/* Do nothing, expected */
 		break;
 
+	case PR_SWAP_SNK_SRC_SOURCE_ON:
+		/*
+		 * Do nothing when vbus off notification is received.
+		 * TCPM can wait for PD_T_NEWSRC in PR_SWAP_SNK_SRC_SOURCE_ON
+		 * for the vbus source to ramp up.
+		 */
+		break;
+
 	case PORT_RESET_WAIT_OFF:
 		tcpm_set_state(port, tcpm_default_state(port), 0);
 		break;
@@ -4808,14 +4932,58 @@
 
 	/* FRS can only be supported byb DRP ports */
 	if (port->port_type == TYPEC_PORT_DRP) {
-		ret = fwnode_property_read_u32(fwnode, "frs-typec-current", &frs_current);
+		ret = fwnode_property_read_u32(fwnode, "new-source-frs-typec-current",
+					       &frs_current);
 		if (ret >= 0 && frs_current <= FRS_5V_3A)
-			port->frs_current = frs_current;
+			port->new_source_frs_current = frs_current;
 	}
 
 	return 0;
 }
 
+static int tcpm_copy_pdos(u32 *dest_pdo, const u32 *src_pdo, unsigned int nr_pdo)
+{
+	unsigned int i;
+
+	if (nr_pdo > PDO_MAX_OBJECTS)
+		nr_pdo = PDO_MAX_OBJECTS;
+
+	for (i = 0; i < nr_pdo; i++)
+		dest_pdo[i] = src_pdo[i];
+
+	return nr_pdo;
+}
+
+int tcpm_update_sink_capabilities(struct tcpm_port *port, const u32 *pdo, unsigned int nr_pdo,
+				  unsigned int operating_snk_mw)
+{
+	if (tcpm_validate_caps(port, pdo, nr_pdo))
+		return -EINVAL;
+
+	mutex_lock(&port->lock);
+	port->nr_snk_pdo = tcpm_copy_pdos(port->snk_pdo, pdo, nr_pdo);
+	port->operating_snk_mw = operating_snk_mw;
+	port->update_sink_caps = true;
+
+	switch (port->state) {
+	case SNK_NEGOTIATE_CAPABILITIES:
+	case SNK_NEGOTIATE_PPS_CAPABILITIES:
+	case SNK_READY:
+	case SNK_TRANSITION_SINK:
+	case SNK_TRANSITION_SINK_VBUS:
+		if (port->pps_data.active)
+			tcpm_set_state(port, SNK_NEGOTIATE_PPS_CAPABILITIES, 0);
+		else
+			tcpm_set_state(port, SNK_NEGOTIATE_CAPABILITIES, 0);
+		break;
+	default:
+		break;
+	}
+	mutex_unlock(&port->lock);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(tcpm_update_sink_capabilities);
+
 /* Power Supply access to expose source power information */
 enum tcpm_psy_online_states {
 	TCPM_PSY_OFFLINE = 0,
diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c
index 51a570d..f029589 100644
--- a/drivers/usb/typec/ucsi/ucsi.c
+++ b/drivers/usb/typec/ucsi/ucsi.c
@@ -53,7 +53,7 @@
 	ctrl = UCSI_ACK_CC_CI;
 	ctrl |= UCSI_ACK_CONNECTOR_CHANGE;
 
-	return ucsi->ops->async_write(ucsi, UCSI_CONTROL, &ctrl, sizeof(ctrl));
+	return ucsi->ops->sync_write(ucsi, UCSI_CONTROL, &ctrl, sizeof(ctrl));
 }
 
 static int ucsi_exec_command(struct ucsi *ucsi, u64 command);
@@ -625,20 +625,112 @@
 	struct ucsi_connector *con = container_of(work, struct ucsi_connector,
 						  work);
 	struct ucsi *ucsi = con->ucsi;
+	struct ucsi_connector_status pre_ack_status;
+	struct ucsi_connector_status post_ack_status;
 	enum typec_role role;
+	u16 inferred_changes;
+	u16 changed_flags;
 	u64 command;
 	int ret;
 
 	mutex_lock(&con->lock);
 
+	/*
+	 * Some/many PPMs have an issue where all fields in the change bitfield
+	 * are cleared when an ACK is send. This will causes any change
+	 * between GET_CONNECTOR_STATUS and ACK to be lost.
+	 *
+	 * We work around this by re-fetching the connector status afterwards.
+	 * We then infer any changes that we see have happened but that may not
+	 * be represented in the change bitfield.
+	 *
+	 * Also, even though we don't need to know the currently supported alt
+	 * modes, we run the GET_CAM_SUPPORTED command to ensure the PPM does
+	 * not get stuck in case it assumes we do.
+	 * Always do this, rather than relying on UCSI_CONSTAT_CAM_CHANGE to be
+	 * set in the change bitfield.
+	 *
+	 * We end up with the following actions:
+	 *  1. UCSI_GET_CONNECTOR_STATUS, store result, update unprocessed_changes
+	 *  2. UCSI_GET_CAM_SUPPORTED, discard result
+	 *  3. ACK connector change
+	 *  4. UCSI_GET_CONNECTOR_STATUS, store result
+	 *  5. Infere lost changes by comparing UCSI_GET_CONNECTOR_STATUS results
+	 *  6. If PPM reported a new change, then restart in order to ACK
+	 *  7. Process everything as usual.
+	 *
+	 * We may end up seeing a change twice, but we can only miss extremely
+	 * short transitional changes.
+	 */
+
+	/* 1. First UCSI_GET_CONNECTOR_STATUS */
 	command = UCSI_GET_CONNECTOR_STATUS | UCSI_CONNECTOR_NUMBER(con->num);
-	ret = ucsi_send_command(ucsi, command, &con->status,
-				sizeof(con->status));
+	ret = ucsi_send_command(ucsi, command, &pre_ack_status,
+				sizeof(pre_ack_status));
 	if (ret < 0) {
 		dev_err(ucsi->dev, "%s: GET_CONNECTOR_STATUS failed (%d)\n",
 			__func__, ret);
 		goto out_unlock;
 	}
+	con->unprocessed_changes |= pre_ack_status.change;
+
+	/* 2. Run UCSI_GET_CAM_SUPPORTED and discard the result. */
+	command = UCSI_GET_CAM_SUPPORTED;
+	command |= UCSI_CONNECTOR_NUMBER(con->num);
+	ucsi_send_command(con->ucsi, command, NULL, 0);
+
+	/* 3. ACK connector change */
+	clear_bit(EVENT_PENDING, &ucsi->flags);
+	ret = ucsi_acknowledge_connector_change(ucsi);
+	if (ret) {
+		dev_err(ucsi->dev, "%s: ACK failed (%d)", __func__, ret);
+		goto out_unlock;
+	}
+
+	/* 4. Second UCSI_GET_CONNECTOR_STATUS */
+	command = UCSI_GET_CONNECTOR_STATUS | UCSI_CONNECTOR_NUMBER(con->num);
+	ret = ucsi_send_command(ucsi, command, &post_ack_status,
+				sizeof(post_ack_status));
+	if (ret < 0) {
+		dev_err(ucsi->dev, "%s: GET_CONNECTOR_STATUS failed (%d)\n",
+			__func__, ret);
+		goto out_unlock;
+	}
+
+	/* 5. Inferre any missing changes */
+	changed_flags = pre_ack_status.flags ^ post_ack_status.flags;
+	inferred_changes = 0;
+	if (UCSI_CONSTAT_PWR_OPMODE(changed_flags) != 0)
+		inferred_changes |= UCSI_CONSTAT_POWER_OPMODE_CHANGE;
+
+	if (changed_flags & UCSI_CONSTAT_CONNECTED)
+		inferred_changes |= UCSI_CONSTAT_CONNECT_CHANGE;
+
+	if (changed_flags & UCSI_CONSTAT_PWR_DIR)
+		inferred_changes |= UCSI_CONSTAT_POWER_DIR_CHANGE;
+
+	if (UCSI_CONSTAT_PARTNER_FLAGS(changed_flags) != 0)
+		inferred_changes |= UCSI_CONSTAT_PARTNER_CHANGE;
+
+	if (UCSI_CONSTAT_PARTNER_TYPE(changed_flags) != 0)
+		inferred_changes |= UCSI_CONSTAT_PARTNER_CHANGE;
+
+	/* Mask out anything that was correctly notified in the later call. */
+	inferred_changes &= ~post_ack_status.change;
+	if (inferred_changes)
+		dev_dbg(ucsi->dev, "%s: Inferred changes that would have been lost: 0x%04x\n",
+			__func__, inferred_changes);
+
+	con->unprocessed_changes |= inferred_changes;
+
+	/* 6. If PPM reported a new change, then restart in order to ACK */
+	if (post_ack_status.change)
+		goto out_unlock;
+
+	/* 7. Continue as if nothing happened */
+	con->status = post_ack_status;
+	con->status.change = con->unprocessed_changes;
+	con->unprocessed_changes = 0;
 
 	role = !!(con->status.flags & UCSI_CONSTAT_PWR_DIR);
 
@@ -680,28 +772,19 @@
 		ucsi_port_psy_changed(con);
 	}
 
-	if (con->status.change & UCSI_CONSTAT_CAM_CHANGE) {
-		/*
-		 * We don't need to know the currently supported alt modes here.
-		 * Running GET_CAM_SUPPORTED command just to make sure the PPM
-		 * does not get stuck in case it assumes we do so.
-		 */
-		command = UCSI_GET_CAM_SUPPORTED;
-		command |= UCSI_CONNECTOR_NUMBER(con->num);
-		ucsi_send_command(con->ucsi, command, NULL, 0);
-	}
-
 	if (con->status.change & UCSI_CONSTAT_PARTNER_CHANGE)
 		ucsi_partner_change(con);
 
-	ret = ucsi_acknowledge_connector_change(ucsi);
-	if (ret)
-		dev_err(ucsi->dev, "%s: ACK failed (%d)", __func__, ret);
-
 	trace_ucsi_connector_change(con->num, &con->status);
 
 out_unlock:
-	clear_bit(EVENT_PENDING, &ucsi->flags);
+	if (test_and_clear_bit(EVENT_PENDING, &ucsi->flags)) {
+		schedule_work(&con->work);
+		mutex_unlock(&con->lock);
+		return;
+	}
+
+	clear_bit(EVENT_PROCESSING, &ucsi->flags);
 	mutex_unlock(&con->lock);
 }
 
@@ -719,7 +802,9 @@
 		return;
 	}
 
-	if (!test_and_set_bit(EVENT_PENDING, &ucsi->flags))
+	set_bit(EVENT_PENDING, &ucsi->flags);
+
+	if (!test_and_set_bit(EVENT_PROCESSING, &ucsi->flags))
 		schedule_work(&con->work);
 }
 EXPORT_SYMBOL_GPL(ucsi_connector_change);
diff --git a/drivers/usb/typec/ucsi/ucsi.h b/drivers/usb/typec/ucsi/ucsi.h
index b7a92f2..dd9ba60 100644
--- a/drivers/usb/typec/ucsi/ucsi.h
+++ b/drivers/usb/typec/ucsi/ucsi.h
@@ -296,6 +296,7 @@
 #define EVENT_PENDING	0
 #define COMMAND_PENDING	1
 #define ACK_PENDING	2
+#define EVENT_PROCESSING	3
 };
 
 #define UCSI_MAX_SVID		5
@@ -322,6 +323,7 @@
 
 	struct typec_capability typec_cap;
 
+	u16 unprocessed_changes;
 	struct ucsi_connector_status status;
 	struct ucsi_connector_capability cap;
 	struct power_supply *psy;
diff --git a/drivers/usb/typec/ucsi/ucsi_acpi.c b/drivers/usb/typec/ucsi/ucsi_acpi.c
index fbfe8f5..0497643 100644
--- a/drivers/usb/typec/ucsi/ucsi_acpi.c
+++ b/drivers/usb/typec/ucsi/ucsi_acpi.c
@@ -103,11 +103,12 @@
 	if (ret)
 		return;
 
+	if (UCSI_CCI_CONNECTOR(cci))
+		ucsi_connector_change(ua->ucsi, UCSI_CCI_CONNECTOR(cci));
+
 	if (test_bit(COMMAND_PENDING, &ua->flags) &&
 	    cci & (UCSI_CCI_ACK_COMPLETE | UCSI_CCI_COMMAND_COMPLETE))
 		complete(&ua->complete);
-	else if (UCSI_CCI_CONNECTOR(cci))
-		ucsi_connector_change(ua->ucsi, UCSI_CCI_CONNECTOR(cci));
 }
 
 static int ucsi_acpi_probe(struct platform_device *pdev)
diff --git a/drivers/virtio/virtio_input.c b/drivers/virtio/virtio_input.c
index f1f6208..427d58c 100644
--- a/drivers/virtio/virtio_input.c
+++ b/drivers/virtio/virtio_input.c
@@ -4,6 +4,7 @@
 #include <linux/virtio_config.h>
 #include <linux/input.h>
 #include <linux/slab.h>
+#include <linux/input/mt.h>
 
 #include <uapi/linux/virtio_ids.h>
 #include <uapi/linux/virtio_input.h>
@@ -165,6 +166,15 @@
 	virtio_cread_le(vi->vdev, struct virtio_input_config, u.abs.flat, &fl);
 	input_set_abs_params(vi->idev, abs, mi, ma, fu, fl);
 	input_abs_set_res(vi->idev, abs, re);
+	if (abs == ABS_MT_TRACKING_ID) {
+		unsigned int slot_flags =
+			test_bit(INPUT_PROP_DIRECT, vi->idev->propbit) ?
+				INPUT_MT_DIRECT : 0;
+
+		input_mt_init_slots(vi->idev,
+				    ma, /* input max finger */
+				    slot_flags);
+	}
 }
 
 static int virtinput_init_vqs(struct virtio_input *vi)
diff --git a/fs/9p/acl.c b/fs/9p/acl.c
index 6261719..cb14e8b 100644
--- a/fs/9p/acl.c
+++ b/fs/9p/acl.c
@@ -214,7 +214,8 @@
 
 static int v9fs_xattr_get_acl(const struct xattr_handler *handler,
 			      struct dentry *dentry, struct inode *inode,
-			      const char *name, void *buffer, size_t size)
+			      const char *name, void *buffer, size_t size,
+			      int flags)
 {
 	struct v9fs_session_info *v9ses;
 	struct posix_acl *acl;
diff --git a/fs/9p/xattr.c b/fs/9p/xattr.c
index ac8ff8c..5cfa772 100644
--- a/fs/9p/xattr.c
+++ b/fs/9p/xattr.c
@@ -139,7 +139,8 @@
 
 static int v9fs_xattr_handler_get(const struct xattr_handler *handler,
 				  struct dentry *dentry, struct inode *inode,
-				  const char *name, void *buffer, size_t size)
+				  const char *name, void *buffer, size_t size,
+				  int flags)
 {
 	const char *full_name = xattr_full_name(handler, name);
 
diff --git a/fs/Kconfig b/fs/Kconfig
index aa4c122..cd4c52d 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -122,6 +122,7 @@
 source "fs/autofs/Kconfig"
 source "fs/fuse/Kconfig"
 source "fs/overlayfs/Kconfig"
+source "fs/incfs/Kconfig"
 
 menu "Caches"
 
diff --git a/fs/Makefile b/fs/Makefile
index 999d1a2..5dca348 100644
--- a/fs/Makefile
+++ b/fs/Makefile
@@ -113,6 +113,7 @@
 obj-$(CONFIG_FUSE_FS)		+= fuse/
 obj-$(CONFIG_OVERLAY_FS)	+= overlayfs/
 obj-$(CONFIG_ORANGEFS_FS)       += orangefs/
+obj-$(CONFIG_INCREMENTAL_FS)	+= incfs/
 obj-$(CONFIG_UDF_FS)		+= udf/
 obj-$(CONFIG_SUN_OPENPROMFS)	+= openpromfs/
 obj-$(CONFIG_OMFS_FS)		+= omfs/
diff --git a/fs/afs/xattr.c b/fs/afs/xattr.c
index 95c573d..6992918 100644
--- a/fs/afs/xattr.c
+++ b/fs/afs/xattr.c
@@ -59,7 +59,7 @@
 static int afs_xattr_get_acl(const struct xattr_handler *handler,
 			     struct dentry *dentry,
 			     struct inode *inode, const char *name,
-			     void *buffer, size_t size)
+			     void *buffer, size_t size, int flags)
 {
 	struct afs_operation *op;
 	struct afs_vnode *vnode = AFS_FS_I(inode);
@@ -160,7 +160,7 @@
 static int afs_xattr_get_yfs(const struct xattr_handler *handler,
 			     struct dentry *dentry,
 			     struct inode *inode, const char *name,
-			     void *buffer, size_t size)
+			     void *buffer, size_t size, int flags)
 {
 	struct afs_operation *op;
 	struct afs_vnode *vnode = AFS_FS_I(inode);
@@ -283,7 +283,7 @@
 static int afs_xattr_get_cell(const struct xattr_handler *handler,
 			      struct dentry *dentry,
 			      struct inode *inode, const char *name,
-			      void *buffer, size_t size)
+			      void *buffer, size_t size, int flags)
 {
 	struct afs_vnode *vnode = AFS_FS_I(inode);
 	struct afs_cell *cell = vnode->volume->cell;
@@ -310,7 +310,7 @@
 static int afs_xattr_get_fid(const struct xattr_handler *handler,
 			     struct dentry *dentry,
 			     struct inode *inode, const char *name,
-			     void *buffer, size_t size)
+			     void *buffer, size_t size, int flags)
 {
 	struct afs_vnode *vnode = AFS_FS_I(inode);
 	char text[16 + 1 + 24 + 1 + 8 + 1];
@@ -348,7 +348,7 @@
 static int afs_xattr_get_volume(const struct xattr_handler *handler,
 			      struct dentry *dentry,
 			      struct inode *inode, const char *name,
-			      void *buffer, size_t size)
+			      void *buffer, size_t size, int flags)
 {
 	struct afs_vnode *vnode = AFS_FS_I(inode);
 	const char *volname = vnode->volume->name;
diff --git a/fs/btrfs/xattr.c b/fs/btrfs/xattr.c
index 95d9aeb..1e522e1 100644
--- a/fs/btrfs/xattr.c
+++ b/fs/btrfs/xattr.c
@@ -353,7 +353,8 @@
 
 static int btrfs_xattr_handler_get(const struct xattr_handler *handler,
 				   struct dentry *unused, struct inode *inode,
-				   const char *name, void *buffer, size_t size)
+				   const char *name, void *buffer, size_t size,
+				   int flags)
 {
 	name = xattr_full_name(handler, name);
 	return btrfs_getxattr(inode, name, buffer, size);
diff --git a/fs/ceph/xattr.c b/fs/ceph/xattr.c
index 197cb12..2a39813 100644
--- a/fs/ceph/xattr.c
+++ b/fs/ceph/xattr.c
@@ -1155,7 +1155,8 @@
 
 static int ceph_get_xattr_handler(const struct xattr_handler *handler,
 				  struct dentry *dentry, struct inode *inode,
-				  const char *name, void *value, size_t size)
+				  const char *name, void *value, size_t size,
+				  int flags)
 {
 	if (!ceph_is_valid_xattr(name))
 		return -EOPNOTSUPP;
diff --git a/fs/cifs/xattr.c b/fs/cifs/xattr.c
index b829917..032df60 100644
--- a/fs/cifs/xattr.c
+++ b/fs/cifs/xattr.c
@@ -281,7 +281,7 @@
 
 static int cifs_xattr_get(const struct xattr_handler *handler,
 			  struct dentry *dentry, struct inode *inode,
-			  const char *name, void *value, size_t size)
+			  const char *name, void *value, size_t size, int flags)
 {
 	ssize_t rc = -EOPNOTSUPP;
 	unsigned int xid;
diff --git a/fs/crypto/crypto.c b/fs/crypto/crypto.c
index 4ef3f71..171500f 100644
--- a/fs/crypto/crypto.c
+++ b/fs/crypto/crypto.c
@@ -69,6 +69,14 @@
 }
 EXPORT_SYMBOL(fscrypt_free_bounce_page);
 
+/*
+ * Generate the IV for the given logical block number within the given file.
+ * For filenames encryption, lblk_num == 0.
+ *
+ * Keep this in sync with fscrypt_limit_dio_pages().  fscrypt_limit_dio_pages()
+ * needs to know about any IV generation methods where the low bits of IV don't
+ * simply contain the lblk_num (e.g., IV_INO_LBLK_32).
+ */
 void fscrypt_generate_iv(union fscrypt_iv *iv, u64 lblk_num,
 			 const struct fscrypt_info *ci)
 {
diff --git a/fs/crypto/fname.c b/fs/crypto/fname.c
index 1fbe6c2..cb3cfa6 100644
--- a/fs/crypto/fname.c
+++ b/fs/crypto/fname.c
@@ -570,7 +570,3 @@
 	return valid;
 }
 EXPORT_SYMBOL_GPL(fscrypt_d_revalidate);
-
-const struct dentry_operations fscrypt_d_ops = {
-	.d_revalidate = fscrypt_d_revalidate,
-};
diff --git a/fs/crypto/fscrypt_private.h b/fs/crypto/fscrypt_private.h
index 4f5806a..b0cd957 100644
--- a/fs/crypto/fscrypt_private.h
+++ b/fs/crypto/fscrypt_private.h
@@ -22,6 +22,8 @@
 
 #define FSCRYPT_MIN_KEY_SIZE	16
 
+#define FSCRYPT_MAX_HW_WRAPPED_KEY_SIZE	128
+
 #define FSCRYPT_CONTEXT_V1	1
 #define FSCRYPT_CONTEXT_V2	2
 
@@ -294,7 +296,6 @@
 bool fscrypt_fname_encrypted_size(const union fscrypt_policy *policy,
 				  u32 orig_len, u32 max_len,
 				  u32 *encrypted_len_ret);
-extern const struct dentry_operations fscrypt_d_ops;
 
 /* hkdf.c */
 
@@ -328,7 +329,8 @@
 
 /* inline_crypt.c */
 #ifdef CONFIG_FS_ENCRYPTION_INLINE_CRYPT
-int fscrypt_select_encryption_impl(struct fscrypt_info *ci);
+int fscrypt_select_encryption_impl(struct fscrypt_info *ci,
+				   bool is_hw_wrapped_key);
 
 static inline bool
 fscrypt_using_inline_encryption(const struct fscrypt_info *ci)
@@ -338,10 +340,18 @@
 
 int fscrypt_prepare_inline_crypt_key(struct fscrypt_prepared_key *prep_key,
 				     const u8 *raw_key,
+				     unsigned int raw_key_size,
+				     bool is_hw_wrapped,
 				     const struct fscrypt_info *ci);
 
 void fscrypt_destroy_inline_crypt_key(struct fscrypt_prepared_key *prep_key);
 
+extern int fscrypt_derive_raw_secret(struct super_block *sb,
+				     const u8 *wrapped_key,
+				     unsigned int wrapped_key_size,
+				     u8 *raw_secret,
+				     unsigned int raw_secret_size);
+
 /*
  * Check whether the crypto transform or blk-crypto key has been allocated in
  * @prep_key, depending on which encryption implementation the file will use.
@@ -365,7 +375,8 @@
 
 #else /* CONFIG_FS_ENCRYPTION_INLINE_CRYPT */
 
-static inline int fscrypt_select_encryption_impl(struct fscrypt_info *ci)
+static inline int fscrypt_select_encryption_impl(struct fscrypt_info *ci,
+						 bool is_hw_wrapped_key)
 {
 	return 0;
 }
@@ -378,7 +389,8 @@
 
 static inline int
 fscrypt_prepare_inline_crypt_key(struct fscrypt_prepared_key *prep_key,
-				 const u8 *raw_key,
+				 const u8 *raw_key, unsigned int raw_key_size,
+				 bool is_hw_wrapped,
 				 const struct fscrypt_info *ci)
 {
 	WARN_ON(1);
@@ -390,6 +402,17 @@
 {
 }
 
+static inline int fscrypt_derive_raw_secret(struct super_block *sb,
+					    const u8 *wrapped_key,
+					    unsigned int wrapped_key_size,
+					    u8 *raw_secret,
+					    unsigned int raw_secret_size)
+{
+	fscrypt_warn(NULL,
+		     "kernel built without support for hardware-wrapped keys");
+	return -EOPNOTSUPP;
+}
+
 static inline bool
 fscrypt_is_key_prepared(struct fscrypt_prepared_key *prep_key,
 			const struct fscrypt_info *ci)
@@ -414,8 +437,15 @@
 	/* Size of the raw key in bytes.  Set even if ->raw isn't set. */
 	u32			size;
 
-	/* For v1 policy keys: the raw key.  Wiped for v2 policy keys. */
-	u8			raw[FSCRYPT_MAX_KEY_SIZE];
+	/* True if the key in ->raw is a hardware-wrapped key. */
+	bool			is_hw_wrapped;
+
+	/*
+	 * For v1 policy keys: the raw key.  Wiped for v2 policy keys, unless
+	 * ->is_hw_wrapped is true, in which case this contains the wrapped key
+	 * rather than the key with which 'hkdf' was keyed.
+	 */
+	u8			raw[FSCRYPT_MAX_HW_WRAPPED_KEY_SIZE];
 
 } __randomize_layout;
 
@@ -563,7 +593,8 @@
 extern struct fscrypt_mode fscrypt_modes[];
 
 int fscrypt_prepare_key(struct fscrypt_prepared_key *prep_key,
-			const u8 *raw_key, const struct fscrypt_info *ci);
+			const u8 *raw_key, unsigned int raw_key_size,
+			bool is_hw_wrapped, const struct fscrypt_info *ci);
 
 void fscrypt_destroy_prepared_key(struct fscrypt_prepared_key *prep_key);
 
diff --git a/fs/crypto/hooks.c b/fs/crypto/hooks.c
index 20b0df4..9006fa9 100644
--- a/fs/crypto/hooks.c
+++ b/fs/crypto/hooks.c
@@ -117,7 +117,6 @@
 		spin_lock(&dentry->d_lock);
 		dentry->d_flags |= DCACHE_NOKEY_NAME;
 		spin_unlock(&dentry->d_lock);
-		d_set_d_op(dentry, &fscrypt_d_ops);
 	}
 	return err;
 }
diff --git a/fs/crypto/inline_crypt.c b/fs/crypto/inline_crypt.c
index c57bebf..bf2c46c 100644
--- a/fs/crypto/inline_crypt.c
+++ b/fs/crypto/inline_crypt.c
@@ -15,8 +15,10 @@
 #include <linux/blk-crypto.h>
 #include <linux/blkdev.h>
 #include <linux/buffer_head.h>
+#include <linux/keyslot-manager.h>
 #include <linux/sched/mm.h>
 #include <linux/slab.h>
+#include <linux/uio.h>
 
 #include "fscrypt_private.h"
 
@@ -64,7 +66,8 @@
 }
 
 /* Enable inline encryption for this file if supported. */
-int fscrypt_select_encryption_impl(struct fscrypt_info *ci)
+int fscrypt_select_encryption_impl(struct fscrypt_info *ci,
+				   bool is_hw_wrapped_key)
 {
 	const struct inode *inode = ci->ci_inode;
 	struct super_block *sb = inode->i_sb;
@@ -105,6 +108,7 @@
 	crypto_cfg.crypto_mode = ci->ci_mode->blk_crypto_mode;
 	crypto_cfg.data_unit_size = sb->s_blocksize;
 	crypto_cfg.dun_bytes = fscrypt_get_dun_bytes(ci);
+	crypto_cfg.is_hw_wrapped = is_hw_wrapped_key;
 	num_devs = fscrypt_get_num_devices(sb);
 	devs = kmalloc_array(num_devs, sizeof(*devs), GFP_KERNEL);
 	if (!devs)
@@ -125,6 +129,8 @@
 
 int fscrypt_prepare_inline_crypt_key(struct fscrypt_prepared_key *prep_key,
 				     const u8 *raw_key,
+				     unsigned int raw_key_size,
+				     bool is_hw_wrapped,
 				     const struct fscrypt_info *ci)
 {
 	const struct inode *inode = ci->ci_inode;
@@ -143,7 +149,11 @@
 	blk_key->num_devs = num_devs;
 	fscrypt_get_devices(sb, num_devs, blk_key->devs);
 
-	err = blk_crypto_init_key(&blk_key->base, raw_key, crypto_mode,
+	BUILD_BUG_ON(FSCRYPT_MAX_HW_WRAPPED_KEY_SIZE >
+		     BLK_CRYPTO_MAX_WRAPPED_KEY_SIZE);
+
+	err = blk_crypto_init_key(&blk_key->base, raw_key, raw_key_size,
+				  is_hw_wrapped, crypto_mode,
 				  fscrypt_get_dun_bytes(ci), sb->s_blocksize);
 	if (err) {
 		fscrypt_err(inode, "error %d initializing blk-crypto key", err);
@@ -203,6 +213,21 @@
 	}
 }
 
+int fscrypt_derive_raw_secret(struct super_block *sb,
+			      const u8 *wrapped_key,
+			      unsigned int wrapped_key_size,
+			      u8 *raw_secret, unsigned int raw_secret_size)
+{
+	struct request_queue *q;
+
+	q = bdev_get_queue(sb->s_bdev);
+	if (!q->ksm)
+		return -EOPNOTSUPP;
+
+	return blk_ksm_derive_raw_secret(q->ksm, wrapped_key, wrapped_key_size,
+					 raw_secret, raw_secret_size);
+}
+
 bool __fscrypt_inode_uses_inline_crypto(const struct inode *inode)
 {
 	return inode->i_crypt_info->ci_inlinecrypt;
@@ -238,6 +263,8 @@
  * otherwise fscrypt_mergeable_bio() won't work as intended.
  *
  * The encryption context will be freed automatically when the bio is freed.
+ *
+ * This function also handles setting bi_skip_dm_default_key when needed.
  */
 void fscrypt_set_bio_crypt_ctx(struct bio *bio, const struct inode *inode,
 			       u64 first_lblk, gfp_t gfp_mask)
@@ -245,6 +272,9 @@
 	const struct fscrypt_info *ci;
 	u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE];
 
+	if (fscrypt_inode_should_skip_dm_default_key(inode))
+		bio_set_skip_dm_default_key(bio);
+
 	if (!fscrypt_inode_uses_inline_crypto(inode))
 		return;
 	ci = inode->i_crypt_info;
@@ -315,6 +345,9 @@
  *
  * fscrypt_set_bio_crypt_ctx() must have already been called on the bio.
  *
+ * This function also returns false if the next part of the I/O would need to
+ * have a different value for the bi_skip_dm_default_key flag.
+ *
  * Return: true iff the I/O is mergeable
  */
 bool fscrypt_mergeable_bio(struct bio *bio, const struct inode *inode,
@@ -325,6 +358,9 @@
 
 	if (!!bc != fscrypt_inode_uses_inline_crypto(inode))
 		return false;
+	if (bio_should_skip_dm_default_key(bio) !=
+	    fscrypt_inode_should_skip_dm_default_key(inode))
+		return false;
 	if (!bc)
 		return true;
 
@@ -358,8 +394,80 @@
 	u64 next_lblk;
 
 	if (!bh_get_inode_and_lblk_num(next_bh, &inode, &next_lblk))
-		return !bio->bi_crypt_context;
+		return !bio->bi_crypt_context &&
+		       !bio_should_skip_dm_default_key(bio);
 
 	return fscrypt_mergeable_bio(bio, inode, next_lblk);
 }
 EXPORT_SYMBOL_GPL(fscrypt_mergeable_bio_bh);
+
+/**
+ * fscrypt_dio_supported() - check whether a direct I/O request is unsupported
+ *			     due to encryption constraints
+ * @iocb: the file and position the I/O is targeting
+ * @iter: the I/O data segment(s)
+ *
+ * Return: true if direct I/O is supported
+ */
+bool fscrypt_dio_supported(struct kiocb *iocb, struct iov_iter *iter)
+{
+	const struct inode *inode = file_inode(iocb->ki_filp);
+	const unsigned int blocksize = i_blocksize(inode);
+
+	/* If the file is unencrypted, no veto from us. */
+	if (!fscrypt_needs_contents_encryption(inode))
+		return true;
+
+	/* We only support direct I/O with inline crypto, not fs-layer crypto */
+	if (!fscrypt_inode_uses_inline_crypto(inode))
+		return false;
+
+	/*
+	 * Since the granularity of encryption is filesystem blocks, the I/O
+	 * must be block aligned -- not just disk sector aligned.
+	 */
+	if (!IS_ALIGNED(iocb->ki_pos | iov_iter_alignment(iter), blocksize))
+		return false;
+
+	return true;
+}
+EXPORT_SYMBOL_GPL(fscrypt_dio_supported);
+
+/**
+ * fscrypt_limit_dio_pages() - limit I/O pages to avoid discontiguous DUNs
+ * @inode: the file on which I/O is being done
+ * @pos: the file position (in bytes) at which the I/O is being done
+ * @nr_pages: the number of pages we want to submit starting at @pos
+ *
+ * For direct I/O: limit the number of pages that will be submitted in the bio
+ * targeting @pos, in order to avoid crossing a data unit number (DUN)
+ * discontinuity.  This is only needed for certain IV generation methods.
+ *
+ * This assumes block_size == PAGE_SIZE; see fscrypt_dio_supported().
+ *
+ * Return: the actual number of pages that can be submitted
+ */
+int fscrypt_limit_dio_pages(const struct inode *inode, loff_t pos, int nr_pages)
+{
+	const struct fscrypt_info *ci = inode->i_crypt_info;
+	u32 dun;
+
+	if (!fscrypt_inode_uses_inline_crypto(inode))
+		return nr_pages;
+
+	if (nr_pages <= 1)
+		return nr_pages;
+
+	if (!(fscrypt_policy_flags(&ci->ci_policy) &
+	      FSCRYPT_POLICY_FLAG_IV_INO_LBLK_32))
+		return nr_pages;
+
+	if (WARN_ON_ONCE(i_blocksize(inode) != PAGE_SIZE))
+		return 1;
+
+	/* With IV_INO_LBLK_32, the DUN can wrap around from U32_MAX to 0. */
+
+	dun = ci->ci_hashed_ino + (pos >> inode->i_blkbits);
+
+	return min_t(u64, nr_pages, (u64)U32_MAX + 1 - dun);
+}
diff --git a/fs/crypto/keyring.c b/fs/crypto/keyring.c
index 53cc552..04a5896 100644
--- a/fs/crypto/keyring.c
+++ b/fs/crypto/keyring.c
@@ -476,6 +476,9 @@
 	return err;
 }
 
+/* Size of software "secret" derived from hardware-wrapped key */
+#define RAW_SECRET_SIZE 32
+
 static int add_master_key(struct super_block *sb,
 			  struct fscrypt_master_key_secret *secret,
 			  struct fscrypt_key_specifier *key_spec)
@@ -483,17 +486,28 @@
 	int err;
 
 	if (key_spec->type == FSCRYPT_KEY_SPEC_TYPE_IDENTIFIER) {
-		err = fscrypt_init_hkdf(&secret->hkdf, secret->raw,
-					secret->size);
+		u8 _kdf_key[RAW_SECRET_SIZE];
+		u8 *kdf_key = secret->raw;
+		unsigned int kdf_key_size = secret->size;
+
+		if (secret->is_hw_wrapped) {
+			kdf_key = _kdf_key;
+			kdf_key_size = RAW_SECRET_SIZE;
+			err = fscrypt_derive_raw_secret(sb, secret->raw,
+							secret->size,
+							kdf_key, kdf_key_size);
+			if (err)
+				return err;
+		}
+		err = fscrypt_init_hkdf(&secret->hkdf, kdf_key, kdf_key_size);
+		/*
+		 * Now that the HKDF context is initialized, the raw HKDF key is
+		 * no longer needed.
+		 */
+		memzero_explicit(kdf_key, kdf_key_size);
 		if (err)
 			return err;
 
-		/*
-		 * Now that the HKDF context is initialized, the raw key is no
-		 * longer needed.
-		 */
-		memzero_explicit(secret->raw, secret->size);
-
 		/* Calculate the key identifier */
 		err = fscrypt_hkdf_expand(&secret->hkdf,
 					  HKDF_CONTEXT_KEY_IDENTIFIER, NULL, 0,
@@ -509,8 +523,10 @@
 {
 	const struct fscrypt_provisioning_key_payload *payload = prep->data;
 
+	BUILD_BUG_ON(FSCRYPT_MAX_HW_WRAPPED_KEY_SIZE < FSCRYPT_MAX_KEY_SIZE);
+
 	if (prep->datalen < sizeof(*payload) + FSCRYPT_MIN_KEY_SIZE ||
-	    prep->datalen > sizeof(*payload) + FSCRYPT_MAX_KEY_SIZE)
+	    prep->datalen > sizeof(*payload) + FSCRYPT_MAX_HW_WRAPPED_KEY_SIZE)
 		return -EINVAL;
 
 	if (payload->type != FSCRYPT_KEY_SPEC_TYPE_DESCRIPTOR &&
@@ -659,15 +675,29 @@
 		return -EACCES;
 
 	memset(&secret, 0, sizeof(secret));
+
+	if (arg.__flags) {
+		if (arg.__flags & ~__FSCRYPT_ADD_KEY_FLAG_HW_WRAPPED)
+			return -EINVAL;
+		if (arg.key_spec.type != FSCRYPT_KEY_SPEC_TYPE_IDENTIFIER)
+			return -EINVAL;
+		secret.is_hw_wrapped = true;
+	}
+
 	if (arg.key_id) {
 		if (arg.raw_size != 0)
 			return -EINVAL;
 		err = get_keyring_key(arg.key_id, arg.key_spec.type, &secret);
 		if (err)
 			goto out_wipe_secret;
+		err = -EINVAL;
+		if (secret.size > FSCRYPT_MAX_KEY_SIZE && !secret.is_hw_wrapped)
+			goto out_wipe_secret;
 	} else {
 		if (arg.raw_size < FSCRYPT_MIN_KEY_SIZE ||
-		    arg.raw_size > FSCRYPT_MAX_KEY_SIZE)
+		    arg.raw_size > (secret.is_hw_wrapped ?
+				    FSCRYPT_MAX_HW_WRAPPED_KEY_SIZE :
+				    FSCRYPT_MAX_KEY_SIZE))
 			return -EINVAL;
 		secret.size = arg.raw_size;
 		err = -EFAULT;
diff --git a/fs/crypto/keysetup.c b/fs/crypto/keysetup.c
index d595abb..549de7cf 100644
--- a/fs/crypto/keysetup.c
+++ b/fs/crypto/keysetup.c
@@ -119,12 +119,17 @@
  * (fs-layer or blk-crypto) will be used.
  */
 int fscrypt_prepare_key(struct fscrypt_prepared_key *prep_key,
-			const u8 *raw_key, const struct fscrypt_info *ci)
+			const u8 *raw_key, unsigned int raw_key_size,
+			bool is_hw_wrapped, const struct fscrypt_info *ci)
 {
 	struct crypto_skcipher *tfm;
 
 	if (fscrypt_using_inline_encryption(ci))
-		return fscrypt_prepare_inline_crypt_key(prep_key, raw_key, ci);
+		return fscrypt_prepare_inline_crypt_key(prep_key,
+				raw_key, raw_key_size, is_hw_wrapped, ci);
+
+	if (WARN_ON(is_hw_wrapped || raw_key_size != ci->ci_mode->keysize))
+		return -EINVAL;
 
 	tfm = fscrypt_allocate_skcipher(ci->ci_mode, raw_key, ci->ci_inode);
 	if (IS_ERR(tfm))
@@ -150,7 +155,9 @@
 int fscrypt_set_per_file_enc_key(struct fscrypt_info *ci, const u8 *raw_key)
 {
 	ci->ci_owns_key = true;
-	return fscrypt_prepare_key(&ci->ci_enc_key, raw_key, ci);
+	return fscrypt_prepare_key(&ci->ci_enc_key, raw_key,
+				   ci->ci_mode->keysize,
+				   false /*is_hw_wrapped*/, ci);
 }
 
 static int setup_per_mode_enc_key(struct fscrypt_info *ci,
@@ -182,24 +189,48 @@
 	if (fscrypt_is_key_prepared(prep_key, ci))
 		goto done_unlock;
 
-	BUILD_BUG_ON(sizeof(mode_num) != 1);
-	BUILD_BUG_ON(sizeof(sb->s_uuid) != 16);
-	BUILD_BUG_ON(sizeof(hkdf_info) != 17);
-	hkdf_info[hkdf_infolen++] = mode_num;
-	if (include_fs_uuid) {
-		memcpy(&hkdf_info[hkdf_infolen], &sb->s_uuid,
-		       sizeof(sb->s_uuid));
-		hkdf_infolen += sizeof(sb->s_uuid);
+	if (mk->mk_secret.is_hw_wrapped && S_ISREG(inode->i_mode)) {
+		int i;
+
+		if (!fscrypt_using_inline_encryption(ci)) {
+			fscrypt_warn(ci->ci_inode,
+				     "Hardware-wrapped keys require inline encryption (-o inlinecrypt)");
+			err = -EINVAL;
+			goto out_unlock;
+		}
+		for (i = 0; i <= __FSCRYPT_MODE_MAX; i++) {
+			if (fscrypt_is_key_prepared(&keys[i], ci)) {
+				fscrypt_warn(ci->ci_inode,
+					     "Each hardware-wrapped key can only be used with one encryption mode");
+				err = -EINVAL;
+				goto out_unlock;
+			}
+		}
+		err = fscrypt_prepare_key(prep_key, mk->mk_secret.raw,
+					  mk->mk_secret.size, true, ci);
+		if (err)
+			goto out_unlock;
+	} else {
+		BUILD_BUG_ON(sizeof(mode_num) != 1);
+		BUILD_BUG_ON(sizeof(sb->s_uuid) != 16);
+		BUILD_BUG_ON(sizeof(hkdf_info) != 17);
+		hkdf_info[hkdf_infolen++] = mode_num;
+		if (include_fs_uuid) {
+			memcpy(&hkdf_info[hkdf_infolen], &sb->s_uuid,
+				   sizeof(sb->s_uuid));
+			hkdf_infolen += sizeof(sb->s_uuid);
+		}
+		err = fscrypt_hkdf_expand(&mk->mk_secret.hkdf,
+					  hkdf_context, hkdf_info, hkdf_infolen,
+					  mode_key, mode->keysize);
+		if (err)
+			goto out_unlock;
+		err = fscrypt_prepare_key(prep_key, mode_key, mode->keysize,
+					  false /*is_hw_wrapped*/, ci);
+		memzero_explicit(mode_key, mode->keysize);
+		if (err)
+			goto out_unlock;
 	}
-	err = fscrypt_hkdf_expand(&mk->mk_secret.hkdf,
-				  hkdf_context, hkdf_info, hkdf_infolen,
-				  mode_key, mode->keysize);
-	if (err)
-		goto out_unlock;
-	err = fscrypt_prepare_key(prep_key, mode_key, ci);
-	memzero_explicit(mode_key, mode->keysize);
-	if (err)
-		goto out_unlock;
 done_unlock:
 	ci->ci_enc_key = *prep_key;
 	err = 0;
@@ -280,6 +311,14 @@
 {
 	int err;
 
+	if (mk->mk_secret.is_hw_wrapped &&
+	    !(ci->ci_policy.v2.flags & (FSCRYPT_POLICY_FLAG_IV_INO_LBLK_64 |
+					FSCRYPT_POLICY_FLAG_IV_INO_LBLK_32))) {
+		fscrypt_warn(ci->ci_inode,
+			     "Hardware-wrapped keys are only supported with IV_INO_LBLK policies");
+		return -EINVAL;
+	}
+
 	if (ci->ci_policy.v2.flags & FSCRYPT_POLICY_FLAG_DIRECT_KEY) {
 		/*
 		 * DIRECT_KEY: instead of deriving per-file encryption keys, the
@@ -350,10 +389,6 @@
 	struct fscrypt_key_specifier mk_spec;
 	int err;
 
-	err = fscrypt_select_encryption_impl(ci);
-	if (err)
-		return err;
-
 	switch (ci->ci_policy.version) {
 	case FSCRYPT_POLICY_V1:
 		mk_spec.type = FSCRYPT_KEY_SPEC_TYPE_DESCRIPTOR;
@@ -378,6 +413,10 @@
 		    ci->ci_policy.version != FSCRYPT_POLICY_V1)
 			return PTR_ERR(key);
 
+		err = fscrypt_select_encryption_impl(ci, false);
+		if (err)
+			return err;
+
 		/*
 		 * As a legacy fallback for v1 policies, search for the key in
 		 * the current task's subscribed keyrings too.  Don't move this
@@ -412,6 +451,10 @@
 		goto out_release_key;
 	}
 
+	err = fscrypt_select_encryption_impl(ci, mk->mk_secret.is_hw_wrapped);
+	if (err)
+		goto out_release_key;
+
 	switch (ci->ci_policy.version) {
 	case FSCRYPT_POLICY_V1:
 		err = fscrypt_setup_v1_file_key(ci, mk->mk_secret.raw);
diff --git a/fs/crypto/keysetup_v1.c b/fs/crypto/keysetup_v1.c
index 2762c53..f0c22b5 100644
--- a/fs/crypto/keysetup_v1.c
+++ b/fs/crypto/keysetup_v1.c
@@ -233,7 +233,8 @@
 		return ERR_PTR(-ENOMEM);
 	refcount_set(&dk->dk_refcount, 1);
 	dk->dk_mode = ci->ci_mode;
-	err = fscrypt_prepare_key(&dk->dk_key, raw_key, ci);
+	err = fscrypt_prepare_key(&dk->dk_key, raw_key, ci->ci_mode->keysize,
+				  false /*is_hw_wrapped*/, ci);
 	if (err)
 		goto err_free_dk;
 	memcpy(dk->dk_descriptor, ci->ci_policy.v1.master_key_descriptor,
diff --git a/fs/direct-io.c b/fs/direct-io.c
index d53fa92..f6672c4 100644
--- a/fs/direct-io.c
+++ b/fs/direct-io.c
@@ -24,6 +24,7 @@
 #include <linux/module.h>
 #include <linux/types.h>
 #include <linux/fs.h>
+#include <linux/fscrypt.h>
 #include <linux/mm.h>
 #include <linux/slab.h>
 #include <linux/highmem.h>
@@ -392,6 +393,7 @@
 	      sector_t first_sector, int nr_vecs)
 {
 	struct bio *bio;
+	struct inode *inode = dio->inode;
 
 	/*
 	 * bio_alloc() is guaranteed to return a bio when allowed to sleep and
@@ -399,6 +401,9 @@
 	 */
 	bio = bio_alloc(GFP_KERNEL, nr_vecs);
 
+	fscrypt_set_bio_crypt_ctx(bio, inode,
+				  sdio->cur_page_fs_offset >> inode->i_blkbits,
+				  GFP_KERNEL);
 	bio_set_dev(bio, bdev);
 	bio->bi_iter.bi_sector = first_sector;
 	bio_set_op_attrs(bio, dio->op, dio->op_flags);
@@ -763,9 +768,17 @@
 		 * current logical offset in the file does not equal what would
 		 * be the next logical offset in the bio, submit the bio we
 		 * have.
+		 *
+		 * When fscrypt inline encryption is used, data unit number
+		 * (DUN) contiguity is also required.  Normally that's implied
+		 * by logical contiguity.  However, certain IV generation
+		 * methods (e.g. IV_INO_LBLK_32) don't guarantee it.  So, we
+		 * must explicitly check fscrypt_mergeable_bio() too.
 		 */
 		if (sdio->final_block_in_bio != sdio->cur_page_block ||
-		    cur_offset != bio_next_offset)
+		    cur_offset != bio_next_offset ||
+		    !fscrypt_mergeable_bio(sdio->bio, dio->inode,
+					   cur_offset >> dio->inode->i_blkbits))
 			dio_bio_submit(dio, sdio);
 	}
 
diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
index e23752d..12616d5 100644
--- a/fs/ecryptfs/inode.c
+++ b/fs/ecryptfs/inode.c
@@ -1040,7 +1040,8 @@
 		goto out;
 	}
 	inode_lock(lower_inode);
-	rc = __vfs_getxattr(lower_dentry, lower_inode, name, value, size);
+	rc = __vfs_getxattr(lower_dentry, lower_inode, name, value, size,
+			    XATTR_NOSECURITY);
 	inode_unlock(lower_inode);
 out:
 	return rc;
@@ -1125,7 +1126,8 @@
 
 static int ecryptfs_xattr_get(const struct xattr_handler *handler,
 			      struct dentry *dentry, struct inode *inode,
-			      const char *name, void *buffer, size_t size)
+			      const char *name, void *buffer, size_t size,
+			      int flags)
 {
 	return ecryptfs_getxattr(dentry, inode, name, buffer, size);
 }
diff --git a/fs/ecryptfs/mmap.c b/fs/ecryptfs/mmap.c
index 019572c..bc1ca4d 100644
--- a/fs/ecryptfs/mmap.c
+++ b/fs/ecryptfs/mmap.c
@@ -422,7 +422,7 @@
 	}
 	inode_lock(lower_inode);
 	size = __vfs_getxattr(lower_dentry, lower_inode, ECRYPTFS_XATTR_NAME,
-			      xattr_virt, PAGE_SIZE);
+			      xattr_virt, PAGE_SIZE, XATTR_NOSECURITY);
 	if (size < 0)
 		size = 8;
 	put_unaligned_be64(i_size_read(ecryptfs_inode), xattr_virt);
diff --git a/fs/erofs/xattr.c b/fs/erofs/xattr.c
index 5bde77d..58cf0cf 100644
--- a/fs/erofs/xattr.c
+++ b/fs/erofs/xattr.c
@@ -463,7 +463,8 @@
 
 static int erofs_xattr_generic_get(const struct xattr_handler *handler,
 				   struct dentry *unused, struct inode *inode,
-				   const char *name, void *buffer, size_t size)
+				   const char *name, void *buffer, size_t size,
+				   int flags)
 {
 	struct erofs_sb_info *const sbi = EROFS_I_SB(inode);
 
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index 4df6112..185252d 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -29,6 +29,7 @@
 #include <linux/mutex.h>
 #include <linux/anon_inodes.h>
 #include <linux/device.h>
+#include <linux/freezer.h>
 #include <linux/uaccess.h>
 #include <asm/io.h>
 #include <asm/mman.h>
@@ -1905,7 +1906,8 @@
 		if (eavail || res)
 			break;
 
-		if (!schedule_hrtimeout_range(to, slack, HRTIMER_MODE_ABS)) {
+		if (!freezable_schedule_hrtimeout_range(to, slack,
+						        HRTIMER_MODE_ABS)) {
 			timed_out = 1;
 			break;
 		}
diff --git a/fs/ext2/xattr_security.c b/fs/ext2/xattr_security.c
index 9a682e4..d5f6eb0 100644
--- a/fs/ext2/xattr_security.c
+++ b/fs/ext2/xattr_security.c
@@ -11,7 +11,7 @@
 static int
 ext2_xattr_security_get(const struct xattr_handler *handler,
 			struct dentry *unused, struct inode *inode,
-			const char *name, void *buffer, size_t size)
+			const char *name, void *buffer, size_t size, int flags)
 {
 	return ext2_xattr_get(inode, EXT2_XATTR_INDEX_SECURITY, name,
 			      buffer, size);
diff --git a/fs/ext2/xattr_trusted.c b/fs/ext2/xattr_trusted.c
index 49add11..8d31366 100644
--- a/fs/ext2/xattr_trusted.c
+++ b/fs/ext2/xattr_trusted.c
@@ -18,7 +18,7 @@
 static int
 ext2_xattr_trusted_get(const struct xattr_handler *handler,
 		       struct dentry *unused, struct inode *inode,
-		       const char *name, void *buffer, size_t size)
+		       const char *name, void *buffer, size_t size, int flags)
 {
 	return ext2_xattr_get(inode, EXT2_XATTR_INDEX_TRUSTED, name,
 			      buffer, size);
diff --git a/fs/ext2/xattr_user.c b/fs/ext2/xattr_user.c
index c243a3b..712b7c9 100644
--- a/fs/ext2/xattr_user.c
+++ b/fs/ext2/xattr_user.c
@@ -20,7 +20,7 @@
 static int
 ext2_xattr_user_get(const struct xattr_handler *handler,
 		    struct dentry *unused, struct inode *inode,
-		    const char *name, void *buffer, size_t size)
+		    const char *name, void *buffer, size_t size, int flags)
 {
 	if (!test_opt(inode->i_sb, XATTR_USER))
 		return -EOPNOTSUPP;
diff --git a/fs/ext4/dir.c b/fs/ext4/dir.c
index ca50c90..430d479 100644
--- a/fs/ext4/dir.c
+++ b/fs/ext4/dir.c
@@ -30,6 +30,8 @@
 #include "ext4.h"
 #include "xattr.h"
 
+#define DOTDOT_OFFSET 12
+
 static int ext4_dx_readdir(struct file *, struct dir_context *);
 
 /**
@@ -55,6 +57,19 @@
 	return 0;
 }
 
+static bool is_fake_entry(struct inode *dir, ext4_lblk_t lblk,
+			  unsigned int offset, unsigned int blocksize)
+{
+	/* Entries in the first block before this value refer to . or .. */
+	if (lblk == 0 && offset <= DOTDOT_OFFSET)
+		return true;
+	/* Check if this is likely the csum entry */
+	if (ext4_has_metadata_csum(dir->i_sb) && offset % blocksize ==
+				blocksize - sizeof(struct ext4_dir_entry_tail))
+		return true;
+	return false;
+}
+
 /*
  * Return 0 if the directory entry is OK, and 1 if there is a problem
  *
@@ -67,22 +82,28 @@
 			   struct inode *dir, struct file *filp,
 			   struct ext4_dir_entry_2 *de,
 			   struct buffer_head *bh, char *buf, int size,
+			   ext4_lblk_t lblk,
 			   unsigned int offset)
 {
 	const char *error_msg = NULL;
 	const int rlen = ext4_rec_len_from_disk(de->rec_len,
 						dir->i_sb->s_blocksize);
 	const int next_offset = ((char *) de - buf) + rlen;
+	unsigned int blocksize = dir->i_sb->s_blocksize;
+	bool fake = is_fake_entry(dir, lblk, offset, blocksize);
+	bool next_fake = is_fake_entry(dir, lblk, next_offset, blocksize);
 
-	if (unlikely(rlen < EXT4_DIR_REC_LEN(1)))
+	if (unlikely(rlen < ext4_dir_rec_len(1, fake ? NULL : dir)))
 		error_msg = "rec_len is smaller than minimal";
 	else if (unlikely(rlen % 4 != 0))
 		error_msg = "rec_len % 4 != 0";
-	else if (unlikely(rlen < EXT4_DIR_REC_LEN(de->name_len)))
+	else if (unlikely(rlen < ext4_dir_rec_len(de->name_len,
+							fake ? NULL : dir)))
 		error_msg = "rec_len is too small for name_len";
 	else if (unlikely(next_offset > size))
 		error_msg = "directory entry overrun";
-	else if (unlikely(next_offset > size - EXT4_DIR_REC_LEN(1) &&
+	else if (unlikely(next_offset > size - ext4_dir_rec_len(1,
+						next_fake ? NULL : dir) &&
 			  next_offset != size))
 		error_msg = "directory entry too close to block end";
 	else if (unlikely(le32_to_cpu(de->inode) >
@@ -94,15 +115,15 @@
 	if (filp)
 		ext4_error_file(filp, function, line, bh->b_blocknr,
 				"bad entry in directory: %s - offset=%u, "
-				"inode=%u, rec_len=%d, name_len=%d, size=%d",
+				"inode=%u, rec_len=%d, lblk=%d, size=%d fake=%d",
 				error_msg, offset, le32_to_cpu(de->inode),
-				rlen, de->name_len, size);
+				rlen, lblk, size, fake);
 	else
 		ext4_error_inode(dir, function, line, bh->b_blocknr,
 				"bad entry in directory: %s - offset=%u, "
-				"inode=%u, rec_len=%d, name_len=%d, size=%d",
+				"inode=%u, rec_len=%d, lblk=%d, size=%d fake=%d",
 				 error_msg, offset, le32_to_cpu(de->inode),
-				 rlen, de->name_len, size);
+				 rlen, lblk, size, fake);
 
 	return 1;
 }
@@ -226,7 +247,8 @@
 				 * failure will be detected in the
 				 * dirent test below. */
 				if (ext4_rec_len_from_disk(de->rec_len,
-					sb->s_blocksize) < EXT4_DIR_REC_LEN(1))
+					sb->s_blocksize) < ext4_dir_rec_len(1,
+									inode))
 					break;
 				i += ext4_rec_len_from_disk(de->rec_len,
 							    sb->s_blocksize);
@@ -242,7 +264,7 @@
 			de = (struct ext4_dir_entry_2 *) (bh->b_data + offset);
 			if (ext4_check_dir_entry(inode, file, de, bh,
 						 bh->b_data, bh->b_size,
-						 offset)) {
+						 map.m_lblk, offset)) {
 				/*
 				 * On error, skip to the next block
 				 */
@@ -267,7 +289,9 @@
 
 					/* Directory is encrypted */
 					err = fscrypt_fname_disk_to_usr(inode,
-						0, 0, &de_name, &fstr);
+						EXT4_DIRENT_HASH(de),
+						EXT4_DIRENT_MINOR_HASH(de),
+						&de_name, &fstr);
 					de_name = fstr;
 					fstr.len = save_len;
 					if (err)
@@ -643,7 +667,7 @@
 	top = buf + buf_size;
 	while ((char *) de < top) {
 		if (ext4_check_dir_entry(dir, NULL, de, bh,
-					 buf, buf_size, offset))
+					 buf, buf_size, 0, offset))
 			return -EFSCORRUPTED;
 		rlen = ext4_rec_len_from_disk(de->rec_len, buf_size);
 		de = (struct ext4_dir_entry_2 *)((char *)de + rlen);
@@ -667,10 +691,3 @@
 	.open		= ext4_dir_open,
 	.release	= ext4_release_dir,
 };
-
-#ifdef CONFIG_UNICODE
-const struct dentry_operations ext4_dentry_ops = {
-	.d_hash = generic_ci_d_hash,
-	.d_compare = generic_ci_d_compare,
-};
-#endif
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index 65ecaf9..c21deb2 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -2165,6 +2165,17 @@
 	char	name[EXT4_NAME_LEN];	/* File name */
 };
 
+
+/*
+ * Encrypted Casefolded entries require saving the hash on disk. This structure
+ * followed ext4_dir_entry_2's name[name_len] at the next 4 byte aligned
+ * boundary.
+ */
+struct ext4_dir_entry_hash {
+	__le32 hash;
+	__le32 minor_hash;
+};
+
 /*
  * The new version of the directory entry.  Since EXT4 structures are
  * stored in intel byte order, and the name_len field could never be
@@ -2180,6 +2191,22 @@
 };
 
 /*
+ * Access the hashes at the end of ext4_dir_entry_2
+ */
+#define EXT4_DIRENT_HASHES(entry) \
+	((struct ext4_dir_entry_hash *) \
+		(((void *)(entry)) + \
+		((8 + (entry)->name_len + EXT4_DIR_ROUND) & ~EXT4_DIR_ROUND)))
+#define EXT4_DIRENT_HASH(entry) le32_to_cpu(EXT4_DIRENT_HASHES(de)->hash)
+#define EXT4_DIRENT_MINOR_HASH(entry) \
+		le32_to_cpu(EXT4_DIRENT_HASHES(de)->minor_hash)
+
+static inline bool ext4_hash_in_dirent(const struct inode *inode)
+{
+	return IS_CASEFOLDED(inode) && IS_ENCRYPTED(inode);
+}
+
+/*
  * This is a bogus directory entry at the end of each leaf block that
  * records checksums.
  */
@@ -2220,11 +2247,25 @@
  */
 #define EXT4_DIR_PAD			4
 #define EXT4_DIR_ROUND			(EXT4_DIR_PAD - 1)
-#define EXT4_DIR_REC_LEN(name_len)	(((name_len) + 8 + EXT4_DIR_ROUND) & \
-					 ~EXT4_DIR_ROUND)
 #define EXT4_MAX_REC_LEN		((1<<16)-1)
 
 /*
+ * The rec_len is dependent on the type of directory. Directories that are
+ * casefolded and encrypted need to store the hash as well, so we add room for
+ * ext4_extended_dir_entry_2. For all entries related to '.' or '..' you should
+ * pass NULL for dir, as those entries do not use the extra fields.
+ */
+static inline unsigned int ext4_dir_rec_len(__u8 name_len,
+						const struct inode *dir)
+{
+	int rec_len = (name_len + 8 + EXT4_DIR_ROUND);
+
+	if (dir && ext4_hash_in_dirent(dir))
+		rec_len += sizeof(struct ext4_dir_entry_hash);
+	return (rec_len & ~EXT4_DIR_ROUND);
+}
+
+/*
  * If we ever get support for fs block sizes > page_size, we'll need
  * to remove the #if statements in the next two functions...
  */
@@ -2280,6 +2321,7 @@
 #define DX_HASH_LEGACY_UNSIGNED		3
 #define DX_HASH_HALF_MD4_UNSIGNED	4
 #define DX_HASH_TEA_UNSIGNED		5
+#define DX_HASH_SIPHASH			6
 
 static inline u32 ext4_chksum(struct ext4_sb_info *sbi, u32 crc,
 			      const void *address, unsigned int length)
@@ -2334,6 +2376,7 @@
 };
 
 #define fname_name(p) ((p)->disk_name.name)
+#define fname_usr_name(p) ((p)->usr_fname->name)
 #define fname_len(p)  ((p)->disk_name.len)
 
 /*
@@ -2564,9 +2607,9 @@
 ext4_fsblk_t ext4_inode_to_goal_block(struct inode *);
 
 #ifdef CONFIG_UNICODE
-extern void ext4_fname_setup_ci_filename(struct inode *dir,
+extern int ext4_fname_setup_ci_filename(struct inode *dir,
 					 const struct qstr *iname,
-					 struct fscrypt_str *fname);
+					 struct ext4_filename *fname);
 #endif
 
 #ifdef CONFIG_FS_ENCRYPTION
@@ -2597,9 +2640,9 @@
 	ext4_fname_from_fscrypt_name(fname, &name);
 
 #ifdef CONFIG_UNICODE
-	ext4_fname_setup_ci_filename(dir, iname, &fname->cf_name);
+	err = ext4_fname_setup_ci_filename(dir, iname, fname);
 #endif
-	return 0;
+	return err;
 }
 
 static inline int ext4_fname_prepare_lookup(struct inode *dir,
@@ -2616,9 +2659,9 @@
 	ext4_fname_from_fscrypt_name(fname, &name);
 
 #ifdef CONFIG_UNICODE
-	ext4_fname_setup_ci_filename(dir, &dentry->d_name, &fname->cf_name);
+	err = ext4_fname_setup_ci_filename(dir, &dentry->d_name, fname);
 #endif
-	return 0;
+	return err;
 }
 
 static inline void ext4_fname_free_filename(struct ext4_filename *fname)
@@ -2643,15 +2686,16 @@
 					    int lookup,
 					    struct ext4_filename *fname)
 {
+	int err = 0;
 	fname->usr_fname = iname;
 	fname->disk_name.name = (unsigned char *) iname->name;
 	fname->disk_name.len = iname->len;
 
 #ifdef CONFIG_UNICODE
-	ext4_fname_setup_ci_filename(dir, iname, &fname->cf_name);
+	err = ext4_fname_setup_ci_filename(dir, iname, fname);
 #endif
 
-	return 0;
+	return err;
 }
 
 static inline int ext4_fname_prepare_lookup(struct inode *dir,
@@ -2675,21 +2719,22 @@
 				  struct file *,
 				  struct ext4_dir_entry_2 *,
 				  struct buffer_head *, char *, int,
-				  unsigned int);
-#define ext4_check_dir_entry(dir, filp, de, bh, buf, size, offset)	\
+				  ext4_lblk_t, unsigned int);
+#define ext4_check_dir_entry(dir, filp, de, bh, buf, size, lblk, offset) \
 	unlikely(__ext4_check_dir_entry(__func__, __LINE__, (dir), (filp), \
-					(de), (bh), (buf), (size), (offset)))
+				(de), (bh), (buf), (size), (lblk), (offset)))
 extern int ext4_htree_store_dirent(struct file *dir_file, __u32 hash,
 				__u32 minor_hash,
 				struct ext4_dir_entry_2 *dirent,
 				struct fscrypt_str *ent_name);
 extern void ext4_htree_free_dir_info(struct dir_private_info *p);
 extern int ext4_find_dest_de(struct inode *dir, struct inode *inode,
+			     ext4_lblk_t lblk,
 			     struct buffer_head *bh,
 			     void *buf, int buf_size,
 			     struct ext4_filename *fname,
 			     struct ext4_dir_entry_2 **dest_de);
-void ext4_insert_dentry(struct inode *inode,
+void ext4_insert_dentry(struct inode *dir, struct inode *inode,
 			struct ext4_dir_entry_2 *de,
 			int buf_size,
 			struct ext4_filename *fname);
@@ -2911,10 +2956,11 @@
 			   int buf_size,
 			   struct inode *dir,
 			   struct ext4_filename *fname,
-			   unsigned int offset,
+			   ext4_lblk_t lblk, unsigned int offset,
 			   struct ext4_dir_entry_2 **res_dir);
 extern int ext4_generic_delete_entry(struct inode *dir,
 				     struct ext4_dir_entry_2 *de_del,
+				     ext4_lblk_t lblk,
 				     struct buffer_head *bh,
 				     void *entry_buf,
 				     int buf_size,
@@ -3381,10 +3427,6 @@
 /* dir.c */
 extern const struct file_operations ext4_dir_operations;
 
-#ifdef CONFIG_UNICODE
-extern const struct dentry_operations ext4_dentry_ops;
-#endif
-
 /* file.c */
 extern const struct inode_operations ext4_file_inode_operations;
 extern const struct file_operations ext4_file_operations;
@@ -3476,9 +3518,6 @@
 					unsigned int blocksize);
 extern int ext4_handle_dirty_dirblock(handle_t *handle, struct inode *inode,
 				      struct buffer_head *bh);
-extern int ext4_ci_compare(const struct inode *parent,
-			   const struct qstr *fname,
-			   const struct qstr *entry, bool quick);
 extern int __ext4_unlink(handle_t *handle, struct inode *dir, const struct qstr *d_name,
 			 struct inode *inode);
 extern int __ext4_link(struct inode *dir, struct inode *inode,
diff --git a/fs/ext4/file.c b/fs/ext4/file.c
index 3ed8c04..be77b77 100644
--- a/fs/ext4/file.c
+++ b/fs/ext4/file.c
@@ -36,9 +36,11 @@
 #include "acl.h"
 #include "truncate.h"
 
-static bool ext4_dio_supported(struct inode *inode)
+static bool ext4_dio_supported(struct kiocb *iocb, struct iov_iter *iter)
 {
-	if (IS_ENABLED(CONFIG_FS_ENCRYPTION) && IS_ENCRYPTED(inode))
+	struct inode *inode = file_inode(iocb->ki_filp);
+
+	if (!fscrypt_dio_supported(iocb, iter))
 		return false;
 	if (fsverity_active(inode))
 		return false;
@@ -61,7 +63,7 @@
 		inode_lock_shared(inode);
 	}
 
-	if (!ext4_dio_supported(inode)) {
+	if (!ext4_dio_supported(iocb, to)) {
 		inode_unlock_shared(inode);
 		/*
 		 * Fallback to buffered I/O if the operation being performed on
@@ -495,7 +497,7 @@
 	}
 
 	/* Fallback to buffered I/O if the inode does not support direct I/O. */
-	if (!ext4_dio_supported(inode)) {
+	if (!ext4_dio_supported(iocb, from)) {
 		if (ilock_shared)
 			inode_unlock_shared(inode);
 		else
diff --git a/fs/ext4/hash.c b/fs/ext4/hash.c
index a92eb79..aeec7a2 100644
--- a/fs/ext4/hash.c
+++ b/fs/ext4/hash.c
@@ -197,7 +197,7 @@
  * represented, and whether or not the returned hash is 32 bits or 64
  * bits.  32 bit hashes will return 0 for the minor hash.
  */
-static int __ext4fs_dirhash(const char *name, int len,
+static int __ext4fs_dirhash(const struct inode *dir, const char *name, int len,
 			    struct dx_hash_info *hinfo)
 {
 	__u32	hash;
@@ -259,6 +259,22 @@
 		hash = buf[0];
 		minor_hash = buf[1];
 		break;
+	case DX_HASH_SIPHASH:
+	{
+		struct qstr qname = QSTR_INIT(name, len);
+		__u64	combined_hash;
+
+		if (fscrypt_has_encryption_key(dir)) {
+			combined_hash = fscrypt_fname_siphash(dir, &qname);
+		} else {
+			ext4_warning_inode(dir, "Siphash requires key");
+			return -1;
+		}
+
+		hash = (__u32)(combined_hash >> 32);
+		minor_hash = (__u32)combined_hash;
+		break;
+	}
 	default:
 		hinfo->hash = 0;
 		return -1;
@@ -280,7 +296,8 @@
 	unsigned char *buff;
 	struct qstr qstr = {.name = name, .len = len };
 
-	if (len && IS_CASEFOLDED(dir) && um) {
+	if (len && IS_CASEFOLDED(dir) && um &&
+	    (!IS_ENCRYPTED(dir) || fscrypt_has_encryption_key(dir))) {
 		buff = kzalloc(sizeof(char) * PATH_MAX, GFP_KERNEL);
 		if (!buff)
 			return -ENOMEM;
@@ -291,12 +308,12 @@
 			goto opaque_seq;
 		}
 
-		r = __ext4fs_dirhash(buff, dlen, hinfo);
+		r = __ext4fs_dirhash(dir, buff, dlen, hinfo);
 
 		kfree(buff);
 		return r;
 	}
 opaque_seq:
 #endif
-	return __ext4fs_dirhash(name, len, hinfo);
+	return __ext4fs_dirhash(dir, name, len, hinfo);
 }
diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
index b215c56..14aef14 100644
--- a/fs/ext4/ialloc.c
+++ b/fs/ext4/ialloc.c
@@ -457,7 +457,10 @@
 		int ret = -1;
 
 		if (qstr) {
-			hinfo.hash_version = DX_HASH_HALF_MD4;
+			if (ext4_hash_in_dirent(parent))
+				hinfo.hash_version = DX_HASH_SIPHASH;
+			else
+				hinfo.hash_version = DX_HASH_HALF_MD4;
 			hinfo.seed = sbi->s_hash_seed;
 			ext4fs_dirhash(parent, qstr->name, qstr->len, &hinfo);
 			grp = hinfo.hash;
diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c
index b41512d..f3c2a89 100644
--- a/fs/ext4/inline.c
+++ b/fs/ext4/inline.c
@@ -12,6 +12,7 @@
 #include "ext4.h"
 #include "xattr.h"
 #include "truncate.h"
+#include <trace/events/android_fs.h>
 
 #define EXT4_XATTR_SYSTEM_DATA	"data"
 #define EXT4_MIN_INLINE_DATA_SIZE	((sizeof(__le32) * EXT4_N_BLOCKS))
@@ -505,6 +506,17 @@
 		return -EAGAIN;
 	}
 
+	if (trace_android_fs_dataread_start_enabled()) {
+		char *path, pathbuf[MAX_TRACE_PATHBUF_LEN];
+
+		path = android_fstrace_get_pathname(pathbuf,
+						    MAX_TRACE_PATHBUF_LEN,
+						    inode);
+		trace_android_fs_dataread_start(inode, page_offset(page),
+						PAGE_SIZE, current->pid,
+						path, current->comm);
+	}
+
 	/*
 	 * Current inline data can only exist in the 1st page,
 	 * So for all the other pages, just set them uptodate.
@@ -516,6 +528,8 @@
 		SetPageUptodate(page);
 	}
 
+	trace_android_fs_dataread_end(inode, page_offset(page), PAGE_SIZE);
+
 	up_read(&EXT4_I(inode)->xattr_sem);
 
 	unlock_page(page);
@@ -996,7 +1010,7 @@
 			     offset, de_len, de->name_len, de->name,
 			     de->name_len, le32_to_cpu(de->inode));
 		if (ext4_check_dir_entry(dir, NULL, de, bh,
-					 inline_start, inline_size, offset))
+					 inline_start, inline_size, 0, offset))
 			BUG();
 
 		offset += de_len;
@@ -1022,7 +1036,7 @@
 	int		err;
 	struct ext4_dir_entry_2 *de;
 
-	err = ext4_find_dest_de(dir, inode, iloc->bh, inline_start,
+	err = ext4_find_dest_de(dir, inode, 0, iloc->bh, inline_start,
 				inline_size, fname, &de);
 	if (err)
 		return err;
@@ -1031,7 +1045,7 @@
 	err = ext4_journal_get_write_access(handle, iloc->bh);
 	if (err)
 		return err;
-	ext4_insert_dentry(inode, de, inline_size, fname);
+	ext4_insert_dentry(dir, inode, de, inline_size, fname);
 
 	ext4_show_inline_dir(dir, iloc->bh, inline_start, inline_size);
 
@@ -1100,7 +1114,7 @@
 	int old_size = EXT4_I(dir)->i_inline_size - EXT4_MIN_INLINE_DATA_SIZE;
 	int new_size = get_max_inline_xattr_value_size(dir, iloc);
 
-	if (new_size - old_size <= EXT4_DIR_REC_LEN(1))
+	if (new_size - old_size <= ext4_dir_rec_len(1, NULL))
 		return -ENOSPC;
 
 	ret = ext4_update_inline_data(handle, dir,
@@ -1380,8 +1394,8 @@
 			fake.name_len = 1;
 			strcpy(fake.name, ".");
 			fake.rec_len = ext4_rec_len_to_disk(
-						EXT4_DIR_REC_LEN(fake.name_len),
-						inline_size);
+					  ext4_dir_rec_len(fake.name_len, NULL),
+					  inline_size);
 			ext4_set_de_type(inode->i_sb, &fake, S_IFDIR);
 			de = &fake;
 			pos = EXT4_INLINE_DOTDOT_OFFSET;
@@ -1390,8 +1404,8 @@
 			fake.name_len = 2;
 			strcpy(fake.name, "..");
 			fake.rec_len = ext4_rec_len_to_disk(
-						EXT4_DIR_REC_LEN(fake.name_len),
-						inline_size);
+					  ext4_dir_rec_len(fake.name_len, NULL),
+					  inline_size);
 			ext4_set_de_type(inode->i_sb, &fake, S_IFDIR);
 			de = &fake;
 			pos = EXT4_INLINE_DOTDOT_SIZE;
@@ -1400,13 +1414,18 @@
 			pos += ext4_rec_len_from_disk(de->rec_len, inline_size);
 			if (ext4_check_dir_entry(inode, dir_file, de,
 					 iloc.bh, dir_buf,
-					 inline_size, pos)) {
+					 inline_size, block, pos)) {
 				ret = count;
 				goto out;
 			}
 		}
 
-		ext4fs_dirhash(dir, de->name, de->name_len, hinfo);
+		if (ext4_hash_in_dirent(dir)) {
+			hinfo->hash = EXT4_DIRENT_HASH(de);
+			hinfo->minor_hash = EXT4_DIRENT_MINOR_HASH(de);
+		} else {
+			ext4fs_dirhash(dir, de->name, de->name_len, hinfo);
+		}
 		if ((hinfo->hash < start_hash) ||
 		    ((hinfo->hash == start_hash) &&
 		     (hinfo->minor_hash < start_minor_hash)))
@@ -1488,8 +1507,8 @@
 	 * So we will use extra_offset and extra_size to indicate them
 	 * during the inline dir iteration.
 	 */
-	dotdot_offset = EXT4_DIR_REC_LEN(1);
-	dotdot_size = dotdot_offset + EXT4_DIR_REC_LEN(2);
+	dotdot_offset = ext4_dir_rec_len(1, NULL);
+	dotdot_size = dotdot_offset + ext4_dir_rec_len(2, NULL);
 	extra_offset = dotdot_size - EXT4_INLINE_DOTDOT_SIZE;
 	extra_size = extra_offset + inline_size;
 
@@ -1524,7 +1543,7 @@
 			 * failure will be detected in the
 			 * dirent test below. */
 			if (ext4_rec_len_from_disk(de->rec_len, extra_size)
-				< EXT4_DIR_REC_LEN(1))
+				< ext4_dir_rec_len(1, NULL))
 				break;
 			i += ext4_rec_len_from_disk(de->rec_len,
 						    extra_size);
@@ -1552,7 +1571,7 @@
 		de = (struct ext4_dir_entry_2 *)
 			(dir_buf + ctx->pos - extra_offset);
 		if (ext4_check_dir_entry(inode, file, de, iloc.bh, dir_buf,
-					 extra_size, ctx->pos))
+					 extra_size, 0, ctx->pos))
 			goto out;
 		if (le32_to_cpu(de->inode)) {
 			if (!dir_emit(ctx, de->name, de->name_len,
@@ -1644,7 +1663,7 @@
 						EXT4_INLINE_DOTDOT_SIZE;
 	inline_size = EXT4_MIN_INLINE_DATA_SIZE - EXT4_INLINE_DOTDOT_SIZE;
 	ret = ext4_search_dir(iloc.bh, inline_start, inline_size,
-			      dir, fname, 0, res_dir);
+			      dir, fname, 0, 0, res_dir);
 	if (ret == 1)
 		goto out_find;
 	if (ret < 0)
@@ -1657,7 +1676,7 @@
 	inline_size = ext4_get_inline_size(dir) - EXT4_MIN_INLINE_DATA_SIZE;
 
 	ret = ext4_search_dir(iloc.bh, inline_start, inline_size,
-			      dir, fname, 0, res_dir);
+			      dir, fname, 0, 0, res_dir);
 	if (ret == 1)
 		goto out_find;
 
@@ -1706,7 +1725,7 @@
 	if (err)
 		goto out;
 
-	err = ext4_generic_delete_entry(dir, de_del, bh,
+	err = ext4_generic_delete_entry(dir, de_del, 0, bh,
 					inline_start, inline_size, 0);
 	if (err)
 		goto out;
@@ -1791,7 +1810,7 @@
 					   &inline_pos, &inline_size);
 		if (ext4_check_dir_entry(dir, NULL, de,
 					 iloc.bh, inline_pos,
-					 inline_size, offset)) {
+					 inline_size, 0, offset)) {
 			ext4_warning(dir->i_sb,
 				     "bad inline directory (dir #%lu) - "
 				     "inode %u, rec_len %u, name_len %d"
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 0d8385a..5c70567 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -47,6 +47,7 @@
 #include "truncate.h"
 
 #include <trace/events/ext4.h>
+#include <trace/events/android_fs.h>
 
 static __u32 ext4_inode_csum(struct inode *inode, struct ext4_inode *raw,
 			      struct ext4_inode_info *ei)
@@ -1135,6 +1136,16 @@
 	if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
 		return -EIO;
 
+	if (trace_android_fs_datawrite_start_enabled()) {
+		char *path, pathbuf[MAX_TRACE_PATHBUF_LEN];
+
+		path = android_fstrace_get_pathname(pathbuf,
+						    MAX_TRACE_PATHBUF_LEN,
+						    inode);
+		trace_android_fs_datawrite_start(inode, pos, len,
+						 current->pid, path,
+						 current->comm);
+	}
 	trace_ext4_write_begin(inode, pos, len, flags);
 	/*
 	 * Reserve one block more for addition to orphan list in case
@@ -1277,6 +1288,7 @@
 	int inline_data = ext4_has_inline_data(inode);
 	bool verity = ext4_verity_in_progress(inode);
 
+	trace_android_fs_datawrite_end(inode, pos, len);
 	trace_ext4_write_end(inode, pos, len, copied);
 	if (inline_data) {
 		ret = ext4_write_inline_data_end(inode, pos, len,
@@ -1387,6 +1399,7 @@
 	int inline_data = ext4_has_inline_data(inode);
 	bool verity = ext4_verity_in_progress(inode);
 
+	trace_android_fs_datawrite_end(inode, pos, len);
 	trace_ext4_journalled_write_end(inode, pos, len, copied);
 	from = pos & (PAGE_SIZE - 1);
 	to = from + len;
@@ -2953,6 +2966,16 @@
 					len, flags, pagep, fsdata);
 	}
 	*fsdata = (void *)0;
+	if (trace_android_fs_datawrite_start_enabled()) {
+		char *path, pathbuf[MAX_TRACE_PATHBUF_LEN];
+
+		path = android_fstrace_get_pathname(pathbuf,
+						    MAX_TRACE_PATHBUF_LEN,
+						    inode);
+		trace_android_fs_datawrite_start(inode, pos, len,
+						 current->pid,
+						 path, current->comm);
+	}
 	trace_ext4_da_write_begin(inode, pos, len, flags);
 
 	if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) {
@@ -3071,6 +3094,7 @@
 		return ext4_write_end(file, mapping, pos,
 				      len, copied, page, fsdata);
 
+	trace_android_fs_datawrite_end(inode, pos, len);
 	trace_ext4_da_write_end(inode, pos, len, copied);
 	start = pos & (PAGE_SIZE - 1);
 	end = start + copied - 1;
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index 3350926..0e3982f 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -284,9 +284,11 @@
 		       unsigned blocksize, struct dx_hash_info *hinfo,
 		       struct dx_map_entry map[]);
 static void dx_sort_map(struct dx_map_entry *map, unsigned count);
-static struct ext4_dir_entry_2 *dx_move_dirents(char *from, char *to,
-		struct dx_map_entry *offsets, int count, unsigned blocksize);
-static struct ext4_dir_entry_2* dx_pack_dirents(char *base, unsigned blocksize);
+static struct ext4_dir_entry_2 *dx_move_dirents(struct inode *dir, char *from,
+					char *to, struct dx_map_entry *offsets,
+					int count, unsigned int blocksize);
+static struct ext4_dir_entry_2 *dx_pack_dirents(struct inode *dir, char *base,
+						unsigned int blocksize);
 static void dx_insert_block(struct dx_frame *frame,
 					u32 hash, ext4_lblk_t block);
 static int ext4_htree_next_block(struct inode *dir, __u32 hash,
@@ -295,7 +297,7 @@
 				 __u32 *start_hash);
 static struct buffer_head * ext4_dx_find_entry(struct inode *dir,
 		struct ext4_filename *fname,
-		struct ext4_dir_entry_2 **res_dir);
+		struct ext4_dir_entry_2 **res_dir, ext4_lblk_t *lblk);
 static int ext4_dx_add_entry(handle_t *handle, struct ext4_filename *fname,
 			     struct inode *dir, struct inode *inode);
 
@@ -578,8 +580,9 @@
 
 static inline unsigned dx_root_limit(struct inode *dir, unsigned infosize)
 {
-	unsigned entry_space = dir->i_sb->s_blocksize - EXT4_DIR_REC_LEN(1) -
-		EXT4_DIR_REC_LEN(2) - infosize;
+	unsigned int entry_space = dir->i_sb->s_blocksize -
+			ext4_dir_rec_len(1, NULL) -
+			ext4_dir_rec_len(2, NULL) - infosize;
 
 	if (ext4_has_metadata_csum(dir->i_sb))
 		entry_space -= sizeof(struct dx_tail);
@@ -588,7 +591,8 @@
 
 static inline unsigned dx_node_limit(struct inode *dir)
 {
-	unsigned entry_space = dir->i_sb->s_blocksize - EXT4_DIR_REC_LEN(0);
+	unsigned int entry_space = dir->i_sb->s_blocksize -
+			ext4_dir_rec_len(0, dir);
 
 	if (ext4_has_metadata_csum(dir->i_sb))
 		entry_space -= sizeof(struct dx_tail);
@@ -683,7 +687,10 @@
 						name = fname_crypto_str.name;
 						len = fname_crypto_str.len;
 					}
-					ext4fs_dirhash(dir, de->name,
+					if (IS_CASEFOLDED(dir))
+						h.hash = EXT4_DIRENT_HASH(de);
+					else
+						ext4fs_dirhash(dir, de->name,
 						       de->name_len, &h);
 					printk("%*.s:(E)%x.%u ", len, name,
 					       h.hash, (unsigned) ((char *) de
@@ -699,7 +706,7 @@
 				       (unsigned) ((char *) de - base));
 #endif
 			}
-			space += EXT4_DIR_REC_LEN(de->name_len);
+			space += ext4_dir_rec_len(de->name_len, dir);
 			names++;
 		}
 		de = ext4_next_entry(de, size);
@@ -771,18 +778,34 @@
 	root = (struct dx_root *) frame->bh->b_data;
 	if (root->info.hash_version != DX_HASH_TEA &&
 	    root->info.hash_version != DX_HASH_HALF_MD4 &&
-	    root->info.hash_version != DX_HASH_LEGACY) {
+	    root->info.hash_version != DX_HASH_LEGACY &&
+	    root->info.hash_version != DX_HASH_SIPHASH) {
 		ext4_warning_inode(dir, "Unrecognised inode hash code %u",
 				   root->info.hash_version);
 		goto fail;
 	}
+	if (ext4_hash_in_dirent(dir)) {
+		if (root->info.hash_version != DX_HASH_SIPHASH) {
+			ext4_warning_inode(dir,
+				"Hash in dirent, but hash is not SIPHASH");
+			goto fail;
+		}
+	} else {
+		if (root->info.hash_version == DX_HASH_SIPHASH) {
+			ext4_warning_inode(dir,
+				"Hash code is SIPHASH, but hash not in dirent");
+			goto fail;
+		}
+	}
 	if (fname)
 		hinfo = &fname->hinfo;
 	hinfo->hash_version = root->info.hash_version;
 	if (hinfo->hash_version <= DX_HASH_TEA)
 		hinfo->hash_version += EXT4_SB(dir->i_sb)->s_hash_unsigned;
 	hinfo->seed = EXT4_SB(dir->i_sb)->s_hash_seed;
-	if (fname && fname_name(fname))
+	/* hash is already computed for encrypted casefolded directory */
+	if (fname && fname_name(fname) &&
+				!(IS_ENCRYPTED(dir) && IS_CASEFOLDED(dir)))
 		ext4fs_dirhash(dir, fname_name(fname), fname_len(fname), hinfo);
 	hash = hinfo->hash;
 
@@ -997,6 +1020,7 @@
 	struct ext4_dir_entry_2 *de, *top;
 	int err = 0, count = 0;
 	struct fscrypt_str fname_crypto_str = FSTR_INIT(NULL, 0), tmp_str;
+	int csum = ext4_has_metadata_csum(dir->i_sb);
 
 	dxtrace(printk(KERN_INFO "In htree dirblock_to_tree: block %lu\n",
 							(unsigned long)block));
@@ -1005,9 +1029,11 @@
 		return PTR_ERR(bh);
 
 	de = (struct ext4_dir_entry_2 *) bh->b_data;
+	/* csum entries are not larger in the casefolded encrypted case */
 	top = (struct ext4_dir_entry_2 *) ((char *) de +
 					   dir->i_sb->s_blocksize -
-					   EXT4_DIR_REC_LEN(0));
+					   ext4_dir_rec_len(0,
+							   csum ? NULL : dir));
 	/* Check if the directory is encrypted */
 	if (IS_ENCRYPTED(dir)) {
 		err = fscrypt_get_encryption_info(dir);
@@ -1025,13 +1051,23 @@
 
 	for (; de < top; de = ext4_next_entry(de, dir->i_sb->s_blocksize)) {
 		if (ext4_check_dir_entry(dir, NULL, de, bh,
-				bh->b_data, bh->b_size,
+				bh->b_data, bh->b_size, block,
 				(block<<EXT4_BLOCK_SIZE_BITS(dir->i_sb))
 					 + ((char *)de - bh->b_data))) {
 			/* silently ignore the rest of the block */
 			break;
 		}
-		ext4fs_dirhash(dir, de->name, de->name_len, hinfo);
+		if (ext4_hash_in_dirent(dir)) {
+			if (de->name_len && de->inode) {
+				hinfo->hash = EXT4_DIRENT_HASH(de);
+				hinfo->minor_hash = EXT4_DIRENT_MINOR_HASH(de);
+			} else {
+				hinfo->hash = 0;
+				hinfo->minor_hash = 0;
+			}
+		} else {
+			ext4fs_dirhash(dir, de->name, de->name_len, hinfo);
+		}
 		if ((hinfo->hash < start_hash) ||
 		    ((hinfo->hash == start_hash) &&
 		     (hinfo->minor_hash < start_minor_hash)))
@@ -1100,7 +1136,11 @@
 		       start_hash, start_minor_hash));
 	dir = file_inode(dir_file);
 	if (!(ext4_test_inode_flag(dir, EXT4_INODE_INDEX))) {
-		hinfo.hash_version = EXT4_SB(dir->i_sb)->s_def_hash_version;
+		if (ext4_hash_in_dirent(dir))
+			hinfo.hash_version = DX_HASH_SIPHASH;
+		else
+			hinfo.hash_version =
+					EXT4_SB(dir->i_sb)->s_def_hash_version;
 		if (hinfo.hash_version <= DX_HASH_TEA)
 			hinfo.hash_version +=
 				EXT4_SB(dir->i_sb)->s_hash_unsigned;
@@ -1193,11 +1233,12 @@
 static inline int search_dirblock(struct buffer_head *bh,
 				  struct inode *dir,
 				  struct ext4_filename *fname,
+				  ext4_lblk_t lblk,
 				  unsigned int offset,
 				  struct ext4_dir_entry_2 **res_dir)
 {
 	return ext4_search_dir(bh, bh->b_data, dir->i_sb->s_blocksize, dir,
-			       fname, offset, res_dir);
+			       fname, lblk, offset, res_dir);
 }
 
 /*
@@ -1218,7 +1259,10 @@
 
 	while ((char *) de < base + blocksize) {
 		if (de->name_len && de->inode) {
-			ext4fs_dirhash(dir, de->name, de->name_len, &h);
+			if (ext4_hash_in_dirent(dir))
+				h.hash = EXT4_DIRENT_HASH(de);
+			else
+				ext4fs_dirhash(dir, de->name, de->name_len, &h);
 			map_tail--;
 			map_tail->hash = h.hash;
 			map_tail->offs = ((char *) de - base)>>2;
@@ -1282,47 +1326,66 @@
  * Returns: 0 if the directory entry matches, more than 0 if it
  * doesn't match or less than zero on error.
  */
-int ext4_ci_compare(const struct inode *parent, const struct qstr *name,
-		    const struct qstr *entry, bool quick)
+static int ext4_ci_compare(const struct inode *parent, const struct qstr *name,
+			   u8 *de_name, size_t de_name_len, bool quick)
 {
 	const struct super_block *sb = parent->i_sb;
 	const struct unicode_map *um = sb->s_encoding;
+	struct fscrypt_str decrypted_name = FSTR_INIT(NULL, de_name_len);
+	struct qstr entry = QSTR_INIT(de_name, de_name_len);
 	int ret;
 
-	if (quick)
-		ret = utf8_strncasecmp_folded(um, name, entry);
-	else
-		ret = utf8_strncasecmp(um, name, entry);
+	if (IS_ENCRYPTED(parent)) {
+		const struct fscrypt_str encrypted_name =
+				FSTR_INIT(de_name, de_name_len);
 
+		decrypted_name.name = kmalloc(de_name_len, GFP_KERNEL);
+		if (!decrypted_name.name)
+			return -ENOMEM;
+		ret = fscrypt_fname_disk_to_usr(parent, 0, 0, &encrypted_name,
+						&decrypted_name);
+		if (ret < 0)
+			goto out;
+		entry.name = decrypted_name.name;
+		entry.len = decrypted_name.len;
+	}
+
+	if (quick)
+		ret = utf8_strncasecmp_folded(um, name, &entry);
+	else
+		ret = utf8_strncasecmp(um, name, &entry);
 	if (ret < 0) {
 		/* Handle invalid character sequence as either an error
 		 * or as an opaque byte sequence.
 		 */
 		if (sb_has_strict_encoding(sb))
-			return -EINVAL;
-
-		if (name->len != entry->len)
-			return 1;
-
-		return !!memcmp(name->name, entry->name, name->len);
+			ret = -EINVAL;
+		else if (name->len != entry.len)
+			ret = 1;
+		else
+			ret = !!memcmp(name->name, entry.name, entry.len);
 	}
-
+out:
+	kfree(decrypted_name.name);
 	return ret;
 }
 
-void ext4_fname_setup_ci_filename(struct inode *dir, const struct qstr *iname,
-				  struct fscrypt_str *cf_name)
+int ext4_fname_setup_ci_filename(struct inode *dir, const struct qstr *iname,
+				  struct ext4_filename *name)
 {
+	struct fscrypt_str *cf_name = &name->cf_name;
+	struct dx_hash_info *hinfo = &name->hinfo;
 	int len;
 
-	if (!IS_CASEFOLDED(dir) || !dir->i_sb->s_encoding) {
+	if (!IS_CASEFOLDED(dir) || !dir->i_sb->s_encoding ||
+	    (IS_ENCRYPTED(dir) && !fscrypt_has_encryption_key(dir))) {
 		cf_name->name = NULL;
-		return;
+		return 0;
 	}
 
 	cf_name->name = kmalloc(EXT4_NAME_LEN, GFP_NOFS);
 	if (!cf_name->name)
-		return;
+		return -ENOMEM;
 
 	len = utf8_casefold(dir->i_sb->s_encoding,
 			    iname, cf_name->name,
@@ -1330,10 +1393,18 @@
 	if (len <= 0) {
 		kfree(cf_name->name);
 		cf_name->name = NULL;
-		return;
 	}
 	cf_name->len = (unsigned) len;
+	if (!IS_ENCRYPTED(dir))
+		return 0;
 
+	hinfo->hash_version = DX_HASH_SIPHASH;
+	hinfo->seed = NULL;
+	if (cf_name->name)
+		ext4fs_dirhash(dir, cf_name->name, cf_name->len, hinfo);
+	else
+		ext4fs_dirhash(dir, iname->name, iname->len, hinfo);
+	return 0;
 }
 #endif
 
@@ -1342,14 +1413,11 @@
  *
  * Return: %true if the directory entry matches, otherwise %false.
  */
-static inline bool ext4_match(const struct inode *parent,
+static bool ext4_match(struct inode *parent,
 			      const struct ext4_filename *fname,
-			      const struct ext4_dir_entry_2 *de)
+			      struct ext4_dir_entry_2 *de)
 {
 	struct fscrypt_name f;
-#ifdef CONFIG_UNICODE
-	const struct qstr entry = {.name = de->name, .len = de->name_len};
-#endif
 
 	if (!de->inode)
 		return false;
@@ -1361,14 +1429,24 @@
 #endif
 
 #ifdef CONFIG_UNICODE
-	if (parent->i_sb->s_encoding && IS_CASEFOLDED(parent)) {
+	if (parent->i_sb->s_encoding && IS_CASEFOLDED(parent) &&
+	    (!IS_ENCRYPTED(parent) || fscrypt_has_encryption_key(parent))) {
 		if (fname->cf_name.name) {
 			struct qstr cf = {.name = fname->cf_name.name,
 					  .len = fname->cf_name.len};
-			return !ext4_ci_compare(parent, &cf, &entry, true);
+			if (IS_ENCRYPTED(parent)) {
+				if (fname->hinfo.hash != EXT4_DIRENT_HASH(de) ||
+					fname->hinfo.minor_hash !=
+						EXT4_DIRENT_MINOR_HASH(de)) {
+
+					return 0;
+				}
+			}
+			return !ext4_ci_compare(parent, &cf, de->name,
+							de->name_len, true);
 		}
-		return !ext4_ci_compare(parent, fname->usr_fname, &entry,
-					false);
+		return !ext4_ci_compare(parent, fname->usr_fname, de->name,
+						de->name_len, false);
 	}
 #endif
 
@@ -1380,7 +1458,8 @@
  */
 int ext4_search_dir(struct buffer_head *bh, char *search_buf, int buf_size,
 		    struct inode *dir, struct ext4_filename *fname,
-		    unsigned int offset, struct ext4_dir_entry_2 **res_dir)
+		    ext4_lblk_t lblk, unsigned int offset,
+		    struct ext4_dir_entry_2 **res_dir)
 {
 	struct ext4_dir_entry_2 * de;
 	char * dlimit;
@@ -1396,7 +1475,7 @@
 			/* found a match - just to be sure, do
 			 * a full check */
 			if (ext4_check_dir_entry(dir, NULL, de, bh, search_buf,
-						 buf_size, offset))
+						 buf_size, lblk, offset))
 				return -1;
 			*res_dir = de;
 			return 1;
@@ -1442,7 +1521,7 @@
 static struct buffer_head *__ext4_find_entry(struct inode *dir,
 					     struct ext4_filename *fname,
 					     struct ext4_dir_entry_2 **res_dir,
-					     int *inlined)
+					     int *inlined, ext4_lblk_t *lblk)
 {
 	struct super_block *sb;
 	struct buffer_head *bh_use[NAMEI_RA_SIZE];
@@ -1466,6 +1545,8 @@
 		int has_inline_data = 1;
 		ret = ext4_find_inline_entry(dir, fname, res_dir,
 					     &has_inline_data);
+		if (lblk)
+			*lblk = 0;
 		if (has_inline_data) {
 			if (inlined)
 				*inlined = 1;
@@ -1484,7 +1565,7 @@
 		goto restart;
 	}
 	if (is_dx(dir)) {
-		ret = ext4_dx_find_entry(dir, fname, res_dir);
+		ret = ext4_dx_find_entry(dir, fname, res_dir, lblk);
 		/*
 		 * On success, or if the error was file not found,
 		 * return.  Otherwise, fall back to doing a search the
@@ -1550,9 +1631,11 @@
 			goto cleanup_and_exit;
 		}
 		set_buffer_verified(bh);
-		i = search_dirblock(bh, dir, fname,
+		i = search_dirblock(bh, dir, fname, block,
 			    block << EXT4_BLOCK_SIZE_BITS(sb), res_dir);
 		if (i == 1) {
+			if (lblk)
+				*lblk = block;
 			EXT4_I(dir)->i_dir_start_lookup = block;
 			ret = bh;
 			goto cleanup_and_exit;
@@ -1587,7 +1670,7 @@
 static struct buffer_head *ext4_find_entry(struct inode *dir,
 					   const struct qstr *d_name,
 					   struct ext4_dir_entry_2 **res_dir,
-					   int *inlined)
+					   int *inlined, ext4_lblk_t *lblk)
 {
 	int err;
 	struct ext4_filename fname;
@@ -1599,7 +1682,7 @@
 	if (err)
 		return ERR_PTR(err);
 
-	bh = __ext4_find_entry(dir, &fname, res_dir, inlined);
+	bh = __ext4_find_entry(dir, &fname, res_dir, inlined, lblk);
 
 	ext4_fname_free_filename(&fname);
 	return bh;
@@ -1614,12 +1697,13 @@
 	struct buffer_head *bh;
 
 	err = ext4_fname_prepare_lookup(dir, dentry, &fname);
+	generic_set_encrypted_ci_d_ops(dentry);
 	if (err == -ENOENT)
 		return NULL;
 	if (err)
 		return ERR_PTR(err);
 
-	bh = __ext4_find_entry(dir, &fname, res_dir, NULL);
+	bh = __ext4_find_entry(dir, &fname, res_dir, NULL, NULL);
 
 	ext4_fname_free_filename(&fname);
 	return bh;
@@ -1627,7 +1711,7 @@
 
 static struct buffer_head * ext4_dx_find_entry(struct inode *dir,
 			struct ext4_filename *fname,
-			struct ext4_dir_entry_2 **res_dir)
+			struct ext4_dir_entry_2 **res_dir, ext4_lblk_t *lblk)
 {
 	struct super_block * sb = dir->i_sb;
 	struct dx_frame frames[EXT4_HTREE_LEVEL], *frame;
@@ -1643,11 +1727,13 @@
 		return (struct buffer_head *) frame;
 	do {
 		block = dx_get_block(frame->at);
+		if (lblk)
+			*lblk = block;
 		bh = ext4_read_dirblock(dir, block, DIRENT_HTREE);
 		if (IS_ERR(bh))
 			goto errout;
 
-		retval = search_dirblock(bh, dir, fname,
+		retval = search_dirblock(bh, dir, fname, block,
 					 block << EXT4_BLOCK_SIZE_BITS(sb),
 					 res_dir);
 		if (retval == 1)
@@ -1742,7 +1828,7 @@
 	struct ext4_dir_entry_2 * de;
 	struct buffer_head *bh;
 
-	bh = ext4_find_entry(d_inode(child), &dotdot, &de, NULL);
+	bh = ext4_find_entry(d_inode(child), &dotdot, &de, NULL, NULL);
 	if (IS_ERR(bh))
 		return ERR_CAST(bh);
 	if (!bh)
@@ -1764,7 +1850,8 @@
  * Returns pointer to last entry moved.
  */
 static struct ext4_dir_entry_2 *
-dx_move_dirents(char *from, char *to, struct dx_map_entry *map, int count,
+dx_move_dirents(struct inode *dir, char *from, char *to,
+		struct dx_map_entry *map, int count,
 		unsigned blocksize)
 {
 	unsigned rec_len = 0;
@@ -1772,7 +1859,8 @@
 	while (count--) {
 		struct ext4_dir_entry_2 *de = (struct ext4_dir_entry_2 *)
 						(from + (map->offs<<2));
-		rec_len = EXT4_DIR_REC_LEN(de->name_len);
+		rec_len = ext4_dir_rec_len(de->name_len, dir);
+
 		memcpy (to, de, rec_len);
 		((struct ext4_dir_entry_2 *) to)->rec_len =
 				ext4_rec_len_to_disk(rec_len, blocksize);
@@ -1787,7 +1875,8 @@
  * Compact each dir entry in the range to the minimal rec_len.
  * Returns pointer to last entry in range.
  */
-static struct ext4_dir_entry_2* dx_pack_dirents(char *base, unsigned blocksize)
+static struct ext4_dir_entry_2 *dx_pack_dirents(struct inode *dir, char *base,
+							unsigned int blocksize)
 {
 	struct ext4_dir_entry_2 *next, *to, *prev, *de = (struct ext4_dir_entry_2 *) base;
 	unsigned rec_len = 0;
@@ -1796,7 +1885,7 @@
 	while ((char*)de < base + blocksize) {
 		next = ext4_next_entry(de, blocksize);
 		if (de->inode && de->name_len) {
-			rec_len = EXT4_DIR_REC_LEN(de->name_len);
+			rec_len = ext4_dir_rec_len(de->name_len, dir);
 			if (de > to)
 				memmove(to, de, rec_len);
 			to->rec_len = ext4_rec_len_to_disk(rec_len, blocksize);
@@ -1814,13 +1903,12 @@
  * Returns pointer to de in block into which the new entry will be inserted.
  */
 static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir,
-			struct buffer_head **bh,struct dx_frame *frame,
-			struct dx_hash_info *hinfo)
+			struct buffer_head **bh, struct dx_frame *frame,
+			struct dx_hash_info *hinfo, ext4_lblk_t *newblock)
 {
 	unsigned blocksize = dir->i_sb->s_blocksize;
 	unsigned count, continued;
 	struct buffer_head *bh2;
-	ext4_lblk_t newblock;
 	u32 hash2;
 	struct dx_map_entry *map;
 	char *data1 = (*bh)->b_data, *data2;
@@ -1832,7 +1920,7 @@
 	if (ext4_has_metadata_csum(dir->i_sb))
 		csum_size = sizeof(struct ext4_dir_entry_tail);
 
-	bh2 = ext4_append(handle, dir, &newblock);
+	bh2 = ext4_append(handle, dir, newblock);
 	if (IS_ERR(bh2)) {
 		brelse(*bh);
 		*bh = NULL;
@@ -1886,9 +1974,9 @@
 					hash2, split, count-split));
 
 	/* Fancy dance to stay within two buffers */
-	de2 = dx_move_dirents(data1, data2, map + split, count - split,
+	de2 = dx_move_dirents(dir, data1, data2, map + split, count - split,
 			      blocksize);
-	de = dx_pack_dirents(data1, blocksize);
+	de = dx_pack_dirents(dir, data1, blocksize);
 	de->rec_len = ext4_rec_len_to_disk(data1 + (blocksize - csum_size) -
 					   (char *) de,
 					   blocksize);
@@ -1910,7 +1998,7 @@
 		swap(*bh, bh2);
 		de = de2;
 	}
-	dx_insert_block(frame, hash2 + continued, newblock);
+	dx_insert_block(frame, hash2 + continued, *newblock);
 	err = ext4_handle_dirty_dirblock(handle, dir, bh2);
 	if (err)
 		goto journal_error;
@@ -1930,13 +2018,14 @@
 }
 
 int ext4_find_dest_de(struct inode *dir, struct inode *inode,
+		      ext4_lblk_t lblk,
 		      struct buffer_head *bh,
 		      void *buf, int buf_size,
 		      struct ext4_filename *fname,
 		      struct ext4_dir_entry_2 **dest_de)
 {
 	struct ext4_dir_entry_2 *de;
-	unsigned short reclen = EXT4_DIR_REC_LEN(fname_len(fname));
+	unsigned short reclen = ext4_dir_rec_len(fname_len(fname), dir);
 	int nlen, rlen;
 	unsigned int offset = 0;
 	char *top;
@@ -1945,11 +2034,11 @@
 	top = buf + buf_size - reclen;
 	while ((char *) de <= top) {
 		if (ext4_check_dir_entry(dir, NULL, de, bh,
-					 buf, buf_size, offset))
+					 buf, buf_size, lblk, offset))
 			return -EFSCORRUPTED;
 		if (ext4_match(dir, fname, de))
 			return -EEXIST;
-		nlen = EXT4_DIR_REC_LEN(de->name_len);
+		nlen = ext4_dir_rec_len(de->name_len, dir);
 		rlen = ext4_rec_len_from_disk(de->rec_len, buf_size);
 		if ((de->inode ? rlen - nlen : rlen) >= reclen)
 			break;
@@ -1963,7 +2052,8 @@
 	return 0;
 }
 
-void ext4_insert_dentry(struct inode *inode,
+void ext4_insert_dentry(struct inode *dir,
+			struct inode *inode,
 			struct ext4_dir_entry_2 *de,
 			int buf_size,
 			struct ext4_filename *fname)
@@ -1971,7 +2061,7 @@
 
 	int nlen, rlen;
 
-	nlen = EXT4_DIR_REC_LEN(de->name_len);
+	nlen = ext4_dir_rec_len(de->name_len, dir);
 	rlen = ext4_rec_len_from_disk(de->rec_len, buf_size);
 	if (de->inode) {
 		struct ext4_dir_entry_2 *de1 =
@@ -1985,6 +2075,13 @@
 	ext4_set_de_type(inode->i_sb, de, inode->i_mode);
 	de->name_len = fname_len(fname);
 	memcpy(de->name, fname_name(fname), fname_len(fname));
+	if (ext4_hash_in_dirent(dir)) {
+		struct dx_hash_info *hinfo = &fname->hinfo;
+
+		EXT4_DIRENT_HASHES(de)->hash = cpu_to_le32(hinfo->hash);
+		EXT4_DIRENT_HASHES(de)->minor_hash =
+						cpu_to_le32(hinfo->minor_hash);
+	}
 }
 
 /*
@@ -1998,6 +2095,7 @@
 static int add_dirent_to_buf(handle_t *handle, struct ext4_filename *fname,
 			     struct inode *dir,
 			     struct inode *inode, struct ext4_dir_entry_2 *de,
+			     ext4_lblk_t blk,
 			     struct buffer_head *bh)
 {
 	unsigned int	blocksize = dir->i_sb->s_blocksize;
@@ -2008,7 +2106,7 @@
 		csum_size = sizeof(struct ext4_dir_entry_tail);
 
 	if (!de) {
-		err = ext4_find_dest_de(dir, inode, bh, bh->b_data,
+		err = ext4_find_dest_de(dir, inode, blk, bh, bh->b_data,
 					blocksize - csum_size, fname, &de);
 		if (err)
 			return err;
@@ -2021,7 +2119,7 @@
 	}
 
 	/* By now the buffer is marked for journaling */
-	ext4_insert_dentry(inode, de, blocksize, fname);
+	ext4_insert_dentry(dir, inode, de, blocksize, fname);
 
 	/*
 	 * XXX shouldn't update any times until successful
@@ -2113,11 +2211,16 @@
 
 	/* Initialize the root; the dot dirents already exist */
 	de = (struct ext4_dir_entry_2 *) (&root->dotdot);
-	de->rec_len = ext4_rec_len_to_disk(blocksize - EXT4_DIR_REC_LEN(2),
-					   blocksize);
+	de->rec_len = ext4_rec_len_to_disk(
+			blocksize - ext4_dir_rec_len(2, NULL), blocksize);
 	memset (&root->info, 0, sizeof(root->info));
 	root->info.info_length = sizeof(root->info);
-	root->info.hash_version = EXT4_SB(dir->i_sb)->s_def_hash_version;
+	if (ext4_hash_in_dirent(dir))
+		root->info.hash_version = DX_HASH_SIPHASH;
+	else
+		root->info.hash_version =
+				EXT4_SB(dir->i_sb)->s_def_hash_version;
+
 	entries = root->entries;
 	dx_set_block(entries, 1);
 	dx_set_count(entries, 1);
@@ -2128,7 +2231,11 @@
 	if (fname->hinfo.hash_version <= DX_HASH_TEA)
 		fname->hinfo.hash_version += EXT4_SB(dir->i_sb)->s_hash_unsigned;
 	fname->hinfo.seed = EXT4_SB(dir->i_sb)->s_hash_seed;
-	ext4fs_dirhash(dir, fname_name(fname), fname_len(fname), &fname->hinfo);
+
+	/* casefolded encrypted hashes are computed on fname setup */
+	if (!ext4_hash_in_dirent(dir))
+		ext4fs_dirhash(dir, fname_name(fname),
+				fname_len(fname), &fname->hinfo);
 
 	memset(frames, 0, sizeof(frames));
 	frame = frames;
@@ -2143,13 +2250,13 @@
 	if (retval)
 		goto out_frames;	
 
-	de = do_split(handle,dir, &bh2, frame, &fname->hinfo);
+	de = do_split(handle, dir, &bh2, frame, &fname->hinfo, &block);
 	if (IS_ERR(de)) {
 		retval = PTR_ERR(de);
 		goto out_frames;
 	}
 
-	retval = add_dirent_to_buf(handle, fname, dir, inode, de, bh2);
+	retval = add_dirent_to_buf(handle, fname, dir, inode, de, block, bh2);
 out_frames:
 	/*
 	 * Even if the block split failed, we have to properly write
@@ -2246,7 +2353,7 @@
 			goto out;
 		}
 		retval = add_dirent_to_buf(handle, &fname, dir, inode,
-					   NULL, bh);
+					   NULL, block, bh);
 		if (retval != -ENOSPC)
 			goto out;
 
@@ -2273,7 +2380,7 @@
 	if (csum_size)
 		ext4_initialize_dirent_tail(bh, blocksize);
 
-	retval = add_dirent_to_buf(handle, &fname, dir, inode, de, bh);
+	retval = add_dirent_to_buf(handle, &fname, dir, inode, de, block, bh);
 out:
 	ext4_fname_free_filename(&fname);
 	brelse(bh);
@@ -2295,6 +2402,7 @@
 	struct ext4_dir_entry_2 *de;
 	int restart;
 	int err;
+	ext4_lblk_t lblk;
 
 again:
 	restart = 0;
@@ -2303,7 +2411,8 @@
 		return PTR_ERR(frame);
 	entries = frame->entries;
 	at = frame->at;
-	bh = ext4_read_dirblock(dir, dx_get_block(frame->at), DIRENT_HTREE);
+	lblk = dx_get_block(frame->at);
+	bh = ext4_read_dirblock(dir, lblk, DIRENT_HTREE);
 	if (IS_ERR(bh)) {
 		err = PTR_ERR(bh);
 		bh = NULL;
@@ -2315,7 +2424,7 @@
 	if (err)
 		goto journal_error;
 
-	err = add_dirent_to_buf(handle, fname, dir, inode, NULL, bh);
+	err = add_dirent_to_buf(handle, fname, dir, inode, NULL, lblk, bh);
 	if (err != -ENOSPC)
 		goto cleanup;
 
@@ -2435,12 +2544,12 @@
 			goto journal_error;
 		}
 	}
-	de = do_split(handle, dir, &bh, frame, &fname->hinfo);
+	de = do_split(handle, dir, &bh, frame, &fname->hinfo, &lblk);
 	if (IS_ERR(de)) {
 		err = PTR_ERR(de);
 		goto cleanup;
 	}
-	err = add_dirent_to_buf(handle, fname, dir, inode, de, bh);
+	err = add_dirent_to_buf(handle, fname, dir, inode, de, lblk, bh);
 	goto cleanup;
 
 journal_error:
@@ -2462,6 +2571,7 @@
  */
 int ext4_generic_delete_entry(struct inode *dir,
 			      struct ext4_dir_entry_2 *de_del,
+			      ext4_lblk_t lblk,
 			      struct buffer_head *bh,
 			      void *entry_buf,
 			      int buf_size,
@@ -2476,7 +2586,7 @@
 	de = (struct ext4_dir_entry_2 *)entry_buf;
 	while (i < buf_size - csum_size) {
 		if (ext4_check_dir_entry(dir, NULL, de, bh,
-					 entry_buf, buf_size, i))
+					 entry_buf, buf_size, lblk, i))
 			return -EFSCORRUPTED;
 		if (de == de_del)  {
 			if (pde)
@@ -2501,6 +2611,7 @@
 static int ext4_delete_entry(handle_t *handle,
 			     struct inode *dir,
 			     struct ext4_dir_entry_2 *de_del,
+			     ext4_lblk_t lblk,
 			     struct buffer_head *bh)
 {
 	int err, csum_size = 0;
@@ -2521,7 +2632,7 @@
 	if (unlikely(err))
 		goto out;
 
-	err = ext4_generic_delete_entry(dir, de_del, bh, bh->b_data,
+	err = ext4_generic_delete_entry(dir, de_del, lblk, bh, bh->b_data,
 					dir->i_sb->s_blocksize, csum_size);
 	if (err)
 		goto out;
@@ -2718,7 +2829,7 @@
 {
 	de->inode = cpu_to_le32(inode->i_ino);
 	de->name_len = 1;
-	de->rec_len = ext4_rec_len_to_disk(EXT4_DIR_REC_LEN(de->name_len),
+	de->rec_len = ext4_rec_len_to_disk(ext4_dir_rec_len(de->name_len, NULL),
 					   blocksize);
 	strcpy(de->name, ".");
 	ext4_set_de_type(inode->i_sb, de, S_IFDIR);
@@ -2728,11 +2839,12 @@
 	de->name_len = 2;
 	if (!dotdot_real_len)
 		de->rec_len = ext4_rec_len_to_disk(blocksize -
-					(csum_size + EXT4_DIR_REC_LEN(1)),
+					(csum_size + ext4_dir_rec_len(1, NULL)),
 					blocksize);
 	else
 		de->rec_len = ext4_rec_len_to_disk(
-				EXT4_DIR_REC_LEN(de->name_len), blocksize);
+					ext4_dir_rec_len(de->name_len, NULL),
+					blocksize);
 	strcpy(de->name, "..");
 	ext4_set_de_type(inode->i_sb, de, S_IFDIR);
 
@@ -2864,7 +2976,8 @@
 	}
 
 	sb = inode->i_sb;
-	if (inode->i_size < EXT4_DIR_REC_LEN(1) + EXT4_DIR_REC_LEN(2)) {
+	if (inode->i_size < ext4_dir_rec_len(1, NULL) +
+					ext4_dir_rec_len(2, NULL)) {
 		EXT4_ERROR_INODE(inode, "invalid size");
 		return true;
 	}
@@ -2876,7 +2989,7 @@
 		return true;
 
 	de = (struct ext4_dir_entry_2 *) bh->b_data;
-	if (ext4_check_dir_entry(inode, NULL, de, bh, bh->b_data, bh->b_size,
+	if (ext4_check_dir_entry(inode, NULL, de, bh, bh->b_data, bh->b_size, 0,
 				 0) ||
 	    le32_to_cpu(de->inode) != inode->i_ino || strcmp(".", de->name)) {
 		ext4_warning_inode(inode, "directory missing '.'");
@@ -2885,7 +2998,7 @@
 	}
 	offset = ext4_rec_len_from_disk(de->rec_len, sb->s_blocksize);
 	de = ext4_next_entry(de, sb->s_blocksize);
-	if (ext4_check_dir_entry(inode, NULL, de, bh, bh->b_data, bh->b_size,
+	if (ext4_check_dir_entry(inode, NULL, de, bh, bh->b_data, bh->b_size, 0,
 				 offset) ||
 	    le32_to_cpu(de->inode) == 0 || strcmp("..", de->name)) {
 		ext4_warning_inode(inode, "directory missing '..'");
@@ -2909,7 +3022,7 @@
 		de = (struct ext4_dir_entry_2 *) (bh->b_data +
 					(offset & (sb->s_blocksize - 1)));
 		if (ext4_check_dir_entry(inode, NULL, de, bh,
-					 bh->b_data, bh->b_size, offset)) {
+					 bh->b_data, bh->b_size, 0, offset)) {
 			offset = (offset | (sb->s_blocksize - 1)) + 1;
 			continue;
 		}
@@ -3104,6 +3217,8 @@
 	struct buffer_head *bh;
 	struct ext4_dir_entry_2 *de;
 	handle_t *handle = NULL;
+	ext4_lblk_t lblk;
+
 
 	if (unlikely(ext4_forced_shutdown(EXT4_SB(dir->i_sb))))
 		return -EIO;
@@ -3118,7 +3233,7 @@
 		return retval;
 
 	retval = -ENOENT;
-	bh = ext4_find_entry(dir, &dentry->d_name, &de, NULL);
+	bh = ext4_find_entry(dir, &dentry->d_name, &de, NULL, &lblk);
 	if (IS_ERR(bh))
 		return PTR_ERR(bh);
 	if (!bh)
@@ -3145,7 +3260,7 @@
 	if (IS_DIRSYNC(dir))
 		ext4_handle_sync(handle);
 
-	retval = ext4_delete_entry(handle, dir, de, bh);
+	retval = ext4_delete_entry(handle, dir, de, lblk, bh);
 	if (retval)
 		goto end_rmdir;
 	if (!EXT4_DIR_LINK_EMPTY(inode))
@@ -3194,8 +3309,9 @@
 	struct buffer_head *bh;
 	struct ext4_dir_entry_2 *de;
 	int skip_remove_dentry = 0;
+	ext4_lblk_t lblk;
 
-	bh = ext4_find_entry(dir, d_name, &de, NULL);
+	bh = ext4_find_entry(dir, d_name, &de, NULL, &lblk);
 	if (IS_ERR(bh))
 		return PTR_ERR(bh);
 
@@ -3218,7 +3334,7 @@
 		ext4_handle_sync(handle);
 
 	if (!skip_remove_dentry) {
-		retval = ext4_delete_entry(handle, dir, de, bh);
+		retval = ext4_delete_entry(handle, dir, de, lblk, bh);
 		if (retval)
 			goto out;
 		dir->i_ctime = dir->i_mtime = current_time(dir);
@@ -3524,6 +3640,7 @@
 	int dir_nlink_delta;
 
 	/* entry for "dentry" */
+	ext4_lblk_t lblk;
 	struct buffer_head *bh;
 	struct ext4_dir_entry_2 *de;
 	int inlined;
@@ -3611,12 +3728,13 @@
 	int retval = -ENOENT;
 	struct buffer_head *bh;
 	struct ext4_dir_entry_2 *de;
+	ext4_lblk_t lblk;
 
-	bh = ext4_find_entry(dir, d_name, &de, NULL);
+	bh = ext4_find_entry(dir, d_name, &de, NULL, &lblk);
 	if (IS_ERR(bh))
 		return PTR_ERR(bh);
 	if (bh) {
-		retval = ext4_delete_entry(handle, dir, de, bh);
+		retval = ext4_delete_entry(handle, dir, de, lblk, bh);
 		brelse(bh);
 	}
 	return retval;
@@ -3640,7 +3758,8 @@
 		retval = ext4_find_delete_entry(handle, ent->dir,
 						&ent->dentry->d_name);
 	} else {
-		retval = ext4_delete_entry(handle, ent->dir, ent->de, ent->bh);
+		retval = ext4_delete_entry(handle, ent->dir, ent->de,
+						ent->lblk, ent->bh);
 		if (retval == -ENOENT) {
 			retval = ext4_find_delete_entry(handle, ent->dir,
 							&ent->dentry->d_name);
@@ -3753,7 +3872,8 @@
 			return retval;
 	}
 
-	old.bh = ext4_find_entry(old.dir, &old.dentry->d_name, &old.de, NULL);
+	old.bh = ext4_find_entry(old.dir, &old.dentry->d_name, &old.de, NULL,
+				&old.lblk);
 	if (IS_ERR(old.bh))
 		return PTR_ERR(old.bh);
 	/*
@@ -3767,7 +3887,7 @@
 		goto end_rename;
 
 	new.bh = ext4_find_entry(new.dir, &new.dentry->d_name,
-				 &new.de, &new.inlined);
+				 &new.de, &new.inlined, NULL);
 	if (IS_ERR(new.bh)) {
 		retval = PTR_ERR(new.bh);
 		new.bh = NULL;
@@ -3973,7 +4093,7 @@
 		return retval;
 
 	old.bh = ext4_find_entry(old.dir, &old.dentry->d_name,
-				 &old.de, &old.inlined);
+				 &old.de, &old.inlined, NULL);
 	if (IS_ERR(old.bh))
 		return PTR_ERR(old.bh);
 	/*
@@ -3987,7 +4107,7 @@
 		goto end_rename;
 
 	new.bh = ext4_find_entry(new.dir, &new.dentry->d_name,
-				 &new.de, &new.inlined);
+				 &new.de, &new.inlined, NULL);
 	if (IS_ERR(new.bh)) {
 		retval = PTR_ERR(new.bh);
 		new.bh = NULL;
diff --git a/fs/ext4/readpage.c b/fs/ext4/readpage.c
index f014c5e4..0deb256 100644
--- a/fs/ext4/readpage.c
+++ b/fs/ext4/readpage.c
@@ -46,6 +46,7 @@
 #include <linux/cleancache.h>
 
 #include "ext4.h"
+#include <trace/events/android_fs.h>
 
 #define NUM_PREALLOC_POST_READ_CTXS	128
 
@@ -159,6 +160,17 @@
 	return bio->bi_private && !bio->bi_status;
 }
 
+static void
+ext4_trace_read_completion(struct bio *bio)
+{
+	struct page *first_page = bio->bi_io_vec[0].bv_page;
+
+	if (first_page != NULL)
+		trace_android_fs_dataread_end(first_page->mapping->host,
+					      page_offset(first_page),
+					      bio->bi_iter.bi_size);
+}
+
 /*
  * I/O completion handler for multipage BIOs.
  *
@@ -173,6 +185,9 @@
  */
 static void mpage_end_io(struct bio *bio)
 {
+	if (trace_android_fs_dataread_start_enabled())
+		ext4_trace_read_completion(bio);
+
 	if (bio_post_read_required(bio)) {
 		struct bio_post_read_ctx *ctx = bio->bi_private;
 
@@ -221,6 +236,30 @@
 	return i_size_read(inode);
 }
 
+static void
+ext4_submit_bio_read(struct bio *bio)
+{
+	if (trace_android_fs_dataread_start_enabled()) {
+		struct page *first_page = bio->bi_io_vec[0].bv_page;
+
+		if (first_page != NULL) {
+			char *path, pathbuf[MAX_TRACE_PATHBUF_LEN];
+
+			path = android_fstrace_get_pathname(pathbuf,
+						    MAX_TRACE_PATHBUF_LEN,
+						    first_page->mapping->host);
+			trace_android_fs_dataread_start(
+				first_page->mapping->host,
+				page_offset(first_page),
+				bio->bi_iter.bi_size,
+				current->pid,
+				path,
+				current->comm);
+		}
+	}
+	submit_bio(bio);
+}
+
 int ext4_mpage_readpages(struct inode *inode,
 		struct readahead_control *rac, struct page *page)
 {
@@ -363,7 +402,7 @@
 		if (bio && (last_block_in_bio != blocks[0] - 1 ||
 			    !fscrypt_mergeable_bio(bio, inode, next_block))) {
 		submit_and_realloc:
-			submit_bio(bio);
+			ext4_submit_bio_read(bio);
 			bio = NULL;
 		}
 		if (bio == NULL) {
@@ -390,14 +429,14 @@
 		if (((map.m_flags & EXT4_MAP_BOUNDARY) &&
 		     (relative_block == map.m_len)) ||
 		    (first_hole != blocks_per_page)) {
-			submit_bio(bio);
+			ext4_submit_bio_read(bio);
 			bio = NULL;
 		} else
 			last_block_in_bio = blocks[blocks_per_page - 1];
 		goto next_page;
 	confused:
 		if (bio) {
-			submit_bio(bio);
+			ext4_submit_bio_read(bio);
 			bio = NULL;
 		}
 		if (!PageUptodate(page))
@@ -409,7 +448,7 @@
 			put_page(page);
 	}
 	if (bio)
-		submit_bio(bio);
+		ext4_submit_bio_read(bio);
 	return 0;
 }
 
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 9447204..95e8557 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -4289,12 +4289,6 @@
 		struct unicode_map *encoding;
 		__u16 encoding_flags;
 
-		if (ext4_has_feature_encrypt(sb)) {
-			ext4_msg(sb, KERN_ERR,
-				 "Can't mount with encoding and encryption");
-			goto failed_mount;
-		}
-
 		if (ext4_sb_read_encoding(es, &encoding_info,
 					  &encoding_flags)) {
 			ext4_msg(sb, KERN_ERR,
@@ -4964,11 +4958,6 @@
 		goto failed_mount4;
 	}
 
-#ifdef CONFIG_UNICODE
-	if (sb->s_encoding)
-		sb->s_d_op = &ext4_dentry_ops;
-#endif
-
 	sb->s_root = d_make_root(root);
 	if (!sb->s_root) {
 		ext4_msg(sb, KERN_ERR, "get root dentry failed");
diff --git a/fs/ext4/xattr_hurd.c b/fs/ext4/xattr_hurd.c
index 8cfa74a..b96df3b 100644
--- a/fs/ext4/xattr_hurd.c
+++ b/fs/ext4/xattr_hurd.c
@@ -21,7 +21,8 @@
 static int
 ext4_xattr_hurd_get(const struct xattr_handler *handler,
 		    struct dentry *unused, struct inode *inode,
-		    const char *name, void *buffer, size_t size)
+		    const char *name, void *buffer, size_t size,
+		    int flags)
 {
 	if (!test_opt(inode->i_sb, XATTR_USER))
 		return -EOPNOTSUPP;
diff --git a/fs/ext4/xattr_security.c b/fs/ext4/xattr_security.c
index 197a9d8..50fb713 100644
--- a/fs/ext4/xattr_security.c
+++ b/fs/ext4/xattr_security.c
@@ -15,7 +15,7 @@
 static int
 ext4_xattr_security_get(const struct xattr_handler *handler,
 			struct dentry *unused, struct inode *inode,
-			const char *name, void *buffer, size_t size)
+			const char *name, void *buffer, size_t size, int flags)
 {
 	return ext4_xattr_get(inode, EXT4_XATTR_INDEX_SECURITY,
 			      name, buffer, size);
diff --git a/fs/ext4/xattr_trusted.c b/fs/ext4/xattr_trusted.c
index e9389e5..64bd8f8 100644
--- a/fs/ext4/xattr_trusted.c
+++ b/fs/ext4/xattr_trusted.c
@@ -22,7 +22,7 @@
 static int
 ext4_xattr_trusted_get(const struct xattr_handler *handler,
 		       struct dentry *unused, struct inode *inode,
-		       const char *name, void *buffer, size_t size)
+		       const char *name, void *buffer, size_t size, int flags)
 {
 	return ext4_xattr_get(inode, EXT4_XATTR_INDEX_TRUSTED,
 			      name, buffer, size);
diff --git a/fs/ext4/xattr_user.c b/fs/ext4/xattr_user.c
index d454618..b730137 100644
--- a/fs/ext4/xattr_user.c
+++ b/fs/ext4/xattr_user.c
@@ -21,7 +21,7 @@
 static int
 ext4_xattr_user_get(const struct xattr_handler *handler,
 		    struct dentry *unused, struct inode *inode,
-		    const char *name, void *buffer, size_t size)
+		    const char *name, void *buffer, size_t size, int flags)
 {
 	if (!test_opt(inode->i_sb, XATTR_USER))
 		return -EOPNOTSUPP;
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index be4da52..2f0ce2a 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -27,6 +27,7 @@
 #include "segment.h"
 #include "trace.h"
 #include <trace/events/f2fs.h>
+#include <trace/events/android_fs.h>
 
 #define NUM_PREALLOC_POST_READ_CTXS	128
 
@@ -471,6 +472,8 @@
 	 */
 	if (!fio || !fio->encrypted_page)
 		fscrypt_set_bio_crypt_ctx(bio, inode, first_idx, gfp_mask);
+	else if (fscrypt_inode_should_skip_dm_default_key(inode))
+		bio_set_skip_dm_default_key(bio);
 }
 
 static bool f2fs_crypt_mergeable_bio(struct bio *bio, const struct inode *inode,
@@ -482,7 +485,9 @@
 	 * read/write raw data without encryption.
 	 */
 	if (fio && fio->encrypted_page)
-		return !bio_has_crypt_ctx(bio);
+		return !bio_has_crypt_ctx(bio) &&
+			(bio_should_skip_dm_default_key(bio) ==
+			 fscrypt_inode_should_skip_dm_default_key(inode));
 
 	return fscrypt_mergeable_bio(bio, inode, next_idx);
 }
@@ -3371,6 +3376,16 @@
 	block_t blkaddr = NULL_ADDR;
 	int err = 0;
 
+	if (trace_android_fs_datawrite_start_enabled()) {
+		char *path, pathbuf[MAX_TRACE_PATHBUF_LEN];
+
+		path = android_fstrace_get_pathname(pathbuf,
+						    MAX_TRACE_PATHBUF_LEN,
+						    inode);
+		trace_android_fs_datawrite_start(inode, pos, len,
+						 current->pid, path,
+						 current->comm);
+	}
 	trace_f2fs_write_begin(inode, pos, len, flags);
 
 	if (!f2fs_is_checkpoint_ready(sbi)) {
@@ -3498,6 +3513,7 @@
 {
 	struct inode *inode = page->mapping->host;
 
+	trace_android_fs_datawrite_end(inode, pos, len);
 	trace_f2fs_write_end(inode, pos, len, copied);
 
 	/*
@@ -3631,6 +3647,29 @@
 
 	trace_f2fs_direct_IO_enter(inode, offset, count, rw);
 
+	if (trace_android_fs_dataread_start_enabled() &&
+	    (rw == READ)) {
+		char *path, pathbuf[MAX_TRACE_PATHBUF_LEN];
+
+		path = android_fstrace_get_pathname(pathbuf,
+						    MAX_TRACE_PATHBUF_LEN,
+						    inode);
+		trace_android_fs_dataread_start(inode, offset,
+						count, current->pid, path,
+						current->comm);
+	}
+	if (trace_android_fs_datawrite_start_enabled() &&
+	    (rw == WRITE)) {
+		char *path, pathbuf[MAX_TRACE_PATHBUF_LEN];
+
+		path = android_fstrace_get_pathname(pathbuf,
+						    MAX_TRACE_PATHBUF_LEN,
+						    inode);
+		trace_android_fs_datawrite_start(inode, offset, count,
+						 current->pid, path,
+						 current->comm);
+	}
+
 	if (rw == WRITE && whint_mode == WHINT_MODE_OFF)
 		iocb->ki_hint = WRITE_LIFE_NOT_SET;
 
@@ -3686,6 +3725,13 @@
 	}
 
 out:
+	if (trace_android_fs_dataread_start_enabled() &&
+	    (rw == READ))
+		trace_android_fs_dataread_end(inode, offset, count);
+	if (trace_android_fs_datawrite_start_enabled() &&
+	    (rw == WRITE))
+		trace_android_fs_datawrite_end(inode, offset, count);
+
 	trace_f2fs_direct_IO_exit(inode, offset, count, rw, err);
 
 	return err;
diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c
index 4b9ef8b..82b58d1 100644
--- a/fs/f2fs/dir.c
+++ b/fs/f2fs/dir.c
@@ -5,6 +5,7 @@
  * Copyright (c) 2012 Samsung Electronics Co., Ltd.
  *             http://www.samsung.com/
  */
+#include <asm/unaligned.h>
 #include <linux/fs.h>
 #include <linux/f2fs_fs.h>
 #include <linux/sched/signal.h>
@@ -206,30 +207,55 @@
 /*
  * Test whether a case-insensitive directory entry matches the filename
  * being searched for.
+ *
+ * Returns 1 for a match, 0 for no match, and -errno on an error.
  */
-static bool f2fs_match_ci_name(const struct inode *dir, const struct qstr *name,
+static int f2fs_match_ci_name(const struct inode *dir, const struct qstr *name,
 			       const u8 *de_name, u32 de_name_len)
 {
 	const struct super_block *sb = dir->i_sb;
 	const struct unicode_map *um = sb->s_encoding;
+	struct fscrypt_str decrypted_name = FSTR_INIT(NULL, de_name_len);
 	struct qstr entry = QSTR_INIT(de_name, de_name_len);
 	int res;
 
-	res = utf8_strncasecmp_folded(um, name, &entry);
-	if (res < 0) {
-		/*
-		 * In strict mode, ignore invalid names.  In non-strict mode,
-		 * fall back to treating them as opaque byte sequences.
-		 */
-		if (sb_has_strict_encoding(sb) || name->len != entry.len)
-			return false;
-		return !memcmp(name->name, entry.name, name->len);
+	if (IS_ENCRYPTED(dir)) {
+		const struct fscrypt_str encrypted_name =
+			FSTR_INIT((u8 *)de_name, de_name_len);
+
+		if (WARN_ON_ONCE(!fscrypt_has_encryption_key(dir)))
+			return -EINVAL;
+
+		decrypted_name.name = kmalloc(de_name_len, GFP_KERNEL);
+		if (!decrypted_name.name)
+			return -ENOMEM;
+		res = fscrypt_fname_disk_to_usr(dir, 0, 0, &encrypted_name,
+						&decrypted_name);
+		if (res < 0)
+			goto out;
+		entry.name = decrypted_name.name;
+		entry.len = decrypted_name.len;
 	}
-	return res == 0;
+
+	res = utf8_strncasecmp_folded(um, name, &entry);
+	/*
+	 * In strict mode, ignore invalid names.  In non-strict mode,
+	 * fall back to treating them as opaque byte sequences.
+	 */
+	if (res < 0 && !sb_has_strict_encoding(sb)) {
+		res = name->len == entry.len &&
+				memcmp(name->name, entry.name, name->len) == 0;
+	} else {
+		/* utf8_strncasecmp_folded returns 0 on match */
+		res = (res == 0);
+	}
+out:
+	kfree(decrypted_name.name);
+	return res;
 }
 #endif /* CONFIG_UNICODE */
 
-static inline bool f2fs_match_name(const struct inode *dir,
+static inline int f2fs_match_name(const struct inode *dir,
 				   const struct f2fs_filename *fname,
 				   const u8 *de_name, u32 de_name_len)
 {
@@ -256,6 +282,7 @@
 	struct f2fs_dir_entry *de;
 	unsigned long bit_pos = 0;
 	int max_len = 0;
+	int res = 0;
 
 	if (max_slots)
 		*max_slots = 0;
@@ -273,10 +300,15 @@
 			continue;
 		}
 
-		if (de->hash_code == fname->hash &&
-		    f2fs_match_name(d->inode, fname, d->filename[bit_pos],
-				    le16_to_cpu(de->name_len)))
-			goto found;
+		if (de->hash_code == fname->hash) {
+			res = f2fs_match_name(d->inode, fname,
+					      d->filename[bit_pos],
+					      le16_to_cpu(de->name_len));
+			if (res < 0)
+				return ERR_PTR(res);
+			if (res)
+				goto found;
+		}
 
 		if (max_slots && max_len > *max_slots)
 			*max_slots = max_len;
@@ -326,7 +358,11 @@
 		}
 
 		de = find_in_block(dir, dentry_page, fname, &max_slots);
-		if (de) {
+		if (IS_ERR(de)) {
+			*res_page = ERR_CAST(de);
+			de = NULL;
+			break;
+		} else if (de) {
 			*res_page = dentry_page;
 			break;
 		}
@@ -448,17 +484,39 @@
 	f2fs_put_page(page, 1);
 }
 
-static void init_dent_inode(const struct f2fs_filename *fname,
+static void init_dent_inode(struct inode *dir, struct inode *inode,
+			    const struct f2fs_filename *fname,
 			    struct page *ipage)
 {
 	struct f2fs_inode *ri;
 
+	if (!fname) /* tmpfile case? */
+		return;
+
 	f2fs_wait_on_page_writeback(ipage, NODE, true, true);
 
 	/* copy name info. to this inode page */
 	ri = F2FS_INODE(ipage);
 	ri->i_namelen = cpu_to_le32(fname->disk_name.len);
 	memcpy(ri->i_name, fname->disk_name.name, fname->disk_name.len);
+	if (IS_ENCRYPTED(dir)) {
+		file_set_enc_name(inode);
+		/*
+		 * Roll-forward recovery doesn't have encryption keys available,
+		 * so it can't compute the dirhash for encrypted+casefolded
+		 * filenames.  Append it to i_name if possible.  Else, disable
+		 * roll-forward recovery of the dentry (i.e., make fsync'ing the
+		 * file force a checkpoint) by setting LOST_PINO.
+		 */
+		if (IS_CASEFOLDED(dir)) {
+			if (fname->disk_name.len + sizeof(f2fs_hash_t) <=
+			    F2FS_NAME_LEN)
+				put_unaligned(fname->hash, (f2fs_hash_t *)
+					&ri->i_name[fname->disk_name.len]);
+			else
+				file_lost_pino(inode);
+		}
+	}
 	set_page_dirty(ipage);
 }
 
@@ -541,11 +599,7 @@
 			return page;
 	}
 
-	if (fname) {
-		init_dent_inode(fname, page);
-		if (IS_ENCRYPTED(dir))
-			file_set_enc_name(inode);
-	}
+	init_dent_inode(dir, inode, fname, page);
 
 	/*
 	 * This file should be checkpointed during fsync.
@@ -1099,10 +1153,3 @@
 	.compat_ioctl   = f2fs_compat_ioctl,
 #endif
 };
-
-#ifdef CONFIG_UNICODE
-const struct dentry_operations f2fs_dentry_ops = {
-	.d_hash = generic_ci_d_hash,
-	.d_compare = generic_ci_d_compare,
-};
-#endif
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index cb700d7..66cf00a 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -533,9 +533,11 @@
 #ifdef CONFIG_UNICODE
 	/*
 	 * For casefolded directories: the casefolded name, but it's left NULL
-	 * if the original name is not valid Unicode or if the filesystem is
-	 * doing an internal operation where usr_fname is also NULL.  In these
-	 * cases we fall back to treating the name as an opaque byte sequence.
+	 * if the original name is not valid Unicode, if the directory is both
+	 * casefolded and encrypted and its encryption key is unavailable, or if
+	 * the filesystem is doing an internal operation where usr_fname is also
+	 * NULL.  In all these cases we fall back to treating the name as an
+	 * opaque byte sequence.
 	 */
 	struct fscrypt_str cf_name;
 #endif
@@ -3767,9 +3769,6 @@
 #endif
 
 extern const struct file_operations f2fs_dir_operations;
-#ifdef CONFIG_UNICODE
-extern const struct dentry_operations f2fs_dentry_ops;
-#endif
 extern const struct file_operations f2fs_file_operations;
 extern const struct inode_operations f2fs_file_inode_operations;
 extern const struct address_space_operations f2fs_dblock_aops;
@@ -4120,7 +4119,11 @@
 	struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
 	int rw = iov_iter_rw(iter);
 
-	if (f2fs_post_read_required(inode))
+	if (!fscrypt_dio_supported(iocb, iter))
+		return true;
+	if (fsverity_active(inode))
+		return true;
+	if (f2fs_compressed_file(inode))
 		return true;
 	if (f2fs_is_multi_device(sbi))
 		return true;
diff --git a/fs/f2fs/hash.c b/fs/f2fs/hash.c
index de841aa..e3beac5 100644
--- a/fs/f2fs/hash.c
+++ b/fs/f2fs/hash.c
@@ -111,7 +111,9 @@
 		 * If the casefolded name is provided, hash it instead of the
 		 * on-disk name.  If the casefolded name is *not* provided, that
 		 * should only be because the name wasn't valid Unicode, so fall
-		 * back to treating the name as an opaque byte sequence.
+		 * back to treating the name as an opaque byte sequence.  Note
+		 * that to handle encrypted directories, the fallback must use
+		 * usr_fname (plaintext) rather than disk_name (ciphertext).
 		 */
 		WARN_ON_ONCE(!fname->usr_fname->name);
 		if (fname->cf_name.name) {
@@ -121,6 +123,13 @@
 			name = fname->usr_fname->name;
 			len = fname->usr_fname->len;
 		}
+		if (IS_ENCRYPTED(dir)) {
+			struct qstr tmp = QSTR_INIT(name, len);
+
+			fname->hash =
+				cpu_to_le32(fscrypt_fname_siphash(dir, &tmp));
+			return;
+		}
 	}
 #endif
 	fname->hash = cpu_to_le32(TEA_hash_name(name, len));
diff --git a/fs/f2fs/inline.c b/fs/f2fs/inline.c
index 70384e3..b93d0cc 100644
--- a/fs/f2fs/inline.c
+++ b/fs/f2fs/inline.c
@@ -13,6 +13,7 @@
 #include "f2fs.h"
 #include "node.h"
 #include <trace/events/f2fs.h>
+#include <trace/events/android_fs.h>
 
 bool f2fs_may_inline_data(struct inode *inode)
 {
@@ -86,14 +87,29 @@
 {
 	struct page *ipage;
 
+	if (trace_android_fs_dataread_start_enabled()) {
+		char *path, pathbuf[MAX_TRACE_PATHBUF_LEN];
+
+		path = android_fstrace_get_pathname(pathbuf,
+						    MAX_TRACE_PATHBUF_LEN,
+						    inode);
+		trace_android_fs_dataread_start(inode, page_offset(page),
+						PAGE_SIZE, current->pid,
+						path, current->comm);
+	}
+
 	ipage = f2fs_get_node_page(F2FS_I_SB(inode), inode->i_ino);
 	if (IS_ERR(ipage)) {
+		trace_android_fs_dataread_end(inode, page_offset(page),
+					      PAGE_SIZE);
 		unlock_page(page);
 		return PTR_ERR(ipage);
 	}
 
 	if (!f2fs_has_inline_data(inode)) {
 		f2fs_put_page(ipage, 1);
+		trace_android_fs_dataread_end(inode, page_offset(page),
+					      PAGE_SIZE);
 		return -EAGAIN;
 	}
 
@@ -105,6 +121,8 @@
 	if (!PageUptodate(page))
 		SetPageUptodate(page);
 	f2fs_put_page(ipage, 1);
+	trace_android_fs_dataread_end(inode, page_offset(page),
+				      PAGE_SIZE);
 	unlock_page(page);
 	return 0;
 }
@@ -332,6 +350,10 @@
 	make_dentry_ptr_inline(dir, &d, inline_dentry);
 	de = f2fs_find_target_dentry(&d, fname, NULL);
 	unlock_page(ipage);
+	if (IS_ERR(de)) {
+		*res_page = ERR_CAST(de);
+		de = NULL;
+	}
 	if (de)
 		*res_page = ipage;
 	else
diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c
index 8fa37d1..6edb1ab 100644
--- a/fs/f2fs/namei.c
+++ b/fs/f2fs/namei.c
@@ -497,6 +497,7 @@
 	}
 
 	err = f2fs_prepare_lookup(dir, dentry, &fname);
+	generic_set_encrypted_ci_d_ops(dentry);
 	if (err == -ENOENT)
 		goto out_splice;
 	if (err)
diff --git a/fs/f2fs/recovery.c b/fs/f2fs/recovery.c
index 4f12ade..0947d36 100644
--- a/fs/f2fs/recovery.c
+++ b/fs/f2fs/recovery.c
@@ -5,6 +5,7 @@
  * Copyright (c) 2012 Samsung Electronics Co., Ltd.
  *             http://www.samsung.com/
  */
+#include <asm/unaligned.h>
 #include <linux/fs.h>
 #include <linux/f2fs_fs.h>
 #include "f2fs.h"
@@ -128,7 +129,16 @@
 	}
 
 	/* Compute the hash of the filename */
-	if (IS_CASEFOLDED(dir)) {
+	if (IS_ENCRYPTED(dir) && IS_CASEFOLDED(dir)) {
+		/*
+		 * In this case the hash isn't computable without the key, so it
+		 * was saved on-disk.
+		 */
+		if (fname->disk_name.len + sizeof(f2fs_hash_t) > F2FS_NAME_LEN)
+			return -EINVAL;
+		fname->hash = get_unaligned((f2fs_hash_t *)
+				&raw_inode->i_name[fname->disk_name.len]);
+	} else if (IS_CASEFOLDED(dir)) {
 		err = f2fs_init_casefolded_name(dir, fname);
 		if (err)
 			return err;
diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
index 00eff2f..42293b7 100644
--- a/fs/f2fs/super.c
+++ b/fs/f2fs/super.c
@@ -3399,12 +3399,6 @@
 		struct unicode_map *encoding;
 		__u16 encoding_flags;
 
-		if (f2fs_sb_has_encrypt(sbi)) {
-			f2fs_err(sbi,
-				"Can't mount with encoding and encryption");
-			return -EINVAL;
-		}
-
 		if (f2fs_sb_read_encoding(sbi->raw_super, &encoding_info,
 					  &encoding_flags)) {
 			f2fs_err(sbi,
@@ -3427,7 +3421,6 @@
 
 		sbi->sb->s_encoding = encoding;
 		sbi->sb->s_encoding_flags = encoding_flags;
-		sbi->sb->s_d_op = &f2fs_dentry_ops;
 	}
 #else
 	if (f2fs_sb_has_casefold(sbi)) {
diff --git a/fs/f2fs/xattr.c b/fs/f2fs/xattr.c
index 65afcc3..eb892db 100644
--- a/fs/f2fs/xattr.c
+++ b/fs/f2fs/xattr.c
@@ -44,7 +44,7 @@
 
 static int f2fs_xattr_generic_get(const struct xattr_handler *handler,
 		struct dentry *unused, struct inode *inode,
-		const char *name, void *buffer, size_t size)
+		const char *name, void *buffer, size_t size, int flags)
 {
 	struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
 
@@ -99,7 +99,7 @@
 
 static int f2fs_xattr_advise_get(const struct xattr_handler *handler,
 		struct dentry *unused, struct inode *inode,
-		const char *name, void *buffer, size_t size)
+		const char *name, void *buffer, size_t size, int flags)
 {
 	if (buffer)
 		*((char *)buffer) = F2FS_I(inode)->i_advise;
diff --git a/fs/fuse/Makefile b/fs/fuse/Makefile
index 8c7021f..20ed23a 100644
--- a/fs/fuse/Makefile
+++ b/fs/fuse/Makefile
@@ -8,6 +8,7 @@
 obj-$(CONFIG_VIRTIO_FS) += virtiofs.o
 
 fuse-y := dev.o dir.o file.o inode.o control.o xattr.o acl.o readdir.o
+fuse-y += passthrough.o
 fuse-$(CONFIG_FUSE_DAX) += dax.o
 
 virtiofs-y := virtio_fs.o
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index 588f8d1..d390865 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -14,6 +14,7 @@
 #include <linux/sched/signal.h>
 #include <linux/uio.h>
 #include <linux/miscdevice.h>
+#include <linux/namei.h>
 #include <linux/pagemap.h>
 #include <linux/file.h>
 #include <linux/slab.h>
@@ -1908,6 +1909,14 @@
 		err = copy_out_args(cs, req->args, nbytes);
 	fuse_copy_finish(cs);
 
+	if (!err && req->in.h.opcode == FUSE_CANONICAL_PATH) {
+		char *path = (char *)req->args->out_args[0].value;
+
+		path[req->args->out_args[0].size - 1] = 0;
+		req->out.h.error =
+			kern_path(path, 0, req->args->canonical_path);
+	}
+
 	spin_lock(&fpq->lock);
 	clear_bit(FR_LOCKED, &req->flags);
 	if (!fpq->connected)
@@ -2233,37 +2242,55 @@
 static long fuse_dev_ioctl(struct file *file, unsigned int cmd,
 			   unsigned long arg)
 {
-	int err = -ENOTTY;
+	int res;
+	int oldfd;
+	struct fuse_dev *fud;
+	struct fuse_passthrough_out pto;
 
-	if (cmd == FUSE_DEV_IOC_CLONE) {
-		int oldfd;
-
-		err = -EFAULT;
-		if (!get_user(oldfd, (__u32 __user *) arg)) {
+	switch (cmd) {
+	case FUSE_DEV_IOC_CLONE:
+		res = -EFAULT;
+		if (!get_user(oldfd, (__u32 __user *)arg)) {
 			struct file *old = fget(oldfd);
 
-			err = -EINVAL;
+			res = -EINVAL;
 			if (old) {
-				struct fuse_dev *fud = NULL;
+				fud = NULL;
 
 				/*
 				 * Check against file->f_op because CUSE
 				 * uses the same ioctl handler.
 				 */
 				if (old->f_op == file->f_op &&
-				    old->f_cred->user_ns == file->f_cred->user_ns)
+				    old->f_cred->user_ns ==
+					    file->f_cred->user_ns)
 					fud = fuse_get_dev(old);
 
 				if (fud) {
 					mutex_lock(&fuse_mutex);
-					err = fuse_device_clone(fud->fc, file);
+					res = fuse_device_clone(fud->fc, file);
 					mutex_unlock(&fuse_mutex);
 				}
 				fput(old);
 			}
 		}
+		break;
+	case FUSE_DEV_IOC_PASSTHROUGH_OPEN:
+		res = -EFAULT;
+		if (!copy_from_user(&pto,
+				    (struct fuse_passthrough_out __user *)arg,
+				    sizeof(pto))) {
+			res = -EINVAL;
+			fud = fuse_get_dev(file);
+			if (fud)
+				res = fuse_passthrough_open(fud, &pto);
+		}
+		break;
+	default:
+		res = -ENOTTY;
+		break;
 	}
-	return err;
+	return res;
 }
 
 const struct file_operations fuse_dev_operations = {
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
index ff7dbeb..d53b1c5 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -374,6 +374,45 @@
 	return ERR_PTR(err);
 }
 
+/*
+ * Get the canonical path. Since we must translate to a path, this must be done
+ * in the context of the userspace daemon, however, the userspace daemon cannot
+ * look up paths on its own. Instead, we handle the lookup as a special case
+ * inside of the write request.
+ */
+static void fuse_dentry_canonical_path(const struct path *path,
+				       struct path *canonical_path)
+{
+	struct inode *inode = d_inode(path->dentry);
+	//struct fuse_conn *fc = get_fuse_conn(inode);
+	struct fuse_mount *fm = get_fuse_mount_super(path->mnt->mnt_sb);
+	FUSE_ARGS(args);
+	char *path_name;
+	int err;
+
+	path_name = (char *)__get_free_page(GFP_KERNEL);
+	if (!path_name)
+		goto default_path;
+
+	args.opcode = FUSE_CANONICAL_PATH;
+	args.nodeid = get_node_id(inode);
+	args.in_numargs = 0;
+	args.out_numargs = 1;
+	args.out_args[0].size = PATH_MAX;
+	args.out_args[0].value = path_name;
+	args.canonical_path = canonical_path;
+	args.out_argvar = 1;
+
+	err = fuse_simple_request(fm, &args);
+	free_page((unsigned long)path_name);
+	if (err > 0)
+		return;
+default_path:
+	canonical_path->dentry = path->dentry;
+	canonical_path->mnt = path->mnt;
+	path_get(canonical_path);
+}
+
 const struct dentry_operations fuse_dentry_operations = {
 	.d_revalidate	= fuse_dentry_revalidate,
 	.d_delete	= fuse_dentry_delete,
@@ -382,6 +421,7 @@
 	.d_release	= fuse_dentry_release,
 #endif
 	.d_automount	= fuse_dentry_automount,
+	.d_canonical_path = fuse_dentry_canonical_path,
 };
 
 const struct dentry_operations fuse_root_dentry_operations = {
@@ -512,6 +552,7 @@
 	int err;
 	struct inode *inode;
 	struct fuse_mount *fm = get_fuse_mount(dir);
+	struct fuse_conn *fc = get_fuse_conn(dir);
 	FUSE_ARGS(args);
 	struct fuse_forget_link *forget;
 	struct fuse_create_in inarg;
@@ -566,6 +607,7 @@
 	ff->fh = outopen.fh;
 	ff->nodeid = outentry.nodeid;
 	ff->open_flags = outopen.open_flags;
+	fuse_passthrough_setup(fc, ff, &outopen);
 	inode = fuse_iget(dir->i_sb, outentry.nodeid, outentry.generation,
 			  &outentry.attr, entry_attr_timeout(&outentry), 0);
 	if (!inode) {
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index c03034e..199c93c 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -152,7 +152,7 @@
 		if (!err) {
 			ff->fh = outarg.fh;
 			ff->open_flags = outarg.open_flags;
-
+			fuse_passthrough_setup(fc, ff, &outarg);
 		} else if (err != -ENOSYS) {
 			fuse_file_free(ff);
 			return err;
@@ -295,6 +295,8 @@
 	struct fuse_release_args *ra = ff->release_args;
 	int opcode = isdir ? FUSE_RELEASEDIR : FUSE_RELEASE;
 
+	fuse_passthrough_release(&ff->passthrough);
+
 	fuse_prepare_release(fi, ff, file->f_flags, opcode);
 
 	if (ff->flock) {
@@ -1561,7 +1563,9 @@
 	if (FUSE_IS_DAX(inode))
 		return fuse_dax_read_iter(iocb, to);
 
-	if (!(ff->open_flags & FOPEN_DIRECT_IO))
+	if (ff->passthrough.filp)
+		return fuse_passthrough_read_iter(iocb, to);
+	else if (!(ff->open_flags & FOPEN_DIRECT_IO))
 		return fuse_cache_read_iter(iocb, to);
 	else
 		return fuse_direct_read_iter(iocb, to);
@@ -1579,7 +1583,9 @@
 	if (FUSE_IS_DAX(inode))
 		return fuse_dax_write_iter(iocb, from);
 
-	if (!(ff->open_flags & FOPEN_DIRECT_IO))
+	if (ff->passthrough.filp)
+		return fuse_passthrough_write_iter(iocb, from);
+	else if (!(ff->open_flags & FOPEN_DIRECT_IO))
 		return fuse_cache_write_iter(iocb, from);
 	else
 		return fuse_direct_write_iter(iocb, from);
diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
index d515980..21442bd 100644
--- a/fs/fuse/fuse_i.h
+++ b/fs/fuse/fuse_i.h
@@ -178,6 +178,17 @@
 struct fuse_mount;
 struct fuse_release_args;
 
+/**
+ * Reference to lower filesystem file for read/write operations handled in
+ * passthrough mode.
+ * This struct also tracks the credentials to be used for handling read/write
+ * operations.
+ */
+struct fuse_passthrough {
+	struct file *filp;
+	struct cred *cred;
+};
+
 /** FUSE specific file data */
 struct fuse_file {
 	/** Fuse connection for this file */
@@ -223,6 +234,9 @@
 
 	} readdir;
 
+	/** Container for data related to the passthrough functionality */
+	struct fuse_passthrough passthrough;
+
 	/** RB node to be linked on fuse_conn->polled_files */
 	struct rb_node polled_node;
 
@@ -268,6 +282,9 @@
 	struct fuse_in_arg in_args[3];
 	struct fuse_arg out_args[2];
 	void (*end)(struct fuse_mount *fm, struct fuse_args *args, int error);
+
+	/* Path used for completing d_canonical_path */
+	struct path *canonical_path;
 };
 
 struct fuse_args_pages {
@@ -371,10 +388,8 @@
 	/** Used to wake up the task waiting for completion of request*/
 	wait_queue_head_t waitq;
 
-#if IS_ENABLED(CONFIG_VIRTIO_FS)
 	/** virtio-fs's physically contiguous buffer for in and out args */
 	void *argbuf;
-#endif
 
 	/** fuse_mount this request belongs to */
 	struct fuse_mount *fm;
@@ -745,6 +760,9 @@
 	/* Auto-mount submounts announced by the server */
 	unsigned int auto_submounts:1;
 
+	/** Passthrough mode for read/write IO */
+	unsigned int passthrough:1;
+
 	/** The number of requests waiting for completion */
 	atomic_t num_waiting;
 
@@ -788,6 +806,12 @@
 
 	/** List of filesystems using this connection */
 	struct list_head mounts;
+
+	/** IDR for passthrough requests */
+	struct idr passthrough_req;
+
+	/** Protects passthrough_req */
+	spinlock_t passthrough_req_lock;
 };
 
 /*
@@ -1210,4 +1234,12 @@
 bool fuse_dax_check_alignment(struct fuse_conn *fc, unsigned int map_alignment);
 void fuse_dax_cancel_work(struct fuse_conn *fc);
 
+int fuse_passthrough_open(struct fuse_dev *fud,
+			  struct fuse_passthrough_out *pto);
+int fuse_passthrough_setup(struct fuse_conn *fc, struct fuse_file *ff,
+			   struct fuse_open_out *openarg);
+void fuse_passthrough_release(struct fuse_passthrough *passthrough);
+ssize_t fuse_passthrough_read_iter(struct kiocb *iocb, struct iov_iter *to);
+ssize_t fuse_passthrough_write_iter(struct kiocb *iocb, struct iov_iter *from);
+
 #endif /* _FS_FUSE_I_H */
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index 1a47afc..6a131fd 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -680,6 +680,7 @@
 	memset(fc, 0, sizeof(*fc));
 	spin_lock_init(&fc->lock);
 	spin_lock_init(&fc->bg_lock);
+	spin_lock_init(&fc->passthrough_req_lock);
 	init_rwsem(&fc->killsb);
 	refcount_set(&fc->count, 1);
 	atomic_set(&fc->dev_count, 1);
@@ -688,6 +689,7 @@
 	INIT_LIST_HEAD(&fc->bg_queue);
 	INIT_LIST_HEAD(&fc->entry);
 	INIT_LIST_HEAD(&fc->devices);
+	idr_init(&fc->passthrough_req);
 	atomic_set(&fc->num_waiting, 0);
 	fc->max_background = FUSE_DEFAULT_MAX_BACKGROUND;
 	fc->congestion_threshold = FUSE_DEFAULT_CONGESTION_THRESHOLD;
@@ -1055,6 +1057,12 @@
 			    !fuse_dax_check_alignment(fc, arg->map_alignment)) {
 				ok = false;
 			}
+			if (arg->flags & FUSE_PASSTHROUGH) {
+				fc->passthrough = 1;
+				/* Prevent further stacking */
+				fm->sb->s_stack_depth =
+					FILESYSTEM_MAX_STACK_DEPTH;
+			}
 		} else {
 			ra_pages = fc->max_read / PAGE_SIZE;
 			fc->no_lock = 1;
@@ -1097,7 +1105,8 @@
 		FUSE_WRITEBACK_CACHE | FUSE_NO_OPEN_SUPPORT |
 		FUSE_PARALLEL_DIROPS | FUSE_HANDLE_KILLPRIV | FUSE_POSIX_ACL |
 		FUSE_ABORT_ERROR | FUSE_MAX_PAGES | FUSE_CACHE_SYMLINKS |
-		FUSE_NO_OPENDIR_SUPPORT | FUSE_EXPLICIT_INVAL_DATA;
+		FUSE_NO_OPENDIR_SUPPORT | FUSE_EXPLICIT_INVAL_DATA |
+		FUSE_PASSTHROUGH;
 #ifdef CONFIG_FUSE_DAX
 	if (fm->fc->dax)
 		ia->in.flags |= FUSE_MAP_ALIGNMENT;
@@ -1125,9 +1134,21 @@
 }
 EXPORT_SYMBOL_GPL(fuse_send_init);
 
+static int free_fuse_passthrough(int id, void *p, void *data)
+{
+	struct fuse_passthrough *passthrough = (struct fuse_passthrough *)p;
+
+	fuse_passthrough_release(passthrough);
+	kfree(p);
+
+	return 0;
+}
+
 void fuse_free_conn(struct fuse_conn *fc)
 {
 	WARN_ON(!list_empty(&fc->devices));
+	idr_for_each(&fc->passthrough_req, free_fuse_passthrough, NULL);
+	idr_destroy(&fc->passthrough_req);
 	kfree_rcu(fc, rcu);
 }
 EXPORT_SYMBOL_GPL(fuse_free_conn);
diff --git a/fs/fuse/passthrough.c b/fs/fuse/passthrough.c
new file mode 100644
index 0000000..ab81dd8
--- /dev/null
+++ b/fs/fuse/passthrough.c
@@ -0,0 +1,245 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include "fuse_i.h"
+
+#include <linux/fuse.h>
+#include <linux/idr.h>
+#include <linux/uio.h>
+
+struct fuse_aio_req {
+	struct kiocb iocb;
+	struct kiocb *iocb_fuse;
+};
+
+static void fuse_copyattr(struct file *dst_file, struct file *src_file)
+{
+	struct inode *dst = file_inode(dst_file);
+	struct inode *src = file_inode(src_file);
+
+	i_size_write(dst, i_size_read(src));
+}
+
+static inline rwf_t iocb_to_rw_flags(int ifl)
+{
+	rwf_t flags = 0;
+
+	if (ifl & IOCB_APPEND)
+		flags |= RWF_APPEND;
+	if (ifl & IOCB_DSYNC)
+		flags |= RWF_DSYNC;
+	if (ifl & IOCB_HIPRI)
+		flags |= RWF_HIPRI;
+	if (ifl & IOCB_NOWAIT)
+		flags |= RWF_NOWAIT;
+	if (ifl & IOCB_SYNC)
+		flags |= RWF_SYNC;
+
+	return flags;
+}
+
+static void fuse_aio_cleanup_handler(struct fuse_aio_req *aio_req)
+{
+	struct kiocb *iocb = &aio_req->iocb;
+	struct kiocb *iocb_fuse = aio_req->iocb_fuse;
+
+	if (iocb->ki_flags & IOCB_WRITE) {
+		__sb_writers_acquired(file_inode(iocb->ki_filp)->i_sb,
+				      SB_FREEZE_WRITE);
+		file_end_write(iocb->ki_filp);
+		fuse_copyattr(iocb_fuse->ki_filp, iocb->ki_filp);
+	}
+
+	iocb_fuse->ki_pos = iocb->ki_pos;
+	kfree(aio_req);
+}
+
+static void fuse_aio_rw_complete(struct kiocb *iocb, long res, long res2)
+{
+	struct fuse_aio_req *aio_req =
+		container_of(iocb, struct fuse_aio_req, iocb);
+	struct kiocb *iocb_fuse = aio_req->iocb_fuse;
+
+	fuse_aio_cleanup_handler(aio_req);
+	iocb_fuse->ki_complete(iocb_fuse, res, res2);
+}
+
+ssize_t fuse_passthrough_read_iter(struct kiocb *iocb_fuse,
+				   struct iov_iter *iter)
+{
+	ssize_t ret;
+	const struct cred *old_cred;
+	struct file *fuse_filp = iocb_fuse->ki_filp;
+	struct fuse_file *ff = fuse_filp->private_data;
+	struct file *passthrough_filp = ff->passthrough.filp;
+
+	if (!iov_iter_count(iter))
+		return 0;
+
+	old_cred = override_creds(ff->passthrough.cred);
+	if (is_sync_kiocb(iocb_fuse)) {
+		ret = vfs_iter_read(passthrough_filp, iter, &iocb_fuse->ki_pos,
+				    iocb_to_rw_flags(iocb_fuse->ki_flags));
+	} else {
+		struct fuse_aio_req *aio_req;
+
+		aio_req = kmalloc(sizeof(struct fuse_aio_req), GFP_KERNEL);
+		if (!aio_req)
+			return -ENOMEM;
+
+		aio_req->iocb_fuse = iocb_fuse;
+		kiocb_clone(&aio_req->iocb, iocb_fuse, passthrough_filp);
+		aio_req->iocb.ki_complete = fuse_aio_rw_complete;
+		ret = call_read_iter(passthrough_filp, &aio_req->iocb, iter);
+		if (ret != -EIOCBQUEUED)
+			fuse_aio_cleanup_handler(aio_req);
+	}
+	revert_creds(old_cred);
+
+	return ret;
+}
+
+ssize_t fuse_passthrough_write_iter(struct kiocb *iocb_fuse,
+				    struct iov_iter *iter)
+{
+	ssize_t ret;
+	const struct cred *old_cred;
+	struct file *fuse_filp = iocb_fuse->ki_filp;
+	struct fuse_file *ff = fuse_filp->private_data;
+	struct inode *fuse_inode = file_inode(fuse_filp);
+	struct file *passthrough_filp = ff->passthrough.filp;
+	struct inode *passthrough_inode = file_inode(passthrough_filp);
+
+	if (!iov_iter_count(iter))
+		return 0;
+
+	inode_lock(fuse_inode);
+
+	old_cred = override_creds(ff->passthrough.cred);
+	if (is_sync_kiocb(iocb_fuse)) {
+		file_start_write(passthrough_filp);
+		ret = vfs_iter_write(passthrough_filp, iter, &iocb_fuse->ki_pos,
+				     iocb_to_rw_flags(iocb_fuse->ki_flags));
+		file_end_write(passthrough_filp);
+		if (ret > 0)
+			fuse_copyattr(fuse_filp, passthrough_filp);
+	} else {
+		struct fuse_aio_req *aio_req;
+
+		aio_req = kmalloc(sizeof(struct fuse_aio_req), GFP_KERNEL);
+		if (!aio_req) {
+			ret = -ENOMEM;
+			goto out;
+		}
+
+		file_start_write(passthrough_filp);
+		__sb_writers_release(passthrough_inode->i_sb, SB_FREEZE_WRITE);
+
+		aio_req->iocb_fuse = iocb_fuse;
+		kiocb_clone(&aio_req->iocb, iocb_fuse, passthrough_filp);
+		aio_req->iocb.ki_complete = fuse_aio_rw_complete;
+		ret = call_write_iter(passthrough_filp, &aio_req->iocb, iter);
+		if (ret != -EIOCBQUEUED)
+			fuse_aio_cleanup_handler(aio_req);
+	}
+out:
+	revert_creds(old_cred);
+	inode_unlock(fuse_inode);
+
+	return ret;
+}
+
+int fuse_passthrough_open(struct fuse_dev *fud,
+			  struct fuse_passthrough_out *pto)
+{
+	int res;
+	struct file *passthrough_filp;
+	struct fuse_conn *fc = fud->fc;
+	struct fuse_passthrough *passthrough;
+
+	if (!fc->passthrough)
+		return -EPERM;
+
+	/* This field is reserved for future implementation */
+	if (pto->len != 0)
+		return -EINVAL;
+
+	passthrough_filp = fget(pto->fd);
+	if (!passthrough_filp) {
+		pr_err("FUSE: invalid file descriptor for passthrough.\n");
+		return -EBADF;
+	}
+
+	if (!passthrough_filp->f_op->read_iter ||
+	    !passthrough_filp->f_op->write_iter) {
+		pr_err("FUSE: passthrough file misses file operations.\n");
+		return -EBADF;
+	}
+
+	passthrough = kmalloc(sizeof(struct fuse_passthrough), GFP_KERNEL);
+	if (!passthrough)
+		return -ENOMEM;
+
+	passthrough->filp = passthrough_filp;
+	passthrough->cred = prepare_creds();
+
+	idr_preload(GFP_KERNEL);
+	spin_lock(&fc->passthrough_req_lock);
+	res = idr_alloc(&fc->passthrough_req, passthrough, 1, 0, GFP_ATOMIC);
+	spin_unlock(&fc->passthrough_req_lock);
+	idr_preload_end();
+	if (res <= 0) {
+		fuse_passthrough_release(passthrough);
+		kfree(passthrough);
+	}
+
+	return res;
+}
+
+int fuse_passthrough_setup(struct fuse_conn *fc, struct fuse_file *ff,
+			   struct fuse_open_out *openarg)
+{
+	struct inode *passthrough_inode;
+	struct super_block *passthrough_sb;
+	struct fuse_passthrough *passthrough;
+	int passthrough_fh = openarg->passthrough_fh;
+
+	if (!fc->passthrough)
+		return -EPERM;
+
+	/* Default case, passthrough is not requested */
+	if (passthrough_fh <= 0)
+		return -EINVAL;
+
+	spin_lock(&fc->passthrough_req_lock);
+	passthrough = idr_remove(&fc->passthrough_req, passthrough_fh);
+	spin_unlock(&fc->passthrough_req_lock);
+
+	if (!passthrough)
+		return -EINVAL;
+
+	passthrough_inode = file_inode(passthrough->filp);
+	passthrough_sb = passthrough_inode->i_sb;
+	if (passthrough_sb->s_stack_depth >= FILESYSTEM_MAX_STACK_DEPTH) {
+		pr_err("FUSE: fs stacking depth exceeded for passthrough\n");
+		fuse_passthrough_release(passthrough);
+		kfree(passthrough);
+		return -EINVAL;
+	}
+
+	ff->passthrough = *passthrough;
+	kfree(passthrough);
+
+	return 0;
+}
+
+void fuse_passthrough_release(struct fuse_passthrough *passthrough)
+{
+	if (passthrough->filp) {
+		fput(passthrough->filp);
+		passthrough->filp = NULL;
+	}
+	if (passthrough->cred) {
+		put_cred(passthrough->cred);
+		passthrough->cred = NULL;
+	}
+}
diff --git a/fs/fuse/xattr.c b/fs/fuse/xattr.c
index 371bdcb..5d4ddb9 100644
--- a/fs/fuse/xattr.c
+++ b/fs/fuse/xattr.c
@@ -176,7 +176,7 @@
 
 static int fuse_xattr_get(const struct xattr_handler *handler,
 			 struct dentry *dentry, struct inode *inode,
-			 const char *name, void *value, size_t size)
+			 const char *name, void *value, size_t size, int flags)
 {
 	return fuse_getxattr(inode, name, value, size);
 }
@@ -199,7 +199,7 @@
 
 static int no_xattr_get(const struct xattr_handler *handler,
 			struct dentry *dentry, struct inode *inode,
-			const char *name, void *value, size_t size)
+			const char *name, void *value, size_t size, int flags)
 {
 	return -EOPNOTSUPP;
 }
diff --git a/fs/gfs2/xattr.c b/fs/gfs2/xattr.c
index 9d7667b..2e4a9b9 100644
--- a/fs/gfs2/xattr.c
+++ b/fs/gfs2/xattr.c
@@ -588,7 +588,8 @@
 
 static int gfs2_xattr_get(const struct xattr_handler *handler,
 			  struct dentry *unused, struct inode *inode,
-			  const char *name, void *buffer, size_t size)
+			  const char *name, void *buffer, size_t size,
+			  int flags)
 {
 	struct gfs2_inode *ip = GFS2_I(inode);
 	struct gfs2_holder gh;
diff --git a/fs/hfs/attr.c b/fs/hfs/attr.c
index 74fa6264..08222a9 100644
--- a/fs/hfs/attr.c
+++ b/fs/hfs/attr.c
@@ -115,7 +115,7 @@
 
 static int hfs_xattr_get(const struct xattr_handler *handler,
 			 struct dentry *unused, struct inode *inode,
-			 const char *name, void *value, size_t size)
+			 const char *name, void *value, size_t size, int flags)
 {
 	return __hfs_getxattr(inode, handler->flags, value, size);
 }
diff --git a/fs/hfsplus/xattr.c b/fs/hfsplus/xattr.c
index bb0b27d8..381c2aa 100644
--- a/fs/hfsplus/xattr.c
+++ b/fs/hfsplus/xattr.c
@@ -839,7 +839,8 @@
 
 static int hfsplus_osx_getxattr(const struct xattr_handler *handler,
 				struct dentry *unused, struct inode *inode,
-				const char *name, void *buffer, size_t size)
+				const char *name, void *buffer, size_t size,
+				int flags)
 {
 	/*
 	 * Don't allow retrieving properly prefixed attributes
diff --git a/fs/hfsplus/xattr_security.c b/fs/hfsplus/xattr_security.c
index cfbe6a3..43e28b3 100644
--- a/fs/hfsplus/xattr_security.c
+++ b/fs/hfsplus/xattr_security.c
@@ -15,7 +15,8 @@
 
 static int hfsplus_security_getxattr(const struct xattr_handler *handler,
 				     struct dentry *unused, struct inode *inode,
-				     const char *name, void *buffer, size_t size)
+				     const char *name, void *buffer,
+				     size_t size, int flags)
 {
 	return hfsplus_getxattr(inode, name, buffer, size,
 				XATTR_SECURITY_PREFIX,
diff --git a/fs/hfsplus/xattr_trusted.c b/fs/hfsplus/xattr_trusted.c
index fbad91e..54d9263 100644
--- a/fs/hfsplus/xattr_trusted.c
+++ b/fs/hfsplus/xattr_trusted.c
@@ -14,7 +14,8 @@
 
 static int hfsplus_trusted_getxattr(const struct xattr_handler *handler,
 				    struct dentry *unused, struct inode *inode,
-				    const char *name, void *buffer, size_t size)
+				    const char *name, void *buffer,
+				    size_t size, int flags)
 {
 	return hfsplus_getxattr(inode, name, buffer, size,
 				XATTR_TRUSTED_PREFIX,
diff --git a/fs/hfsplus/xattr_user.c b/fs/hfsplus/xattr_user.c
index 74d19fa..4d2b1ff 100644
--- a/fs/hfsplus/xattr_user.c
+++ b/fs/hfsplus/xattr_user.c
@@ -14,7 +14,8 @@
 
 static int hfsplus_user_getxattr(const struct xattr_handler *handler,
 				 struct dentry *unused, struct inode *inode,
-				 const char *name, void *buffer, size_t size)
+				 const char *name, void *buffer, size_t size,
+				 int flags)
 {
 
 	return hfsplus_getxattr(inode, name, buffer, size,
diff --git a/fs/incfs/Kconfig b/fs/incfs/Kconfig
new file mode 100644
index 0000000..5f15000
--- /dev/null
+++ b/fs/incfs/Kconfig
@@ -0,0 +1,13 @@
+config INCREMENTAL_FS
+	tristate "Incremental file system support"
+	depends on BLOCK
+	select DECOMPRESS_LZ4
+	select DECOMPRESS_ZSTD
+	select CRYPTO_SHA256
+	help
+	  Incremental FS is a read-only virtual file system that facilitates execution
+	  of programs while their binaries are still being lazily downloaded over the
+	  network, USB or pigeon post.
+
+	  To compile this file system support as a module, choose M here: the
+	  module will be called incrementalfs.
diff --git a/fs/incfs/Makefile b/fs/incfs/Makefile
new file mode 100644
index 0000000..0c3f6d3
--- /dev/null
+++ b/fs/incfs/Makefile
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_INCREMENTAL_FS)	+= incrementalfs.o
+
+incrementalfs-y := \
+	data_mgmt.o \
+	format.o \
+	integrity.o \
+	main.o \
+	pseudo_files.o \
+	vfs.o
diff --git a/fs/incfs/data_mgmt.c b/fs/incfs/data_mgmt.c
new file mode 100644
index 0000000..48ab7428
--- /dev/null
+++ b/fs/incfs/data_mgmt.c
@@ -0,0 +1,1667 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2019 Google LLC
+ */
+#include <linux/crc32.h>
+#include <linux/delay.h>
+#include <linux/file.h>
+#include <linux/gfp.h>
+#include <linux/ktime.h>
+#include <linux/lz4.h>
+#include <linux/mm.h>
+#include <linux/namei.h>
+#include <linux/pagemap.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+
+#include "data_mgmt.h"
+#include "format.h"
+#include "integrity.h"
+
+static int incfs_scan_metadata_chain(struct data_file *df);
+
+static void log_wake_up_all(struct work_struct *work)
+{
+	struct delayed_work *dw = container_of(work, struct delayed_work, work);
+	struct read_log *rl = container_of(dw, struct read_log, ml_wakeup_work);
+	wake_up_all(&rl->ml_notif_wq);
+}
+
+static void zstd_free_workspace(struct work_struct *work)
+{
+	struct delayed_work *dw = container_of(work, struct delayed_work, work);
+	struct mount_info *mi =
+		container_of(dw, struct mount_info, mi_zstd_cleanup_work);
+
+	mutex_lock(&mi->mi_zstd_workspace_mutex);
+	kvfree(mi->mi_zstd_workspace);
+	mi->mi_zstd_workspace = NULL;
+	mi->mi_zstd_stream = NULL;
+	mutex_unlock(&mi->mi_zstd_workspace_mutex);
+}
+
+struct mount_info *incfs_alloc_mount_info(struct super_block *sb,
+					  struct mount_options *options,
+					  struct path *backing_dir_path)
+{
+	struct mount_info *mi = NULL;
+	int error = 0;
+
+	mi = kzalloc(sizeof(*mi), GFP_NOFS);
+	if (!mi)
+		return ERR_PTR(-ENOMEM);
+
+	mi->mi_sb = sb;
+	mi->mi_backing_dir_path = *backing_dir_path;
+	mi->mi_owner = get_current_cred();
+	path_get(&mi->mi_backing_dir_path);
+	mutex_init(&mi->mi_dir_struct_mutex);
+	init_waitqueue_head(&mi->mi_pending_reads_notif_wq);
+	init_waitqueue_head(&mi->mi_log.ml_notif_wq);
+	init_waitqueue_head(&mi->mi_blocks_written_notif_wq);
+	atomic_set(&mi->mi_blocks_written, 0);
+	INIT_DELAYED_WORK(&mi->mi_log.ml_wakeup_work, log_wake_up_all);
+	spin_lock_init(&mi->mi_log.rl_lock);
+	spin_lock_init(&mi->pending_read_lock);
+	INIT_LIST_HEAD(&mi->mi_reads_list_head);
+	spin_lock_init(&mi->mi_per_uid_read_timeouts_lock);
+	mutex_init(&mi->mi_zstd_workspace_mutex);
+	INIT_DELAYED_WORK(&mi->mi_zstd_cleanup_work, zstd_free_workspace);
+
+	error = incfs_realloc_mount_info(mi, options);
+	if (error)
+		goto err;
+
+	return mi;
+
+err:
+	incfs_free_mount_info(mi);
+	return ERR_PTR(error);
+}
+
+int incfs_realloc_mount_info(struct mount_info *mi,
+			     struct mount_options *options)
+{
+	void *new_buffer = NULL;
+	void *old_buffer;
+	size_t new_buffer_size = 0;
+
+	if (options->read_log_pages != mi->mi_options.read_log_pages) {
+		struct read_log_state log_state;
+		/*
+		 * Even though having two buffers allocated at once isn't
+		 * usually good, allocating a multipage buffer under a spinlock
+		 * is even worse, so let's optimize for the shorter lock
+		 * duration. It's not end of the world if we fail to increase
+		 * the buffer size anyway.
+		 */
+		if (options->read_log_pages > 0) {
+			new_buffer_size = PAGE_SIZE * options->read_log_pages;
+			new_buffer = kzalloc(new_buffer_size, GFP_NOFS);
+			if (!new_buffer)
+				return -ENOMEM;
+		}
+
+		spin_lock(&mi->mi_log.rl_lock);
+		old_buffer = mi->mi_log.rl_ring_buf;
+		mi->mi_log.rl_ring_buf = new_buffer;
+		mi->mi_log.rl_size = new_buffer_size;
+		log_state = (struct read_log_state){
+			.generation_id = mi->mi_log.rl_head.generation_id + 1,
+		};
+		mi->mi_log.rl_head = log_state;
+		mi->mi_log.rl_tail = log_state;
+		spin_unlock(&mi->mi_log.rl_lock);
+
+		kfree(old_buffer);
+	}
+
+	mi->mi_options = *options;
+	return 0;
+}
+
+void incfs_free_mount_info(struct mount_info *mi)
+{
+	if (!mi)
+		return;
+
+	flush_delayed_work(&mi->mi_log.ml_wakeup_work);
+	flush_delayed_work(&mi->mi_zstd_cleanup_work);
+
+	dput(mi->mi_index_dir);
+	dput(mi->mi_incomplete_dir);
+	path_put(&mi->mi_backing_dir_path);
+	mutex_destroy(&mi->mi_dir_struct_mutex);
+	mutex_destroy(&mi->mi_zstd_workspace_mutex);
+	put_cred(mi->mi_owner);
+	kfree(mi->mi_log.rl_ring_buf);
+	kfree(mi->log_xattr);
+	kfree(mi->pending_read_xattr);
+	kfree(mi->mi_per_uid_read_timeouts);
+	kfree(mi);
+}
+
+static void data_file_segment_init(struct data_file_segment *segment)
+{
+	init_waitqueue_head(&segment->new_data_arrival_wq);
+	init_rwsem(&segment->rwsem);
+	INIT_LIST_HEAD(&segment->reads_list_head);
+}
+
+char *file_id_to_str(incfs_uuid_t id)
+{
+	char *result = kmalloc(1 + sizeof(id.bytes) * 2, GFP_NOFS);
+	char *end;
+
+	if (!result)
+		return NULL;
+
+	end = bin2hex(result, id.bytes, sizeof(id.bytes));
+	*end = 0;
+	return result;
+}
+
+struct dentry *incfs_lookup_dentry(struct dentry *parent, const char *name)
+{
+	struct inode *inode;
+	struct dentry *result = NULL;
+
+	if (!parent)
+		return ERR_PTR(-EFAULT);
+
+	inode = d_inode(parent);
+	inode_lock_nested(inode, I_MUTEX_PARENT);
+	result = lookup_one_len(name, parent, strlen(name));
+	inode_unlock(inode);
+
+	if (IS_ERR(result))
+		pr_warn("%s err:%ld\n", __func__, PTR_ERR(result));
+
+	return result;
+}
+
+static struct data_file *handle_mapped_file(struct mount_info *mi,
+					    struct data_file *df)
+{
+	char *file_id_str;
+	struct dentry *index_file_dentry;
+	struct path path;
+	struct file *bf;
+	struct data_file *result = NULL;
+
+	file_id_str = file_id_to_str(df->df_id);
+	if (!file_id_str)
+		return ERR_PTR(-ENOENT);
+
+	index_file_dentry = incfs_lookup_dentry(mi->mi_index_dir,
+						file_id_str);
+	kfree(file_id_str);
+	if (!index_file_dentry)
+		return ERR_PTR(-ENOENT);
+	if (IS_ERR(index_file_dentry))
+		return (struct data_file *)index_file_dentry;
+	if (!d_really_is_positive(index_file_dentry)) {
+		result = ERR_PTR(-ENOENT);
+		goto out;
+	}
+
+	path = (struct path) {
+		.mnt = mi->mi_backing_dir_path.mnt,
+		.dentry = index_file_dentry
+	};
+
+	bf = dentry_open(&path, O_RDWR | O_NOATIME | O_LARGEFILE, mi->mi_owner);
+	if (IS_ERR(bf)) {
+		result = (struct data_file *)bf;
+		goto out;
+	}
+
+	result = incfs_open_data_file(mi, bf);
+	fput(bf);
+	if (IS_ERR(result))
+		goto out;
+
+	result->df_mapped_offset = df->df_metadata_off;
+
+out:
+	dput(index_file_dentry);
+	return result;
+}
+
+struct data_file *incfs_open_data_file(struct mount_info *mi, struct file *bf)
+{
+	struct data_file *df = NULL;
+	struct backing_file_context *bfc = NULL;
+	int md_records;
+	u64 size;
+	int error = 0;
+	int i;
+
+	if (!bf || !mi)
+		return ERR_PTR(-EFAULT);
+
+	if (!S_ISREG(bf->f_inode->i_mode))
+		return ERR_PTR(-EBADF);
+
+	bfc = incfs_alloc_bfc(bf);
+	if (IS_ERR(bfc))
+		return ERR_CAST(bfc);
+
+	df = kzalloc(sizeof(*df), GFP_NOFS);
+	if (!df) {
+		error = -ENOMEM;
+		goto out;
+	}
+
+	df->df_backing_file_context = bfc;
+	df->df_mount_info = mi;
+	for (i = 0; i < ARRAY_SIZE(df->df_segments); i++)
+		data_file_segment_init(&df->df_segments[i]);
+
+	error = incfs_read_file_header(bfc, &df->df_metadata_off, &df->df_id,
+				       &size, &df->df_header_flags);
+
+	if (error)
+		goto out;
+
+	df->df_size = size;
+	if (size > 0)
+		df->df_data_block_count = get_blocks_count_for_size(size);
+
+	if (df->df_header_flags & INCFS_FILE_MAPPED) {
+		struct data_file *mapped_df = handle_mapped_file(mi, df);
+
+		incfs_free_data_file(df);
+		return mapped_df;
+	}
+
+	md_records = incfs_scan_metadata_chain(df);
+	if (md_records < 0)
+		error = md_records;
+
+out:
+	if (error) {
+		incfs_free_bfc(bfc);
+		if (df)
+			df->df_backing_file_context = NULL;
+		incfs_free_data_file(df);
+		return ERR_PTR(error);
+	}
+	return df;
+}
+
+void incfs_free_data_file(struct data_file *df)
+{
+	u32 data_blocks_written, hash_blocks_written;
+
+	if (!df)
+		return;
+
+	data_blocks_written = atomic_read(&df->df_data_blocks_written);
+	hash_blocks_written = atomic_read(&df->df_hash_blocks_written);
+
+	if (data_blocks_written != df->df_initial_data_blocks_written ||
+	    hash_blocks_written != df->df_initial_hash_blocks_written) {
+		struct backing_file_context *bfc = df->df_backing_file_context;
+		int error = -1;
+
+		if (bfc && !mutex_lock_interruptible(&bfc->bc_mutex)) {
+			error = incfs_write_status_to_backing_file(
+						df->df_backing_file_context,
+						df->df_status_offset,
+						data_blocks_written,
+						hash_blocks_written);
+			mutex_unlock(&bfc->bc_mutex);
+		}
+
+		if (error)
+			/* Nothing can be done, just warn */
+			pr_warn("incfs: failed to write status to backing file\n");
+	}
+
+	incfs_free_mtree(df->df_hash_tree);
+	incfs_free_bfc(df->df_backing_file_context);
+	kfree(df);
+}
+
+int make_inode_ready_for_data_ops(struct mount_info *mi,
+				struct inode *inode,
+				struct file *backing_file)
+{
+	struct inode_info *node = get_incfs_node(inode);
+	struct data_file *df = NULL;
+	int err = 0;
+
+	inode_lock(inode);
+	if (S_ISREG(inode->i_mode)) {
+		if (!node->n_file) {
+			df = incfs_open_data_file(mi, backing_file);
+
+			if (IS_ERR(df))
+				err = PTR_ERR(df);
+			else
+				node->n_file = df;
+		}
+	} else
+		err = -EBADF;
+	inode_unlock(inode);
+	return err;
+}
+
+struct dir_file *incfs_open_dir_file(struct mount_info *mi, struct file *bf)
+{
+	struct dir_file *dir = NULL;
+
+	if (!S_ISDIR(bf->f_inode->i_mode))
+		return ERR_PTR(-EBADF);
+
+	dir = kzalloc(sizeof(*dir), GFP_NOFS);
+	if (!dir)
+		return ERR_PTR(-ENOMEM);
+
+	dir->backing_dir = get_file(bf);
+	dir->mount_info = mi;
+	return dir;
+}
+
+void incfs_free_dir_file(struct dir_file *dir)
+{
+	if (!dir)
+		return;
+	if (dir->backing_dir)
+		fput(dir->backing_dir);
+	kfree(dir);
+}
+
+static ssize_t zstd_decompress_safe(struct mount_info *mi,
+				    struct mem_range src, struct mem_range dst)
+{
+	ssize_t result;
+	ZSTD_inBuffer inbuf = {.src = src.data,	.size = src.len};
+	ZSTD_outBuffer outbuf = {.dst = dst.data, .size = dst.len};
+
+	result = mutex_lock_interruptible(&mi->mi_zstd_workspace_mutex);
+	if (result)
+		return result;
+
+	if (!mi->mi_zstd_stream) {
+		unsigned int workspace_size = ZSTD_DStreamWorkspaceBound(
+						INCFS_DATA_FILE_BLOCK_SIZE);
+		void *workspace = kvmalloc(workspace_size, GFP_NOFS);
+		ZSTD_DStream *stream;
+
+		if (!workspace) {
+			result = -ENOMEM;
+			goto out;
+		}
+
+		stream = ZSTD_initDStream(INCFS_DATA_FILE_BLOCK_SIZE, workspace,
+				  workspace_size);
+		if (!stream) {
+			kvfree(workspace);
+			result = -EIO;
+			goto out;
+		}
+
+		mi->mi_zstd_workspace = workspace;
+		mi->mi_zstd_stream = stream;
+	}
+
+	result = ZSTD_decompressStream(mi->mi_zstd_stream, &outbuf, &inbuf) ?
+		-EBADMSG : outbuf.pos;
+
+	mod_delayed_work(system_wq, &mi->mi_zstd_cleanup_work,
+			 msecs_to_jiffies(5000));
+
+out:
+	mutex_unlock(&mi->mi_zstd_workspace_mutex);
+	return result;
+}
+
+static ssize_t decompress(struct mount_info *mi,
+			  struct mem_range src, struct mem_range dst, int alg)
+{
+	int result;
+
+	switch (alg) {
+	case INCFS_BLOCK_COMPRESSED_LZ4:
+		result = LZ4_decompress_safe(src.data, dst.data, src.len,
+					     dst.len);
+		if (result < 0)
+			return -EBADMSG;
+		return result;
+
+	case INCFS_BLOCK_COMPRESSED_ZSTD:
+		return zstd_decompress_safe(mi, src, dst);
+
+	default:
+		WARN_ON(true);
+		return -EOPNOTSUPP;
+	}
+}
+
+static void log_read_one_record(struct read_log *rl, struct read_log_state *rs)
+{
+	union log_record *record =
+		(union log_record *)((u8 *)rl->rl_ring_buf + rs->next_offset);
+	size_t record_size;
+
+	switch (record->full_record.type) {
+	case FULL:
+		rs->base_record = record->full_record;
+		record_size = sizeof(record->full_record);
+		break;
+
+	case SAME_FILE:
+		rs->base_record.block_index =
+			record->same_file_record.block_index;
+		rs->base_record.absolute_ts_us +=
+			record->same_file_record.relative_ts_us;
+		record_size = sizeof(record->same_file_record);
+		break;
+
+	case SAME_FILE_NEXT_BLOCK:
+		++rs->base_record.block_index;
+		rs->base_record.absolute_ts_us +=
+			record->same_file_next_block.relative_ts_us;
+		record_size = sizeof(record->same_file_next_block);
+		break;
+
+	case SAME_FILE_NEXT_BLOCK_SHORT:
+		++rs->base_record.block_index;
+		rs->base_record.absolute_ts_us +=
+			record->same_file_next_block_short.relative_ts_us;
+		record_size = sizeof(record->same_file_next_block_short);
+		break;
+	}
+
+	rs->next_offset += record_size;
+	if (rs->next_offset > rl->rl_size - sizeof(*record)) {
+		rs->next_offset = 0;
+		++rs->current_pass_no;
+	}
+	++rs->current_record_no;
+}
+
+static void log_block_read(struct mount_info *mi, incfs_uuid_t *id,
+			   int block_index)
+{
+	struct read_log *log = &mi->mi_log;
+	struct read_log_state *head, *tail;
+	s64 now_us;
+	s64 relative_us;
+	union log_record record;
+	size_t record_size;
+	uid_t uid = current_uid().val;
+
+	/*
+	 * This may read the old value, but it's OK to delay the logging start
+	 * right after the configuration update.
+	 */
+	if (READ_ONCE(log->rl_size) == 0)
+		return;
+
+	now_us = ktime_to_us(ktime_get());
+
+	spin_lock(&log->rl_lock);
+	if (log->rl_size == 0) {
+		spin_unlock(&log->rl_lock);
+		return;
+	}
+
+	head = &log->rl_head;
+	tail = &log->rl_tail;
+	relative_us = now_us - head->base_record.absolute_ts_us;
+
+	if (memcmp(id, &head->base_record.file_id, sizeof(incfs_uuid_t)) ||
+	    relative_us >= 1ll << 32 ||
+	    uid != head->base_record.uid) {
+		record.full_record = (struct full_record){
+			.type = FULL,
+			.block_index = block_index,
+			.file_id = *id,
+			.absolute_ts_us = now_us,
+			.uid = uid,
+		};
+		head->base_record.file_id = *id;
+		record_size = sizeof(struct full_record);
+	} else if (block_index != head->base_record.block_index + 1 ||
+		   relative_us >= 1 << 30) {
+		record.same_file_record = (struct same_file_record){
+			.type = SAME_FILE,
+			.block_index = block_index,
+			.relative_ts_us = relative_us,
+		};
+		record_size = sizeof(struct same_file_record);
+	} else if (relative_us >= 1 << 14) {
+		record.same_file_next_block = (struct same_file_next_block){
+			.type = SAME_FILE_NEXT_BLOCK,
+			.relative_ts_us = relative_us,
+		};
+		record_size = sizeof(struct same_file_next_block);
+	} else {
+		record.same_file_next_block_short =
+			(struct same_file_next_block_short){
+				.type = SAME_FILE_NEXT_BLOCK_SHORT,
+				.relative_ts_us = relative_us,
+			};
+		record_size = sizeof(struct same_file_next_block_short);
+	}
+
+	head->base_record.block_index = block_index;
+	head->base_record.absolute_ts_us = now_us;
+
+	/* Advance tail beyond area we are going to overwrite */
+	while (tail->current_pass_no < head->current_pass_no &&
+	       tail->next_offset < head->next_offset + record_size)
+		log_read_one_record(log, tail);
+
+	memcpy(((u8 *)log->rl_ring_buf) + head->next_offset, &record,
+	       record_size);
+	head->next_offset += record_size;
+	if (head->next_offset > log->rl_size - sizeof(record)) {
+		head->next_offset = 0;
+		++head->current_pass_no;
+	}
+	++head->current_record_no;
+
+	spin_unlock(&log->rl_lock);
+	schedule_delayed_work(&log->ml_wakeup_work, msecs_to_jiffies(16));
+}
+
+static int validate_hash_tree(struct file *bf, struct file *f, int block_index,
+			      struct mem_range data, u8 *buf)
+{
+	struct data_file *df = get_incfs_data_file(f);
+	u8 stored_digest[INCFS_MAX_HASH_SIZE] = {};
+	u8 calculated_digest[INCFS_MAX_HASH_SIZE] = {};
+	struct mtree *tree = NULL;
+	struct incfs_df_signature *sig = NULL;
+	int digest_size;
+	int hash_block_index = block_index;
+	int lvl;
+	int res;
+	loff_t hash_block_offset[INCFS_MAX_MTREE_LEVELS];
+	size_t hash_offset_in_block[INCFS_MAX_MTREE_LEVELS];
+	int hash_per_block;
+	pgoff_t file_pages;
+
+	tree = df->df_hash_tree;
+	sig = df->df_signature;
+	if (!tree || !sig)
+		return 0;
+
+	digest_size = tree->alg->digest_size;
+	hash_per_block = INCFS_DATA_FILE_BLOCK_SIZE / digest_size;
+	for (lvl = 0; lvl < tree->depth; lvl++) {
+		loff_t lvl_off = tree->hash_level_suboffset[lvl];
+
+		hash_block_offset[lvl] =
+			lvl_off + round_down(hash_block_index * digest_size,
+					     INCFS_DATA_FILE_BLOCK_SIZE);
+		hash_offset_in_block[lvl] = hash_block_index * digest_size %
+					    INCFS_DATA_FILE_BLOCK_SIZE;
+		hash_block_index /= hash_per_block;
+	}
+
+	memcpy(stored_digest, tree->root_hash, digest_size);
+
+	file_pages = DIV_ROUND_UP(df->df_size, INCFS_DATA_FILE_BLOCK_SIZE);
+	for (lvl = tree->depth - 1; lvl >= 0; lvl--) {
+		pgoff_t hash_page =
+			file_pages +
+			hash_block_offset[lvl] / INCFS_DATA_FILE_BLOCK_SIZE;
+		struct page *page = find_get_page_flags(
+			f->f_inode->i_mapping, hash_page, FGP_ACCESSED);
+
+		if (page && PageChecked(page)) {
+			u8 *addr = kmap_atomic(page);
+
+			memcpy(stored_digest, addr + hash_offset_in_block[lvl],
+			       digest_size);
+			kunmap_atomic(addr);
+			put_page(page);
+			continue;
+		}
+
+		if (page)
+			put_page(page);
+
+		res = incfs_kread(bf, buf, INCFS_DATA_FILE_BLOCK_SIZE,
+				  hash_block_offset[lvl] + sig->hash_offset);
+		if (res < 0)
+			return res;
+		if (res != INCFS_DATA_FILE_BLOCK_SIZE)
+			return -EIO;
+		res = incfs_calc_digest(tree->alg,
+					range(buf, INCFS_DATA_FILE_BLOCK_SIZE),
+					range(calculated_digest, digest_size));
+		if (res)
+			return res;
+
+		if (memcmp(stored_digest, calculated_digest, digest_size)) {
+			int i;
+			bool zero = true;
+
+			pr_debug("incfs: Hash mismatch lvl:%d blk:%d\n",
+				lvl, block_index);
+			for (i = 0; i < digest_size; i++)
+				if (stored_digest[i]) {
+					zero = false;
+					break;
+				}
+
+			if (zero)
+				pr_debug("incfs: Note saved_digest all zero - did you forget to load the hashes?\n");
+			return -EBADMSG;
+		}
+
+		memcpy(stored_digest, buf + hash_offset_in_block[lvl],
+		       digest_size);
+
+		page = grab_cache_page(f->f_inode->i_mapping, hash_page);
+		if (page) {
+			u8 *addr = kmap_atomic(page);
+
+			memcpy(addr, buf, INCFS_DATA_FILE_BLOCK_SIZE);
+			kunmap_atomic(addr);
+			SetPageChecked(page);
+			unlock_page(page);
+			put_page(page);
+		}
+	}
+
+	res = incfs_calc_digest(tree->alg, data,
+				range(calculated_digest, digest_size));
+	if (res)
+		return res;
+
+	if (memcmp(stored_digest, calculated_digest, digest_size)) {
+		pr_debug("incfs: Leaf hash mismatch blk:%d\n", block_index);
+		return -EBADMSG;
+	}
+
+	return 0;
+}
+
+static struct data_file_segment *get_file_segment(struct data_file *df,
+						  int block_index)
+{
+	int seg_idx = block_index % ARRAY_SIZE(df->df_segments);
+
+	return &df->df_segments[seg_idx];
+}
+
+static bool is_data_block_present(struct data_file_block *block)
+{
+	return (block->db_backing_file_data_offset != 0) &&
+	       (block->db_stored_size != 0);
+}
+
+static void convert_data_file_block(struct incfs_blockmap_entry *bme,
+				    struct data_file_block *res_block)
+{
+	u16 flags = le16_to_cpu(bme->me_flags);
+
+	res_block->db_backing_file_data_offset =
+		le16_to_cpu(bme->me_data_offset_hi);
+	res_block->db_backing_file_data_offset <<= 32;
+	res_block->db_backing_file_data_offset |=
+		le32_to_cpu(bme->me_data_offset_lo);
+	res_block->db_stored_size = le16_to_cpu(bme->me_data_size);
+	res_block->db_comp_alg = flags & INCFS_BLOCK_COMPRESSED_MASK;
+}
+
+static int get_data_file_block(struct data_file *df, int index,
+			       struct data_file_block *res_block)
+{
+	struct incfs_blockmap_entry bme = {};
+	struct backing_file_context *bfc = NULL;
+	loff_t blockmap_off = 0;
+	int error = 0;
+
+	if (!df || !res_block)
+		return -EFAULT;
+
+	blockmap_off = df->df_blockmap_off;
+	bfc = df->df_backing_file_context;
+
+	if (index < 0 || blockmap_off == 0)
+		return -EINVAL;
+
+	error = incfs_read_blockmap_entry(bfc, index, blockmap_off, &bme);
+	if (error)
+		return error;
+
+	convert_data_file_block(&bme, res_block);
+	return 0;
+}
+
+static int check_room_for_one_range(u32 size, u32 size_out)
+{
+	if (size_out + sizeof(struct incfs_filled_range) > size)
+		return -ERANGE;
+	return 0;
+}
+
+static int copy_one_range(struct incfs_filled_range *range, void __user *buffer,
+			  u32 size, u32 *size_out)
+{
+	int error = check_room_for_one_range(size, *size_out);
+	if (error)
+		return error;
+
+	if (copy_to_user(((char __user *)buffer) + *size_out, range,
+				sizeof(*range)))
+		return -EFAULT;
+
+	*size_out += sizeof(*range);
+	return 0;
+}
+
+#define READ_BLOCKMAP_ENTRIES 512
+int incfs_get_filled_blocks(struct data_file *df,
+			    struct incfs_file_data *fd,
+			    struct incfs_get_filled_blocks_args *arg)
+{
+	int error = 0;
+	bool in_range = false;
+	struct incfs_filled_range range;
+	void __user *buffer = u64_to_user_ptr(arg->range_buffer);
+	u32 size = arg->range_buffer_size;
+	u32 end_index =
+		arg->end_index ? arg->end_index : df->df_total_block_count;
+	u32 *size_out = &arg->range_buffer_size_out;
+	int i = READ_BLOCKMAP_ENTRIES - 1;
+	int entries_read = 0;
+	struct incfs_blockmap_entry *bme;
+	int data_blocks_filled = 0;
+	int hash_blocks_filled = 0;
+
+	*size_out = 0;
+	if (end_index > df->df_total_block_count)
+		end_index = df->df_total_block_count;
+	arg->total_blocks_out = df->df_total_block_count;
+	arg->data_blocks_out = df->df_data_block_count;
+
+	if (atomic_read(&df->df_data_blocks_written) ==
+	    df->df_data_block_count) {
+		pr_debug("File marked full, fast get_filled_blocks");
+		if (arg->start_index > end_index) {
+			arg->index_out = arg->start_index;
+			return 0;
+		}
+		arg->index_out = arg->start_index;
+
+		error = check_room_for_one_range(size, *size_out);
+		if (error)
+			return error;
+
+		range = (struct incfs_filled_range){
+			.begin = arg->start_index,
+			.end = end_index,
+		};
+
+		error = copy_one_range(&range, buffer, size, size_out);
+		if (error)
+			return error;
+		arg->index_out = end_index;
+		return 0;
+	}
+
+	bme = kzalloc(sizeof(*bme) * READ_BLOCKMAP_ENTRIES,
+		      GFP_NOFS | __GFP_COMP);
+	if (!bme)
+		return -ENOMEM;
+
+	for (arg->index_out = arg->start_index; arg->index_out < end_index;
+	     ++arg->index_out) {
+		struct data_file_block dfb;
+
+		if (++i == READ_BLOCKMAP_ENTRIES) {
+			entries_read = incfs_read_blockmap_entries(
+				df->df_backing_file_context, bme,
+				arg->index_out, READ_BLOCKMAP_ENTRIES,
+				df->df_blockmap_off);
+			if (entries_read < 0) {
+				error = entries_read;
+				break;
+			}
+
+			i = 0;
+		}
+
+		if (i >= entries_read) {
+			error = -EIO;
+			break;
+		}
+
+		convert_data_file_block(bme + i, &dfb);
+
+		if (is_data_block_present(&dfb)) {
+			if (arg->index_out >= df->df_data_block_count)
+				++hash_blocks_filled;
+			else
+				++data_blocks_filled;
+		}
+
+		if (is_data_block_present(&dfb) == in_range)
+			continue;
+
+		if (!in_range) {
+			error = check_room_for_one_range(size, *size_out);
+			if (error)
+				break;
+			in_range = true;
+			range.begin = arg->index_out;
+		} else {
+			range.end = arg->index_out;
+			error = copy_one_range(&range, buffer, size, size_out);
+			if (error) {
+				/* there will be another try out of the loop,
+				 * it will reset the index_out if it fails too
+				 */
+				break;
+			}
+			in_range = false;
+		}
+	}
+
+	if (in_range) {
+		range.end = arg->index_out;
+		error = copy_one_range(&range, buffer, size, size_out);
+		if (error)
+			arg->index_out = range.begin;
+	}
+
+	if (arg->start_index == 0) {
+		fd->fd_get_block_pos = 0;
+		fd->fd_filled_data_blocks = 0;
+		fd->fd_filled_hash_blocks = 0;
+	}
+
+	if (arg->start_index == fd->fd_get_block_pos) {
+		fd->fd_get_block_pos = arg->index_out + 1;
+		fd->fd_filled_data_blocks += data_blocks_filled;
+		fd->fd_filled_hash_blocks += hash_blocks_filled;
+	}
+
+	if (fd->fd_get_block_pos == df->df_total_block_count + 1) {
+		if (fd->fd_filled_data_blocks >
+		   atomic_read(&df->df_data_blocks_written))
+			atomic_set(&df->df_data_blocks_written,
+				   fd->fd_filled_data_blocks);
+
+		if (fd->fd_filled_hash_blocks >
+		   atomic_read(&df->df_hash_blocks_written))
+			atomic_set(&df->df_hash_blocks_written,
+				   fd->fd_filled_hash_blocks);
+	}
+
+	kfree(bme);
+	return error;
+}
+
+static bool is_read_done(struct pending_read *read)
+{
+	return atomic_read_acquire(&read->done) != 0;
+}
+
+static void set_read_done(struct pending_read *read)
+{
+	atomic_set_release(&read->done, 1);
+}
+
+/*
+ * Notifies a given data file about pending read from a given block.
+ * Returns a new pending read entry.
+ */
+static struct pending_read *add_pending_read(struct data_file *df,
+					     int block_index)
+{
+	struct pending_read *result = NULL;
+	struct data_file_segment *segment = NULL;
+	struct mount_info *mi = NULL;
+
+	segment = get_file_segment(df, block_index);
+	mi = df->df_mount_info;
+
+	result = kzalloc(sizeof(*result), GFP_NOFS);
+	if (!result)
+		return NULL;
+
+	result->file_id = df->df_id;
+	result->block_index = block_index;
+	result->timestamp_us = ktime_to_us(ktime_get());
+	result->uid = current_uid().val;
+
+	spin_lock(&mi->pending_read_lock);
+
+	result->serial_number = ++mi->mi_last_pending_read_number;
+	mi->mi_pending_reads_count++;
+
+	list_add_rcu(&result->mi_reads_list, &mi->mi_reads_list_head);
+	list_add_rcu(&result->segment_reads_list, &segment->reads_list_head);
+
+	spin_unlock(&mi->pending_read_lock);
+
+	wake_up_all(&mi->mi_pending_reads_notif_wq);
+	return result;
+}
+
+static void free_pending_read_entry(struct rcu_head *entry)
+{
+	struct pending_read *read;
+
+	read = container_of(entry, struct pending_read, rcu);
+
+	kfree(read);
+}
+
+/* Notifies a given data file that pending read is completed. */
+static void remove_pending_read(struct data_file *df, struct pending_read *read)
+{
+	struct mount_info *mi = NULL;
+
+	if (!df || !read) {
+		WARN_ON(!df);
+		WARN_ON(!read);
+		return;
+	}
+
+	mi = df->df_mount_info;
+
+	spin_lock(&mi->pending_read_lock);
+
+	list_del_rcu(&read->mi_reads_list);
+	list_del_rcu(&read->segment_reads_list);
+
+	mi->mi_pending_reads_count--;
+
+	spin_unlock(&mi->pending_read_lock);
+
+	/* Don't free. Wait for readers */
+	call_rcu(&read->rcu, free_pending_read_entry);
+}
+
+static void notify_pending_reads(struct mount_info *mi,
+		struct data_file_segment *segment,
+		int index)
+{
+	struct pending_read *entry = NULL;
+
+	/* Notify pending reads waiting for this block. */
+	rcu_read_lock();
+	list_for_each_entry_rcu(entry, &segment->reads_list_head,
+						segment_reads_list) {
+		if (entry->block_index == index)
+			set_read_done(entry);
+	}
+	rcu_read_unlock();
+	wake_up_all(&segment->new_data_arrival_wq);
+
+	atomic_inc(&mi->mi_blocks_written);
+	wake_up_all(&mi->mi_blocks_written_notif_wq);
+}
+
+static int wait_for_data_block(struct data_file *df, int block_index,
+			       int min_time_ms, int min_pending_time_ms,
+			       int max_pending_time_ms,
+			       struct data_file_block *res_block)
+{
+	struct data_file_block block = {};
+	struct data_file_segment *segment = NULL;
+	struct pending_read *read = NULL;
+	struct mount_info *mi = NULL;
+	int error = 0;
+	int wait_res = 0;
+	u64 time;
+
+	if (!df || !res_block)
+		return -EFAULT;
+
+	if (block_index < 0 || block_index >= df->df_data_block_count)
+		return -EINVAL;
+
+	if (df->df_blockmap_off <= 0 || !df->df_mount_info)
+		return -ENODATA;
+
+	mi = df->df_mount_info;
+	segment = get_file_segment(df, block_index);
+
+	error = down_read_killable(&segment->rwsem);
+	if (error)
+		return error;
+
+	/* Look up the given block */
+	error = get_data_file_block(df, block_index, &block);
+
+	up_read(&segment->rwsem);
+
+	if (error)
+		return error;
+
+	/* If the block was found, just return it. No need to wait. */
+	if (is_data_block_present(&block)) {
+		if (min_time_ms)
+			error = msleep_interruptible(min_time_ms);
+		*res_block = block;
+		return error;
+	} else {
+		/* If it's not found, create a pending read */
+		if (max_pending_time_ms != 0) {
+			read = add_pending_read(df, block_index);
+			if (!read)
+				return -ENOMEM;
+		} else {
+			log_block_read(mi, &df->df_id, block_index);
+			return -ETIME;
+		}
+	}
+
+	if (min_pending_time_ms)
+		time = ktime_get_ns();
+
+	/* Wait for notifications about block's arrival */
+	wait_res =
+		wait_event_interruptible_timeout(segment->new_data_arrival_wq,
+					(is_read_done(read)),
+					msecs_to_jiffies(max_pending_time_ms));
+
+	/* Woke up, the pending read is no longer needed. */
+	remove_pending_read(df, read);
+
+	if (wait_res == 0) {
+		/* Wait has timed out */
+		log_block_read(mi, &df->df_id, block_index);
+		return -ETIME;
+	}
+	if (wait_res < 0) {
+		/*
+		 * Only ERESTARTSYS is really expected here when a signal
+		 * comes while we wait.
+		 */
+		return wait_res;
+	}
+
+	if (min_pending_time_ms) {
+		time = div_u64(ktime_get_ns() - time, 1000000);
+		if (min_pending_time_ms > time) {
+			error = msleep_interruptible(
+						min_pending_time_ms - time);
+			if (error)
+				return error;
+		}
+	}
+
+	error = down_read_killable(&segment->rwsem);
+	if (error)
+		return error;
+
+	/*
+	 * Re-read block's info now, it has just arrived and
+	 * should be available.
+	 */
+	error = get_data_file_block(df, block_index, &block);
+	if (!error) {
+		if (is_data_block_present(&block))
+			*res_block = block;
+		else {
+			/*
+			 * Somehow wait finished successfully bug block still
+			 * can't be found. It's not normal.
+			 */
+			pr_warn("incfs:Wait succeeded, but block not found.\n");
+			error = -ENODATA;
+		}
+	}
+
+	up_read(&segment->rwsem);
+	return error;
+}
+
+ssize_t incfs_read_data_file_block(struct mem_range dst, struct file *f,
+			int index, int min_time_ms,
+			int min_pending_time_ms, int max_pending_time_ms,
+			struct mem_range tmp)
+{
+	loff_t pos;
+	ssize_t result;
+	size_t bytes_to_read;
+	struct mount_info *mi = NULL;
+	struct file *bf = NULL;
+	struct data_file_block block = {};
+	struct data_file *df = get_incfs_data_file(f);
+
+	if (!dst.data || !df || !tmp.data)
+		return -EFAULT;
+
+	if (tmp.len < 2 * INCFS_DATA_FILE_BLOCK_SIZE)
+		return -ERANGE;
+
+	mi = df->df_mount_info;
+	bf = df->df_backing_file_context->bc_file;
+
+	result = wait_for_data_block(df, index, min_time_ms,
+			min_pending_time_ms, max_pending_time_ms, &block);
+	if (result < 0)
+		goto out;
+
+	pos = block.db_backing_file_data_offset;
+	if (block.db_comp_alg == COMPRESSION_NONE) {
+		bytes_to_read = min(dst.len, block.db_stored_size);
+		result = incfs_kread(bf, dst.data, bytes_to_read, pos);
+
+		/* Some data was read, but not enough */
+		if (result >= 0 && result != bytes_to_read)
+			result = -EIO;
+	} else {
+		bytes_to_read = min(tmp.len, block.db_stored_size);
+		result = incfs_kread(bf, tmp.data, bytes_to_read, pos);
+		if (result == bytes_to_read) {
+			result =
+				decompress(mi, range(tmp.data, bytes_to_read),
+					   dst, block.db_comp_alg);
+			if (result < 0) {
+				const char *name =
+					bf->f_path.dentry->d_name.name;
+
+				pr_warn_once("incfs: Decompression error. %s",
+					     name);
+			}
+		} else if (result >= 0) {
+			/* Some data was read, but not enough */
+			result = -EIO;
+		}
+	}
+
+	if (result > 0) {
+		int err = validate_hash_tree(bf, f, index, dst, tmp.data);
+
+		if (err < 0)
+			result = err;
+	}
+
+	if (result >= 0)
+		log_block_read(mi, &df->df_id, index);
+
+out:
+	return result;
+}
+
+int incfs_process_new_data_block(struct data_file *df,
+				 struct incfs_fill_block *block, u8 *data)
+{
+	struct mount_info *mi = NULL;
+	struct backing_file_context *bfc = NULL;
+	struct data_file_segment *segment = NULL;
+	struct data_file_block existing_block = {};
+	u16 flags = 0;
+	int error = 0;
+
+	if (!df || !block)
+		return -EFAULT;
+
+	bfc = df->df_backing_file_context;
+	mi = df->df_mount_info;
+
+	if (block->block_index >= df->df_data_block_count)
+		return -ERANGE;
+
+	segment = get_file_segment(df, block->block_index);
+	if (!segment)
+		return -EFAULT;
+
+	if (block->compression == COMPRESSION_LZ4)
+		flags |= INCFS_BLOCK_COMPRESSED_LZ4;
+	else if (block->compression == COMPRESSION_ZSTD)
+		flags |= INCFS_BLOCK_COMPRESSED_ZSTD;
+	else if (block->compression)
+		return -EINVAL;
+
+	error = down_read_killable(&segment->rwsem);
+	if (error)
+		return error;
+
+	error = get_data_file_block(df, block->block_index, &existing_block);
+
+	up_read(&segment->rwsem);
+
+	if (error)
+		return error;
+	if (is_data_block_present(&existing_block)) {
+		/* Block is already present, nothing to do here */
+		return 0;
+	}
+
+	error = down_write_killable(&segment->rwsem);
+	if (error)
+		return error;
+
+	error = mutex_lock_interruptible(&bfc->bc_mutex);
+	if (!error) {
+		error = incfs_write_data_block_to_backing_file(
+			bfc, range(data, block->data_len), block->block_index,
+			df->df_blockmap_off, flags);
+		mutex_unlock(&bfc->bc_mutex);
+	}
+	if (!error) {
+		notify_pending_reads(mi, segment, block->block_index);
+		atomic_inc(&df->df_data_blocks_written);
+	}
+
+	up_write(&segment->rwsem);
+
+	if (error)
+		pr_debug("incfs: %s %d error: %d\n", __func__,
+				block->block_index, error);
+	return error;
+}
+
+int incfs_read_file_signature(struct data_file *df, struct mem_range dst)
+{
+	struct file *bf = df->df_backing_file_context->bc_file;
+	struct incfs_df_signature *sig;
+	int read_res = 0;
+
+	if (!dst.data)
+		return -EFAULT;
+
+	sig = df->df_signature;
+	if (!sig)
+		return 0;
+
+	if (dst.len < sig->sig_size)
+		return -E2BIG;
+
+	read_res = incfs_kread(bf, dst.data, sig->sig_size, sig->sig_offset);
+
+	if (read_res < 0)
+		return read_res;
+
+	if (read_res != sig->sig_size)
+		return -EIO;
+
+	return read_res;
+}
+
+int incfs_process_new_hash_block(struct data_file *df,
+				 struct incfs_fill_block *block, u8 *data)
+{
+	struct backing_file_context *bfc = NULL;
+	struct mount_info *mi = NULL;
+	struct mtree *hash_tree = NULL;
+	struct incfs_df_signature *sig = NULL;
+	loff_t hash_area_base = 0;
+	loff_t hash_area_size = 0;
+	int error = 0;
+
+	if (!df || !block)
+		return -EFAULT;
+
+	if (!(block->flags & INCFS_BLOCK_FLAGS_HASH))
+		return -EINVAL;
+
+	bfc = df->df_backing_file_context;
+	mi = df->df_mount_info;
+
+	if (!df)
+		return -ENOENT;
+
+	hash_tree = df->df_hash_tree;
+	sig = df->df_signature;
+	if (!hash_tree || !sig || sig->hash_offset == 0)
+		return -ENOTSUPP;
+
+	hash_area_base = sig->hash_offset;
+	hash_area_size = sig->hash_size;
+	if (hash_area_size < block->block_index * INCFS_DATA_FILE_BLOCK_SIZE
+				+ block->data_len) {
+		/* Hash block goes beyond dedicated hash area of this file. */
+		return -ERANGE;
+	}
+
+	error = mutex_lock_interruptible(&bfc->bc_mutex);
+	if (!error) {
+		error = incfs_write_hash_block_to_backing_file(
+			bfc, range(data, block->data_len), block->block_index,
+			hash_area_base, df->df_blockmap_off, df->df_size);
+		mutex_unlock(&bfc->bc_mutex);
+	}
+	if (!error)
+		atomic_inc(&df->df_hash_blocks_written);
+
+	return error;
+}
+
+static int process_blockmap_md(struct incfs_blockmap *bm,
+			       struct metadata_handler *handler)
+{
+	struct data_file *df = handler->context;
+	int error = 0;
+	loff_t base_off = le64_to_cpu(bm->m_base_offset);
+	u32 block_count = le32_to_cpu(bm->m_block_count);
+
+	if (!df)
+		return -EFAULT;
+
+	if (df->df_data_block_count > block_count)
+		return -EBADMSG;
+
+	df->df_total_block_count = block_count;
+	df->df_blockmap_off = base_off;
+	return error;
+}
+
+static int process_file_signature_md(struct incfs_file_signature *sg,
+				struct metadata_handler *handler)
+{
+	struct data_file *df = handler->context;
+	struct mtree *hash_tree = NULL;
+	int error = 0;
+	struct incfs_df_signature *signature =
+		kzalloc(sizeof(*signature), GFP_NOFS);
+	void *buf = NULL;
+	ssize_t read;
+
+	if (!signature)
+		return -ENOMEM;
+
+	if (!df || !df->df_backing_file_context ||
+	    !df->df_backing_file_context->bc_file) {
+		error = -ENOENT;
+		goto out;
+	}
+
+	signature->hash_offset = le64_to_cpu(sg->sg_hash_tree_offset);
+	signature->hash_size = le32_to_cpu(sg->sg_hash_tree_size);
+	signature->sig_offset = le64_to_cpu(sg->sg_sig_offset);
+	signature->sig_size = le32_to_cpu(sg->sg_sig_size);
+
+	buf = kzalloc(signature->sig_size, GFP_NOFS);
+	if (!buf) {
+		error = -ENOMEM;
+		goto out;
+	}
+
+	read = incfs_kread(df->df_backing_file_context->bc_file, buf,
+			   signature->sig_size, signature->sig_offset);
+	if (read < 0) {
+		error = read;
+		goto out;
+	}
+
+	if (read != signature->sig_size) {
+		error = -EINVAL;
+		goto out;
+	}
+
+	hash_tree = incfs_alloc_mtree(range(buf, signature->sig_size),
+				      df->df_data_block_count);
+	if (IS_ERR(hash_tree)) {
+		error = PTR_ERR(hash_tree);
+		hash_tree = NULL;
+		goto out;
+	}
+	if (hash_tree->hash_tree_area_size != signature->hash_size) {
+		error = -EINVAL;
+		goto out;
+	}
+	if (signature->hash_size > 0 &&
+	    handler->md_record_offset <= signature->hash_offset) {
+		error = -EINVAL;
+		goto out;
+	}
+	if (handler->md_record_offset <= signature->sig_offset) {
+		error = -EINVAL;
+		goto out;
+	}
+	df->df_hash_tree = hash_tree;
+	hash_tree = NULL;
+	df->df_signature = signature;
+	signature = NULL;
+out:
+	incfs_free_mtree(hash_tree);
+	kfree(signature);
+	kfree(buf);
+
+	return error;
+}
+
+static int process_status_md(struct incfs_status *is,
+			     struct metadata_handler *handler)
+{
+	struct data_file *df = handler->context;
+
+	df->df_initial_data_blocks_written =
+		le32_to_cpu(is->is_data_blocks_written);
+	atomic_set(&df->df_data_blocks_written,
+		   df->df_initial_data_blocks_written);
+
+	df->df_initial_hash_blocks_written =
+		le32_to_cpu(is->is_hash_blocks_written);
+	atomic_set(&df->df_hash_blocks_written,
+		   df->df_initial_hash_blocks_written);
+
+	df->df_status_offset = handler->md_record_offset;
+
+	return 0;
+}
+
+static int incfs_scan_metadata_chain(struct data_file *df)
+{
+	struct metadata_handler *handler = NULL;
+	int result = 0;
+	int records_count = 0;
+	int error = 0;
+	struct backing_file_context *bfc = NULL;
+
+	if (!df || !df->df_backing_file_context)
+		return -EFAULT;
+
+	bfc = df->df_backing_file_context;
+
+	handler = kzalloc(sizeof(*handler), GFP_NOFS);
+	if (!handler)
+		return -ENOMEM;
+
+	handler->md_record_offset = df->df_metadata_off;
+	handler->context = df;
+	handler->handle_blockmap = process_blockmap_md;
+	handler->handle_signature = process_file_signature_md;
+	handler->handle_status = process_status_md;
+
+	while (handler->md_record_offset > 0) {
+		error = incfs_read_next_metadata_record(bfc, handler);
+		if (error) {
+			pr_warn("incfs: Error during reading incfs-metadata record. Offset: %lld Record #%d Error code: %d\n",
+				handler->md_record_offset, records_count + 1,
+				-error);
+			break;
+		}
+		records_count++;
+	}
+	if (error) {
+		pr_warn("incfs: Error %d after reading %d incfs-metadata records.\n",
+			 -error, records_count);
+		result = error;
+	} else
+		result = records_count;
+
+	if (df->df_hash_tree) {
+		int hash_block_count = get_blocks_count_for_size(
+			df->df_hash_tree->hash_tree_area_size);
+
+		if (df->df_data_block_count + hash_block_count !=
+		    df->df_total_block_count)
+			result = -EINVAL;
+	} else if (df->df_data_block_count != df->df_total_block_count)
+		result = -EINVAL;
+
+	kfree(handler);
+	return result;
+}
+
+/*
+ * Quickly checks if there are pending reads with a serial number larger
+ * than a given one.
+ */
+bool incfs_fresh_pending_reads_exist(struct mount_info *mi, int last_number)
+{
+	bool result = false;
+
+	spin_lock(&mi->pending_read_lock);
+	result = (mi->mi_last_pending_read_number > last_number) &&
+		(mi->mi_pending_reads_count > 0);
+	spin_unlock(&mi->pending_read_lock);
+	return result;
+}
+
+int incfs_collect_pending_reads(struct mount_info *mi, int sn_lowerbound,
+				struct incfs_pending_read_info *reads,
+				struct incfs_pending_read_info2 *reads2,
+				int reads_size, int *new_max_sn)
+{
+	int reported_reads = 0;
+	struct pending_read *entry = NULL;
+
+	if (!mi)
+		return -EFAULT;
+
+	if (reads_size <= 0)
+		return 0;
+
+	if (!incfs_fresh_pending_reads_exist(mi, sn_lowerbound))
+		return 0;
+
+	rcu_read_lock();
+
+	list_for_each_entry_rcu(entry, &mi->mi_reads_list_head, mi_reads_list) {
+		if (entry->serial_number <= sn_lowerbound)
+			continue;
+
+		if (reads) {
+			reads[reported_reads].file_id = entry->file_id;
+			reads[reported_reads].block_index = entry->block_index;
+			reads[reported_reads].serial_number =
+				entry->serial_number;
+			reads[reported_reads].timestamp_us =
+				entry->timestamp_us;
+		}
+
+		if (reads2) {
+			reads2[reported_reads].file_id = entry->file_id;
+			reads2[reported_reads].block_index = entry->block_index;
+			reads2[reported_reads].serial_number =
+				entry->serial_number;
+			reads2[reported_reads].timestamp_us =
+				entry->timestamp_us;
+			reads2[reported_reads].uid = entry->uid;
+		}
+
+		if (entry->serial_number > *new_max_sn)
+			*new_max_sn = entry->serial_number;
+
+		reported_reads++;
+		if (reported_reads >= reads_size)
+			break;
+	}
+
+	rcu_read_unlock();
+
+	return reported_reads;
+}
+
+struct read_log_state incfs_get_log_state(struct mount_info *mi)
+{
+	struct read_log *log = &mi->mi_log;
+	struct read_log_state result;
+
+	spin_lock(&log->rl_lock);
+	result = log->rl_head;
+	spin_unlock(&log->rl_lock);
+	return result;
+}
+
+int incfs_get_uncollected_logs_count(struct mount_info *mi,
+				     const struct read_log_state *state)
+{
+	struct read_log *log = &mi->mi_log;
+	u32 generation;
+	u64 head_no, tail_no;
+
+	spin_lock(&log->rl_lock);
+	tail_no = log->rl_tail.current_record_no;
+	head_no = log->rl_head.current_record_no;
+	generation = log->rl_head.generation_id;
+	spin_unlock(&log->rl_lock);
+
+	if (generation != state->generation_id)
+		return head_no - tail_no;
+	else
+		return head_no - max_t(u64, tail_no, state->current_record_no);
+}
+
+int incfs_collect_logged_reads(struct mount_info *mi,
+			       struct read_log_state *state,
+			       struct incfs_pending_read_info *reads,
+			       struct incfs_pending_read_info2 *reads2,
+			       int reads_size)
+{
+	int dst_idx;
+	struct read_log *log = &mi->mi_log;
+	struct read_log_state *head, *tail;
+
+	spin_lock(&log->rl_lock);
+	head = &log->rl_head;
+	tail = &log->rl_tail;
+
+	if (state->generation_id != head->generation_id) {
+		pr_debug("read ptr is wrong generation: %u/%u",
+			 state->generation_id, head->generation_id);
+
+		*state = (struct read_log_state){
+			.generation_id = head->generation_id,
+		};
+	}
+
+	if (state->current_record_no < tail->current_record_no) {
+		pr_debug("read ptr is behind, moving: %u/%u -> %u/%u\n",
+			 (u32)state->next_offset,
+			 (u32)state->current_pass_no,
+			 (u32)tail->next_offset, (u32)tail->current_pass_no);
+
+		*state = *tail;
+	}
+
+	for (dst_idx = 0; dst_idx < reads_size; dst_idx++) {
+		if (state->current_record_no == head->current_record_no)
+			break;
+
+		log_read_one_record(log, state);
+
+		if (reads)
+			reads[dst_idx] = (struct incfs_pending_read_info) {
+				.file_id = state->base_record.file_id,
+				.block_index = state->base_record.block_index,
+				.serial_number = state->current_record_no,
+				.timestamp_us =
+					state->base_record.absolute_ts_us,
+			};
+
+		if (reads2)
+			reads2[dst_idx] = (struct incfs_pending_read_info2) {
+				.file_id = state->base_record.file_id,
+				.block_index = state->base_record.block_index,
+				.serial_number = state->current_record_no,
+				.timestamp_us =
+					state->base_record.absolute_ts_us,
+				.uid = state->base_record.uid,
+			};
+	}
+
+	spin_unlock(&log->rl_lock);
+	return dst_idx;
+}
+
diff --git a/fs/incfs/data_mgmt.h b/fs/incfs/data_mgmt.h
new file mode 100644
index 0000000..a63af70
--- /dev/null
+++ b/fs/incfs/data_mgmt.h
@@ -0,0 +1,454 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright 2019 Google LLC
+ */
+#ifndef _INCFS_DATA_MGMT_H
+#define _INCFS_DATA_MGMT_H
+
+#include <linux/cred.h>
+#include <linux/fs.h>
+#include <linux/types.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+#include <linux/rcupdate.h>
+#include <linux/completion.h>
+#include <linux/wait.h>
+#include <linux/zstd.h>
+#include <crypto/hash.h>
+#include <linux/rwsem.h>
+
+#include <uapi/linux/incrementalfs.h>
+
+#include "internal.h"
+
+#define SEGMENTS_PER_FILE 3
+
+enum LOG_RECORD_TYPE {
+	FULL,
+	SAME_FILE,
+	SAME_FILE_NEXT_BLOCK,
+	SAME_FILE_NEXT_BLOCK_SHORT,
+};
+
+struct full_record {
+	enum LOG_RECORD_TYPE type : 2; /* FULL */
+	u32 block_index : 30;
+	incfs_uuid_t file_id;
+	u64 absolute_ts_us;
+	uid_t uid;
+} __packed; /* 28 bytes */
+
+struct same_file_record {
+	enum LOG_RECORD_TYPE type : 2; /* SAME_FILE */
+	u32 block_index : 30;
+	u32 relative_ts_us; /* max 2^32 us ~= 1 hour (1:11:30) */
+} __packed; /* 8 bytes */
+
+struct same_file_next_block {
+	enum LOG_RECORD_TYPE type : 2; /* SAME_FILE_NEXT_BLOCK */
+	u32 relative_ts_us : 30; /* max 2^30 us ~= 15 min (17:50) */
+} __packed; /* 4 bytes */
+
+struct same_file_next_block_short {
+	enum LOG_RECORD_TYPE type : 2; /* SAME_FILE_NEXT_BLOCK_SHORT */
+	u16 relative_ts_us : 14; /* max 2^14 us ~= 16 ms */
+} __packed; /* 2 bytes */
+
+union log_record {
+	struct full_record full_record;
+	struct same_file_record same_file_record;
+	struct same_file_next_block same_file_next_block;
+	struct same_file_next_block_short same_file_next_block_short;
+};
+
+struct read_log_state {
+	/* Log buffer generation id, incremented on configuration changes */
+	u32 generation_id;
+
+	/* Offset in rl_ring_buf to write into. */
+	u32 next_offset;
+
+	/* Current number of writer passes over rl_ring_buf */
+	u32 current_pass_no;
+
+	/* Current full_record to diff against */
+	struct full_record base_record;
+
+	/* Current record number counting from configuration change */
+	u64 current_record_no;
+};
+
+/* A ring buffer to save records about data blocks which were recently read. */
+struct read_log {
+	void *rl_ring_buf;
+
+	int rl_size;
+
+	struct read_log_state rl_head;
+
+	struct read_log_state rl_tail;
+
+	/* A lock to protect the above fields */
+	spinlock_t rl_lock;
+
+	/* A queue of waiters who want to be notified about reads */
+	wait_queue_head_t ml_notif_wq;
+
+	/* A work item to wake up those waiters without slowing down readers */
+	struct delayed_work ml_wakeup_work;
+};
+
+struct mount_options {
+	unsigned int read_timeout_ms;
+	unsigned int readahead_pages;
+	unsigned int read_log_pages;
+	unsigned int read_log_wakeup_count;
+	bool report_uid;
+};
+
+struct mount_info {
+	struct super_block *mi_sb;
+
+	struct path mi_backing_dir_path;
+
+	struct dentry *mi_index_dir;
+
+	struct dentry *mi_incomplete_dir;
+
+	const struct cred *mi_owner;
+
+	struct mount_options mi_options;
+
+	/* This mutex is to be taken before create, rename, delete */
+	struct mutex mi_dir_struct_mutex;
+
+	/*
+	 * A queue of waiters who want to be notified about new pending reads.
+	 */
+	wait_queue_head_t mi_pending_reads_notif_wq;
+
+	/*
+	 * Protects - RCU safe:
+	 *  - reads_list_head
+	 *  - mi_pending_reads_count
+	 *  - mi_last_pending_read_number
+	 *  - data_file_segment.reads_list_head
+	 */
+	spinlock_t pending_read_lock;
+
+	/* List of active pending_read objects */
+	struct list_head mi_reads_list_head;
+
+	/* Total number of items in reads_list_head */
+	int mi_pending_reads_count;
+
+	/*
+	 * Last serial number that was assigned to a pending read.
+	 * 0 means no pending reads have been seen yet.
+	 */
+	int mi_last_pending_read_number;
+
+	/* Temporary buffer for read logger. */
+	struct read_log mi_log;
+
+	void *log_xattr;
+	size_t log_xattr_size;
+
+	void *pending_read_xattr;
+	size_t pending_read_xattr_size;
+
+	/* A queue of waiters who want to be notified about blocks_written */
+	wait_queue_head_t mi_blocks_written_notif_wq;
+
+	/* Number of blocks written since mount */
+	atomic_t mi_blocks_written;
+
+	/* Per UID read timeouts */
+	spinlock_t mi_per_uid_read_timeouts_lock;
+	struct incfs_per_uid_read_timeouts *mi_per_uid_read_timeouts;
+	int mi_per_uid_read_timeouts_size;
+
+	/* zstd workspace */
+	struct mutex mi_zstd_workspace_mutex;
+	void *mi_zstd_workspace;
+	ZSTD_DStream *mi_zstd_stream;
+	struct delayed_work mi_zstd_cleanup_work;
+};
+
+struct data_file_block {
+	loff_t db_backing_file_data_offset;
+
+	size_t db_stored_size;
+
+	enum incfs_compression_alg db_comp_alg;
+};
+
+struct pending_read {
+	incfs_uuid_t file_id;
+
+	s64 timestamp_us;
+
+	atomic_t done;
+
+	int block_index;
+
+	int serial_number;
+
+	uid_t uid;
+
+	struct list_head mi_reads_list;
+
+	struct list_head segment_reads_list;
+
+	struct rcu_head rcu;
+};
+
+struct data_file_segment {
+	wait_queue_head_t new_data_arrival_wq;
+
+	/* Protects reads and writes from the blockmap */
+	struct rw_semaphore rwsem;
+
+	/* List of active pending_read objects belonging to this segment */
+	/* Protected by mount_info.pending_reads_mutex */
+	struct list_head reads_list_head;
+};
+
+/*
+ * Extra info associated with a file. Just a few bytes set by a user.
+ */
+struct file_attr {
+	loff_t fa_value_offset;
+
+	size_t fa_value_size;
+
+	u32 fa_crc;
+};
+
+
+struct data_file {
+	struct backing_file_context *df_backing_file_context;
+
+	struct mount_info *df_mount_info;
+
+	incfs_uuid_t df_id;
+
+	/*
+	 * Array of segments used to reduce lock contention for the file.
+	 * Segment is chosen for a block depends on the block's index.
+	 */
+	struct data_file_segment df_segments[SEGMENTS_PER_FILE];
+
+	/* Base offset of the first metadata record. */
+	loff_t df_metadata_off;
+
+	/* Base offset of the block map. */
+	loff_t df_blockmap_off;
+
+	/* File size in bytes */
+	loff_t df_size;
+
+	/* File header flags */
+	u32 df_header_flags;
+
+	/* File size in DATA_FILE_BLOCK_SIZE blocks */
+	int df_data_block_count;
+
+	/* Total number of blocks, data + hash */
+	int df_total_block_count;
+
+	/* For mapped files, the offset into the actual file */
+	loff_t df_mapped_offset;
+
+	/* Number of data blocks written to file */
+	atomic_t df_data_blocks_written;
+
+	/* Number of data blocks in the status block */
+	u32 df_initial_data_blocks_written;
+
+	/* Number of hash blocks written to file */
+	atomic_t df_hash_blocks_written;
+
+	/* Number of hash blocks in the status block */
+	u32 df_initial_hash_blocks_written;
+
+	/* Offset to status metadata header */
+	loff_t df_status_offset;
+
+	struct mtree *df_hash_tree;
+
+	struct incfs_df_signature *df_signature;
+};
+
+struct dir_file {
+	struct mount_info *mount_info;
+
+	struct file *backing_dir;
+};
+
+struct inode_info {
+	struct mount_info *n_mount_info; /* A mount, this file belongs to */
+
+	struct inode *n_backing_inode;
+
+	struct data_file *n_file;
+
+	struct inode n_vfs_inode;
+};
+
+struct dentry_info {
+	struct path backing_path;
+};
+
+enum FILL_PERMISSION {
+	CANT_FILL = 0,
+	CAN_FILL = 1,
+};
+
+struct incfs_file_data {
+	/* Does this file handle have INCFS_IOC_FILL_BLOCKS permission */
+	enum FILL_PERMISSION fd_fill_permission;
+
+	/* If INCFS_IOC_GET_FILLED_BLOCKS has been called, where are we */
+	int fd_get_block_pos;
+
+	/* And how many filled blocks are there up to that point */
+	int fd_filled_data_blocks;
+	int fd_filled_hash_blocks;
+};
+
+struct mount_info *incfs_alloc_mount_info(struct super_block *sb,
+					  struct mount_options *options,
+					  struct path *backing_dir_path);
+
+int incfs_realloc_mount_info(struct mount_info *mi,
+			     struct mount_options *options);
+
+void incfs_free_mount_info(struct mount_info *mi);
+
+char *file_id_to_str(incfs_uuid_t id);
+struct dentry *incfs_lookup_dentry(struct dentry *parent, const char *name);
+struct data_file *incfs_open_data_file(struct mount_info *mi, struct file *bf);
+void incfs_free_data_file(struct data_file *df);
+
+struct dir_file *incfs_open_dir_file(struct mount_info *mi, struct file *bf);
+void incfs_free_dir_file(struct dir_file *dir);
+
+ssize_t incfs_read_data_file_block(struct mem_range dst, struct file *f,
+			int index, int min_time_ms,
+			int min_pending_time_ms, int max_pending_time_ms,
+			struct mem_range tmp);
+
+int incfs_get_filled_blocks(struct data_file *df,
+			    struct incfs_file_data *fd,
+			    struct incfs_get_filled_blocks_args *arg);
+
+int incfs_read_file_signature(struct data_file *df, struct mem_range dst);
+
+int incfs_process_new_data_block(struct data_file *df,
+				 struct incfs_fill_block *block, u8 *data);
+
+int incfs_process_new_hash_block(struct data_file *df,
+				 struct incfs_fill_block *block, u8 *data);
+
+bool incfs_fresh_pending_reads_exist(struct mount_info *mi, int last_number);
+
+/*
+ * Collects pending reads and saves them into the array (reads/reads_size).
+ * Only reads with serial_number > sn_lowerbound are reported.
+ * Returns how many reads were saved into the array.
+ */
+int incfs_collect_pending_reads(struct mount_info *mi, int sn_lowerbound,
+				struct incfs_pending_read_info *reads,
+				struct incfs_pending_read_info2 *reads2,
+				int reads_size, int *new_max_sn);
+
+int incfs_collect_logged_reads(struct mount_info *mi,
+			       struct read_log_state *start_state,
+			       struct incfs_pending_read_info *reads,
+			       struct incfs_pending_read_info2 *reads2,
+			       int reads_size);
+struct read_log_state incfs_get_log_state(struct mount_info *mi);
+int incfs_get_uncollected_logs_count(struct mount_info *mi,
+				     const struct read_log_state *state);
+
+static inline struct inode_info *get_incfs_node(struct inode *inode)
+{
+	if (!inode)
+		return NULL;
+
+	if (inode->i_sb->s_magic != (long) INCFS_MAGIC_NUMBER) {
+		/* This inode doesn't belong to us. */
+		pr_warn_once("incfs: %s on an alien inode.", __func__);
+		return NULL;
+	}
+
+	return container_of(inode, struct inode_info, n_vfs_inode);
+}
+
+static inline struct data_file *get_incfs_data_file(struct file *f)
+{
+	struct inode_info *node = NULL;
+
+	if (!f)
+		return NULL;
+
+	if (!S_ISREG(f->f_inode->i_mode))
+		return NULL;
+
+	node = get_incfs_node(f->f_inode);
+	if (!node)
+		return NULL;
+
+	return node->n_file;
+}
+
+static inline struct dir_file *get_incfs_dir_file(struct file *f)
+{
+	if (!f)
+		return NULL;
+
+	if (!S_ISDIR(f->f_inode->i_mode))
+		return NULL;
+
+	return (struct dir_file *)f->private_data;
+}
+
+/*
+ * Make sure that inode_info.n_file is initialized and inode can be used
+ * for reading and writing data from/to the backing file.
+ */
+int make_inode_ready_for_data_ops(struct mount_info *mi,
+				struct inode *inode,
+				struct file *backing_file);
+
+static inline struct dentry_info *get_incfs_dentry(const struct dentry *d)
+{
+	if (!d)
+		return NULL;
+
+	return (struct dentry_info *)d->d_fsdata;
+}
+
+static inline void get_incfs_backing_path(const struct dentry *d,
+					  struct path *path)
+{
+	struct dentry_info *di = get_incfs_dentry(d);
+
+	if (!di) {
+		*path = (struct path) {};
+		return;
+	}
+
+	*path = di->backing_path;
+	path_get(path);
+}
+
+static inline int get_blocks_count_for_size(u64 size)
+{
+	if (size == 0)
+		return 0;
+	return 1 + (size - 1) / INCFS_DATA_FILE_BLOCK_SIZE;
+}
+
+#endif /* _INCFS_DATA_MGMT_H */
diff --git a/fs/incfs/format.c b/fs/incfs/format.c
new file mode 100644
index 0000000..52079b5
--- /dev/null
+++ b/fs/incfs/format.c
@@ -0,0 +1,690 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2018 Google LLC
+ */
+#include <linux/fs.h>
+#include <linux/file.h>
+#include <linux/types.h>
+#include <linux/mutex.h>
+#include <linux/mm.h>
+#include <linux/falloc.h>
+#include <linux/slab.h>
+#include <linux/crc32.h>
+#include <linux/kernel.h>
+
+#include "format.h"
+#include "data_mgmt.h"
+
+struct backing_file_context *incfs_alloc_bfc(struct file *backing_file)
+{
+	struct backing_file_context *result = NULL;
+
+	result = kzalloc(sizeof(*result), GFP_NOFS);
+	if (!result)
+		return ERR_PTR(-ENOMEM);
+
+	result->bc_file = get_file(backing_file);
+	mutex_init(&result->bc_mutex);
+	return result;
+}
+
+void incfs_free_bfc(struct backing_file_context *bfc)
+{
+	if (!bfc)
+		return;
+
+	if (bfc->bc_file)
+		fput(bfc->bc_file);
+
+	mutex_destroy(&bfc->bc_mutex);
+	kfree(bfc);
+}
+
+static loff_t incfs_get_end_offset(struct file *f)
+{
+	/*
+	 * This function assumes that file size and the end-offset
+	 * are the same. This is not always true.
+	 */
+	return i_size_read(file_inode(f));
+}
+
+/*
+ * Truncate the tail of the file to the given length.
+ * Used to rollback partially successful multistep writes.
+ */
+static int truncate_backing_file(struct backing_file_context *bfc,
+				loff_t new_end)
+{
+	struct inode *inode = NULL;
+	struct dentry *dentry = NULL;
+	loff_t old_end = 0;
+	struct iattr attr;
+	int result = 0;
+
+	if (!bfc)
+		return -EFAULT;
+
+	LOCK_REQUIRED(bfc->bc_mutex);
+
+	if (!bfc->bc_file)
+		return -EFAULT;
+
+	old_end = incfs_get_end_offset(bfc->bc_file);
+	if (old_end == new_end)
+		return 0;
+	if (old_end < new_end)
+		return -EINVAL;
+
+	inode = bfc->bc_file->f_inode;
+	dentry = bfc->bc_file->f_path.dentry;
+
+	attr.ia_size = new_end;
+	attr.ia_valid = ATTR_SIZE;
+
+	inode_lock(inode);
+	result = notify_change(dentry, &attr, NULL);
+	inode_unlock(inode);
+
+	return result;
+}
+
+static int write_to_bf(struct backing_file_context *bfc, const void *buf,
+			size_t count, loff_t pos)
+{
+	ssize_t res = incfs_kwrite(bfc->bc_file, buf, count, pos);
+
+	if (res < 0)
+		return res;
+	if (res != count)
+		return -EIO;
+	return 0;
+}
+
+static int append_zeros_no_fallocate(struct backing_file_context *bfc,
+				     size_t file_size, size_t len)
+{
+	u8 buffer[256] = {};
+	size_t i;
+
+	for (i = 0; i < len; i += sizeof(buffer)) {
+		int to_write = len - i > sizeof(buffer)
+			? sizeof(buffer) : len - i;
+		int err = write_to_bf(bfc, buffer, to_write, file_size + i);
+
+		if (err)
+			return err;
+	}
+
+	return 0;
+}
+
+/* Append a given number of zero bytes to the end of the backing file. */
+static int append_zeros(struct backing_file_context *bfc, size_t len)
+{
+	loff_t file_size = 0;
+	loff_t new_last_byte_offset = 0;
+	int result;
+
+	if (!bfc)
+		return -EFAULT;
+
+	if (len == 0)
+		return 0;
+
+	LOCK_REQUIRED(bfc->bc_mutex);
+
+	/*
+	 * Allocate only one byte at the new desired end of the file.
+	 * It will increase file size and create a zeroed area of
+	 * a given size.
+	 */
+	file_size = incfs_get_end_offset(bfc->bc_file);
+	new_last_byte_offset = file_size + len - 1;
+	result = vfs_fallocate(bfc->bc_file, 0, new_last_byte_offset, 1);
+	if (result != -EOPNOTSUPP)
+		return result;
+
+	return append_zeros_no_fallocate(bfc, file_size, len);
+}
+
+/*
+ * Append a given metadata record to the backing file and update a previous
+ * record to add the new record the the metadata list.
+ */
+static int append_md_to_backing_file(struct backing_file_context *bfc,
+			      struct incfs_md_header *record)
+{
+	int result = 0;
+	loff_t record_offset;
+	loff_t file_pos;
+	__le64 new_md_offset;
+	size_t record_size;
+
+	if (!bfc || !record)
+		return -EFAULT;
+
+	if (bfc->bc_last_md_record_offset < 0)
+		return -EINVAL;
+
+	LOCK_REQUIRED(bfc->bc_mutex);
+
+	record_size = le16_to_cpu(record->h_record_size);
+	file_pos = incfs_get_end_offset(bfc->bc_file);
+	record->h_next_md_offset = 0;
+
+	/* Write the metadata record to the end of the backing file */
+	record_offset = file_pos;
+	new_md_offset = cpu_to_le64(record_offset);
+	result = write_to_bf(bfc, record, record_size, file_pos);
+	if (result)
+		return result;
+
+	/* Update next metadata offset in a previous record or a superblock. */
+	if (bfc->bc_last_md_record_offset) {
+		/*
+		 * Find a place in the previous md record where new record's
+		 * offset needs to be saved.
+		 */
+		file_pos = bfc->bc_last_md_record_offset +
+			offsetof(struct incfs_md_header, h_next_md_offset);
+	} else {
+		/*
+		 * No metadata yet, file a place to update in the
+		 * file_header.
+		 */
+		file_pos = offsetof(struct incfs_file_header,
+				    fh_first_md_offset);
+	}
+	result = write_to_bf(bfc, &new_md_offset, sizeof(new_md_offset),
+			     file_pos);
+	if (result)
+		return result;
+
+	bfc->bc_last_md_record_offset = record_offset;
+	return result;
+}
+
+/*
+ * Reserve 0-filled space for the blockmap body, and append
+ * incfs_blockmap metadata record pointing to it.
+ */
+int incfs_write_blockmap_to_backing_file(struct backing_file_context *bfc,
+					 u32 block_count)
+{
+	struct incfs_blockmap blockmap = {};
+	int result = 0;
+	loff_t file_end = 0;
+	size_t map_size = block_count * sizeof(struct incfs_blockmap_entry);
+
+	if (!bfc)
+		return -EFAULT;
+
+	blockmap.m_header.h_md_entry_type = INCFS_MD_BLOCK_MAP;
+	blockmap.m_header.h_record_size = cpu_to_le16(sizeof(blockmap));
+	blockmap.m_header.h_next_md_offset = cpu_to_le64(0);
+	blockmap.m_block_count = cpu_to_le32(block_count);
+
+	LOCK_REQUIRED(bfc->bc_mutex);
+
+	/* Reserve 0-filled space for the blockmap body in the backing file. */
+	file_end = incfs_get_end_offset(bfc->bc_file);
+	result = append_zeros(bfc, map_size);
+	if (result)
+		return result;
+
+	/* Write blockmap metadata record pointing to the body written above. */
+	blockmap.m_base_offset = cpu_to_le64(file_end);
+	result = append_md_to_backing_file(bfc, &blockmap.m_header);
+	if (result)
+		/* Error, rollback file changes */
+		truncate_backing_file(bfc, file_end);
+
+	return result;
+}
+
+int incfs_write_signature_to_backing_file(struct backing_file_context *bfc,
+					  struct mem_range sig, u32 tree_size)
+{
+	struct incfs_file_signature sg = {};
+	int result = 0;
+	loff_t rollback_pos = 0;
+	loff_t tree_area_pos = 0;
+	size_t alignment = 0;
+
+	if (!bfc)
+		return -EFAULT;
+
+	LOCK_REQUIRED(bfc->bc_mutex);
+
+	rollback_pos = incfs_get_end_offset(bfc->bc_file);
+
+	sg.sg_header.h_md_entry_type = INCFS_MD_SIGNATURE;
+	sg.sg_header.h_record_size = cpu_to_le16(sizeof(sg));
+	sg.sg_header.h_next_md_offset = cpu_to_le64(0);
+	if (sig.data != NULL && sig.len > 0) {
+		loff_t pos = incfs_get_end_offset(bfc->bc_file);
+
+		sg.sg_sig_size = cpu_to_le32(sig.len);
+		sg.sg_sig_offset = cpu_to_le64(pos);
+
+		result = write_to_bf(bfc, sig.data, sig.len, pos);
+		if (result)
+			goto err;
+	}
+
+	tree_area_pos = incfs_get_end_offset(bfc->bc_file);
+	if (tree_size > 0) {
+		if (tree_size > 5 * INCFS_DATA_FILE_BLOCK_SIZE) {
+			/*
+			 * If hash tree is big enough, it makes sense to
+			 * align in the backing file for faster access.
+			 */
+			loff_t offset = round_up(tree_area_pos, PAGE_SIZE);
+
+			alignment = offset - tree_area_pos;
+			tree_area_pos = offset;
+		}
+
+		/*
+		 * If root hash is not the only hash in the tree.
+		 * reserve 0-filled space for the tree.
+		 */
+		result = append_zeros(bfc, tree_size + alignment);
+		if (result)
+			goto err;
+
+		sg.sg_hash_tree_size = cpu_to_le32(tree_size);
+		sg.sg_hash_tree_offset = cpu_to_le64(tree_area_pos);
+	}
+
+	/* Write a hash tree metadata record pointing to the hash tree above. */
+	result = append_md_to_backing_file(bfc, &sg.sg_header);
+err:
+	if (result)
+		/* Error, rollback file changes */
+		truncate_backing_file(bfc, rollback_pos);
+	return result;
+}
+
+static int write_new_status_to_backing_file(struct backing_file_context *bfc,
+				       u32 data_blocks_written,
+				       u32 hash_blocks_written)
+{
+	int result;
+	loff_t rollback_pos;
+	struct incfs_status is = {
+		.is_header = {
+			.h_md_entry_type = INCFS_MD_STATUS,
+			.h_record_size = cpu_to_le16(sizeof(is)),
+		},
+		.is_data_blocks_written = cpu_to_le32(data_blocks_written),
+		.is_hash_blocks_written = cpu_to_le32(hash_blocks_written),
+	};
+
+	if (!bfc)
+		return -EFAULT;
+
+	LOCK_REQUIRED(bfc->bc_mutex);
+	rollback_pos = incfs_get_end_offset(bfc->bc_file);
+	result = append_md_to_backing_file(bfc, &is.is_header);
+	if (result)
+		truncate_backing_file(bfc, rollback_pos);
+
+	return result;
+}
+
+int incfs_write_status_to_backing_file(struct backing_file_context *bfc,
+				       loff_t status_offset,
+				       u32 data_blocks_written,
+				       u32 hash_blocks_written)
+{
+	struct incfs_status is;
+	int result;
+
+	if (status_offset == 0)
+		return write_new_status_to_backing_file(bfc,
+				data_blocks_written, hash_blocks_written);
+
+	result = incfs_kread(bfc->bc_file, &is, sizeof(is), status_offset);
+	if (result != sizeof(is))
+		return -EIO;
+
+	is.is_data_blocks_written = cpu_to_le32(data_blocks_written);
+	is.is_hash_blocks_written = cpu_to_le32(hash_blocks_written);
+	result = incfs_kwrite(bfc->bc_file, &is, sizeof(is), status_offset);
+	if (result != sizeof(is))
+		return -EIO;
+
+	return 0;
+}
+
+/*
+ * Write a backing file header
+ * It should always be called only on empty file.
+ * fh.fh_first_md_offset is 0 for now, but will be updated
+ * once first metadata record is added.
+ */
+int incfs_write_fh_to_backing_file(struct backing_file_context *bfc,
+				   incfs_uuid_t *uuid, u64 file_size)
+{
+	struct incfs_file_header fh = {};
+	loff_t file_pos = 0;
+
+	if (!bfc)
+		return -EFAULT;
+
+	fh.fh_magic = cpu_to_le64(INCFS_MAGIC_NUMBER);
+	fh.fh_version = cpu_to_le64(INCFS_FORMAT_CURRENT_VER);
+	fh.fh_header_size = cpu_to_le16(sizeof(fh));
+	fh.fh_first_md_offset = cpu_to_le64(0);
+	fh.fh_data_block_size = cpu_to_le16(INCFS_DATA_FILE_BLOCK_SIZE);
+
+	fh.fh_file_size = cpu_to_le64(file_size);
+	fh.fh_uuid = *uuid;
+
+	LOCK_REQUIRED(bfc->bc_mutex);
+
+	file_pos = incfs_get_end_offset(bfc->bc_file);
+	if (file_pos != 0)
+		return -EEXIST;
+
+	return write_to_bf(bfc, &fh, sizeof(fh), file_pos);
+}
+
+/*
+ * Write a backing file header for a mapping file
+ * It should always be called only on empty file.
+ */
+int incfs_write_mapping_fh_to_backing_file(struct backing_file_context *bfc,
+				incfs_uuid_t *uuid, u64 file_size, u64 offset)
+{
+	struct incfs_file_header fh = {};
+	loff_t file_pos = 0;
+
+	if (!bfc)
+		return -EFAULT;
+
+	fh.fh_magic = cpu_to_le64(INCFS_MAGIC_NUMBER);
+	fh.fh_version = cpu_to_le64(INCFS_FORMAT_CURRENT_VER);
+	fh.fh_header_size = cpu_to_le16(sizeof(fh));
+	fh.fh_original_offset = cpu_to_le64(offset);
+	fh.fh_data_block_size = cpu_to_le16(INCFS_DATA_FILE_BLOCK_SIZE);
+
+	fh.fh_mapped_file_size = cpu_to_le64(file_size);
+	fh.fh_original_uuid = *uuid;
+	fh.fh_flags = cpu_to_le32(INCFS_FILE_MAPPED);
+
+	LOCK_REQUIRED(bfc->bc_mutex);
+
+	file_pos = incfs_get_end_offset(bfc->bc_file);
+	if (file_pos != 0)
+		return -EEXIST;
+
+	return write_to_bf(bfc, &fh, sizeof(fh), file_pos);
+}
+
+/* Write a given data block and update file's blockmap to point it. */
+int incfs_write_data_block_to_backing_file(struct backing_file_context *bfc,
+				     struct mem_range block, int block_index,
+				     loff_t bm_base_off, u16 flags)
+{
+	struct incfs_blockmap_entry bm_entry = {};
+	int result = 0;
+	loff_t data_offset = 0;
+	loff_t bm_entry_off =
+		bm_base_off + sizeof(struct incfs_blockmap_entry) * block_index;
+
+	if (!bfc)
+		return -EFAULT;
+
+	if (block.len >= (1 << 16) || block_index < 0)
+		return -EINVAL;
+
+	LOCK_REQUIRED(bfc->bc_mutex);
+
+	data_offset = incfs_get_end_offset(bfc->bc_file);
+	if (data_offset <= bm_entry_off) {
+		/* Blockmap entry is beyond the file's end. It is not normal. */
+		return -EINVAL;
+	}
+
+	/* Write the block data at the end of the backing file. */
+	result = write_to_bf(bfc, block.data, block.len, data_offset);
+	if (result)
+		return result;
+
+	/* Update the blockmap to point to the newly written data. */
+	bm_entry.me_data_offset_lo = cpu_to_le32((u32)data_offset);
+	bm_entry.me_data_offset_hi = cpu_to_le16((u16)(data_offset >> 32));
+	bm_entry.me_data_size = cpu_to_le16((u16)block.len);
+	bm_entry.me_flags = cpu_to_le16(flags);
+
+	return write_to_bf(bfc, &bm_entry, sizeof(bm_entry),
+				bm_entry_off);
+}
+
+int incfs_write_hash_block_to_backing_file(struct backing_file_context *bfc,
+					   struct mem_range block,
+					   int block_index,
+					   loff_t hash_area_off,
+					   loff_t bm_base_off,
+					   loff_t file_size)
+{
+	struct incfs_blockmap_entry bm_entry = {};
+	int result;
+	loff_t data_offset = 0;
+	loff_t file_end = 0;
+	loff_t bm_entry_off =
+		bm_base_off +
+		sizeof(struct incfs_blockmap_entry) *
+			(block_index + get_blocks_count_for_size(file_size));
+
+	if (!bfc)
+		return -EFAULT;
+
+	LOCK_REQUIRED(bfc->bc_mutex);
+
+	data_offset = hash_area_off + block_index * INCFS_DATA_FILE_BLOCK_SIZE;
+	file_end = incfs_get_end_offset(bfc->bc_file);
+	if (data_offset + block.len > file_end) {
+		/* Block is located beyond the file's end. It is not normal. */
+		return -EINVAL;
+	}
+
+	result = write_to_bf(bfc, block.data, block.len, data_offset);
+	if (result)
+		return result;
+
+	bm_entry.me_data_offset_lo = cpu_to_le32((u32)data_offset);
+	bm_entry.me_data_offset_hi = cpu_to_le16((u16)(data_offset >> 32));
+	bm_entry.me_data_size = cpu_to_le16(INCFS_DATA_FILE_BLOCK_SIZE);
+
+	return write_to_bf(bfc, &bm_entry, sizeof(bm_entry), bm_entry_off);
+}
+
+int incfs_read_blockmap_entry(struct backing_file_context *bfc, int block_index,
+			loff_t bm_base_off,
+			struct incfs_blockmap_entry *bm_entry)
+{
+	int error = incfs_read_blockmap_entries(bfc, bm_entry, block_index, 1,
+						bm_base_off);
+
+	if (error < 0)
+		return error;
+
+	if (error == 0)
+		return -EIO;
+
+	if (error != 1)
+		return -EFAULT;
+
+	return 0;
+}
+
+int incfs_read_blockmap_entries(struct backing_file_context *bfc,
+		struct incfs_blockmap_entry *entries,
+		int start_index, int blocks_number,
+		loff_t bm_base_off)
+{
+	loff_t bm_entry_off =
+		bm_base_off + sizeof(struct incfs_blockmap_entry) * start_index;
+	const size_t bytes_to_read = sizeof(struct incfs_blockmap_entry)
+					* blocks_number;
+	int result = 0;
+
+	if (!bfc || !entries)
+		return -EFAULT;
+
+	if (start_index < 0 || bm_base_off <= 0)
+		return -ENODATA;
+
+	result = incfs_kread(bfc->bc_file, entries, bytes_to_read,
+			     bm_entry_off);
+	if (result < 0)
+		return result;
+	return result / sizeof(*entries);
+}
+
+int incfs_read_file_header(struct backing_file_context *bfc,
+			   loff_t *first_md_off, incfs_uuid_t *uuid,
+			   u64 *file_size, u32 *flags)
+{
+	ssize_t bytes_read = 0;
+	struct incfs_file_header fh = {};
+
+	if (!bfc || !first_md_off)
+		return -EFAULT;
+
+	bytes_read = incfs_kread(bfc->bc_file, &fh, sizeof(fh), 0);
+	if (bytes_read < 0)
+		return bytes_read;
+
+	if (bytes_read < sizeof(fh))
+		return -EBADMSG;
+
+	if (le64_to_cpu(fh.fh_magic) != INCFS_MAGIC_NUMBER)
+		return -EILSEQ;
+
+	if (le64_to_cpu(fh.fh_version) > INCFS_FORMAT_CURRENT_VER)
+		return -EILSEQ;
+
+	if (le16_to_cpu(fh.fh_data_block_size) != INCFS_DATA_FILE_BLOCK_SIZE)
+		return -EILSEQ;
+
+	if (le16_to_cpu(fh.fh_header_size) != sizeof(fh))
+		return -EILSEQ;
+
+	if (first_md_off)
+		*first_md_off = le64_to_cpu(fh.fh_first_md_offset);
+	if (uuid)
+		*uuid = fh.fh_uuid;
+	if (file_size)
+		*file_size = le64_to_cpu(fh.fh_file_size);
+	if (flags)
+		*flags = le32_to_cpu(fh.fh_flags);
+	return 0;
+}
+
+/*
+ * Read through metadata records from the backing file one by one
+ * and call provided metadata handlers.
+ */
+int incfs_read_next_metadata_record(struct backing_file_context *bfc,
+			      struct metadata_handler *handler)
+{
+	const ssize_t max_md_size = INCFS_MAX_METADATA_RECORD_SIZE;
+	ssize_t bytes_read = 0;
+	size_t md_record_size = 0;
+	loff_t next_record = 0;
+	int res = 0;
+	struct incfs_md_header *md_hdr = NULL;
+
+	if (!bfc || !handler)
+		return -EFAULT;
+
+	if (handler->md_record_offset == 0)
+		return -EPERM;
+
+	memset(&handler->md_buffer, 0, max_md_size);
+	bytes_read = incfs_kread(bfc->bc_file, &handler->md_buffer,
+				 max_md_size, handler->md_record_offset);
+	if (bytes_read < 0)
+		return bytes_read;
+	if (bytes_read < sizeof(*md_hdr))
+		return -EBADMSG;
+
+	md_hdr = &handler->md_buffer.md_header;
+	next_record = le64_to_cpu(md_hdr->h_next_md_offset);
+	md_record_size = le16_to_cpu(md_hdr->h_record_size);
+
+	if (md_record_size > max_md_size) {
+		pr_warn("incfs: The record is too large. Size: %ld",
+				md_record_size);
+		return -EBADMSG;
+	}
+
+	if (bytes_read < md_record_size) {
+		pr_warn("incfs: The record hasn't been fully read.");
+		return -EBADMSG;
+	}
+
+	if (next_record <= handler->md_record_offset && next_record != 0) {
+		pr_warn("incfs: Next record (%lld) points back in file.",
+			next_record);
+		return -EBADMSG;
+	}
+
+	switch (md_hdr->h_md_entry_type) {
+	case INCFS_MD_NONE:
+		break;
+	case INCFS_MD_BLOCK_MAP:
+		if (handler->handle_blockmap)
+			res = handler->handle_blockmap(
+				&handler->md_buffer.blockmap, handler);
+		break;
+	case INCFS_MD_FILE_ATTR:
+		/*
+		 * File attrs no longer supported, ignore section for
+		 * compatibility
+		 */
+		break;
+	case INCFS_MD_SIGNATURE:
+		if (handler->handle_signature)
+			res = handler->handle_signature(
+				&handler->md_buffer.signature, handler);
+		break;
+	case INCFS_MD_STATUS:
+		if (handler->handle_status)
+			res = handler->handle_status(
+				&handler->md_buffer.status, handler);
+		break;
+	default:
+		res = -ENOTSUPP;
+		break;
+	}
+
+	if (!res) {
+		if (next_record == 0) {
+			/*
+			 * Zero offset for the next record means that the last
+			 * metadata record has just been processed.
+			 */
+			bfc->bc_last_md_record_offset =
+				handler->md_record_offset;
+		}
+		handler->md_prev_record_offset = handler->md_record_offset;
+		handler->md_record_offset = next_record;
+	}
+	return res;
+}
+
+ssize_t incfs_kread(struct file *f, void *buf, size_t size, loff_t pos)
+{
+	return kernel_read(f, buf, size, &pos);
+}
+
+ssize_t incfs_kwrite(struct file *f, const void *buf, size_t size, loff_t pos)
+{
+	return kernel_write(f, buf, size, &pos);
+}
diff --git a/fs/incfs/format.h b/fs/incfs/format.h
new file mode 100644
index 0000000..81e3c1a
--- /dev/null
+++ b/fs/incfs/format.h
@@ -0,0 +1,354 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright 2018 Google LLC
+ */
+
+/*
+ * Overview
+ * --------
+ * The backbone of the incremental-fs ondisk format is an append only linked
+ * list of metadata blocks. Each metadata block contains an offset of the next
+ * one. These blocks describe files and directories on the
+ * file system. They also represent actions of adding and removing file names
+ * (hard links).
+ *
+ * Every time incremental-fs instance is mounted, it reads through this list
+ * to recreate filesystem's state in memory. An offset of the first record in
+ * the metadata list is stored in the superblock at the beginning of the backing
+ * file.
+ *
+ * Most of the backing file is taken by data areas and blockmaps.
+ * Since data blocks can be compressed and have different sizes,
+ * single per-file data area can't be pre-allocated. That's why blockmaps are
+ * needed in order to find a location and size of each data block in
+ * the backing file. Each time a file is created, a corresponding block map is
+ * allocated to store future offsets of data blocks.
+ *
+ * Whenever a data block is given by data loader to incremental-fs:
+ *   - A data area with the given block is appended to the end of
+ *     the backing file.
+ *   - A record in the blockmap for the given block index is updated to reflect
+ *     its location, size, and compression algorithm.
+
+ * Metadata records
+ * ----------------
+ * incfs_blockmap - metadata record that specifies size and location
+ *                           of a blockmap area for a given file. This area
+ *                           contains an array of incfs_blockmap_entry-s.
+ * incfs_file_signature - metadata record that specifies where file signature
+ *                           and its hash tree can be found in the backing file.
+ *
+ * incfs_file_attr - metadata record that specifies where additional file
+ *		        attributes blob can be found.
+ *
+ * Metadata header
+ * ---------------
+ * incfs_md_header - header of a metadata record. It's always a part
+ *                   of other structures and served purpose of metadata
+ *                   bookkeeping.
+ *
+ *              +-----------------------------------------------+       ^
+ *              |            incfs_md_header                    |       |
+ *              | 1. type of body(BLOCKMAP, FILE_ATTR..)        |       |
+ *              | 2. size of the whole record header + body     |       |
+ *              | 3. CRC the whole record header + body         |       |
+ *              | 4. offset of the previous md record           |]------+
+ *              | 5. offset of the next md record (md link)     |]---+
+ *              +-----------------------------------------------+    |
+ *              |  Metadata record body with useful data        |    |
+ *              +-----------------------------------------------+    |
+ *                                                                   +--->
+ *
+ * Other ondisk structures
+ * -----------------------
+ * incfs_super_block - backing file header
+ * incfs_blockmap_entry - a record in a blockmap area that describes size
+ *                       and location of a data block.
+ * Data blocks dont have any particular structure, they are written to the
+ * backing file in a raw form as they come from a data loader.
+ *
+ * Backing file layout
+ * -------------------
+ *
+ *
+ *              +-------------------------------------------+
+ *              |            incfs_file_header              |]---+
+ *              +-------------------------------------------+    |
+ *              |                 metadata                  |<---+
+ *              |           incfs_file_signature            |]---+
+ *              +-------------------------------------------+    |
+ *                        .........................              |
+ *              +-------------------------------------------+    |   metadata
+ *     +------->|               blockmap area               |    |  list links
+ *     |        |          [incfs_blockmap_entry]           |    |
+ *     |        |          [incfs_blockmap_entry]           |    |
+ *     |        |          [incfs_blockmap_entry]           |    |
+ *     |    +--[|          [incfs_blockmap_entry]           |    |
+ *     |    |   |          [incfs_blockmap_entry]           |    |
+ *     |    |   |          [incfs_blockmap_entry]           |    |
+ *     |    |   +-------------------------------------------+    |
+ *     |    |             .........................              |
+ *     |    |   +-------------------------------------------+    |
+ *     |    |   |                 metadata                  |<---+
+ *     +----|--[|               incfs_blockmap              |]---+
+ *          |   +-------------------------------------------+    |
+ *          |             .........................              |
+ *          |   +-------------------------------------------+    |
+ *          +-->|                 data block                |    |
+ *              +-------------------------------------------+    |
+ *                        .........................              |
+ *              +-------------------------------------------+    |
+ *              |                 metadata                  |<---+
+ *              |              incfs_file_attr              |
+ *              +-------------------------------------------+
+ */
+#ifndef _INCFS_FORMAT_H
+#define _INCFS_FORMAT_H
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <uapi/linux/incrementalfs.h>
+
+#include "internal.h"
+
+#define INCFS_MAX_NAME_LEN 255
+#define INCFS_FORMAT_V1 1
+#define INCFS_FORMAT_CURRENT_VER INCFS_FORMAT_V1
+
+enum incfs_metadata_type {
+	INCFS_MD_NONE = 0,
+	INCFS_MD_BLOCK_MAP = 1,
+	INCFS_MD_FILE_ATTR = 2,
+	INCFS_MD_SIGNATURE = 3,
+	INCFS_MD_STATUS = 4,
+};
+
+enum incfs_file_header_flags {
+	INCFS_FILE_MAPPED = 1 << 1,
+};
+
+/* Header included at the beginning of all metadata records on the disk. */
+struct incfs_md_header {
+	__u8 h_md_entry_type;
+
+	/*
+	 * Size of the metadata record.
+	 * (e.g. inode, dir entry etc) not just this struct.
+	 */
+	__le16 h_record_size;
+
+	/*
+	 * Was: CRC32 of the metadata record.
+	 * (e.g. inode, dir entry etc) not just this struct.
+	 */
+	__le32 h_unused1;
+
+	/* Offset of the next metadata entry if any */
+	__le64 h_next_md_offset;
+
+	/* Was: Offset of the previous metadata entry if any */
+	__le64 h_unused2;
+
+} __packed;
+
+/* Backing file header */
+struct incfs_file_header {
+	/* Magic number: INCFS_MAGIC_NUMBER */
+	__le64 fh_magic;
+
+	/* Format version: INCFS_FORMAT_CURRENT_VER */
+	__le64 fh_version;
+
+	/* sizeof(incfs_file_header) */
+	__le16 fh_header_size;
+
+	/* INCFS_DATA_FILE_BLOCK_SIZE */
+	__le16 fh_data_block_size;
+
+	/* File flags, from incfs_file_header_flags */
+	__le32 fh_flags;
+
+	union {
+		/* Standard incfs file */
+		struct {
+			/* Offset of the first metadata record */
+			__le64 fh_first_md_offset;
+
+			/* Full size of the file's content */
+			__le64 fh_file_size;
+
+			/* File uuid */
+			incfs_uuid_t fh_uuid;
+		};
+
+		/* Mapped file - INCFS_FILE_MAPPED set in fh_flags */
+		struct {
+			/* Offset in original file */
+			__le64 fh_original_offset;
+
+			/* Full size of the file's content */
+			__le64 fh_mapped_file_size;
+
+			/* Original file's uuid */
+			incfs_uuid_t fh_original_uuid;
+		};
+	};
+} __packed;
+
+enum incfs_block_map_entry_flags {
+	INCFS_BLOCK_COMPRESSED_LZ4 = 1,
+	INCFS_BLOCK_COMPRESSED_ZSTD = 2,
+
+	/* Reserve 3 bits for compression alg */
+	INCFS_BLOCK_COMPRESSED_MASK = 7,
+};
+
+/* Block map entry pointing to an actual location of the data block. */
+struct incfs_blockmap_entry {
+	/* Offset of the actual data block. Lower 32 bits */
+	__le32 me_data_offset_lo;
+
+	/* Offset of the actual data block. Higher 16 bits */
+	__le16 me_data_offset_hi;
+
+	/* How many bytes the data actually occupies in the backing file */
+	__le16 me_data_size;
+
+	/* Block flags from incfs_block_map_entry_flags */
+	__le16 me_flags;
+} __packed;
+
+/* Metadata record for locations of file blocks. Type = INCFS_MD_BLOCK_MAP */
+struct incfs_blockmap {
+	struct incfs_md_header m_header;
+
+	/* Base offset of the array of incfs_blockmap_entry */
+	__le64 m_base_offset;
+
+	/* Size of the map entry array in blocks */
+	__le32 m_block_count;
+} __packed;
+
+/* Metadata record for file signature. Type = INCFS_MD_SIGNATURE */
+struct incfs_file_signature {
+	struct incfs_md_header sg_header;
+
+	__le32 sg_sig_size; /* The size of the signature. */
+
+	__le64 sg_sig_offset; /* Signature's offset in the backing file */
+
+	__le32 sg_hash_tree_size; /* The size of the hash tree. */
+
+	__le64 sg_hash_tree_offset; /* Hash tree offset in the backing file */
+} __packed;
+
+/* In memory version of above */
+struct incfs_df_signature {
+	u32 sig_size;
+	u64 sig_offset;
+	u32 hash_size;
+	u64 hash_offset;
+};
+
+struct incfs_status {
+	struct incfs_md_header is_header;
+
+	__le32 is_data_blocks_written; /* Number of data blocks written */
+
+	__le32 is_hash_blocks_written; /* Number of hash blocks written */
+
+	__le32 is_dummy[6]; /* Spare fields */
+} __packed;
+
+/* State of the backing file. */
+struct backing_file_context {
+	/* Protects writes to bc_file */
+	struct mutex bc_mutex;
+
+	/* File object to read data from */
+	struct file *bc_file;
+
+	/*
+	 * Offset of the last known metadata record in the backing file.
+	 * 0 means there are no metadata records.
+	 */
+	loff_t bc_last_md_record_offset;
+};
+
+struct metadata_handler {
+	loff_t md_record_offset;
+	loff_t md_prev_record_offset;
+	void *context;
+
+	union {
+		struct incfs_md_header md_header;
+		struct incfs_blockmap blockmap;
+		struct incfs_file_signature signature;
+		struct incfs_status status;
+	} md_buffer;
+
+	int (*handle_blockmap)(struct incfs_blockmap *bm,
+			       struct metadata_handler *handler);
+	int (*handle_signature)(struct incfs_file_signature *sig,
+				 struct metadata_handler *handler);
+	int (*handle_status)(struct incfs_status *sig,
+				 struct metadata_handler *handler);
+};
+#define INCFS_MAX_METADATA_RECORD_SIZE \
+	sizeof_field(struct metadata_handler, md_buffer)
+
+/* Backing file context management */
+struct backing_file_context *incfs_alloc_bfc(struct file *backing_file);
+
+void incfs_free_bfc(struct backing_file_context *bfc);
+
+/* Writing stuff */
+int incfs_write_blockmap_to_backing_file(struct backing_file_context *bfc,
+					 u32 block_count);
+
+int incfs_write_fh_to_backing_file(struct backing_file_context *bfc,
+				   incfs_uuid_t *uuid, u64 file_size);
+
+int incfs_write_mapping_fh_to_backing_file(struct backing_file_context *bfc,
+				incfs_uuid_t *uuid, u64 file_size, u64 offset);
+
+int incfs_write_data_block_to_backing_file(struct backing_file_context *bfc,
+					   struct mem_range block,
+					   int block_index, loff_t bm_base_off,
+					   u16 flags);
+
+int incfs_write_hash_block_to_backing_file(struct backing_file_context *bfc,
+					   struct mem_range block,
+					   int block_index,
+					   loff_t hash_area_off,
+					   loff_t bm_base_off,
+					   loff_t file_size);
+
+int incfs_write_signature_to_backing_file(struct backing_file_context *bfc,
+					  struct mem_range sig, u32 tree_size);
+
+int incfs_write_status_to_backing_file(struct backing_file_context *bfc,
+				       loff_t status_offset,
+				       u32 data_blocks_written,
+				       u32 hash_blocks_written);
+
+/* Reading stuff */
+int incfs_read_file_header(struct backing_file_context *bfc,
+			   loff_t *first_md_off, incfs_uuid_t *uuid,
+			   u64 *file_size, u32 *flags);
+
+int incfs_read_blockmap_entry(struct backing_file_context *bfc, int block_index,
+			      loff_t bm_base_off,
+			      struct incfs_blockmap_entry *bm_entry);
+
+int incfs_read_blockmap_entries(struct backing_file_context *bfc,
+		struct incfs_blockmap_entry *entries,
+		int start_index, int blocks_number,
+		loff_t bm_base_off);
+
+int incfs_read_next_metadata_record(struct backing_file_context *bfc,
+				    struct metadata_handler *handler);
+
+ssize_t incfs_kread(struct file *f, void *buf, size_t size, loff_t pos);
+ssize_t incfs_kwrite(struct file *f, const void *buf, size_t size, loff_t pos);
+
+#endif /* _INCFS_FORMAT_H */
diff --git a/fs/incfs/integrity.c b/fs/incfs/integrity.c
new file mode 100644
index 0000000..bce319e
--- /dev/null
+++ b/fs/incfs/integrity.c
@@ -0,0 +1,235 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2019 Google LLC
+ */
+#include <crypto/sha.h>
+#include <crypto/hash.h>
+#include <linux/err.h>
+#include <linux/version.h>
+
+#include "integrity.h"
+
+struct incfs_hash_alg *incfs_get_hash_alg(enum incfs_hash_tree_algorithm id)
+{
+	static struct incfs_hash_alg sha256 = {
+		.name = "sha256",
+		.digest_size = SHA256_DIGEST_SIZE,
+		.id = INCFS_HASH_TREE_SHA256
+	};
+	struct incfs_hash_alg *result = NULL;
+	struct crypto_shash *shash;
+
+	if (id == INCFS_HASH_TREE_SHA256) {
+		BUILD_BUG_ON(INCFS_MAX_HASH_SIZE < SHA256_DIGEST_SIZE);
+		result = &sha256;
+	}
+
+	if (result == NULL)
+		return ERR_PTR(-ENOENT);
+
+	/* pairs with cmpxchg_release() below */
+	shash = smp_load_acquire(&result->shash);
+	if (shash)
+		return result;
+
+	shash = crypto_alloc_shash(result->name, 0, 0);
+	if (IS_ERR(shash)) {
+		int err = PTR_ERR(shash);
+
+		pr_err("Can't allocate hash alg %s, error code:%d",
+			result->name, err);
+		return ERR_PTR(err);
+	}
+
+	/* pairs with smp_load_acquire() above */
+	if (cmpxchg_release(&result->shash, NULL, shash) != NULL)
+		crypto_free_shash(shash);
+
+	return result;
+}
+
+struct signature_info {
+	u32 version;
+	enum incfs_hash_tree_algorithm hash_algorithm;
+	u8 log2_blocksize;
+	struct mem_range salt;
+	struct mem_range root_hash;
+};
+
+static bool read_u32(u8 **p, u8 *top, u32 *result)
+{
+	if (*p + sizeof(u32) > top)
+		return false;
+
+	*result = le32_to_cpu(*(__le32 *)*p);
+	*p += sizeof(u32);
+	return true;
+}
+
+static bool read_u8(u8 **p, u8 *top, u8 *result)
+{
+	if (*p + sizeof(u8) > top)
+		return false;
+
+	*result = *(u8 *)*p;
+	*p += sizeof(u8);
+	return true;
+}
+
+static bool read_mem_range(u8 **p, u8 *top, struct mem_range *range)
+{
+	u32 len;
+
+	if (!read_u32(p, top, &len) || *p + len > top)
+		return false;
+
+	range->len = len;
+	range->data = *p;
+	*p += len;
+	return true;
+}
+
+static int incfs_parse_signature(struct mem_range signature,
+				 struct signature_info *si)
+{
+	u8 *p = signature.data;
+	u8 *top = signature.data + signature.len;
+	u32 hash_section_size;
+
+	if (signature.len > INCFS_MAX_SIGNATURE_SIZE)
+		return -EINVAL;
+
+	if (!read_u32(&p, top, &si->version) ||
+	    si->version != INCFS_SIGNATURE_VERSION)
+		return -EINVAL;
+
+	if (!read_u32(&p, top, &hash_section_size) ||
+	    p + hash_section_size > top)
+		return -EINVAL;
+	top = p + hash_section_size;
+
+	if (!read_u32(&p, top, &si->hash_algorithm) ||
+	    si->hash_algorithm != INCFS_HASH_TREE_SHA256)
+		return -EINVAL;
+
+	if (!read_u8(&p, top, &si->log2_blocksize) || si->log2_blocksize != 12)
+		return -EINVAL;
+
+	if (!read_mem_range(&p, top, &si->salt))
+		return -EINVAL;
+
+	if (!read_mem_range(&p, top, &si->root_hash))
+		return -EINVAL;
+
+	if (p != top)
+		return -EINVAL;
+
+	return 0;
+}
+
+struct mtree *incfs_alloc_mtree(struct mem_range signature,
+				int data_block_count)
+{
+	int error;
+	struct signature_info si;
+	struct mtree *result = NULL;
+	struct incfs_hash_alg *hash_alg = NULL;
+	int hash_per_block;
+	int lvl;
+	int total_blocks = 0;
+	int blocks_in_level[INCFS_MAX_MTREE_LEVELS];
+	int blocks = data_block_count;
+
+	if (data_block_count <= 0)
+		return ERR_PTR(-EINVAL);
+
+	error = incfs_parse_signature(signature, &si);
+	if (error)
+		return ERR_PTR(error);
+
+	hash_alg = incfs_get_hash_alg(si.hash_algorithm);
+	if (IS_ERR(hash_alg))
+		return ERR_PTR(PTR_ERR(hash_alg));
+
+	if (si.root_hash.len < hash_alg->digest_size)
+		return ERR_PTR(-EINVAL);
+
+	result = kzalloc(sizeof(*result), GFP_NOFS);
+	if (!result)
+		return ERR_PTR(-ENOMEM);
+
+	result->alg = hash_alg;
+	hash_per_block = INCFS_DATA_FILE_BLOCK_SIZE / result->alg->digest_size;
+
+	/* Calculating tree geometry. */
+	/* First pass: calculate how many blocks in each tree level. */
+	for (lvl = 0; blocks > 1; lvl++) {
+		if (lvl >= INCFS_MAX_MTREE_LEVELS) {
+			pr_err("incfs: too much data in mtree");
+			goto err;
+		}
+
+		blocks = (blocks + hash_per_block - 1) / hash_per_block;
+		blocks_in_level[lvl] = blocks;
+		total_blocks += blocks;
+	}
+	result->depth = lvl;
+	result->hash_tree_area_size = total_blocks * INCFS_DATA_FILE_BLOCK_SIZE;
+	if (result->hash_tree_area_size > INCFS_MAX_HASH_AREA_SIZE)
+		goto err;
+
+	blocks = 0;
+	/* Second pass: calculate offset of each level. 0th level goes last. */
+	for (lvl = 0; lvl < result->depth; lvl++) {
+		u32 suboffset;
+
+		blocks += blocks_in_level[lvl];
+		suboffset = (total_blocks - blocks)
+					* INCFS_DATA_FILE_BLOCK_SIZE;
+
+		result->hash_level_suboffset[lvl] = suboffset;
+	}
+
+	/* Root hash is stored separately from the rest of the tree. */
+	memcpy(result->root_hash, si.root_hash.data, hash_alg->digest_size);
+	return result;
+
+err:
+	kfree(result);
+	return ERR_PTR(-E2BIG);
+}
+
+void incfs_free_mtree(struct mtree *tree)
+{
+	kfree(tree);
+}
+
+int incfs_calc_digest(struct incfs_hash_alg *alg, struct mem_range data,
+			struct mem_range digest)
+{
+	SHASH_DESC_ON_STACK(desc, alg->shash);
+
+	if (!alg || !alg->shash || !data.data || !digest.data)
+		return -EFAULT;
+
+	if (alg->digest_size > digest.len)
+		return -EINVAL;
+
+	desc->tfm = alg->shash;
+
+	if (data.len < INCFS_DATA_FILE_BLOCK_SIZE) {
+		int err;
+		void *buf = kzalloc(INCFS_DATA_FILE_BLOCK_SIZE, GFP_NOFS);
+
+		if (!buf)
+			return -ENOMEM;
+
+		memcpy(buf, data.data, data.len);
+		err = crypto_shash_digest(desc, buf, INCFS_DATA_FILE_BLOCK_SIZE,
+					  digest.data);
+		kfree(buf);
+		return err;
+	}
+	return crypto_shash_digest(desc, data.data, data.len, digest.data);
+}
+
diff --git a/fs/incfs/integrity.h b/fs/incfs/integrity.h
new file mode 100644
index 0000000..cf79b64
--- /dev/null
+++ b/fs/incfs/integrity.h
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright 2019 Google LLC
+ */
+#ifndef _INCFS_INTEGRITY_H
+#define _INCFS_INTEGRITY_H
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <crypto/hash.h>
+
+#include <uapi/linux/incrementalfs.h>
+
+#include "internal.h"
+
+#define INCFS_MAX_MTREE_LEVELS 8
+#define INCFS_MAX_HASH_AREA_SIZE (1280 * 1024 * 1024)
+
+struct incfs_hash_alg {
+	const char *name;
+	int digest_size;
+	enum incfs_hash_tree_algorithm id;
+
+	struct crypto_shash *shash;
+};
+
+/* Merkle tree structure. */
+struct mtree {
+	struct incfs_hash_alg *alg;
+
+	u8 root_hash[INCFS_MAX_HASH_SIZE];
+
+	/* Offset of each hash level in the hash area. */
+	u32 hash_level_suboffset[INCFS_MAX_MTREE_LEVELS];
+
+	u32 hash_tree_area_size;
+
+	/* Number of levels in hash_level_suboffset */
+	int depth;
+};
+
+struct incfs_hash_alg *incfs_get_hash_alg(enum incfs_hash_tree_algorithm id);
+
+struct mtree *incfs_alloc_mtree(struct mem_range signature,
+				int data_block_count);
+
+void incfs_free_mtree(struct mtree *tree);
+
+size_t incfs_get_mtree_depth(enum incfs_hash_tree_algorithm alg, loff_t size);
+
+size_t incfs_get_mtree_hash_count(enum incfs_hash_tree_algorithm alg,
+					loff_t size);
+
+int incfs_calc_digest(struct incfs_hash_alg *alg, struct mem_range data,
+			struct mem_range digest);
+
+#endif /* _INCFS_INTEGRITY_H */
diff --git a/fs/incfs/internal.h b/fs/incfs/internal.h
new file mode 100644
index 0000000..0a85eae
--- /dev/null
+++ b/fs/incfs/internal.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright 2018 Google LLC
+ */
+#ifndef _INCFS_INTERNAL_H
+#define _INCFS_INTERNAL_H
+#include <linux/types.h>
+
+struct mem_range {
+	u8 *data;
+	size_t len;
+};
+
+static inline struct mem_range range(u8 *data, size_t len)
+{
+	return (struct mem_range){ .data = data, .len = len };
+}
+
+#define LOCK_REQUIRED(lock)  WARN_ON_ONCE(!mutex_is_locked(&lock))
+
+#endif /* _INCFS_INTERNAL_H */
diff --git a/fs/incfs/main.c b/fs/incfs/main.c
new file mode 100644
index 0000000..2b8161f
--- /dev/null
+++ b/fs/incfs/main.c
@@ -0,0 +1,112 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2018 Google LLC
+ */
+#include <linux/fs.h>
+#include <linux/init.h>
+#include <linux/module.h>
+
+#include <uapi/linux/incrementalfs.h>
+
+#include "vfs.h"
+
+#define INCFS_NODE_FEATURES "features"
+
+static struct file_system_type incfs_fs_type = {
+	.owner = THIS_MODULE,
+	.name = INCFS_NAME,
+	.mount = incfs_mount_fs,
+	.kill_sb = incfs_kill_sb,
+	.fs_flags = 0
+};
+
+static struct kobject *sysfs_root, *featurefs_root;
+
+static ssize_t corefs_show(struct kobject *kobj,
+			  struct kobj_attribute *attr, char *buff)
+{
+	return snprintf(buff, PAGE_SIZE, "supported\n");
+}
+
+static struct kobj_attribute corefs_attr = __ATTR_RO(corefs);
+
+static ssize_t report_uid_show(struct kobject *kobj,
+			       struct kobj_attribute *attr, char *buff)
+{
+	return snprintf(buff, PAGE_SIZE, "supported\n");
+}
+
+static struct kobj_attribute report_uid_attr = __ATTR_RO(report_uid);
+
+static struct attribute *attributes[] = {
+	&corefs_attr.attr,
+	&report_uid_attr.attr,
+	NULL,
+};
+
+static const struct attribute_group attr_group = {
+	.attrs = attributes,
+};
+
+static int __init init_sysfs(void)
+{
+	int res = 0;
+
+	sysfs_root = kobject_create_and_add(INCFS_NAME, fs_kobj);
+	if (!sysfs_root)
+		return -ENOMEM;
+
+	featurefs_root = kobject_create_and_add(INCFS_NODE_FEATURES,
+						sysfs_root);
+	if (!featurefs_root)
+		return -ENOMEM;
+
+	res = sysfs_create_group(featurefs_root, &attr_group);
+	if (res) {
+		kobject_put(sysfs_root);
+		sysfs_root = NULL;
+	}
+	return res;
+}
+
+static void cleanup_sysfs(void)
+{
+	if (featurefs_root) {
+		sysfs_remove_group(featurefs_root, &attr_group);
+		kobject_put(featurefs_root);
+		featurefs_root = NULL;
+	}
+
+	if (sysfs_root) {
+		kobject_put(sysfs_root);
+		sysfs_root = NULL;
+	}
+}
+
+static int __init init_incfs_module(void)
+{
+	int err = 0;
+
+	err = init_sysfs();
+	if (err)
+		return err;
+
+	err = register_filesystem(&incfs_fs_type);
+	if (err)
+		cleanup_sysfs();
+
+	return err;
+}
+
+static void __exit cleanup_incfs_module(void)
+{
+	cleanup_sysfs();
+	unregister_filesystem(&incfs_fs_type);
+}
+
+module_init(init_incfs_module);
+module_exit(cleanup_incfs_module);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Eugene Zemtsov <ezemtsov@google.com>");
+MODULE_DESCRIPTION("Incremental File System");
diff --git a/fs/incfs/pseudo_files.c b/fs/incfs/pseudo_files.c
new file mode 100644
index 0000000..768abd4
--- /dev/null
+++ b/fs/incfs/pseudo_files.c
@@ -0,0 +1,1280 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2020 Google LLC
+ */
+
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/namei.h>
+#include <linux/poll.h>
+#include <linux/syscalls.h>
+
+#include <uapi/linux/incrementalfs.h>
+
+#include "pseudo_files.h"
+
+#include "data_mgmt.h"
+#include "format.h"
+#include "integrity.h"
+#include "vfs.h"
+
+#define INCFS_PENDING_READS_INODE 2
+#define INCFS_LOG_INODE 3
+#define INCFS_BLOCKS_WRITTEN_INODE 4
+#define READ_WRITE_FILE_MODE 0666
+
+/*******************************************************************************
+ * .log pseudo file definition
+ ******************************************************************************/
+static const char log_file_name[] = INCFS_LOG_FILENAME;
+static const struct mem_range log_file_name_range = {
+	.data = (u8 *)log_file_name,
+	.len = ARRAY_SIZE(log_file_name) - 1
+};
+
+/* State of an open .log file, unique for each file descriptor. */
+struct log_file_state {
+	struct read_log_state state;
+};
+
+static ssize_t log_read(struct file *f, char __user *buf, size_t len,
+			loff_t *ppos)
+{
+	struct log_file_state *log_state = f->private_data;
+	struct mount_info *mi = get_mount_info(file_superblock(f));
+	int total_reads_collected = 0;
+	int rl_size;
+	ssize_t result = 0;
+	bool report_uid;
+	unsigned long page = 0;
+	struct incfs_pending_read_info *reads_buf = NULL;
+	struct incfs_pending_read_info2 *reads_buf2 = NULL;
+	size_t record_size;
+	ssize_t reads_to_collect;
+	ssize_t reads_per_page;
+
+	if (!mi)
+		return -EFAULT;
+
+	report_uid = mi->mi_options.report_uid;
+	record_size = report_uid ? sizeof(*reads_buf2) : sizeof(*reads_buf);
+	reads_to_collect = len / record_size;
+	reads_per_page = PAGE_SIZE / record_size;
+
+	rl_size = READ_ONCE(mi->mi_log.rl_size);
+	if (rl_size == 0)
+		return 0;
+
+	page = __get_free_page(GFP_NOFS);
+	if (!page)
+		return -ENOMEM;
+
+	if (report_uid)
+		reads_buf2 = (struct incfs_pending_read_info2 *) page;
+	else
+		reads_buf = (struct incfs_pending_read_info *) page;
+
+	reads_to_collect = min_t(ssize_t, rl_size, reads_to_collect);
+	while (reads_to_collect > 0) {
+		struct read_log_state next_state;
+		int reads_collected;
+
+		memcpy(&next_state, &log_state->state, sizeof(next_state));
+		reads_collected = incfs_collect_logged_reads(
+			mi, &next_state, reads_buf, reads_buf2,
+			min_t(ssize_t, reads_to_collect, reads_per_page));
+		if (reads_collected <= 0) {
+			result = total_reads_collected ?
+					 total_reads_collected * record_size :
+					 reads_collected;
+			goto out;
+		}
+		if (copy_to_user(buf, (void *) page,
+				 reads_collected * record_size)) {
+			result = total_reads_collected ?
+					 total_reads_collected * record_size :
+					 -EFAULT;
+			goto out;
+		}
+
+		memcpy(&log_state->state, &next_state, sizeof(next_state));
+		total_reads_collected += reads_collected;
+		buf += reads_collected * record_size;
+		reads_to_collect -= reads_collected;
+	}
+
+	result = total_reads_collected * record_size;
+	*ppos = 0;
+out:
+	free_page(page);
+	return result;
+}
+
+static __poll_t log_poll(struct file *file, poll_table *wait)
+{
+	struct log_file_state *log_state = file->private_data;
+	struct mount_info *mi = get_mount_info(file_superblock(file));
+	int count;
+	__poll_t ret = 0;
+
+	poll_wait(file, &mi->mi_log.ml_notif_wq, wait);
+	count = incfs_get_uncollected_logs_count(mi, &log_state->state);
+	if (count >= mi->mi_options.read_log_wakeup_count)
+		ret = EPOLLIN | EPOLLRDNORM;
+
+	return ret;
+}
+
+static int log_open(struct inode *inode, struct file *file)
+{
+	struct log_file_state *log_state = NULL;
+	struct mount_info *mi = get_mount_info(file_superblock(file));
+
+	log_state = kzalloc(sizeof(*log_state), GFP_NOFS);
+	if (!log_state)
+		return -ENOMEM;
+
+	log_state->state = incfs_get_log_state(mi);
+	file->private_data = log_state;
+	return 0;
+}
+
+static int log_release(struct inode *inode, struct file *file)
+{
+	kfree(file->private_data);
+	return 0;
+}
+
+static const struct file_operations incfs_log_file_ops = {
+	.read = log_read,
+	.poll = log_poll,
+	.open = log_open,
+	.release = log_release,
+	.llseek = noop_llseek,
+};
+
+/*******************************************************************************
+ * .pending_reads pseudo file definition
+ ******************************************************************************/
+static const char pending_reads_file_name[] = INCFS_PENDING_READS_FILENAME;
+static const struct mem_range pending_reads_file_name_range = {
+	.data = (u8 *)pending_reads_file_name,
+	.len = ARRAY_SIZE(pending_reads_file_name) - 1
+};
+
+/* State of an open .pending_reads file, unique for each file descriptor. */
+struct pending_reads_state {
+	/* A serial number of the last pending read obtained from this file. */
+	int last_pending_read_sn;
+};
+
+static ssize_t pending_reads_read(struct file *f, char __user *buf, size_t len,
+			    loff_t *ppos)
+{
+	struct pending_reads_state *pr_state = f->private_data;
+	struct mount_info *mi = get_mount_info(file_superblock(f));
+	bool report_uid;
+	unsigned long page = 0;
+	struct incfs_pending_read_info *reads_buf = NULL;
+	struct incfs_pending_read_info2 *reads_buf2 = NULL;
+	size_t record_size;
+	size_t reads_to_collect;
+	int last_known_read_sn = READ_ONCE(pr_state->last_pending_read_sn);
+	int new_max_sn = last_known_read_sn;
+	int reads_collected = 0;
+	ssize_t result = 0;
+
+	if (!mi)
+		return -EFAULT;
+
+	report_uid = mi->mi_options.report_uid;
+	record_size = report_uid ? sizeof(*reads_buf2) : sizeof(*reads_buf);
+	reads_to_collect = len / record_size;
+
+	if (!incfs_fresh_pending_reads_exist(mi, last_known_read_sn))
+		return 0;
+
+	page = get_zeroed_page(GFP_NOFS);
+	if (!page)
+		return -ENOMEM;
+
+	if (report_uid)
+		reads_buf2 = (struct incfs_pending_read_info2 *) page;
+	else
+		reads_buf = (struct incfs_pending_read_info *) page;
+
+	reads_to_collect =
+		min_t(size_t, PAGE_SIZE / record_size, reads_to_collect);
+
+	reads_collected = incfs_collect_pending_reads(mi, last_known_read_sn,
+				reads_buf, reads_buf2, reads_to_collect,
+				&new_max_sn);
+
+	if (reads_collected < 0) {
+		result = reads_collected;
+		goto out;
+	}
+
+	/*
+	 * Just to make sure that we don't accidentally copy more data
+	 * to reads buffer than userspace can handle.
+	 */
+	reads_collected = min_t(size_t, reads_collected, reads_to_collect);
+	result = reads_collected * record_size;
+
+	/* Copy reads info to the userspace buffer */
+	if (copy_to_user(buf, (void *)page, result)) {
+		result = -EFAULT;
+		goto out;
+	}
+
+	WRITE_ONCE(pr_state->last_pending_read_sn, new_max_sn);
+	*ppos = 0;
+
+out:
+	free_page(page);
+	return result;
+}
+
+static __poll_t pending_reads_poll(struct file *file, poll_table *wait)
+{
+	struct pending_reads_state *state = file->private_data;
+	struct mount_info *mi = get_mount_info(file_superblock(file));
+	__poll_t ret = 0;
+
+	poll_wait(file, &mi->mi_pending_reads_notif_wq, wait);
+	if (incfs_fresh_pending_reads_exist(mi,
+					    state->last_pending_read_sn))
+		ret = EPOLLIN | EPOLLRDNORM;
+
+	return ret;
+}
+
+static int pending_reads_open(struct inode *inode, struct file *file)
+{
+	struct pending_reads_state *state = NULL;
+
+	state = kzalloc(sizeof(*state), GFP_NOFS);
+	if (!state)
+		return -ENOMEM;
+
+	file->private_data = state;
+	return 0;
+}
+
+static int pending_reads_release(struct inode *inode, struct file *file)
+{
+	kfree(file->private_data);
+	return 0;
+}
+
+static long ioctl_permit_fill(struct file *f, void __user *arg)
+{
+	struct incfs_permit_fill __user *usr_permit_fill = arg;
+	struct incfs_permit_fill permit_fill;
+	long error = 0;
+	struct file *file = NULL;
+	struct incfs_file_data *fd;
+
+	if (copy_from_user(&permit_fill, usr_permit_fill, sizeof(permit_fill)))
+		return -EFAULT;
+
+	file = fget(permit_fill.file_descriptor);
+	if (IS_ERR(file))
+		return PTR_ERR(file);
+
+	if (file->f_op != &incfs_file_ops) {
+		error = -EPERM;
+		goto out;
+	}
+
+	if (file->f_inode->i_sb != f->f_inode->i_sb) {
+		error = -EPERM;
+		goto out;
+	}
+
+	fd = file->private_data;
+
+	switch (fd->fd_fill_permission) {
+	case CANT_FILL:
+		fd->fd_fill_permission = CAN_FILL;
+		break;
+
+	case CAN_FILL:
+		pr_debug("CAN_FILL already set");
+		break;
+
+	default:
+		pr_warn("Invalid file private data");
+		error = -EFAULT;
+		goto out;
+	}
+
+out:
+	fput(file);
+	return error;
+}
+
+static int chmod(struct dentry *dentry, umode_t mode)
+{
+	struct inode *inode = dentry->d_inode;
+	struct inode *delegated_inode = NULL;
+	struct iattr newattrs;
+	int error;
+
+retry_deleg:
+	inode_lock(inode);
+	newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
+	newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
+	error = notify_change(dentry, &newattrs, &delegated_inode);
+	inode_unlock(inode);
+	if (delegated_inode) {
+		error = break_deleg_wait(&delegated_inode);
+		if (!error)
+			goto retry_deleg;
+	}
+	return error;
+}
+
+static bool incfs_equal_ranges(struct mem_range lhs, struct mem_range rhs)
+{
+	if (lhs.len != rhs.len)
+		return false;
+	return memcmp(lhs.data, rhs.data, lhs.len) == 0;
+}
+
+static bool is_pseudo_filename(struct mem_range name)
+{
+	if (incfs_equal_ranges(pending_reads_file_name_range, name))
+		return true;
+	if (incfs_equal_ranges(log_file_name_range, name))
+		return true;
+
+	return false;
+}
+
+static int validate_name(char *file_name)
+{
+	struct mem_range name = range(file_name, strlen(file_name));
+	int i = 0;
+
+	if (name.len > INCFS_MAX_NAME_LEN)
+		return -ENAMETOOLONG;
+
+	if (is_pseudo_filename(name))
+		return -EINVAL;
+
+	for (i = 0; i < name.len; i++)
+		if (name.data[i] == '/')
+			return -EINVAL;
+
+	return 0;
+}
+
+static int dir_relative_path_resolve(
+			struct mount_info *mi,
+			const char __user *relative_path,
+			struct path *result_path)
+{
+	struct path *base_path = &mi->mi_backing_dir_path;
+	int dir_fd = get_unused_fd_flags(0);
+	struct file *dir_f = NULL;
+	int error = 0;
+
+	if (dir_fd < 0)
+		return dir_fd;
+
+	dir_f = dentry_open(base_path, O_RDONLY | O_NOATIME, mi->mi_owner);
+
+	if (IS_ERR(dir_f)) {
+		error = PTR_ERR(dir_f);
+		goto out;
+	}
+	fd_install(dir_fd, dir_f);
+
+	if (!relative_path) {
+		/* No relative path given, just return the base dir. */
+		*result_path = *base_path;
+		path_get(result_path);
+		goto out;
+	}
+
+	error = user_path_at_empty(dir_fd, relative_path,
+		LOOKUP_FOLLOW | LOOKUP_DIRECTORY, result_path, NULL);
+
+out:
+	ksys_close(dir_fd);
+	if (error)
+		pr_debug("incfs: %s %d\n", __func__, error);
+	return error;
+}
+
+static struct mem_range incfs_copy_signature_info_from_user(u8 __user *original,
+							    u64 size)
+{
+	u8 *result;
+
+	if (!original)
+		return range(NULL, 0);
+
+	if (size > INCFS_MAX_SIGNATURE_SIZE)
+		return range(ERR_PTR(-EFAULT), 0);
+
+	result = kzalloc(size, GFP_NOFS | __GFP_COMP);
+	if (!result)
+		return range(ERR_PTR(-ENOMEM), 0);
+
+	if (copy_from_user(result, original, size)) {
+		kfree(result);
+		return range(ERR_PTR(-EFAULT), 0);
+	}
+
+	return range(result, size);
+}
+
+static int init_new_file(struct mount_info *mi, struct dentry *dentry,
+			 incfs_uuid_t *uuid, u64 size, struct mem_range attr,
+			 u8 __user *user_signature_info, u64 signature_size)
+{
+	struct path path = {};
+	struct file *new_file;
+	int error = 0;
+	struct backing_file_context *bfc = NULL;
+	u32 block_count;
+	struct mem_range raw_signature = { NULL };
+	struct mtree *hash_tree = NULL;
+
+	if (!mi || !dentry || !uuid)
+		return -EFAULT;
+
+	/* Resize newly created file to its true size. */
+	path = (struct path) {
+		.mnt = mi->mi_backing_dir_path.mnt,
+		.dentry = dentry
+	};
+	new_file = dentry_open(&path, O_RDWR | O_NOATIME | O_LARGEFILE,
+			       mi->mi_owner);
+
+	if (IS_ERR(new_file)) {
+		error = PTR_ERR(new_file);
+		goto out;
+	}
+
+	bfc = incfs_alloc_bfc(new_file);
+	fput(new_file);
+	if (IS_ERR(bfc)) {
+		error = PTR_ERR(bfc);
+		bfc = NULL;
+		goto out;
+	}
+
+	mutex_lock(&bfc->bc_mutex);
+	error = incfs_write_fh_to_backing_file(bfc, uuid, size);
+	if (error)
+		goto out;
+
+	block_count = (u32)get_blocks_count_for_size(size);
+
+	if (user_signature_info) {
+		raw_signature = incfs_copy_signature_info_from_user(
+			user_signature_info, signature_size);
+
+		if (IS_ERR(raw_signature.data)) {
+			error = PTR_ERR(raw_signature.data);
+			raw_signature.data = NULL;
+			goto out;
+		}
+
+		hash_tree = incfs_alloc_mtree(raw_signature, block_count);
+		if (IS_ERR(hash_tree)) {
+			error = PTR_ERR(hash_tree);
+			hash_tree = NULL;
+			goto out;
+		}
+
+		error = incfs_write_signature_to_backing_file(
+			bfc, raw_signature, hash_tree->hash_tree_area_size);
+		if (error)
+			goto out;
+
+		block_count += get_blocks_count_for_size(
+			hash_tree->hash_tree_area_size);
+	}
+
+	if (block_count)
+		error = incfs_write_blockmap_to_backing_file(bfc, block_count);
+
+	if (error)
+		goto out;
+out:
+	if (bfc) {
+		mutex_unlock(&bfc->bc_mutex);
+		incfs_free_bfc(bfc);
+	}
+	incfs_free_mtree(hash_tree);
+	kfree(raw_signature.data);
+
+	if (error)
+		pr_debug("incfs: %s error: %d\n", __func__, error);
+	return error;
+}
+
+static long ioctl_create_file(struct mount_info *mi,
+			struct incfs_new_file_args __user *usr_args)
+{
+	struct incfs_new_file_args args;
+	char *file_id_str = NULL;
+	struct dentry *index_file_dentry = NULL;
+	struct dentry *named_file_dentry = NULL;
+	struct dentry *incomplete_file_dentry = NULL;
+	struct path parent_dir_path = {};
+	struct inode *index_dir_inode = NULL;
+	__le64 size_attr_value = 0;
+	char *file_name = NULL;
+	char *attr_value = NULL;
+	int error = 0;
+	bool locked = false;
+	bool index_linked = false;
+	bool name_linked = false;
+	bool incomplete_linked = false;
+
+	if (!mi || !mi->mi_index_dir || !mi->mi_incomplete_dir) {
+		error = -EFAULT;
+		goto out;
+	}
+
+	if (copy_from_user(&args, usr_args, sizeof(args)) > 0) {
+		error = -EFAULT;
+		goto out;
+	}
+
+	file_name = strndup_user(u64_to_user_ptr(args.file_name), PATH_MAX);
+	if (IS_ERR(file_name)) {
+		error = PTR_ERR(file_name);
+		file_name = NULL;
+		goto out;
+	}
+
+	error = validate_name(file_name);
+	if (error)
+		goto out;
+
+	file_id_str = file_id_to_str(args.file_id);
+	if (!file_id_str) {
+		error = -ENOMEM;
+		goto out;
+	}
+
+	error = mutex_lock_interruptible(&mi->mi_dir_struct_mutex);
+	if (error)
+		goto out;
+	locked = true;
+
+	/* Find a directory to put the file into. */
+	error = dir_relative_path_resolve(mi,
+			u64_to_user_ptr(args.directory_path),
+			&parent_dir_path);
+	if (error)
+		goto out;
+
+	if (parent_dir_path.dentry == mi->mi_index_dir) {
+		/* Can't create a file directly inside .index */
+		error = -EBUSY;
+		goto out;
+	}
+
+	if (parent_dir_path.dentry == mi->mi_incomplete_dir) {
+		/* Can't create a file directly inside .incomplete */
+		error = -EBUSY;
+		goto out;
+	}
+
+	/* Look up a dentry in the parent dir. It should be negative. */
+	named_file_dentry = incfs_lookup_dentry(parent_dir_path.dentry,
+					file_name);
+	if (!named_file_dentry) {
+		error = -EFAULT;
+		goto out;
+	}
+	if (IS_ERR(named_file_dentry)) {
+		error = PTR_ERR(named_file_dentry);
+		named_file_dentry = NULL;
+		goto out;
+	}
+	if (d_really_is_positive(named_file_dentry)) {
+		/* File with this path already exists. */
+		error = -EEXIST;
+		goto out;
+	}
+
+	/* Look up a dentry in the incomplete dir. It should be negative. */
+	incomplete_file_dentry = incfs_lookup_dentry(mi->mi_incomplete_dir,
+					file_id_str);
+	if (!incomplete_file_dentry) {
+		error = -EFAULT;
+		goto out;
+	}
+	if (IS_ERR(incomplete_file_dentry)) {
+		error = PTR_ERR(incomplete_file_dentry);
+		incomplete_file_dentry = NULL;
+		goto out;
+	}
+	if (d_really_is_positive(incomplete_file_dentry)) {
+		/* File with this path already exists. */
+		error = -EEXIST;
+		goto out;
+	}
+
+	/* Look up a dentry in the .index dir. It should be negative. */
+	index_file_dentry = incfs_lookup_dentry(mi->mi_index_dir, file_id_str);
+	if (!index_file_dentry) {
+		error = -EFAULT;
+		goto out;
+	}
+	if (IS_ERR(index_file_dentry)) {
+		error = PTR_ERR(index_file_dentry);
+		index_file_dentry = NULL;
+		goto out;
+	}
+	if (d_really_is_positive(index_file_dentry)) {
+		/* File with this ID already exists in index. */
+		error = -EEXIST;
+		goto out;
+	}
+
+	/* Creating a file in the .index dir. */
+	index_dir_inode = d_inode(mi->mi_index_dir);
+	inode_lock_nested(index_dir_inode, I_MUTEX_PARENT);
+	error = vfs_create(index_dir_inode, index_file_dentry, args.mode | 0222,
+			   true);
+	inode_unlock(index_dir_inode);
+
+	if (error)
+		goto out;
+	if (!d_really_is_positive(index_file_dentry)) {
+		error = -EINVAL;
+		goto out;
+	}
+
+	error = chmod(index_file_dentry, args.mode | 0222);
+	if (error) {
+		pr_debug("incfs: chmod err: %d\n", error);
+		goto out;
+	}
+
+	/* Save the file's ID as an xattr for easy fetching in future. */
+	error = vfs_setxattr(index_file_dentry, INCFS_XATTR_ID_NAME,
+		file_id_str, strlen(file_id_str), XATTR_CREATE);
+	if (error) {
+		pr_debug("incfs: vfs_setxattr err:%d\n", error);
+		goto out;
+	}
+
+	/* Save the file's size as an xattr for easy fetching in future. */
+	size_attr_value = cpu_to_le64(args.size);
+	error = vfs_setxattr(index_file_dentry, INCFS_XATTR_SIZE_NAME,
+		(char *)&size_attr_value, sizeof(size_attr_value),
+		XATTR_CREATE);
+	if (error) {
+		pr_debug("incfs: vfs_setxattr err:%d\n", error);
+		goto out;
+	}
+
+	/* Save the file's attribute as an xattr */
+	if (args.file_attr_len && args.file_attr) {
+		if (args.file_attr_len > INCFS_MAX_FILE_ATTR_SIZE) {
+			error = -E2BIG;
+			goto out;
+		}
+
+		attr_value = kmalloc(args.file_attr_len, GFP_NOFS);
+		if (!attr_value) {
+			error = -ENOMEM;
+			goto out;
+		}
+
+		if (copy_from_user(attr_value,
+				u64_to_user_ptr(args.file_attr),
+				args.file_attr_len) > 0) {
+			error = -EFAULT;
+			goto out;
+		}
+
+		error = vfs_setxattr(index_file_dentry,
+				INCFS_XATTR_METADATA_NAME,
+				attr_value, args.file_attr_len,
+				XATTR_CREATE);
+
+		if (error)
+			goto out;
+	}
+
+	/* Initializing a newly created file. */
+	error = init_new_file(mi, index_file_dentry, &args.file_id, args.size,
+			      range(attr_value, args.file_attr_len),
+			      (u8 __user *)args.signature_info,
+			      args.signature_size);
+	if (error)
+		goto out;
+	index_linked = true;
+
+	/* Linking a file with its real name from the requested dir. */
+	error = incfs_link(index_file_dentry, named_file_dentry);
+	if (error)
+		goto out;
+	name_linked = true;
+
+	if (args.size) {
+		/* Linking a file with its incomplete entry */
+		error = incfs_link(index_file_dentry, incomplete_file_dentry);
+		if (error)
+			goto out;
+		incomplete_linked = true;
+	}
+
+out:
+	if (error) {
+		pr_debug("incfs: %s err:%d\n", __func__, error);
+		if (index_linked)
+			incfs_unlink(index_file_dentry);
+		if (name_linked)
+			incfs_unlink(named_file_dentry);
+		if (incomplete_linked)
+			incfs_unlink(incomplete_file_dentry);
+	}
+
+	kfree(file_id_str);
+	kfree(file_name);
+	kfree(attr_value);
+	dput(named_file_dentry);
+	dput(index_file_dentry);
+	dput(incomplete_file_dentry);
+	path_put(&parent_dir_path);
+	if (locked)
+		mutex_unlock(&mi->mi_dir_struct_mutex);
+	return error;
+}
+
+static int init_new_mapped_file(struct mount_info *mi, struct dentry *dentry,
+			 incfs_uuid_t *uuid, u64 size, u64 offset)
+{
+	struct path path = {};
+	struct file *new_file;
+	int error = 0;
+	struct backing_file_context *bfc = NULL;
+
+	if (!mi || !dentry || !uuid)
+		return -EFAULT;
+
+	/* Resize newly created file to its true size. */
+	path = (struct path) {
+		.mnt = mi->mi_backing_dir_path.mnt,
+		.dentry = dentry
+	};
+	new_file = dentry_open(&path, O_RDWR | O_NOATIME | O_LARGEFILE,
+			       mi->mi_owner);
+
+	if (IS_ERR(new_file)) {
+		error = PTR_ERR(new_file);
+		goto out;
+	}
+
+	bfc = incfs_alloc_bfc(new_file);
+	fput(new_file);
+	if (IS_ERR(bfc)) {
+		error = PTR_ERR(bfc);
+		bfc = NULL;
+		goto out;
+	}
+
+	mutex_lock(&bfc->bc_mutex);
+	error = incfs_write_mapping_fh_to_backing_file(bfc, uuid, size, offset);
+	if (error)
+		goto out;
+
+out:
+	if (bfc) {
+		mutex_unlock(&bfc->bc_mutex);
+		incfs_free_bfc(bfc);
+	}
+
+	if (error)
+		pr_debug("incfs: %s error: %d\n", __func__, error);
+	return error;
+}
+
+static long ioctl_create_mapped_file(struct mount_info *mi, void __user *arg)
+{
+	struct incfs_create_mapped_file_args __user *args_usr_ptr = arg;
+	struct incfs_create_mapped_file_args args = {};
+	char *file_name;
+	int error = 0;
+	struct path parent_dir_path = {};
+	char *source_file_name = NULL;
+	struct dentry *source_file_dentry = NULL;
+	u64 source_file_size;
+	struct dentry *file_dentry = NULL;
+	struct inode *parent_inode;
+	__le64 size_attr_value;
+
+	if (copy_from_user(&args, args_usr_ptr, sizeof(args)) > 0)
+		return -EINVAL;
+
+	file_name = strndup_user(u64_to_user_ptr(args.file_name), PATH_MAX);
+	if (IS_ERR(file_name)) {
+		error = PTR_ERR(file_name);
+		file_name = NULL;
+		goto out;
+	}
+
+	error = validate_name(file_name);
+	if (error)
+		goto out;
+
+	if (args.source_offset % INCFS_DATA_FILE_BLOCK_SIZE) {
+		error = -EINVAL;
+		goto out;
+	}
+
+	/* Validate file mapping is in range */
+	source_file_name = file_id_to_str(args.source_file_id);
+	if (!source_file_name) {
+		pr_warn("Failed to alloc source_file_name\n");
+		error = -ENOMEM;
+		goto out;
+	}
+
+	source_file_dentry = incfs_lookup_dentry(mi->mi_index_dir,
+						       source_file_name);
+	if (!source_file_dentry) {
+		pr_warn("Source file does not exist\n");
+		error = -EINVAL;
+		goto out;
+	}
+	if (IS_ERR(source_file_dentry)) {
+		pr_warn("Error opening source file\n");
+		error = PTR_ERR(source_file_dentry);
+		source_file_dentry = NULL;
+		goto out;
+	}
+	if (!d_really_is_positive(source_file_dentry)) {
+		pr_warn("Source file dentry negative\n");
+		error = -EINVAL;
+		goto out;
+	}
+
+	error = vfs_getxattr(source_file_dentry, INCFS_XATTR_SIZE_NAME,
+			     (char *)&size_attr_value, sizeof(size_attr_value));
+	if (error < 0)
+		goto out;
+
+	if (error != sizeof(size_attr_value)) {
+		pr_warn("Mapped file has no size attr\n");
+		error = -EINVAL;
+		goto out;
+	}
+
+	source_file_size = le64_to_cpu(size_attr_value);
+	if (args.source_offset + args.size > source_file_size) {
+		pr_warn("Mapped file out of range\n");
+		error = -EINVAL;
+		goto out;
+	}
+
+	/* Find a directory to put the file into. */
+	error = dir_relative_path_resolve(mi,
+			u64_to_user_ptr(args.directory_path),
+			&parent_dir_path);
+	if (error)
+		goto out;
+
+	if (parent_dir_path.dentry == mi->mi_index_dir) {
+		/* Can't create a file directly inside .index */
+		error = -EBUSY;
+		goto out;
+	}
+
+	/* Look up a dentry in the parent dir. It should be negative. */
+	file_dentry = incfs_lookup_dentry(parent_dir_path.dentry,
+					file_name);
+	if (!file_dentry) {
+		error = -EFAULT;
+		goto out;
+	}
+	if (IS_ERR(file_dentry)) {
+		error = PTR_ERR(file_dentry);
+		file_dentry = NULL;
+		goto out;
+	}
+	if (d_really_is_positive(file_dentry)) {
+		error = -EEXIST;
+		goto out;
+	}
+
+	parent_inode = d_inode(parent_dir_path.dentry);
+	inode_lock_nested(parent_inode, I_MUTEX_PARENT);
+	error = vfs_create(parent_inode, file_dentry, args.mode | 0222, true);
+	inode_unlock(parent_inode);
+	if (error)
+		goto out;
+
+	/* Save the file's size as an xattr for easy fetching in future. */
+	size_attr_value = cpu_to_le64(args.size);
+	error = vfs_setxattr(file_dentry, INCFS_XATTR_SIZE_NAME,
+		(char *)&size_attr_value, sizeof(size_attr_value),
+		XATTR_CREATE);
+	if (error) {
+		pr_debug("incfs: vfs_setxattr err:%d\n", error);
+		goto delete_file;
+	}
+
+	error = init_new_mapped_file(mi, file_dentry, &args.source_file_id,
+			args.size, args.source_offset);
+	if (error)
+		goto delete_file;
+
+	goto out;
+
+delete_file:
+	incfs_unlink(file_dentry);
+
+out:
+	dput(file_dentry);
+	dput(source_file_dentry);
+	path_put(&parent_dir_path);
+	kfree(file_name);
+	kfree(source_file_name);
+	return error;
+}
+
+static long ioctl_get_read_timeouts(struct mount_info *mi, void __user *arg)
+{
+	struct incfs_get_read_timeouts_args __user *args_usr_ptr = arg;
+	struct incfs_get_read_timeouts_args args = {};
+	int error = 0;
+	struct incfs_per_uid_read_timeouts *buffer;
+	int size;
+
+	if (copy_from_user(&args, args_usr_ptr, sizeof(args)))
+		return -EINVAL;
+
+	if (args.timeouts_array_size_out > INCFS_DATA_FILE_BLOCK_SIZE)
+		return -EINVAL;
+
+	buffer = kzalloc(args.timeouts_array_size_out, GFP_NOFS);
+	if (!buffer)
+		return -ENOMEM;
+
+	spin_lock(&mi->mi_per_uid_read_timeouts_lock);
+	size = mi->mi_per_uid_read_timeouts_size;
+	if (args.timeouts_array_size < size)
+		error = -E2BIG;
+	else if (size)
+		memcpy(buffer, mi->mi_per_uid_read_timeouts, size);
+	spin_unlock(&mi->mi_per_uid_read_timeouts_lock);
+
+	args.timeouts_array_size_out = size;
+	if (!error && size)
+		if (copy_to_user(u64_to_user_ptr(args.timeouts_array), buffer,
+				 size))
+			error = -EFAULT;
+
+	if (!error || error == -E2BIG)
+		if (copy_to_user(args_usr_ptr, &args, sizeof(args)) > 0)
+			error = -EFAULT;
+
+	kfree(buffer);
+	return error;
+}
+
+static long ioctl_set_read_timeouts(struct mount_info *mi, void __user *arg)
+{
+	struct incfs_set_read_timeouts_args __user *args_usr_ptr = arg;
+	struct incfs_set_read_timeouts_args args = {};
+	int error = 0;
+	int size;
+	struct incfs_per_uid_read_timeouts *buffer = NULL, *tmp;
+	int i;
+
+	if (copy_from_user(&args, args_usr_ptr, sizeof(args)))
+		return -EINVAL;
+
+	size = args.timeouts_array_size;
+	if (size) {
+		if (size > INCFS_DATA_FILE_BLOCK_SIZE ||
+		    size % sizeof(*buffer) != 0)
+			return -EINVAL;
+
+		buffer = kzalloc(size, GFP_NOFS);
+		if (!buffer)
+			return -ENOMEM;
+
+		if (copy_from_user(buffer, u64_to_user_ptr(args.timeouts_array),
+				   size)) {
+			error = -EINVAL;
+			goto out;
+		}
+
+		for (i = 0; i < size / sizeof(*buffer); ++i) {
+			struct incfs_per_uid_read_timeouts *t = &buffer[i];
+
+			if (t->min_pending_time_ms > t->max_pending_time_ms) {
+				error = -EINVAL;
+				goto out;
+			}
+		}
+	}
+
+	spin_lock(&mi->mi_per_uid_read_timeouts_lock);
+	mi->mi_per_uid_read_timeouts_size = size;
+	tmp = mi->mi_per_uid_read_timeouts;
+	mi->mi_per_uid_read_timeouts = buffer;
+	buffer = tmp;
+	spin_unlock(&mi->mi_per_uid_read_timeouts_lock);
+
+out:
+	kfree(buffer);
+	return error;
+}
+
+static long pending_reads_dispatch_ioctl(struct file *f, unsigned int req,
+					unsigned long arg)
+{
+	struct mount_info *mi = get_mount_info(file_superblock(f));
+
+	switch (req) {
+	case INCFS_IOC_CREATE_FILE:
+		return ioctl_create_file(mi, (void __user *)arg);
+	case INCFS_IOC_PERMIT_FILL:
+		return ioctl_permit_fill(f, (void __user *)arg);
+	case INCFS_IOC_CREATE_MAPPED_FILE:
+		return ioctl_create_mapped_file(mi, (void __user *)arg);
+	case INCFS_IOC_GET_READ_TIMEOUTS:
+		return ioctl_get_read_timeouts(mi, (void __user *)arg);
+	case INCFS_IOC_SET_READ_TIMEOUTS:
+		return ioctl_set_read_timeouts(mi, (void __user *)arg);
+	default:
+		return -EINVAL;
+	}
+}
+
+static const struct file_operations incfs_pending_read_file_ops = {
+	.read = pending_reads_read,
+	.poll = pending_reads_poll,
+	.open = pending_reads_open,
+	.release = pending_reads_release,
+	.llseek = noop_llseek,
+	.unlocked_ioctl = pending_reads_dispatch_ioctl,
+	.compat_ioctl = pending_reads_dispatch_ioctl
+};
+
+/*******************************************************************************
+ * .blocks_written pseudo file definition
+ ******************************************************************************/
+static const char blocks_written_file_name[] = INCFS_BLOCKS_WRITTEN_FILENAME;
+static const struct mem_range blocks_written_file_name_range = {
+	.data = (u8 *)blocks_written_file_name,
+	.len = ARRAY_SIZE(blocks_written_file_name) - 1
+};
+
+/* State of an open .blocks_written file, unique for each file descriptor. */
+struct blocks_written_file_state {
+	unsigned long blocks_written;
+};
+
+static ssize_t blocks_written_read(struct file *f, char __user *buf, size_t len,
+			loff_t *ppos)
+{
+	struct mount_info *mi = get_mount_info(file_superblock(f));
+	struct blocks_written_file_state *state = f->private_data;
+	unsigned long blocks_written;
+	char string[21];
+	int result = 0;
+
+	if (!mi)
+		return -EFAULT;
+
+	blocks_written = atomic_read(&mi->mi_blocks_written);
+	if (state->blocks_written == blocks_written)
+		return 0;
+
+	result = snprintf(string, sizeof(string), "%lu", blocks_written);
+	if (result > len)
+		result = len;
+	if (copy_to_user(buf, string, result))
+		return -EFAULT;
+
+	state->blocks_written = blocks_written;
+	return result;
+}
+
+static __poll_t blocks_written_poll(struct file *f, poll_table *wait)
+{
+	struct mount_info *mi = get_mount_info(file_superblock(f));
+	struct blocks_written_file_state *state = f->private_data;
+	unsigned long blocks_written;
+
+	if (!mi)
+		return 0;
+
+	poll_wait(f, &mi->mi_blocks_written_notif_wq, wait);
+	blocks_written = atomic_read(&mi->mi_blocks_written);
+	if (state->blocks_written == blocks_written)
+		return 0;
+
+	return EPOLLIN | EPOLLRDNORM;
+}
+
+static int blocks_written_open(struct inode *inode, struct file *file)
+{
+	struct blocks_written_file_state *state =
+		kzalloc(sizeof(*state), GFP_NOFS);
+
+	if (!state)
+		return -ENOMEM;
+
+	state->blocks_written = -1;
+	file->private_data = state;
+	return 0;
+}
+
+static int blocks_written_release(struct inode *inode, struct file *file)
+{
+	kfree(file->private_data);
+	return 0;
+}
+
+static const struct file_operations incfs_blocks_written_file_ops = {
+	.read = blocks_written_read,
+	.poll = blocks_written_poll,
+	.open = blocks_written_open,
+	.release = blocks_written_release,
+	.llseek = noop_llseek,
+};
+
+/*******************************************************************************
+ * Generic inode lookup functionality
+ ******************************************************************************/
+static bool get_pseudo_inode(int ino, struct inode *inode)
+{
+	inode->i_ctime = (struct timespec64){};
+	inode->i_mtime = inode->i_ctime;
+	inode->i_atime = inode->i_ctime;
+	inode->i_size = 0;
+	inode->i_ino = ino;
+	inode->i_private = NULL;
+	inode_init_owner(inode, NULL, S_IFREG | READ_WRITE_FILE_MODE);
+	inode->i_op = &incfs_file_inode_ops;
+
+	switch (ino) {
+	case INCFS_PENDING_READS_INODE:
+		inode->i_fop = &incfs_pending_read_file_ops;
+		return true;
+
+	case INCFS_LOG_INODE:
+		inode->i_fop = &incfs_log_file_ops;
+		return true;
+
+	case INCFS_BLOCKS_WRITTEN_INODE:
+		inode->i_fop = &incfs_blocks_written_file_ops;
+		return true;
+
+	default:
+		return false;
+	}
+}
+
+struct inode_search {
+	unsigned long ino;
+};
+
+static int inode_test(struct inode *inode, void *opaque)
+{
+	struct inode_search *search = opaque;
+
+	return inode->i_ino == search->ino;
+}
+
+static int inode_set(struct inode *inode, void *opaque)
+{
+	struct inode_search *search = opaque;
+
+	if (get_pseudo_inode(search->ino, inode))
+		return 0;
+
+	/* Unknown inode requested. */
+	return -EINVAL;
+}
+
+static struct inode *fetch_inode(struct super_block *sb, unsigned long ino)
+{
+	struct inode_search search = {
+		.ino = ino
+	};
+	struct inode *inode = iget5_locked(sb, search.ino, inode_test,
+				inode_set, &search);
+
+	if (!inode)
+		return ERR_PTR(-ENOMEM);
+
+	if (inode->i_state & I_NEW)
+		unlock_new_inode(inode);
+
+	return inode;
+}
+
+int dir_lookup_pseudo_files(struct super_block *sb, struct dentry *dentry)
+{
+	struct mem_range name_range =
+			range((u8 *)dentry->d_name.name, dentry->d_name.len);
+	unsigned long ino;
+	struct inode *inode;
+
+	if (incfs_equal_ranges(pending_reads_file_name_range, name_range))
+		ino = INCFS_PENDING_READS_INODE;
+	else if (incfs_equal_ranges(log_file_name_range, name_range))
+		ino = INCFS_LOG_INODE;
+	else if (incfs_equal_ranges(blocks_written_file_name_range, name_range))
+		ino = INCFS_BLOCKS_WRITTEN_INODE;
+	else
+		return -ENOENT;
+
+	inode = fetch_inode(sb, ino);
+	if (IS_ERR(inode))
+		return PTR_ERR(inode);
+
+	d_add(dentry, inode);
+	return 0;
+}
+
+int emit_pseudo_files(struct dir_context *ctx)
+{
+	if (ctx->pos == 0) {
+		if (!dir_emit(ctx, pending_reads_file_name,
+			      ARRAY_SIZE(pending_reads_file_name) - 1,
+			      INCFS_PENDING_READS_INODE, DT_REG))
+			return -EINVAL;
+
+		ctx->pos++;
+	}
+
+	if (ctx->pos == 1) {
+		if (!dir_emit(ctx, log_file_name,
+			      ARRAY_SIZE(log_file_name) - 1,
+			      INCFS_LOG_INODE, DT_REG))
+			return -EINVAL;
+
+		ctx->pos++;
+	}
+
+	if (ctx->pos == 2) {
+		if (!dir_emit(ctx, blocks_written_file_name,
+			      ARRAY_SIZE(blocks_written_file_name) - 1,
+			      INCFS_BLOCKS_WRITTEN_INODE, DT_REG))
+			return -EINVAL;
+
+		ctx->pos++;
+	}
+
+	return 0;
+}
diff --git a/fs/incfs/pseudo_files.h b/fs/incfs/pseudo_files.h
new file mode 100644
index 0000000..358bcab
--- /dev/null
+++ b/fs/incfs/pseudo_files.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright 2020 Google LLC
+ */
+
+#ifndef _INCFS_PSEUDO_FILES_H
+#define _INCFS_PSEUDO_FILES_H
+
+#define PSEUDO_FILE_COUNT 3
+#define INCFS_START_INO_RANGE 10
+
+int dir_lookup_pseudo_files(struct super_block *sb, struct dentry *dentry);
+int emit_pseudo_files(struct dir_context *ctx);
+
+#endif
diff --git a/fs/incfs/vfs.c b/fs/incfs/vfs.c
new file mode 100644
index 0000000..0fee22f
--- /dev/null
+++ b/fs/incfs/vfs.c
@@ -0,0 +1,1667 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2018 Google LLC
+ */
+
+#include <linux/blkdev.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/fs_stack.h>
+#include <linux/namei.h>
+#include <linux/parser.h>
+#include <linux/seq_file.h>
+
+#include <uapi/linux/incrementalfs.h>
+
+#include "vfs.h"
+
+#include "data_mgmt.h"
+#include "format.h"
+#include "internal.h"
+#include "pseudo_files.h"
+
+static int incfs_remount_fs(struct super_block *sb, int *flags, char *data);
+
+static int dentry_revalidate(struct dentry *dentry, unsigned int flags);
+static void dentry_release(struct dentry *d);
+
+static int iterate_incfs_dir(struct file *file, struct dir_context *ctx);
+static struct dentry *dir_lookup(struct inode *dir_inode,
+		struct dentry *dentry, unsigned int flags);
+static int dir_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode);
+static int dir_unlink(struct inode *dir, struct dentry *dentry);
+static int dir_link(struct dentry *old_dentry, struct inode *dir,
+			 struct dentry *new_dentry);
+static int dir_rmdir(struct inode *dir, struct dentry *dentry);
+static int dir_rename(struct inode *old_dir, struct dentry *old_dentry,
+		struct inode *new_dir, struct dentry *new_dentry);
+
+static int file_open(struct inode *inode, struct file *file);
+static int file_release(struct inode *inode, struct file *file);
+static int read_single_page(struct file *f, struct page *page);
+static long dispatch_ioctl(struct file *f, unsigned int req, unsigned long arg);
+
+static struct inode *alloc_inode(struct super_block *sb);
+static void free_inode(struct inode *inode);
+static void evict_inode(struct inode *inode);
+
+static int incfs_setattr(struct dentry *dentry, struct iattr *ia);
+static ssize_t incfs_getxattr(struct dentry *d, const char *name,
+			void *value, size_t size);
+static ssize_t incfs_setxattr(struct dentry *d, const char *name,
+			const void *value, size_t size, int flags);
+static ssize_t incfs_listxattr(struct dentry *d, char *list, size_t size);
+
+static int show_options(struct seq_file *, struct dentry *);
+
+static const struct super_operations incfs_super_ops = {
+	.statfs = simple_statfs,
+	.remount_fs = incfs_remount_fs,
+	.alloc_inode	= alloc_inode,
+	.destroy_inode	= free_inode,
+	.evict_inode = evict_inode,
+	.show_options = show_options
+};
+
+static int dir_rename_wrap(struct inode *old_dir, struct dentry *old_dentry,
+		struct inode *new_dir, struct dentry *new_dentry,
+		unsigned int flags)
+{
+	return dir_rename(old_dir, old_dentry, new_dir, new_dentry);
+}
+
+static const struct inode_operations incfs_dir_inode_ops = {
+	.lookup = dir_lookup,
+	.mkdir = dir_mkdir,
+	.rename = dir_rename_wrap,
+	.unlink = dir_unlink,
+	.link = dir_link,
+	.rmdir = dir_rmdir,
+	.setattr = incfs_setattr,
+};
+
+static const struct file_operations incfs_dir_fops = {
+	.llseek = generic_file_llseek,
+	.read = generic_read_dir,
+	.iterate = iterate_incfs_dir,
+	.open = file_open,
+	.release = file_release,
+};
+
+static const struct dentry_operations incfs_dentry_ops = {
+	.d_revalidate = dentry_revalidate,
+	.d_release = dentry_release
+};
+
+static const struct address_space_operations incfs_address_space_ops = {
+	.readpage = read_single_page,
+	/* .readpages = readpages */
+};
+
+const struct file_operations incfs_file_ops = {
+	.open = file_open,
+	.release = file_release,
+	.read_iter = generic_file_read_iter,
+	.mmap = generic_file_mmap,
+	.splice_read = generic_file_splice_read,
+	.llseek = generic_file_llseek,
+	.unlocked_ioctl = dispatch_ioctl,
+	.compat_ioctl = dispatch_ioctl
+};
+
+const struct inode_operations incfs_file_inode_ops = {
+	.setattr = incfs_setattr,
+	.getattr = simple_getattr,
+	.listxattr = incfs_listxattr
+};
+
+static int incfs_handler_getxattr(const struct xattr_handler *xh,
+				  struct dentry *d, struct inode *inode,
+				  const char *name, void *buffer, size_t size,
+				  int flags)
+{
+	return incfs_getxattr(d, name, buffer, size);
+}
+
+static int incfs_handler_setxattr(const struct xattr_handler *xh,
+				  struct dentry *d, struct inode *inode,
+				  const char *name, const void *buffer,
+				  size_t size, int flags)
+{
+	return incfs_setxattr(d, name, buffer, size, flags);
+}
+
+static const struct xattr_handler incfs_xattr_handler = {
+	.prefix = "",	/* AKA all attributes */
+	.get = incfs_handler_getxattr,
+	.set = incfs_handler_setxattr,
+};
+
+static const struct xattr_handler *incfs_xattr_ops[] = {
+	&incfs_xattr_handler,
+	NULL,
+};
+
+struct inode_search {
+	unsigned long ino;
+
+	struct dentry *backing_dentry;
+
+	size_t size;
+};
+
+enum parse_parameter {
+	Opt_read_timeout,
+	Opt_readahead_pages,
+	Opt_rlog_pages,
+	Opt_rlog_wakeup_cnt,
+	Opt_report_uid,
+	Opt_err
+};
+
+static const match_table_t option_tokens = {
+	{ Opt_read_timeout, "read_timeout_ms=%u" },
+	{ Opt_readahead_pages, "readahead=%u" },
+	{ Opt_rlog_pages, "rlog_pages=%u" },
+	{ Opt_rlog_wakeup_cnt, "rlog_wakeup_cnt=%u" },
+	{ Opt_report_uid, "report_uid" },
+	{ Opt_err, NULL }
+};
+
+static int parse_options(struct mount_options *opts, char *str)
+{
+	substring_t args[MAX_OPT_ARGS];
+	int value;
+	char *position;
+
+	if (opts == NULL)
+		return -EFAULT;
+
+	*opts = (struct mount_options) {
+		.read_timeout_ms = 1000, /* Default: 1s */
+		.readahead_pages = 10,
+		.read_log_pages = 2,
+		.read_log_wakeup_count = 10,
+	};
+
+	if (str == NULL || *str == 0)
+		return 0;
+
+	while ((position = strsep(&str, ",")) != NULL) {
+		int token;
+
+		if (!*position)
+			continue;
+
+		token = match_token(position, option_tokens, args);
+
+		switch (token) {
+		case Opt_read_timeout:
+			if (match_int(&args[0], &value))
+				return -EINVAL;
+			opts->read_timeout_ms = value;
+			break;
+		case Opt_readahead_pages:
+			if (match_int(&args[0], &value))
+				return -EINVAL;
+			opts->readahead_pages = value;
+			break;
+		case Opt_rlog_pages:
+			if (match_int(&args[0], &value))
+				return -EINVAL;
+			opts->read_log_pages = value;
+			break;
+		case Opt_rlog_wakeup_cnt:
+			if (match_int(&args[0], &value))
+				return -EINVAL;
+			opts->read_log_wakeup_count = value;
+			break;
+		case Opt_report_uid:
+			opts->report_uid = true;
+			break;
+		default:
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+/* Read file size from the attribute. Quicker than reading the header */
+static u64 read_size_attr(struct dentry *backing_dentry)
+{
+	__le64 attr_value;
+	ssize_t bytes_read;
+
+	bytes_read = vfs_getxattr(backing_dentry, INCFS_XATTR_SIZE_NAME,
+			(char *)&attr_value, sizeof(attr_value));
+
+	if (bytes_read != sizeof(attr_value))
+		return 0;
+
+	return le64_to_cpu(attr_value);
+}
+
+static int inode_test(struct inode *inode, void *opaque)
+{
+	struct inode_search *search = opaque;
+	struct inode_info *node = get_incfs_node(inode);
+	struct inode *backing_inode = d_inode(search->backing_dentry);
+
+	if (!node)
+		return 0;
+
+	return node->n_backing_inode == backing_inode &&
+		inode->i_ino == search->ino;
+}
+
+static int inode_set(struct inode *inode, void *opaque)
+{
+	struct inode_search *search = opaque;
+	struct inode_info *node = get_incfs_node(inode);
+	struct dentry *backing_dentry = search->backing_dentry;
+	struct inode *backing_inode = d_inode(backing_dentry);
+
+	fsstack_copy_attr_all(inode, backing_inode);
+	if (S_ISREG(inode->i_mode)) {
+		u64 size = search->size;
+
+		inode->i_size = size;
+		inode->i_blocks = get_blocks_count_for_size(size);
+		inode->i_mapping->a_ops = &incfs_address_space_ops;
+		inode->i_op = &incfs_file_inode_ops;
+		inode->i_fop = &incfs_file_ops;
+		inode->i_mode &= ~0222;
+	} else if (S_ISDIR(inode->i_mode)) {
+		inode->i_size = 0;
+		inode->i_blocks = 1;
+		inode->i_mapping->a_ops = &incfs_address_space_ops;
+		inode->i_op = &incfs_dir_inode_ops;
+		inode->i_fop = &incfs_dir_fops;
+	} else {
+		pr_warn_once("incfs: Unexpected inode type\n");
+		return -EBADF;
+	}
+
+	ihold(backing_inode);
+	node->n_backing_inode = backing_inode;
+	node->n_mount_info = get_mount_info(inode->i_sb);
+	inode->i_ctime = backing_inode->i_ctime;
+	inode->i_mtime = backing_inode->i_mtime;
+	inode->i_atime = backing_inode->i_atime;
+	inode->i_ino = backing_inode->i_ino;
+	if (backing_inode->i_ino < INCFS_START_INO_RANGE) {
+		pr_warn("incfs: ino conflict with backing FS %ld\n",
+			backing_inode->i_ino);
+	}
+
+	return 0;
+}
+
+static struct inode *fetch_regular_inode(struct super_block *sb,
+					struct dentry *backing_dentry)
+{
+	struct inode *backing_inode = d_inode(backing_dentry);
+	struct inode_search search = {
+		.ino = backing_inode->i_ino,
+		.backing_dentry = backing_dentry,
+		.size = read_size_attr(backing_dentry),
+	};
+	struct inode *inode = iget5_locked(sb, search.ino, inode_test,
+				inode_set, &search);
+
+	if (!inode)
+		return ERR_PTR(-ENOMEM);
+
+	if (inode->i_state & I_NEW)
+		unlock_new_inode(inode);
+
+	return inode;
+}
+
+static int iterate_incfs_dir(struct file *file, struct dir_context *ctx)
+{
+	struct dir_file *dir = get_incfs_dir_file(file);
+	int error = 0;
+	struct mount_info *mi = get_mount_info(file_superblock(file));
+	bool root;
+
+	if (!dir) {
+		error = -EBADF;
+		goto out;
+	}
+
+	root = dir->backing_dir->f_inode
+			== d_inode(mi->mi_backing_dir_path.dentry);
+
+	if (root) {
+		error = emit_pseudo_files(ctx);
+		if (error)
+			goto out;
+	}
+
+	ctx->pos -= PSEUDO_FILE_COUNT;
+	error = iterate_dir(dir->backing_dir, ctx);
+	ctx->pos += PSEUDO_FILE_COUNT;
+	file->f_pos = dir->backing_dir->f_pos;
+out:
+	if (error)
+		pr_warn("incfs: %s %s %d\n", __func__,
+			file->f_path.dentry->d_name.name, error);
+	return error;
+}
+
+static int incfs_init_dentry(struct dentry *dentry, struct path *path)
+{
+	struct dentry_info *d_info = NULL;
+
+	if (!dentry || !path)
+		return -EFAULT;
+
+	d_info = kzalloc(sizeof(*d_info), GFP_NOFS);
+	if (!d_info)
+		return -ENOMEM;
+
+	d_info->backing_path = *path;
+	path_get(path);
+
+	dentry->d_fsdata = d_info;
+	return 0;
+}
+
+static struct dentry *open_or_create_special_dir(struct dentry *backing_dir,
+						 const char *name)
+{
+	struct dentry *index_dentry;
+	struct inode *backing_inode = d_inode(backing_dir);
+	int err = 0;
+
+	index_dentry = incfs_lookup_dentry(backing_dir, name);
+	if (!index_dentry) {
+		return ERR_PTR(-EINVAL);
+	} else if (IS_ERR(index_dentry)) {
+		return index_dentry;
+	} else if (d_really_is_positive(index_dentry)) {
+		/* Index already exists. */
+		return index_dentry;
+	}
+
+	/* Index needs to be created. */
+	inode_lock_nested(backing_inode, I_MUTEX_PARENT);
+	err = vfs_mkdir(backing_inode, index_dentry, 0777);
+	inode_unlock(backing_inode);
+
+	if (err)
+		return ERR_PTR(err);
+
+	if (!d_really_is_positive(index_dentry) ||
+		unlikely(d_unhashed(index_dentry))) {
+		dput(index_dentry);
+		return ERR_PTR(-EINVAL);
+	}
+
+	return index_dentry;
+}
+
+static int read_single_page_timeouts(struct data_file *df, struct file *f,
+				     int block_index, struct mem_range range,
+				     struct mem_range tmp)
+{
+	struct mount_info *mi = df->df_mount_info;
+	u32 min_time_ms = 0;
+	u32 min_pending_time_ms = 0;
+	u32 max_pending_time_ms = U32_MAX;
+	int uid = current_uid().val;
+	int i;
+
+	spin_lock(&mi->mi_per_uid_read_timeouts_lock);
+	for (i = 0; i < mi->mi_per_uid_read_timeouts_size /
+		sizeof(*mi->mi_per_uid_read_timeouts); ++i) {
+		struct incfs_per_uid_read_timeouts *t =
+			&mi->mi_per_uid_read_timeouts[i];
+
+		if(t->uid == uid) {
+			min_time_ms = t->min_time_ms;
+			min_pending_time_ms = t->min_pending_time_ms;
+			max_pending_time_ms = t->max_pending_time_ms;
+			break;
+		}
+	}
+	spin_unlock(&mi->mi_per_uid_read_timeouts_lock);
+	if (max_pending_time_ms == U32_MAX)
+		max_pending_time_ms = mi->mi_options.read_timeout_ms;
+
+	return incfs_read_data_file_block(range, f, block_index,
+		min_time_ms, min_pending_time_ms, max_pending_time_ms,
+		tmp);
+}
+
+static int read_single_page(struct file *f, struct page *page)
+{
+	loff_t offset = 0;
+	loff_t size = 0;
+	ssize_t bytes_to_read = 0;
+	ssize_t read_result = 0;
+	struct data_file *df = get_incfs_data_file(f);
+	int result = 0;
+	void *page_start;
+	int block_index;
+
+	if (!df) {
+		SetPageError(page);
+		unlock_page(page);
+		return -EBADF;
+	}
+
+	page_start = kmap(page);
+	offset = page_offset(page);
+	block_index = (offset + df->df_mapped_offset) /
+		INCFS_DATA_FILE_BLOCK_SIZE;
+	size = df->df_size;
+
+	if (offset < size) {
+		struct mem_range tmp = {
+			.len = 2 * INCFS_DATA_FILE_BLOCK_SIZE
+		};
+		tmp.data = (u8 *)__get_free_pages(GFP_NOFS, get_order(tmp.len));
+		if (!tmp.data) {
+			read_result = -ENOMEM;
+			goto err;
+		}
+		bytes_to_read = min_t(loff_t, size - offset, PAGE_SIZE);
+
+		read_result = read_single_page_timeouts(df, f, block_index,
+					range(page_start, bytes_to_read), tmp);
+
+		free_pages((unsigned long)tmp.data, get_order(tmp.len));
+	} else {
+		bytes_to_read = 0;
+		read_result = 0;
+	}
+
+err:
+	if (read_result < 0)
+		result = read_result;
+	else if (read_result < PAGE_SIZE)
+		zero_user(page, read_result, PAGE_SIZE - read_result);
+
+	if (result == 0)
+		SetPageUptodate(page);
+	else
+		SetPageError(page);
+
+	flush_dcache_page(page);
+	kunmap(page);
+	unlock_page(page);
+	return result;
+}
+
+int incfs_link(struct dentry *what, struct dentry *where)
+{
+	struct dentry *parent_dentry = dget_parent(where);
+	struct inode *pinode = d_inode(parent_dentry);
+	int error = 0;
+
+	inode_lock_nested(pinode, I_MUTEX_PARENT);
+	error = vfs_link(what, pinode, where, NULL);
+	inode_unlock(pinode);
+
+	dput(parent_dentry);
+	return error;
+}
+
+int incfs_unlink(struct dentry *dentry)
+{
+	struct dentry *parent_dentry = dget_parent(dentry);
+	struct inode *pinode = d_inode(parent_dentry);
+	int error = 0;
+
+	inode_lock_nested(pinode, I_MUTEX_PARENT);
+	error = vfs_unlink(pinode, dentry, NULL);
+	inode_unlock(pinode);
+
+	dput(parent_dentry);
+	return error;
+}
+
+static int incfs_rmdir(struct dentry *dentry)
+{
+	struct dentry *parent_dentry = dget_parent(dentry);
+	struct inode *pinode = d_inode(parent_dentry);
+	int error = 0;
+
+	inode_lock_nested(pinode, I_MUTEX_PARENT);
+	error = vfs_rmdir(pinode, dentry);
+	inode_unlock(pinode);
+
+	dput(parent_dentry);
+	return error;
+}
+
+static void maybe_delete_incomplete_file(struct data_file *df)
+{
+	char *file_id_str;
+	struct dentry *incomplete_file_dentry;
+
+	if (atomic_read(&df->df_data_blocks_written) < df->df_data_block_count)
+		return;
+
+	/* This is best effort - there is no useful action to take on failure */
+	file_id_str = file_id_to_str(df->df_id);
+	if (!file_id_str)
+		return;
+
+	incomplete_file_dentry = incfs_lookup_dentry(
+					df->df_mount_info->mi_incomplete_dir,
+					file_id_str);
+	if (!incomplete_file_dentry || IS_ERR(incomplete_file_dentry)) {
+		incomplete_file_dentry = NULL;
+		goto out;
+	}
+
+	if (!d_really_is_positive(incomplete_file_dentry))
+		goto out;
+
+	vfs_fsync(df->df_backing_file_context->bc_file, 0);
+	incfs_unlink(incomplete_file_dentry);
+
+out:
+	dput(incomplete_file_dentry);
+	kfree(file_id_str);
+}
+
+static long ioctl_fill_blocks(struct file *f, void __user *arg)
+{
+	struct incfs_fill_blocks __user *usr_fill_blocks = arg;
+	struct incfs_fill_blocks fill_blocks;
+	struct incfs_fill_block __user *usr_fill_block_array;
+	struct data_file *df = get_incfs_data_file(f);
+	struct incfs_file_data *fd = f->private_data;
+	const ssize_t data_buf_size = 2 * INCFS_DATA_FILE_BLOCK_SIZE;
+	u8 *data_buf = NULL;
+	ssize_t error = 0;
+	int i = 0;
+
+	if (!df)
+		return -EBADF;
+
+	if (!fd || fd->fd_fill_permission != CAN_FILL)
+		return -EPERM;
+
+	if (copy_from_user(&fill_blocks, usr_fill_blocks, sizeof(fill_blocks)))
+		return -EFAULT;
+
+	usr_fill_block_array = u64_to_user_ptr(fill_blocks.fill_blocks);
+	data_buf = (u8 *)__get_free_pages(GFP_NOFS | __GFP_COMP,
+					  get_order(data_buf_size));
+	if (!data_buf)
+		return -ENOMEM;
+
+	for (i = 0; i < fill_blocks.count; i++) {
+		struct incfs_fill_block fill_block = {};
+
+		if (copy_from_user(&fill_block, &usr_fill_block_array[i],
+				   sizeof(fill_block)) > 0) {
+			error = -EFAULT;
+			break;
+		}
+
+		if (fill_block.data_len > data_buf_size) {
+			error = -E2BIG;
+			break;
+		}
+
+		if (copy_from_user(data_buf, u64_to_user_ptr(fill_block.data),
+				   fill_block.data_len) > 0) {
+			error = -EFAULT;
+			break;
+		}
+		fill_block.data = 0; /* To make sure nobody uses it. */
+		if (fill_block.flags & INCFS_BLOCK_FLAGS_HASH) {
+			error = incfs_process_new_hash_block(df, &fill_block,
+							     data_buf);
+		} else {
+			error = incfs_process_new_data_block(df, &fill_block,
+							     data_buf);
+		}
+		if (error)
+			break;
+	}
+
+	if (data_buf)
+		free_pages((unsigned long)data_buf, get_order(data_buf_size));
+
+	maybe_delete_incomplete_file(df);
+
+	/*
+	 * Only report the error if no records were processed, otherwise
+	 * just return how many were processed successfully.
+	 */
+	if (i == 0)
+		return error;
+
+	return i;
+}
+
+static long ioctl_read_file_signature(struct file *f, void __user *arg)
+{
+	struct incfs_get_file_sig_args __user *args_usr_ptr = arg;
+	struct incfs_get_file_sig_args args = {};
+	u8 *sig_buffer = NULL;
+	size_t sig_buf_size = 0;
+	int error = 0;
+	int read_result = 0;
+	struct data_file *df = get_incfs_data_file(f);
+
+	if (!df)
+		return -EINVAL;
+
+	if (copy_from_user(&args, args_usr_ptr, sizeof(args)) > 0)
+		return -EINVAL;
+
+	sig_buf_size = args.file_signature_buf_size;
+	if (sig_buf_size > INCFS_MAX_SIGNATURE_SIZE)
+		return -E2BIG;
+
+	sig_buffer = kzalloc(sig_buf_size, GFP_NOFS | __GFP_COMP);
+	if (!sig_buffer)
+		return -ENOMEM;
+
+	read_result = incfs_read_file_signature(df,
+			range(sig_buffer, sig_buf_size));
+
+	if (read_result < 0) {
+		error = read_result;
+		goto out;
+	}
+
+	if (copy_to_user(u64_to_user_ptr(args.file_signature), sig_buffer,
+			read_result)) {
+		error = -EFAULT;
+		goto out;
+	}
+
+	args.file_signature_len_out = read_result;
+	if (copy_to_user(args_usr_ptr, &args, sizeof(args)))
+		error = -EFAULT;
+
+out:
+	kfree(sig_buffer);
+
+	return error;
+}
+
+static long ioctl_get_filled_blocks(struct file *f, void __user *arg)
+{
+	struct incfs_get_filled_blocks_args __user *args_usr_ptr = arg;
+	struct incfs_get_filled_blocks_args args = {};
+	struct data_file *df = get_incfs_data_file(f);
+	struct incfs_file_data *fd = f->private_data;
+	int error;
+
+	if (!df || !fd)
+		return -EINVAL;
+
+	if (fd->fd_fill_permission != CAN_FILL)
+		return -EPERM;
+
+	if (copy_from_user(&args, args_usr_ptr, sizeof(args)) > 0)
+		return -EINVAL;
+
+	error = incfs_get_filled_blocks(df, fd, &args);
+
+	if (copy_to_user(args_usr_ptr, &args, sizeof(args)))
+		return -EFAULT;
+
+	return error;
+}
+
+static long ioctl_get_block_count(struct file *f, void __user *arg)
+{
+	struct incfs_get_block_count_args __user *args_usr_ptr = arg;
+	struct incfs_get_block_count_args args = {};
+	struct data_file *df = get_incfs_data_file(f);
+
+	if (!df)
+		return -EINVAL;
+
+	args.total_data_blocks_out = df->df_data_block_count;
+	args.filled_data_blocks_out = atomic_read(&df->df_data_blocks_written);
+	args.total_hash_blocks_out = df->df_total_block_count -
+		df->df_data_block_count;
+	args.filled_hash_blocks_out = atomic_read(&df->df_hash_blocks_written);
+
+	if (copy_to_user(args_usr_ptr, &args, sizeof(args)))
+		return -EFAULT;
+
+	return 0;
+}
+
+static long dispatch_ioctl(struct file *f, unsigned int req, unsigned long arg)
+{
+	switch (req) {
+	case INCFS_IOC_FILL_BLOCKS:
+		return ioctl_fill_blocks(f, (void __user *)arg);
+	case INCFS_IOC_READ_FILE_SIGNATURE:
+		return ioctl_read_file_signature(f, (void __user *)arg);
+	case INCFS_IOC_GET_FILLED_BLOCKS:
+		return ioctl_get_filled_blocks(f, (void __user *)arg);
+	case INCFS_IOC_GET_BLOCK_COUNT:
+		return ioctl_get_block_count(f, (void __user *)arg);
+	default:
+		return -EINVAL;
+	}
+}
+
+static struct dentry *dir_lookup(struct inode *dir_inode, struct dentry *dentry,
+				 unsigned int flags)
+{
+	struct mount_info *mi = get_mount_info(dir_inode->i_sb);
+	struct dentry *dir_dentry = NULL;
+	struct dentry *backing_dentry = NULL;
+	struct path dir_backing_path = {};
+	struct inode_info *dir_info = get_incfs_node(dir_inode);
+	int err = 0;
+
+	if (!mi || !dir_info || !dir_info->n_backing_inode)
+		return ERR_PTR(-EBADF);
+
+	if (d_inode(mi->mi_backing_dir_path.dentry) ==
+		dir_info->n_backing_inode) {
+		/* We do lookup in the FS root. Show pseudo files. */
+		err = dir_lookup_pseudo_files(dir_inode->i_sb, dentry);
+		if (err != -ENOENT)
+			goto out;
+		err = 0;
+	}
+
+	dir_dentry = dget_parent(dentry);
+	get_incfs_backing_path(dir_dentry, &dir_backing_path);
+	backing_dentry = incfs_lookup_dentry(dir_backing_path.dentry,
+						dentry->d_name.name);
+
+	if (!backing_dentry || IS_ERR(backing_dentry)) {
+		err = IS_ERR(backing_dentry)
+			? PTR_ERR(backing_dentry)
+			: -EFAULT;
+		backing_dentry = NULL;
+		goto out;
+	} else {
+		struct inode *inode = NULL;
+		struct path backing_path = {
+			.mnt = dir_backing_path.mnt,
+			.dentry = backing_dentry
+		};
+
+		err = incfs_init_dentry(dentry, &backing_path);
+		if (err)
+			goto out;
+
+		if (!d_really_is_positive(backing_dentry)) {
+			/*
+			 * No such entry found in the backing dir.
+			 * Create a negative entry.
+			 */
+			d_add(dentry, NULL);
+			err = 0;
+			goto out;
+		}
+
+		if (d_inode(backing_dentry)->i_sb !=
+				dir_info->n_backing_inode->i_sb) {
+			/*
+			 * Somehow after the path lookup we ended up in a
+			 * different fs mount. If we keep going it's going
+			 * to end badly.
+			 */
+			err = -EXDEV;
+			goto out;
+		}
+
+		inode = fetch_regular_inode(dir_inode->i_sb, backing_dentry);
+		if (IS_ERR(inode)) {
+			err = PTR_ERR(inode);
+			goto out;
+		}
+
+		d_add(dentry, inode);
+	}
+
+out:
+	dput(dir_dentry);
+	dput(backing_dentry);
+	path_put(&dir_backing_path);
+	if (err)
+		pr_debug("incfs: %s %s %d\n", __func__,
+			 dentry->d_name.name, err);
+	return ERR_PTR(err);
+}
+
+static int dir_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
+{
+	struct mount_info *mi = get_mount_info(dir->i_sb);
+	struct inode_info *dir_node = get_incfs_node(dir);
+	struct dentry *backing_dentry = NULL;
+	struct path backing_path = {};
+	int err = 0;
+
+
+	if (!mi || !dir_node || !dir_node->n_backing_inode)
+		return -EBADF;
+
+	err = mutex_lock_interruptible(&mi->mi_dir_struct_mutex);
+	if (err)
+		return err;
+
+	get_incfs_backing_path(dentry, &backing_path);
+	backing_dentry = backing_path.dentry;
+
+	if (!backing_dentry) {
+		err = -EBADF;
+		goto path_err;
+	}
+
+	if (backing_dentry->d_parent == mi->mi_index_dir) {
+		/* Can't create a subdir inside .index */
+		err = -EBUSY;
+		goto out;
+	}
+
+	if (backing_dentry->d_parent == mi->mi_incomplete_dir) {
+		/* Can't create a subdir inside .incomplete */
+		err = -EBUSY;
+		goto out;
+	}
+	inode_lock_nested(dir_node->n_backing_inode, I_MUTEX_PARENT);
+	err = vfs_mkdir(dir_node->n_backing_inode, backing_dentry, mode | 0222);
+	inode_unlock(dir_node->n_backing_inode);
+	if (!err) {
+		struct inode *inode = NULL;
+
+		if (d_really_is_negative(backing_dentry) ||
+			unlikely(d_unhashed(backing_dentry))) {
+			err = -EINVAL;
+			goto out;
+		}
+
+		inode = fetch_regular_inode(dir->i_sb, backing_dentry);
+		if (IS_ERR(inode)) {
+			err = PTR_ERR(inode);
+			goto out;
+		}
+		d_instantiate(dentry, inode);
+	}
+
+out:
+	if (d_really_is_negative(dentry))
+		d_drop(dentry);
+	path_put(&backing_path);
+
+path_err:
+	mutex_unlock(&mi->mi_dir_struct_mutex);
+	if (err)
+		pr_debug("incfs: %s err:%d\n", __func__, err);
+	return err;
+}
+
+/*
+ * Delete file referenced by backing_dentry and if appropriate its hardlink
+ * from .index and .incomplete
+ */
+static int file_delete(struct mount_info *mi,
+			struct dentry *backing_dentry,
+			int nlink)
+{
+	struct dentry *index_file_dentry = NULL;
+	struct dentry *incomplete_file_dentry = NULL;
+	/* 2 chars per byte of file ID + 1 char for \0 */
+	char file_id_str[2 * sizeof(incfs_uuid_t) + 1] = {0};
+	ssize_t uuid_size = 0;
+	int error = 0;
+
+	WARN_ON(!mutex_is_locked(&mi->mi_dir_struct_mutex));
+
+	if (nlink > 3)
+		goto just_unlink;
+
+	uuid_size = vfs_getxattr(backing_dentry, INCFS_XATTR_ID_NAME,
+			file_id_str, 2 * sizeof(incfs_uuid_t));
+	if (uuid_size < 0) {
+		error = uuid_size;
+		goto out;
+	}
+
+	if (uuid_size != 2 * sizeof(incfs_uuid_t)) {
+		error = -EBADMSG;
+		goto out;
+	}
+
+	index_file_dentry = incfs_lookup_dentry(mi->mi_index_dir, file_id_str);
+	if (IS_ERR(index_file_dentry)) {
+		error = PTR_ERR(index_file_dentry);
+		index_file_dentry = NULL;
+		goto out;
+	}
+
+	if (d_really_is_positive(index_file_dentry) && nlink > 0)
+		nlink--;
+
+	if (nlink > 2)
+		goto just_unlink;
+
+	incomplete_file_dentry = incfs_lookup_dentry(mi->mi_incomplete_dir,
+						     file_id_str);
+	if (IS_ERR(incomplete_file_dentry)) {
+		error = PTR_ERR(incomplete_file_dentry);
+		incomplete_file_dentry = NULL;
+		goto out;
+	}
+
+	if (d_really_is_positive(incomplete_file_dentry) && nlink > 0)
+		nlink--;
+
+	if (nlink > 1)
+		goto just_unlink;
+
+	if (d_really_is_positive(index_file_dentry))
+		error = incfs_unlink(index_file_dentry);
+	if (error)
+		goto out;
+
+	if (d_really_is_positive(incomplete_file_dentry))
+		error = incfs_unlink(incomplete_file_dentry);
+	if (error)
+		goto out;
+
+just_unlink:
+	error = incfs_unlink(backing_dentry);
+
+out:
+	dput(index_file_dentry);
+	dput(incomplete_file_dentry);
+	if (error)
+		pr_debug("incfs: delete_file_from_index err:%d\n", error);
+	return error;
+}
+
+static int dir_unlink(struct inode *dir, struct dentry *dentry)
+{
+	struct mount_info *mi = get_mount_info(dir->i_sb);
+	struct path backing_path = {};
+	struct kstat stat;
+	int err = 0;
+
+	if (!mi)
+		return -EBADF;
+
+	err = mutex_lock_interruptible(&mi->mi_dir_struct_mutex);
+	if (err)
+		return err;
+
+	get_incfs_backing_path(dentry, &backing_path);
+	if (!backing_path.dentry) {
+		err = -EBADF;
+		goto path_err;
+	}
+
+	if (backing_path.dentry->d_parent == mi->mi_index_dir) {
+		/* Direct unlink from .index are not allowed. */
+		err = -EBUSY;
+		goto out;
+	}
+
+	if (backing_path.dentry->d_parent == mi->mi_incomplete_dir) {
+		/* Direct unlink from .incomplete are not allowed. */
+		err = -EBUSY;
+		goto out;
+	}
+
+	err = vfs_getattr(&backing_path, &stat, STATX_NLINK,
+			  AT_STATX_SYNC_AS_STAT);
+	if (err)
+		goto out;
+
+	err = file_delete(mi, backing_path.dentry, stat.nlink);
+
+	d_drop(dentry);
+out:
+	path_put(&backing_path);
+path_err:
+	if (err)
+		pr_debug("incfs: %s err:%d\n", __func__, err);
+	mutex_unlock(&mi->mi_dir_struct_mutex);
+	return err;
+}
+
+static int dir_link(struct dentry *old_dentry, struct inode *dir,
+			 struct dentry *new_dentry)
+{
+	struct mount_info *mi = get_mount_info(dir->i_sb);
+	struct path backing_old_path = {};
+	struct path backing_new_path = {};
+	int error = 0;
+
+	if (!mi)
+		return -EBADF;
+
+	error = mutex_lock_interruptible(&mi->mi_dir_struct_mutex);
+	if (error)
+		return error;
+
+	get_incfs_backing_path(old_dentry, &backing_old_path);
+	get_incfs_backing_path(new_dentry, &backing_new_path);
+
+	if (backing_new_path.dentry->d_parent == mi->mi_index_dir) {
+		/* Can't link to .index */
+		error = -EBUSY;
+		goto out;
+	}
+
+	if (backing_new_path.dentry->d_parent == mi->mi_incomplete_dir) {
+		/* Can't link to .incomplete */
+		error = -EBUSY;
+		goto out;
+	}
+
+	error = incfs_link(backing_old_path.dentry, backing_new_path.dentry);
+	if (!error) {
+		struct inode *inode = NULL;
+		struct dentry *bdentry = backing_new_path.dentry;
+
+		if (d_really_is_negative(bdentry)) {
+			error = -EINVAL;
+			goto out;
+		}
+
+		inode = fetch_regular_inode(dir->i_sb, bdentry);
+		if (IS_ERR(inode)) {
+			error = PTR_ERR(inode);
+			goto out;
+		}
+		d_instantiate(new_dentry, inode);
+	}
+
+out:
+	path_put(&backing_old_path);
+	path_put(&backing_new_path);
+	if (error)
+		pr_debug("incfs: %s err:%d\n", __func__, error);
+	mutex_unlock(&mi->mi_dir_struct_mutex);
+	return error;
+}
+
+static int dir_rmdir(struct inode *dir, struct dentry *dentry)
+{
+	struct mount_info *mi = get_mount_info(dir->i_sb);
+	struct path backing_path = {};
+	int err = 0;
+
+	if (!mi)
+		return -EBADF;
+
+	err = mutex_lock_interruptible(&mi->mi_dir_struct_mutex);
+	if (err)
+		return err;
+
+	get_incfs_backing_path(dentry, &backing_path);
+	if (!backing_path.dentry) {
+		err = -EBADF;
+		goto path_err;
+	}
+
+	if (backing_path.dentry == mi->mi_index_dir) {
+		/* Can't delete .index */
+		err = -EBUSY;
+		goto out;
+	}
+
+	if (backing_path.dentry == mi->mi_incomplete_dir) {
+		/* Can't delete .incomplete */
+		err = -EBUSY;
+		goto out;
+	}
+
+	err = incfs_rmdir(backing_path.dentry);
+	if (!err)
+		d_drop(dentry);
+out:
+	path_put(&backing_path);
+
+path_err:
+	if (err)
+		pr_debug("incfs: %s err:%d\n", __func__, err);
+	mutex_unlock(&mi->mi_dir_struct_mutex);
+	return err;
+}
+
+static int dir_rename(struct inode *old_dir, struct dentry *old_dentry,
+		struct inode *new_dir, struct dentry *new_dentry)
+{
+	struct mount_info *mi = get_mount_info(old_dir->i_sb);
+	struct dentry *backing_old_dentry;
+	struct dentry *backing_new_dentry;
+	struct dentry *backing_old_dir_dentry;
+	struct dentry *backing_new_dir_dentry;
+	struct inode *target_inode;
+	struct dentry *trap;
+	int error = 0;
+
+	error = mutex_lock_interruptible(&mi->mi_dir_struct_mutex);
+	if (error)
+		return error;
+
+	backing_old_dentry = get_incfs_dentry(old_dentry)->backing_path.dentry;
+
+	if (!backing_old_dentry || backing_old_dentry == mi->mi_index_dir ||
+	    backing_old_dentry == mi->mi_incomplete_dir) {
+		/* Renaming .index or .incomplete not allowed */
+		error = -EBUSY;
+		goto exit;
+	}
+
+	backing_new_dentry = get_incfs_dentry(new_dentry)->backing_path.dentry;
+	dget(backing_old_dentry);
+	dget(backing_new_dentry);
+
+	backing_old_dir_dentry = dget_parent(backing_old_dentry);
+	backing_new_dir_dentry = dget_parent(backing_new_dentry);
+	target_inode = d_inode(new_dentry);
+
+	if (backing_old_dir_dentry == mi->mi_index_dir ||
+	    backing_old_dir_dentry == mi->mi_incomplete_dir) {
+		/* Direct moves from .index or .incomplete are not allowed. */
+		error = -EBUSY;
+		goto out;
+	}
+
+	trap = lock_rename(backing_old_dir_dentry, backing_new_dir_dentry);
+
+	if (trap == backing_old_dentry) {
+		error = -EINVAL;
+		goto unlock_out;
+	}
+	if (trap == backing_new_dentry) {
+		error = -ENOTEMPTY;
+		goto unlock_out;
+	}
+
+	error = vfs_rename(d_inode(backing_old_dir_dentry), backing_old_dentry,
+			d_inode(backing_new_dir_dentry), backing_new_dentry,
+			NULL, 0);
+	if (error)
+		goto unlock_out;
+	if (target_inode)
+		fsstack_copy_attr_all(target_inode,
+			get_incfs_node(target_inode)->n_backing_inode);
+	fsstack_copy_attr_all(new_dir, d_inode(backing_new_dir_dentry));
+	if (new_dir != old_dir)
+		fsstack_copy_attr_all(old_dir, d_inode(backing_old_dir_dentry));
+
+unlock_out:
+	unlock_rename(backing_old_dir_dentry, backing_new_dir_dentry);
+
+out:
+	dput(backing_new_dir_dentry);
+	dput(backing_old_dir_dentry);
+	dput(backing_new_dentry);
+	dput(backing_old_dentry);
+
+exit:
+	mutex_unlock(&mi->mi_dir_struct_mutex);
+	if (error)
+		pr_debug("incfs: %s err:%d\n", __func__, error);
+	return error;
+}
+
+
+static int file_open(struct inode *inode, struct file *file)
+{
+	struct mount_info *mi = get_mount_info(inode->i_sb);
+	struct file *backing_file = NULL;
+	struct path backing_path = {};
+	int err = 0;
+	int flags = O_NOATIME | O_LARGEFILE |
+		(S_ISDIR(inode->i_mode) ? O_RDONLY : O_RDWR);
+
+	WARN_ON(file->private_data);
+
+	if (!mi)
+		return -EBADF;
+
+	get_incfs_backing_path(file->f_path.dentry, &backing_path);
+	if (!backing_path.dentry)
+		return -EBADF;
+
+	backing_file = dentry_open(&backing_path, flags, mi->mi_owner);
+	path_put(&backing_path);
+
+	if (IS_ERR(backing_file)) {
+		err = PTR_ERR(backing_file);
+		backing_file = NULL;
+		goto out;
+	}
+
+	if (S_ISREG(inode->i_mode)) {
+		struct incfs_file_data *fd = kzalloc(sizeof(*fd), GFP_NOFS);
+
+		if (!fd) {
+			err = -ENOMEM;
+			goto out;
+		}
+
+		*fd = (struct incfs_file_data) {
+			.fd_fill_permission = CANT_FILL,
+		};
+		file->private_data = fd;
+
+		err = make_inode_ready_for_data_ops(mi, inode, backing_file);
+
+	} else if (S_ISDIR(inode->i_mode)) {
+		struct dir_file *dir = NULL;
+
+		dir = incfs_open_dir_file(mi, backing_file);
+		if (IS_ERR(dir))
+			err = PTR_ERR(dir);
+		else
+			file->private_data = dir;
+	} else
+		err = -EBADF;
+
+out:
+	if (err) {
+		pr_debug("name:%s err: %d\n",
+			 file->f_path.dentry->d_name.name, err);
+		if (S_ISREG(inode->i_mode))
+			kfree(file->private_data);
+		else if (S_ISDIR(inode->i_mode))
+			incfs_free_dir_file(file->private_data);
+
+		file->private_data = NULL;
+	}
+
+	if (backing_file)
+		fput(backing_file);
+	return err;
+}
+
+static int file_release(struct inode *inode, struct file *file)
+{
+	if (S_ISREG(inode->i_mode)) {
+		kfree(file->private_data);
+		file->private_data = NULL;
+	} else if (S_ISDIR(inode->i_mode)) {
+		struct dir_file *dir = get_incfs_dir_file(file);
+
+		incfs_free_dir_file(dir);
+	}
+
+	return 0;
+}
+
+static int dentry_revalidate(struct dentry *d, unsigned int flags)
+{
+	struct path backing_path = {};
+	struct inode_info *info = get_incfs_node(d_inode(d));
+	struct inode *binode = (info == NULL) ? NULL : info->n_backing_inode;
+	struct dentry *backing_dentry = NULL;
+	int result = 0;
+
+	if (flags & LOOKUP_RCU)
+		return -ECHILD;
+
+	get_incfs_backing_path(d, &backing_path);
+	backing_dentry = backing_path.dentry;
+	if (!backing_dentry)
+		goto out;
+
+	if (d_inode(backing_dentry) != binode) {
+		/*
+		 * Backing inodes obtained via dentry and inode don't match.
+		 * It indicates that most likely backing dir has changed
+		 * directly bypassing Incremental FS interface.
+		 */
+		goto out;
+	}
+
+	if (backing_dentry->d_flags & DCACHE_OP_REVALIDATE) {
+		result = backing_dentry->d_op->d_revalidate(backing_dentry,
+				flags);
+	} else
+		result = 1;
+
+out:
+	path_put(&backing_path);
+	return result;
+}
+
+static void dentry_release(struct dentry *d)
+{
+	struct dentry_info *di = get_incfs_dentry(d);
+
+	if (di)
+		path_put(&di->backing_path);
+	kfree(d->d_fsdata);
+	d->d_fsdata = NULL;
+}
+
+static struct inode *alloc_inode(struct super_block *sb)
+{
+	struct inode_info *node = kzalloc(sizeof(*node), GFP_NOFS);
+
+	/* TODO: add a slab-based cache here. */
+	if (!node)
+		return NULL;
+	inode_init_once(&node->n_vfs_inode);
+	return &node->n_vfs_inode;
+}
+
+static void free_inode(struct inode *inode)
+{
+	struct inode_info *node = get_incfs_node(inode);
+
+	kfree(node);
+}
+
+static void evict_inode(struct inode *inode)
+{
+	struct inode_info *node = get_incfs_node(inode);
+
+	if (node) {
+		if (node->n_backing_inode) {
+			iput(node->n_backing_inode);
+			node->n_backing_inode = NULL;
+		}
+		if (node->n_file) {
+			incfs_free_data_file(node->n_file);
+			node->n_file = NULL;
+		}
+	}
+
+	truncate_inode_pages(&inode->i_data, 0);
+	clear_inode(inode);
+}
+
+static int incfs_setattr(struct dentry *dentry, struct iattr *ia)
+{
+	struct dentry_info *di = get_incfs_dentry(dentry);
+	struct dentry *backing_dentry;
+	struct inode *backing_inode;
+	int error;
+
+	if (ia->ia_valid & ATTR_SIZE)
+		return -EINVAL;
+
+	if (!di)
+		return -EINVAL;
+	backing_dentry = di->backing_path.dentry;
+	if (!backing_dentry)
+		return -EINVAL;
+
+	backing_inode = d_inode(backing_dentry);
+
+	/* incfs files are readonly, but the backing files must be writeable */
+	if (S_ISREG(backing_inode->i_mode)) {
+		if ((ia->ia_valid & ATTR_MODE) && (ia->ia_mode & 0222))
+			return -EINVAL;
+
+		ia->ia_mode |= 0222;
+	}
+
+	inode_lock(d_inode(backing_dentry));
+	error = notify_change(backing_dentry, ia, NULL);
+	inode_unlock(d_inode(backing_dentry));
+
+	if (error)
+		return error;
+
+	if (S_ISREG(backing_inode->i_mode))
+		ia->ia_mode &= ~0222;
+
+	return simple_setattr(dentry, ia);
+}
+
+static ssize_t incfs_getxattr(struct dentry *d, const char *name,
+			void *value, size_t size)
+{
+	struct dentry_info *di = get_incfs_dentry(d);
+	struct mount_info *mi = get_mount_info(d->d_sb);
+	char *stored_value;
+	size_t stored_size;
+
+	if (di && di->backing_path.dentry)
+		return vfs_getxattr(di->backing_path.dentry, name, value, size);
+
+	if (strcmp(name, "security.selinux"))
+		return -ENODATA;
+
+	if (!strcmp(d->d_iname, INCFS_PENDING_READS_FILENAME)) {
+		stored_value = mi->pending_read_xattr;
+		stored_size = mi->pending_read_xattr_size;
+	} else if (!strcmp(d->d_iname, INCFS_LOG_FILENAME)) {
+		stored_value = mi->log_xattr;
+		stored_size = mi->log_xattr_size;
+	} else {
+		return -ENODATA;
+	}
+
+	if (!stored_value)
+		return -ENODATA;
+
+	if (stored_size > size)
+		return -E2BIG;
+
+	memcpy(value, stored_value, stored_size);
+	return stored_size;
+
+}
+
+
+static ssize_t incfs_setxattr(struct dentry *d, const char *name,
+			const void *value, size_t size, int flags)
+{
+	struct dentry_info *di = get_incfs_dentry(d);
+	struct mount_info *mi = get_mount_info(d->d_sb);
+	void **stored_value;
+	size_t *stored_size;
+
+	if (di && di->backing_path.dentry)
+		return vfs_setxattr(di->backing_path.dentry, name, value, size,
+				    flags);
+
+	if (strcmp(name, "security.selinux"))
+		return -ENODATA;
+
+	if (size > INCFS_MAX_FILE_ATTR_SIZE)
+		return -E2BIG;
+
+	if (!strcmp(d->d_iname, INCFS_PENDING_READS_FILENAME)) {
+		stored_value = &mi->pending_read_xattr;
+		stored_size = &mi->pending_read_xattr_size;
+	} else if (!strcmp(d->d_iname, INCFS_LOG_FILENAME)) {
+		stored_value = &mi->log_xattr;
+		stored_size = &mi->log_xattr_size;
+	} else {
+		return -ENODATA;
+	}
+
+	kfree (*stored_value);
+	*stored_value = kzalloc(size, GFP_NOFS);
+	if (!*stored_value)
+		return -ENOMEM;
+
+	memcpy(*stored_value, value, size);
+	*stored_size = size;
+	return 0;
+}
+
+static ssize_t incfs_listxattr(struct dentry *d, char *list, size_t size)
+{
+	struct dentry_info *di = get_incfs_dentry(d);
+
+	if (!di || !di->backing_path.dentry)
+		return -ENODATA;
+
+	return vfs_listxattr(di->backing_path.dentry, list, size);
+}
+
+struct dentry *incfs_mount_fs(struct file_system_type *type, int flags,
+			      const char *dev_name, void *data)
+{
+	static const char index_name[] = ".index";
+	static const char incomplete_name[] = ".incomplete";
+	struct mount_options options = {};
+	struct mount_info *mi = NULL;
+	struct path backing_dir_path = {};
+	struct dentry *index_dir = NULL;
+	struct dentry *incomplete_dir = NULL;
+	struct super_block *src_fs_sb = NULL;
+	struct inode *root_inode = NULL;
+	struct super_block *sb = sget(type, NULL, set_anon_super, flags, NULL);
+	int error = 0;
+
+	if (IS_ERR(sb))
+		return ERR_CAST(sb);
+
+	sb->s_op = &incfs_super_ops;
+	sb->s_d_op = &incfs_dentry_ops;
+	sb->s_flags |= S_NOATIME;
+	sb->s_magic = INCFS_MAGIC_NUMBER;
+	sb->s_time_gran = 1;
+	sb->s_blocksize = INCFS_DATA_FILE_BLOCK_SIZE;
+	sb->s_blocksize_bits = blksize_bits(sb->s_blocksize);
+	sb->s_xattr = incfs_xattr_ops;
+
+	BUILD_BUG_ON(PAGE_SIZE != INCFS_DATA_FILE_BLOCK_SIZE);
+
+	error = parse_options(&options, (char *)data);
+	if (error != 0) {
+		pr_err("incfs: Options parsing error. %d\n", error);
+		goto err;
+	}
+
+	sb->s_bdi->ra_pages = options.readahead_pages;
+	if (!dev_name) {
+		pr_err("incfs: Backing dir is not set, filesystem can't be mounted.\n");
+		error = -ENOENT;
+		goto err;
+	}
+
+	error = kern_path(dev_name, LOOKUP_FOLLOW | LOOKUP_DIRECTORY,
+			&backing_dir_path);
+	if (error || backing_dir_path.dentry == NULL ||
+		!d_really_is_positive(backing_dir_path.dentry)) {
+		pr_err("incfs: Error accessing: %s.\n",
+			dev_name);
+		goto err;
+	}
+	src_fs_sb = backing_dir_path.dentry->d_sb;
+	sb->s_maxbytes = src_fs_sb->s_maxbytes;
+
+	mi = incfs_alloc_mount_info(sb, &options, &backing_dir_path);
+
+	if (IS_ERR_OR_NULL(mi)) {
+		error = PTR_ERR(mi);
+		pr_err("incfs: Error allocating mount info. %d\n", error);
+		mi = NULL;
+		goto err;
+	}
+
+	index_dir = open_or_create_special_dir(backing_dir_path.dentry,
+					       index_name);
+	if (IS_ERR_OR_NULL(index_dir)) {
+		error = PTR_ERR(index_dir);
+		pr_err("incfs: Can't find or create .index dir in %s\n",
+			dev_name);
+		/* No need to null index_dir since we don't put it */
+		goto err;
+	}
+	mi->mi_index_dir = index_dir;
+
+	incomplete_dir = open_or_create_special_dir(backing_dir_path.dentry,
+						    incomplete_name);
+	if (IS_ERR_OR_NULL(incomplete_dir)) {
+		error = PTR_ERR(incomplete_dir);
+		pr_err("incfs: Can't find or create .incomplete dir in %s\n",
+			dev_name);
+		/* No need to null incomplete_dir since we don't put it */
+		goto err;
+	}
+	mi->mi_incomplete_dir = incomplete_dir;
+
+	sb->s_fs_info = mi;
+	root_inode = fetch_regular_inode(sb, backing_dir_path.dentry);
+	if (IS_ERR(root_inode)) {
+		error = PTR_ERR(root_inode);
+		goto err;
+	}
+
+	sb->s_root = d_make_root(root_inode);
+	if (!sb->s_root) {
+		error = -ENOMEM;
+		goto err;
+	}
+	error = incfs_init_dentry(sb->s_root, &backing_dir_path);
+	if (error)
+		goto err;
+
+	path_put(&backing_dir_path);
+	sb->s_flags |= SB_ACTIVE;
+
+	pr_debug("incfs: mount\n");
+	return dget(sb->s_root);
+err:
+	sb->s_fs_info = NULL;
+	path_put(&backing_dir_path);
+	incfs_free_mount_info(mi);
+	deactivate_locked_super(sb);
+	return ERR_PTR(error);
+}
+
+static int incfs_remount_fs(struct super_block *sb, int *flags, char *data)
+{
+	struct mount_options options;
+	struct mount_info *mi = get_mount_info(sb);
+	int err = 0;
+
+	sync_filesystem(sb);
+	err = parse_options(&options, (char *)data);
+	if (err)
+		return err;
+
+	if (options.report_uid != mi->mi_options.report_uid) {
+		pr_err("incfs: Can't change report_uid mount option on remount\n");
+		return -EOPNOTSUPP;
+	}
+
+	err = incfs_realloc_mount_info(mi, &options);
+	if (err)
+		return err;
+
+	pr_debug("incfs: remount\n");
+	return 0;
+}
+
+void incfs_kill_sb(struct super_block *sb)
+{
+	struct mount_info *mi = sb->s_fs_info;
+
+	pr_debug("incfs: unmount\n");
+	incfs_free_mount_info(mi);
+	generic_shutdown_super(sb);
+}
+
+static int show_options(struct seq_file *m, struct dentry *root)
+{
+	struct mount_info *mi = get_mount_info(root->d_sb);
+
+	seq_printf(m, ",read_timeout_ms=%u", mi->mi_options.read_timeout_ms);
+	seq_printf(m, ",readahead=%u", mi->mi_options.readahead_pages);
+	if (mi->mi_options.read_log_pages != 0) {
+		seq_printf(m, ",rlog_pages=%u", mi->mi_options.read_log_pages);
+		seq_printf(m, ",rlog_wakeup_cnt=%u",
+			   mi->mi_options.read_log_wakeup_count);
+	}
+	if (mi->mi_options.report_uid)
+		seq_puts(m, ",report_uid");
+	return 0;
+}
diff --git a/fs/incfs/vfs.h b/fs/incfs/vfs.h
new file mode 100644
index 0000000..79fdf24
--- /dev/null
+++ b/fs/incfs/vfs.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright 2018 Google LLC
+ */
+
+#ifndef _INCFS_VFS_H
+#define _INCFS_VFS_H
+
+extern const struct file_operations incfs_file_ops;
+extern const struct inode_operations incfs_file_inode_ops;
+
+void incfs_kill_sb(struct super_block *sb);
+struct dentry *incfs_mount_fs(struct file_system_type *type, int flags,
+			      const char *dev_name, void *data);
+int incfs_link(struct dentry *what, struct dentry *where);
+int incfs_unlink(struct dentry *dentry);
+
+static inline struct mount_info *get_mount_info(struct super_block *sb)
+{
+	struct mount_info *result = sb->s_fs_info;
+
+	WARN_ON(!result);
+	return result;
+}
+
+static inline struct super_block *file_superblock(struct file *f)
+{
+	struct inode *inode = file_inode(f);
+
+	return inode->i_sb;
+}
+
+#endif
diff --git a/fs/iomap/direct-io.c b/fs/iomap/direct-io.c
index 933f234..7048890 100644
--- a/fs/iomap/direct-io.c
+++ b/fs/iomap/direct-io.c
@@ -6,6 +6,7 @@
 #include <linux/module.h>
 #include <linux/compiler.h>
 #include <linux/fs.h>
+#include <linux/fscrypt.h>
 #include <linux/iomap.h>
 #include <linux/backing-dev.h>
 #include <linux/uio.h>
@@ -185,11 +186,14 @@
 iomap_dio_zero(struct iomap_dio *dio, struct iomap *iomap, loff_t pos,
 		unsigned len)
 {
+	struct inode *inode = file_inode(dio->iocb->ki_filp);
 	struct page *page = ZERO_PAGE(0);
 	int flags = REQ_SYNC | REQ_IDLE;
 	struct bio *bio;
 
 	bio = bio_alloc(GFP_KERNEL, 1);
+	fscrypt_set_bio_crypt_ctx(bio, inode, pos >> inode->i_blkbits,
+				  GFP_KERNEL);
 	bio_set_dev(bio, iomap->bdev);
 	bio->bi_iter.bi_sector = iomap_sector(iomap, pos);
 	bio->bi_private = dio;
@@ -255,6 +259,7 @@
 		ret = nr_pages;
 		goto out;
 	}
+	nr_pages = fscrypt_limit_dio_pages(inode, pos, nr_pages);
 
 	if (need_zeroout) {
 		/* zero out from the start of the block to the write offset */
@@ -272,6 +277,8 @@
 		}
 
 		bio = bio_alloc(GFP_KERNEL, nr_pages);
+		fscrypt_set_bio_crypt_ctx(bio, inode, pos >> inode->i_blkbits,
+					  GFP_KERNEL);
 		bio_set_dev(bio, iomap->bdev);
 		bio->bi_iter.bi_sector = iomap_sector(iomap, pos);
 		bio->bi_write_hint = dio->iocb->ki_hint;
@@ -309,6 +316,7 @@
 		copied += n;
 
 		nr_pages = iov_iter_npages(dio->submit.iter, BIO_MAX_PAGES);
+		nr_pages = fscrypt_limit_dio_pages(inode, pos, nr_pages);
 		iomap_dio_submit_bio(dio, iomap, bio, pos);
 		pos += n;
 	} while (nr_pages);
diff --git a/fs/jffs2/security.c b/fs/jffs2/security.c
index c2332e3..e6f42fe4 100644
--- a/fs/jffs2/security.c
+++ b/fs/jffs2/security.c
@@ -50,7 +50,8 @@
 /* ---- XATTR Handler for "security.*" ----------------- */
 static int jffs2_security_getxattr(const struct xattr_handler *handler,
 				   struct dentry *unused, struct inode *inode,
-				   const char *name, void *buffer, size_t size)
+				   const char *name, void *buffer, size_t size,
+				   int flags)
 {
 	return do_jffs2_getxattr(inode, JFFS2_XPREFIX_SECURITY,
 				 name, buffer, size);
diff --git a/fs/jffs2/xattr_trusted.c b/fs/jffs2/xattr_trusted.c
index 5d60308..9dccaae 100644
--- a/fs/jffs2/xattr_trusted.c
+++ b/fs/jffs2/xattr_trusted.c
@@ -18,7 +18,8 @@
 
 static int jffs2_trusted_getxattr(const struct xattr_handler *handler,
 				  struct dentry *unused, struct inode *inode,
-				  const char *name, void *buffer, size_t size)
+				  const char *name, void *buffer, size_t size,
+				  int flags)
 {
 	return do_jffs2_getxattr(inode, JFFS2_XPREFIX_TRUSTED,
 				 name, buffer, size);
diff --git a/fs/jffs2/xattr_user.c b/fs/jffs2/xattr_user.c
index 9d027b4..c0983a3 100644
--- a/fs/jffs2/xattr_user.c
+++ b/fs/jffs2/xattr_user.c
@@ -18,7 +18,8 @@
 
 static int jffs2_user_getxattr(const struct xattr_handler *handler,
 			       struct dentry *unused, struct inode *inode,
-			       const char *name, void *buffer, size_t size)
+			       const char *name, void *buffer, size_t size,
+			       int flags)
 {
 	return do_jffs2_getxattr(inode, JFFS2_XPREFIX_USER,
 				 name, buffer, size);
diff --git a/fs/jfs/xattr.c b/fs/jfs/xattr.c
index db41e78..5c79a35 100644
--- a/fs/jfs/xattr.c
+++ b/fs/jfs/xattr.c
@@ -925,7 +925,7 @@
 
 static int jfs_xattr_get(const struct xattr_handler *handler,
 			 struct dentry *unused, struct inode *inode,
-			 const char *name, void *value, size_t size)
+			 const char *name, void *value, size_t size, int flags)
 {
 	name = xattr_full_name(handler, name);
 	return __jfs_getxattr(inode, name, value, size);
@@ -942,7 +942,8 @@
 
 static int jfs_xattr_get_os2(const struct xattr_handler *handler,
 			     struct dentry *unused, struct inode *inode,
-			     const char *name, void *value, size_t size)
+			     const char *name, void *value, size_t size,
+			     int flags)
 {
 	if (is_known_namespace(name))
 		return -EOPNOTSUPP;
diff --git a/fs/kernfs/inode.c b/fs/kernfs/inode.c
index fc2469a..1c9e4ec 100644
--- a/fs/kernfs/inode.c
+++ b/fs/kernfs/inode.c
@@ -310,7 +310,8 @@
 
 static int kernfs_vfs_xattr_get(const struct xattr_handler *handler,
 				struct dentry *unused, struct inode *inode,
-				const char *suffix, void *value, size_t size)
+				const char *suffix, void *value, size_t size,
+				int flags)
 {
 	const char *name = xattr_full_name(handler, suffix);
 	struct kernfs_node *kn = inode->i_private;
diff --git a/fs/libfs.c b/fs/libfs.c
index 7124c2e..d1c3bad 100644
--- a/fs/libfs.c
+++ b/fs/libfs.c
@@ -1451,4 +1451,74 @@
 	return 0;
 }
 EXPORT_SYMBOL(generic_ci_d_hash);
+
+static const struct dentry_operations generic_ci_dentry_ops = {
+	.d_hash = generic_ci_d_hash,
+	.d_compare = generic_ci_d_compare,
+};
 #endif
+
+#ifdef CONFIG_FS_ENCRYPTION
+static const struct dentry_operations generic_encrypted_dentry_ops = {
+	.d_revalidate = fscrypt_d_revalidate,
+};
+#endif
+
+#if defined(CONFIG_FS_ENCRYPTION) && defined(CONFIG_UNICODE)
+static const struct dentry_operations generic_encrypted_ci_dentry_ops = {
+	.d_hash = generic_ci_d_hash,
+	.d_compare = generic_ci_d_compare,
+	.d_revalidate = fscrypt_d_revalidate,
+};
+#endif
+
+/**
+ * generic_set_encrypted_ci_d_ops - helper for setting d_ops for given dentry
+ * @dentry:	dentry to set ops on
+ *
+ * Casefolded directories need d_hash and d_compare set, so that the dentries
+ * contained in them are handled case-insensitively.  Note that these operations
+ * are needed on the parent directory rather than on the dentries in it, and
+ * while the casefolding flag can be toggled on and off on an empty directory,
+ * dentry_operations can't be changed later.  As a result, if the filesystem has
+ * casefolding support enabled at all, we have to give all dentries the
+ * casefolding operations even if their inode doesn't have the casefolding flag
+ * currently (and thus the casefolding ops would be no-ops for now).
+ *
+ * Encryption works differently in that the only dentry operation it needs is
+ * d_revalidate, which it only needs on dentries that have the no-key name flag.
+ * The no-key flag can't be set "later", so we don't have to worry about that.
+ *
+ * Finally, to maximize compatibility with overlayfs (which isn't compatible
+ * with certain dentry operations) and to avoid taking an unnecessary
+ * performance hit, we use custom dentry_operations for each possible
+ * combination rather than always installing all operations.
+ */
+void generic_set_encrypted_ci_d_ops(struct dentry *dentry)
+{
+#ifdef CONFIG_FS_ENCRYPTION
+	bool needs_encrypt_ops = dentry->d_flags & DCACHE_NOKEY_NAME;
+#endif
+#ifdef CONFIG_UNICODE
+	bool needs_ci_ops = dentry->d_sb->s_encoding;
+#endif
+#if defined(CONFIG_FS_ENCRYPTION) && defined(CONFIG_UNICODE)
+	if (needs_encrypt_ops && needs_ci_ops) {
+		d_set_d_op(dentry, &generic_encrypted_ci_dentry_ops);
+		return;
+	}
+#endif
+#ifdef CONFIG_FS_ENCRYPTION
+	if (needs_encrypt_ops) {
+		d_set_d_op(dentry, &generic_encrypted_dentry_ops);
+		return;
+	}
+#endif
+#ifdef CONFIG_UNICODE
+	if (needs_ci_ops) {
+		d_set_d_op(dentry, &generic_ci_dentry_ops);
+		return;
+	}
+#endif
+}
+EXPORT_SYMBOL(generic_set_encrypted_ci_d_ops);
diff --git a/fs/mpage.c b/fs/mpage.c
index 830e6cc..6bdb8dc 100644
--- a/fs/mpage.c
+++ b/fs/mpage.c
@@ -32,6 +32,14 @@
 #include <linux/cleancache.h>
 #include "internal.h"
 
+#define CREATE_TRACE_POINTS
+#include <trace/events/android_fs.h>
+
+EXPORT_TRACEPOINT_SYMBOL(android_fs_datawrite_start);
+EXPORT_TRACEPOINT_SYMBOL(android_fs_datawrite_end);
+EXPORT_TRACEPOINT_SYMBOL(android_fs_dataread_start);
+EXPORT_TRACEPOINT_SYMBOL(android_fs_dataread_end);
+
 /*
  * I/O completion handler for multipage BIOs.
  *
@@ -49,6 +57,16 @@
 	struct bio_vec *bv;
 	struct bvec_iter_all iter_all;
 
+	if (trace_android_fs_dataread_end_enabled() &&
+	    (bio_data_dir(bio) == READ)) {
+		struct page *first_page = bio->bi_io_vec[0].bv_page;
+
+		if (first_page != NULL)
+			trace_android_fs_dataread_end(first_page->mapping->host,
+						      page_offset(first_page),
+						      bio->bi_iter.bi_size);
+	}
+
 	bio_for_each_segment_all(bv, bio, iter_all) {
 		struct page *page = bv->bv_page;
 		page_endio(page, bio_op(bio),
@@ -60,6 +78,24 @@
 
 static struct bio *mpage_bio_submit(int op, int op_flags, struct bio *bio)
 {
+	if (trace_android_fs_dataread_start_enabled() && (op == REQ_OP_READ)) {
+		struct page *first_page = bio->bi_io_vec[0].bv_page;
+
+		if (first_page != NULL) {
+			char *path, pathbuf[MAX_TRACE_PATHBUF_LEN];
+
+			path = android_fstrace_get_pathname(pathbuf,
+						    MAX_TRACE_PATHBUF_LEN,
+						    first_page->mapping->host);
+			trace_android_fs_dataread_start(
+				first_page->mapping->host,
+				page_offset(first_page),
+				bio->bi_iter.bi_size,
+				current->pid,
+				path,
+				current->comm);
+		}
+	}
 	bio->bi_end_io = mpage_end_io;
 	bio_set_op_attrs(bio, op, op_flags);
 	guard_bio_eod(bio);
diff --git a/fs/namei.c b/fs/namei.c
index d4a6dd7..71e4d89 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -43,6 +43,9 @@
 #include "internal.h"
 #include "mount.h"
 
+#define CREATE_TRACE_POINTS
+#include <trace/events/namei.h>
+
 /* [Feb-1997 T. Schoebel-Theuer]
  * Fundamental changes in the pathname lookup mechanisms (namei)
  * were necessary because of omirr.  The reason is that omirr needs
@@ -770,6 +773,81 @@
 		return 1;
 }
 
+#define INIT_PATH_SIZE 64
+
+static void success_walk_trace(struct nameidata *nd)
+{
+	struct path *pt = &nd->path;
+	struct inode *i = nd->inode;
+	char buf[INIT_PATH_SIZE], *try_buf;
+	int cur_path_size;
+	char *p;
+
+	/* When eBPF/ tracepoint is disabled, keep overhead low. */
+	if (!trace_inodepath_enabled())
+		return;
+
+	/* First try stack allocated buffer. */
+	try_buf = buf;
+	cur_path_size = INIT_PATH_SIZE;
+
+	while (cur_path_size <= PATH_MAX) {
+		/* Free previous heap allocation if we are now trying
+		 * a second or later heap allocation.
+		 */
+		if (try_buf != buf)
+			kfree(try_buf);
+
+		/* All but the first alloc are on the heap. */
+		if (cur_path_size != INIT_PATH_SIZE) {
+			try_buf = kmalloc(cur_path_size, GFP_KERNEL);
+			if (!try_buf) {
+				try_buf = buf;
+				sprintf(try_buf, "error:buf_alloc_failed");
+				break;
+			}
+		}
+
+		p = d_path(pt, try_buf, cur_path_size);
+
+		if (!IS_ERR(p)) {
+			char *end = mangle_path(try_buf, p, "\n");
+
+			if (end) {
+				try_buf[end - try_buf] = 0;
+				break;
+			} else {
+				/* On mangle errors, double path size
+				 * till PATH_MAX.
+				 */
+				cur_path_size = cur_path_size << 1;
+				continue;
+			}
+		}
+
+		if (PTR_ERR(p) == -ENAMETOOLONG) {
+			/* If d_path complains that name is too long,
+			 * then double path size till PATH_MAX.
+			 */
+			cur_path_size = cur_path_size << 1;
+			continue;
+		}
+
+		sprintf(try_buf, "error:d_path_failed_%lu",
+			-1 * PTR_ERR(p));
+		break;
+	}
+
+	if (cur_path_size > PATH_MAX)
+		sprintf(try_buf, "error:d_path_name_too_long");
+
+	trace_inodepath(i, try_buf);
+
+	if (try_buf != buf)
+		kfree(try_buf);
+	return;
+}
+
 /**
  * complete_walk - successful completion of path walk
  * @nd:  pointer nameidata
@@ -817,15 +895,21 @@
 			return -EXDEV;
 	}
 
-	if (likely(!(nd->flags & LOOKUP_JUMPED)))
+	if (likely(!(nd->flags & LOOKUP_JUMPED))) {
+		success_walk_trace(nd);
 		return 0;
+	}
 
-	if (likely(!(dentry->d_flags & DCACHE_OP_WEAK_REVALIDATE)))
+	if (likely(!(dentry->d_flags & DCACHE_OP_WEAK_REVALIDATE))) {
+		success_walk_trace(nd);
 		return 0;
+	}
 
 	status = dentry->d_op->d_weak_revalidate(dentry, nd->flags);
-	if (status > 0)
+	if (status > 0) {
+		success_walk_trace(nd);
 		return 0;
+	}
 
 	if (!status)
 		status = -ESTALE;
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 9e0ca9b..9648203 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -7495,7 +7495,8 @@
 
 static int nfs4_xattr_get_nfs4_acl(const struct xattr_handler *handler,
 				   struct dentry *unused, struct inode *inode,
-				   const char *key, void *buf, size_t buflen)
+				   const char *key, void *buf, size_t buflen,
+				   int flags)
 {
 	return nfs4_proc_get_acl(inode, buf, buflen);
 }
@@ -7520,7 +7521,8 @@
 
 static int nfs4_xattr_get_nfs4_label(const struct xattr_handler *handler,
 				     struct dentry *unused, struct inode *inode,
-				     const char *key, void *buf, size_t buflen)
+				     const char *key, void *buf, size_t buflen,
+				     int flags)
 {
 	if (security_ismaclabel(key))
 		return nfs4_get_security_label(inode, buf, buflen);
@@ -7597,7 +7599,8 @@
 
 static int nfs4_xattr_get_nfs4_user(const struct xattr_handler *handler,
 				    struct dentry *unused, struct inode *inode,
-				    const char *key, void *buf, size_t buflen)
+				    const char *key, void *buf, size_t buflen,
+				    int flags)
 {
 	struct nfs_access_entry cache;
 	ssize_t ret;
diff --git a/fs/notify/inotify/inotify_user.c b/fs/notify/inotify/inotify_user.c
index 186722b..8755e49 100644
--- a/fs/notify/inotify/inotify_user.c
+++ b/fs/notify/inotify/inotify_user.c
@@ -703,6 +703,8 @@
 	struct fsnotify_group *group;
 	struct inode *inode;
 	struct path path;
+	struct path alteredpath;
+	struct path *canonical_path = &path;
 	struct fd f;
 	int ret;
 	unsigned flags = 0;
@@ -749,13 +751,23 @@
 	if (ret)
 		goto fput_and_out;
 
+	/* support stacked filesystems */
+	if (path.dentry && path.dentry->d_op) {
+		if (path.dentry->d_op->d_canonical_path) {
+			path.dentry->d_op->d_canonical_path(&path,
+							    &alteredpath);
+			canonical_path = &alteredpath;
+			path_put(&path);
+		}
+	}
+
 	/* inode held in place by reference to path; group by fget on fd */
-	inode = path.dentry->d_inode;
+	inode = canonical_path->dentry->d_inode;
 	group = f.file->private_data;
 
 	/* create/update an inode mark */
 	ret = inotify_update_watch(group, inode, mask);
-	path_put(&path);
+	path_put(canonical_path);
 fput_and_out:
 	fdput(f);
 	return ret;
diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c
index 9ccd19d..0ed0a61 100644
--- a/fs/ocfs2/xattr.c
+++ b/fs/ocfs2/xattr.c
@@ -7242,7 +7242,8 @@
  */
 static int ocfs2_xattr_security_get(const struct xattr_handler *handler,
 				    struct dentry *unused, struct inode *inode,
-				    const char *name, void *buffer, size_t size)
+				    const char *name, void *buffer, size_t size,
+				    int flags)
 {
 	return ocfs2_xattr_get(inode, OCFS2_XATTR_INDEX_SECURITY,
 			       name, buffer, size);
@@ -7314,7 +7315,8 @@
  */
 static int ocfs2_xattr_trusted_get(const struct xattr_handler *handler,
 				   struct dentry *unused, struct inode *inode,
-				   const char *name, void *buffer, size_t size)
+				   const char *name, void *buffer, size_t size,
+				   int flags)
 {
 	return ocfs2_xattr_get(inode, OCFS2_XATTR_INDEX_TRUSTED,
 			       name, buffer, size);
@@ -7340,7 +7342,8 @@
  */
 static int ocfs2_xattr_user_get(const struct xattr_handler *handler,
 				struct dentry *unused, struct inode *inode,
-				const char *name, void *buffer, size_t size)
+				const char *name, void *buffer, size_t size,
+				int flags)
 {
 	struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
 
diff --git a/fs/orangefs/xattr.c b/fs/orangefs/xattr.c
index bdc285a..ef4180b 100644
--- a/fs/orangefs/xattr.c
+++ b/fs/orangefs/xattr.c
@@ -541,7 +541,8 @@
 				      struct inode *inode,
 				      const char *name,
 				      void *buffer,
-				      size_t size)
+				      size_t size,
+				      int flags)
 {
 	return orangefs_inode_getxattr(inode, name, buffer, size);
 
diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c
index 955ecd4..b9d97e7 100644
--- a/fs/overlayfs/copy_up.c
+++ b/fs/overlayfs/copy_up.c
@@ -962,7 +962,7 @@
 		dput(parent);
 		dput(next);
 	}
-	revert_creds(old_cred);
+	ovl_revert_creds(dentry->d_sb, old_cred);
 
 	return err;
 }
diff --git a/fs/overlayfs/dir.c b/fs/overlayfs/dir.c
index 28a075b..3c6f5f6 100644
--- a/fs/overlayfs/dir.c
+++ b/fs/overlayfs/dir.c
@@ -565,7 +565,7 @@
 			      struct ovl_cattr *attr, bool origin)
 {
 	int err;
-	const struct cred *old_cred;
+	const struct cred *old_cred, *hold_cred = NULL;
 	struct cred *override_cred;
 	struct dentry *parent = dentry->d_parent;
 
@@ -592,14 +592,15 @@
 		override_cred->fsgid = inode->i_gid;
 		if (!attr->hardlink) {
 			err = security_dentry_create_files_as(dentry,
-					attr->mode, &dentry->d_name, old_cred,
+					attr->mode, &dentry->d_name,
+					old_cred ? old_cred : current_cred(),
 					override_cred);
 			if (err) {
 				put_cred(override_cred);
 				goto out_revert_creds;
 			}
 		}
-		put_cred(override_creds(override_cred));
+		hold_cred = override_creds(override_cred);
 		put_cred(override_cred);
 
 		if (!ovl_dentry_is_whiteout(dentry))
@@ -608,7 +609,9 @@
 			err = ovl_create_over_whiteout(dentry, inode, attr);
 	}
 out_revert_creds:
-	revert_creds(old_cred);
+	ovl_revert_creds(dentry->d_sb, old_cred ?: hold_cred);
+	if (old_cred && hold_cred)
+		put_cred(hold_cred);
 	return err;
 }
 
@@ -684,7 +687,7 @@
 
 	old_cred = ovl_override_creds(dentry->d_sb);
 	err = ovl_set_redirect(dentry, false);
-	revert_creds(old_cred);
+	ovl_revert_creds(dentry->d_sb, old_cred);
 
 	return err;
 }
@@ -903,7 +906,7 @@
 		err = ovl_remove_upper(dentry, is_dir, &list);
 	else
 		err = ovl_remove_and_whiteout(dentry, &list);
-	revert_creds(old_cred);
+	ovl_revert_creds(dentry->d_sb, old_cred);
 	if (!err) {
 		if (is_dir)
 			clear_nlink(dentry->d_inode);
@@ -1273,7 +1276,7 @@
 out_unlock:
 	unlock_rename(new_upperdir, old_upperdir);
 out_revert_creds:
-	revert_creds(old_cred);
+	ovl_revert_creds(old->d_sb, old_cred);
 	if (update_nlink)
 		ovl_nlink_end(new);
 out_drop_write:
diff --git a/fs/overlayfs/file.c b/fs/overlayfs/file.c
index efccb7c..83d95fb 100644
--- a/fs/overlayfs/file.c
+++ b/fs/overlayfs/file.c
@@ -53,13 +53,13 @@
 	err = inode_permission(realinode, MAY_OPEN | acc_mode);
 	if (err) {
 		realfile = ERR_PTR(err);
-	} else if (!inode_owner_or_capable(realinode)) {
+	} else if (old_cred && !inode_owner_or_capable(realinode)) {
 		realfile = ERR_PTR(-EPERM);
 	} else {
 		realfile = open_with_fake_path(&file->f_path, flags, realinode,
 					       current_cred());
 	}
-	revert_creds(old_cred);
+	ovl_revert_creds(inode->i_sb, old_cred);
 
 	pr_debug("open(%p[%pD2/%c], 0%o) -> (%p, 0%o)\n",
 		 file, file, ovl_whatisit(inode, realinode), file->f_flags,
@@ -209,7 +209,7 @@
 
 	old_cred = ovl_override_creds(inode->i_sb);
 	ret = vfs_llseek(real.file, offset, whence);
-	revert_creds(old_cred);
+	ovl_revert_creds(inode->i_sb, old_cred);
 
 	file->f_pos = real.file->f_pos;
 	ovl_inode_unlock(inode);
@@ -323,7 +323,8 @@
 			ovl_aio_cleanup_handler(aio_req);
 	}
 out:
-	revert_creds(old_cred);
+	ovl_revert_creds(file_inode(file)->i_sb, old_cred);
+
 	ovl_file_accessed(file);
 
 	fdput(real);
@@ -388,7 +389,7 @@
 			ovl_aio_cleanup_handler(aio_req);
 	}
 out:
-	revert_creds(old_cred);
+	ovl_revert_creds(file_inode(file)->i_sb, old_cred);
 	fdput(real);
 
 out_unlock:
@@ -411,7 +412,7 @@
 
 	old_cred = ovl_override_creds(file_inode(in)->i_sb);
 	ret = generic_file_splice_read(real.file, ppos, pipe, len, flags);
-	revert_creds(old_cred);
+	ovl_revert_creds(file_inode(in)->i_sb, old_cred);
 
 	ovl_file_accessed(in);
 	fdput(real);
@@ -432,7 +433,7 @@
 
 	old_cred = ovl_override_creds(file_inode(out)->i_sb);
 	ret = iter_file_splice_write(pipe, real.file, ppos, len, flags);
-	revert_creds(old_cred);
+	ovl_revert_creds(file_inode(out)->i_sb, old_cred);
 
 	ovl_file_accessed(out);
 	fdput(real);
@@ -456,7 +457,7 @@
 	if (file_inode(real.file) == ovl_inode_upper(file_inode(file))) {
 		old_cred = ovl_override_creds(file_inode(file)->i_sb);
 		ret = vfs_fsync_range(real.file, start, end, datasync);
-		revert_creds(old_cred);
+		ovl_revert_creds(file_inode(file)->i_sb, old_cred);
 	}
 
 	fdput(real);
@@ -480,7 +481,7 @@
 
 	old_cred = ovl_override_creds(file_inode(file)->i_sb);
 	ret = call_mmap(vma->vm_file, vma);
-	revert_creds(old_cred);
+	ovl_revert_creds(file_inode(file)->i_sb, old_cred);
 
 	if (ret) {
 		/* Drop reference count from new vm_file value */
@@ -508,7 +509,7 @@
 
 	old_cred = ovl_override_creds(file_inode(file)->i_sb);
 	ret = vfs_fallocate(real.file, mode, offset, len);
-	revert_creds(old_cred);
+	ovl_revert_creds(file_inode(file)->i_sb, old_cred);
 
 	/* Update size */
 	ovl_copyattr(ovl_inode_real(inode), inode);
@@ -530,7 +531,7 @@
 
 	old_cred = ovl_override_creds(file_inode(file)->i_sb);
 	ret = vfs_fadvise(real.file, offset, len, advice);
-	revert_creds(old_cred);
+	ovl_revert_creds(file_inode(file)->i_sb, old_cred);
 
 	fdput(real);
 
@@ -552,7 +553,7 @@
 	ret = security_file_ioctl(real.file, cmd, arg);
 	if (!ret)
 		ret = vfs_ioctl(real.file, cmd, arg);
-	revert_creds(old_cred);
+	ovl_revert_creds(file_inode(file)->i_sb, old_cred);
 
 	fdput(real);
 
@@ -741,7 +742,7 @@
 						flags);
 		break;
 	}
-	revert_creds(old_cred);
+	ovl_revert_creds(file_inode(file_out)->i_sb, old_cred);
 
 	/* Update size */
 	ovl_copyattr(ovl_inode_real(inode_out), inode_out);
diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c
index b584dca..fb0ec01 100644
--- a/fs/overlayfs/inode.c
+++ b/fs/overlayfs/inode.c
@@ -80,7 +80,7 @@
 		inode_lock(upperdentry->d_inode);
 		old_cred = ovl_override_creds(dentry->d_sb);
 		err = notify_change(upperdentry, attr, NULL);
-		revert_creds(old_cred);
+		ovl_revert_creds(dentry->d_sb, old_cred);
 		if (!err)
 			ovl_copyattr(upperdentry->d_inode, dentry->d_inode);
 		inode_unlock(upperdentry->d_inode);
@@ -272,7 +272,7 @@
 		stat->nlink = dentry->d_inode->i_nlink;
 
 out:
-	revert_creds(old_cred);
+	ovl_revert_creds(dentry->d_sb, old_cred);
 
 	return err;
 }
@@ -306,7 +306,7 @@
 		mask |= MAY_READ;
 	}
 	err = inode_permission(realinode, mask);
-	revert_creds(old_cred);
+	ovl_revert_creds(inode->i_sb, old_cred);
 
 	return err;
 }
@@ -323,7 +323,7 @@
 
 	old_cred = ovl_override_creds(dentry->d_sb);
 	p = vfs_get_link(ovl_dentry_real(dentry), done);
-	revert_creds(old_cred);
+	ovl_revert_creds(dentry->d_sb, old_cred);
 	return p;
 }
 
@@ -366,7 +366,7 @@
 		WARN_ON(flags != XATTR_REPLACE);
 		err = vfs_removexattr(realdentry, name);
 	}
-	revert_creds(old_cred);
+	ovl_revert_creds(dentry->d_sb, old_cred);
 
 	/* copy c/mtime */
 	ovl_copyattr(d_inode(realdentry), inode);
@@ -378,7 +378,7 @@
 }
 
 int ovl_xattr_get(struct dentry *dentry, struct inode *inode, const char *name,
-		  void *value, size_t size)
+		  void *value, size_t size, int flags)
 {
 	ssize_t res;
 	const struct cred *old_cred;
@@ -386,8 +386,9 @@
 		ovl_i_dentry_upper(inode) ?: ovl_dentry_lower(dentry);
 
 	old_cred = ovl_override_creds(dentry->d_sb);
-	res = vfs_getxattr(realdentry, name, value, size);
-	revert_creds(old_cred);
+	res = __vfs_getxattr(realdentry, d_inode(realdentry), name,
+			     value, size, flags);
+	ovl_revert_creds(dentry->d_sb, old_cred);
 	return res;
 }
 
@@ -415,7 +416,7 @@
 
 	old_cred = ovl_override_creds(dentry->d_sb);
 	res = vfs_listxattr(realdentry, list, size);
-	revert_creds(old_cred);
+	ovl_revert_creds(dentry->d_sb, old_cred);
 	if (res <= 0 || size == 0)
 		return res;
 
@@ -450,7 +451,7 @@
 
 	old_cred = ovl_override_creds(inode->i_sb);
 	acl = get_acl(realinode, type);
-	revert_creds(old_cred);
+	ovl_revert_creds(inode->i_sb, old_cred);
 
 	return acl;
 }
@@ -484,7 +485,7 @@
 
 	old_cred = ovl_override_creds(inode->i_sb);
 	err = realinode->i_op->fiemap(realinode, fieinfo, start, len);
-	revert_creds(old_cred);
+	ovl_revert_creds(inode->i_sb, old_cred);
 
 	return err;
 }
diff --git a/fs/overlayfs/namei.c b/fs/overlayfs/namei.c
index a6162c4..0bf283e 100644
--- a/fs/overlayfs/namei.c
+++ b/fs/overlayfs/namei.c
@@ -108,7 +108,8 @@
 static struct ovl_fh *ovl_get_fh(struct ovl_fs *ofs, struct dentry *dentry,
 				 enum ovl_xattr ox)
 {
-	int res, err;
+	ssize_t res;
+	int err;
 	struct ovl_fh *fh = NULL;
 
 	res = ovl_do_getxattr(ofs, dentry, ox, NULL, 0);
@@ -143,10 +144,10 @@
 	return NULL;
 
 fail:
-	pr_warn_ratelimited("failed to get origin (%i)\n", res);
+	pr_warn_ratelimited("failed to get origin (%zi)\n", res);
 	goto out;
 invalid:
-	pr_warn_ratelimited("invalid origin (%*phN)\n", res, fh);
+	pr_warn_ratelimited("invalid origin (%*phN)\n", (int)res, fh);
 	goto out;
 }
 
@@ -1097,7 +1098,7 @@
 	ovl_dentry_update_reval(dentry, upperdentry,
 			DCACHE_OP_REVALIDATE | DCACHE_OP_WEAK_REVALIDATE);
 
-	revert_creds(old_cred);
+	ovl_revert_creds(dentry->d_sb, old_cred);
 	if (origin_path) {
 		dput(origin_path->dentry);
 		kfree(origin_path);
@@ -1124,7 +1125,7 @@
 	kfree(upperredirect);
 out:
 	kfree(d.redirect);
-	revert_creds(old_cred);
+	ovl_revert_creds(dentry->d_sb, old_cred);
 	return ERR_PTR(err);
 }
 
@@ -1176,7 +1177,7 @@
 			dput(this);
 		}
 	}
-	revert_creds(old_cred);
+	ovl_revert_creds(dentry->d_sb, old_cred);
 
 	return positive;
 }
diff --git a/fs/overlayfs/overlayfs.h b/fs/overlayfs/overlayfs.h
index f8880aa..da4a25a 100644
--- a/fs/overlayfs/overlayfs.h
+++ b/fs/overlayfs/overlayfs.h
@@ -184,7 +184,9 @@
 				      size_t size)
 {
 	const char *name = ovl_xattr(ofs, ox);
-	return vfs_getxattr(dentry, name, value, size);
+	struct inode *ip = d_inode(dentry);
+
+	return __vfs_getxattr(dentry, ip, name, value, size, XATTR_NOSECURITY);
 }
 
 static inline int ovl_do_setxattr(struct ovl_fs *ofs, struct dentry *dentry,
@@ -251,6 +253,7 @@
 void ovl_drop_write(struct dentry *dentry);
 struct dentry *ovl_workdir(struct dentry *dentry);
 const struct cred *ovl_override_creds(struct super_block *sb);
+void ovl_revert_creds(struct super_block *sb, const struct cred *oldcred);
 int ovl_can_decode_fh(struct super_block *sb);
 struct dentry *ovl_indexdir(struct super_block *sb);
 bool ovl_index_all(struct super_block *sb);
@@ -439,7 +442,7 @@
 int ovl_xattr_set(struct dentry *dentry, struct inode *inode, const char *name,
 		  const void *value, size_t size, int flags);
 int ovl_xattr_get(struct dentry *dentry, struct inode *inode, const char *name,
-		  void *value, size_t size);
+		  void *value, size_t size, int flags);
 ssize_t ovl_listxattr(struct dentry *dentry, char *list, size_t size);
 struct posix_acl *ovl_get_acl(struct inode *inode, int type);
 int ovl_update_time(struct inode *inode, struct timespec64 *ts, int flags);
diff --git a/fs/overlayfs/ovl_entry.h b/fs/overlayfs/ovl_entry.h
index 1b5a2094..597f009 100644
--- a/fs/overlayfs/ovl_entry.h
+++ b/fs/overlayfs/ovl_entry.h
@@ -18,6 +18,7 @@
 	int xino;
 	bool metacopy;
 	bool ovl_volatile;
+	bool override_creds;
 };
 
 struct ovl_sb {
diff --git a/fs/overlayfs/readdir.c b/fs/overlayfs/readdir.c
index 01620eb..38d5b85 100644
--- a/fs/overlayfs/readdir.c
+++ b/fs/overlayfs/readdir.c
@@ -286,7 +286,7 @@
 		}
 		inode_unlock(dir->d_inode);
 	}
-	revert_creds(old_cred);
+	ovl_revert_creds(rdd->dentry->d_sb, old_cred);
 
 	return err;
 }
@@ -796,7 +796,7 @@
 	}
 	err = 0;
 out:
-	revert_creds(old_cred);
+	ovl_revert_creds(dentry->d_sb, old_cred);
 	return err;
 }
 
@@ -848,7 +848,7 @@
 
 	old_cred = ovl_override_creds(file_inode(file)->i_sb);
 	res = ovl_path_open(realpath, O_RDONLY | (file->f_flags & O_LARGEFILE));
-	revert_creds(old_cred);
+	ovl_revert_creds(file_inode(file)->i_sb, old_cred);
 
 	return res;
 }
@@ -986,7 +986,7 @@
 
 	old_cred = ovl_override_creds(dentry->d_sb);
 	err = ovl_dir_read_merged(dentry, list, &root);
-	revert_creds(old_cred);
+	ovl_revert_creds(dentry->d_sb, old_cred);
 	if (err)
 		return err;
 
diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c
index 290983b..28c5d29 100644
--- a/fs/overlayfs/super.c
+++ b/fs/overlayfs/super.c
@@ -53,6 +53,11 @@
 MODULE_PARM_DESC(xino_auto,
 		 "Auto enable xino feature");
 
+static bool __read_mostly ovl_override_creds_def = true;
+module_param_named(override_creds, ovl_override_creds_def, bool, 0644);
+MODULE_PARM_DESC(ovl_override_creds_def,
+		 "Use mounter's credentials for accesses");
+
 static void ovl_entry_stack_free(struct ovl_entry *oe)
 {
 	unsigned int i;
@@ -366,6 +371,9 @@
 			   ofs->config.metacopy ? "on" : "off");
 	if (ofs->config.ovl_volatile)
 		seq_puts(m, ",volatile");
+	if (ofs->config.override_creds != ovl_override_creds_def)
+		seq_show_option(m, "override_creds",
+				ofs->config.override_creds ? "on" : "off");
 	return 0;
 }
 
@@ -418,6 +426,8 @@
 	OPT_METACOPY_ON,
 	OPT_METACOPY_OFF,
 	OPT_VOLATILE,
+	OPT_OVERRIDE_CREDS_ON,
+	OPT_OVERRIDE_CREDS_OFF,
 	OPT_ERR,
 };
 
@@ -437,6 +447,8 @@
 	{OPT_METACOPY_ON,		"metacopy=on"},
 	{OPT_METACOPY_OFF,		"metacopy=off"},
 	{OPT_VOLATILE,			"volatile"},
+	{OPT_OVERRIDE_CREDS_ON,		"override_creds=on"},
+	{OPT_OVERRIDE_CREDS_OFF,	"override_creds=off"},
 	{OPT_ERR,			NULL}
 };
 
@@ -496,6 +508,7 @@
 	config->redirect_mode = kstrdup(ovl_redirect_mode_def(), GFP_KERNEL);
 	if (!config->redirect_mode)
 		return -ENOMEM;
+	config->override_creds = ovl_override_creds_def;
 
 	while ((p = ovl_next_opt(&opt)) != NULL) {
 		int token;
@@ -585,6 +598,14 @@
 			config->ovl_volatile = true;
 			break;
 
+		case OPT_OVERRIDE_CREDS_ON:
+			config->override_creds = true;
+			break;
+
+		case OPT_OVERRIDE_CREDS_OFF:
+			config->override_creds = false;
+			break;
+
 		default:
 			pr_err("unrecognized mount option \"%s\" or missing value\n",
 					p);
@@ -928,9 +949,9 @@
 static int __maybe_unused
 ovl_posix_acl_xattr_get(const struct xattr_handler *handler,
 			struct dentry *dentry, struct inode *inode,
-			const char *name, void *buffer, size_t size)
+			const char *name, void *buffer, size_t size, int flags)
 {
-	return ovl_xattr_get(dentry, inode, handler->name, buffer, size);
+	return ovl_xattr_get(dentry, inode, handler->name, buffer, size, flags);
 }
 
 static int __maybe_unused
@@ -993,7 +1014,8 @@
 
 static int ovl_own_xattr_get(const struct xattr_handler *handler,
 			     struct dentry *dentry, struct inode *inode,
-			     const char *name, void *buffer, size_t size)
+			     const char *name, void *buffer, size_t size,
+			     int flags)
 {
 	return -EOPNOTSUPP;
 }
@@ -1008,9 +1030,10 @@
 
 static int ovl_other_xattr_get(const struct xattr_handler *handler,
 			       struct dentry *dentry, struct inode *inode,
-			       const char *name, void *buffer, size_t size)
+			       const char *name, void *buffer, size_t size,
+			       int flags)
 {
-	return ovl_xattr_get(dentry, inode, name, buffer, size);
+	return ovl_xattr_get(dentry, inode, name, buffer, size, flags);
 }
 
 static int ovl_other_xattr_set(const struct xattr_handler *handler,
@@ -2005,7 +2028,6 @@
 	kfree(splitlower);
 
 	sb->s_root = root_dentry;
-
 	return 0;
 
 out_free_oe:
diff --git a/fs/overlayfs/util.c b/fs/overlayfs/util.c
index 23f4756..f87f228 100644
--- a/fs/overlayfs/util.c
+++ b/fs/overlayfs/util.c
@@ -37,9 +37,17 @@
 {
 	struct ovl_fs *ofs = sb->s_fs_info;
 
+	if (!ofs->config.override_creds)
+		return NULL;
 	return override_creds(ofs->creator_cred);
 }
 
+void ovl_revert_creds(struct super_block *sb, const struct cred *old_cred)
+{
+	if (old_cred)
+		revert_creds(old_cred);
+}
+
 /*
  * Check if underlying fs supports file handles and try to determine encoding
  * type, in order to deduce maximum inode number used by fs.
@@ -546,7 +554,7 @@
 
 bool ovl_check_origin_xattr(struct ovl_fs *ofs, struct dentry *dentry)
 {
-	int res;
+	ssize_t res;
 
 	res = ovl_do_getxattr(ofs, dentry, OVL_XATTR_ORIGIN, NULL, 0);
 
@@ -560,7 +568,7 @@
 bool ovl_check_dir_xattr(struct super_block *sb, struct dentry *dentry,
 			 enum ovl_xattr ox)
 {
-	int res;
+	ssize_t res;
 	char val;
 
 	if (!d_is_dir(dentry))
@@ -823,7 +831,7 @@
 	 * value relative to the upper inode nlink in an upper inode xattr.
 	 */
 	err = ovl_set_nlink_upper(dentry);
-	revert_creds(old_cred);
+	ovl_revert_creds(dentry->d_sb, old_cred);
 
 out:
 	if (err)
@@ -841,7 +849,7 @@
 
 		old_cred = ovl_override_creds(dentry->d_sb);
 		ovl_cleanup_index(dentry);
-		revert_creds(old_cred);
+		ovl_revert_creds(dentry->d_sb, old_cred);
 	}
 
 	ovl_inode_unlock(inode);
@@ -869,7 +877,7 @@
 /* err < 0, 0 if no metacopy xattr, 1 if metacopy xattr found */
 int ovl_check_metacopy_xattr(struct ovl_fs *ofs, struct dentry *dentry)
 {
-	int res;
+	ssize_t res;
 
 	/* Only regular files can have metacopy xattr */
 	if (!S_ISREG(d_inode(dentry)->i_mode))
@@ -884,7 +892,7 @@
 
 	return 1;
 out:
-	pr_warn_ratelimited("failed to get metacopy (%i)\n", res);
+	pr_warn_ratelimited("failed to get metacopy (%zi)\n", res);
 	return res;
 }
 
diff --git a/fs/posix_acl.c b/fs/posix_acl.c
index 95882b3..f3fbc26 100644
--- a/fs/posix_acl.c
+++ b/fs/posix_acl.c
@@ -835,7 +835,7 @@
 static int
 posix_acl_xattr_get(const struct xattr_handler *handler,
 		    struct dentry *unused, struct inode *inode,
-		    const char *name, void *value, size_t size)
+		    const char *name, void *value, size_t size, int flags)
 {
 	struct posix_acl *acl;
 	int error;
diff --git a/fs/proc/base.c b/fs/proc/base.c
index b362523..896b8c91 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -96,6 +96,7 @@
 #include <linux/posix-timers.h>
 #include <linux/time_namespace.h>
 #include <linux/resctrl.h>
+#include <linux/cpufreq_times.h>
 #include <trace/events/oom.h>
 #include "internal.h"
 #include "fd.h"
@@ -3257,6 +3258,9 @@
 #ifdef CONFIG_LIVEPATCH
 	ONE("patch_state",  S_IRUSR, proc_pid_patch_state),
 #endif
+#ifdef CONFIG_CPU_FREQ_TIMES
+	ONE("time_in_state", 0444, proc_time_in_state_show),
+#endif
 #ifdef CONFIG_STACKLEAK_METRICS
 	ONE("stack_depth", S_IRUGO, proc_stack_depth),
 #endif
@@ -3592,6 +3596,9 @@
 #ifdef CONFIG_PROC_PID_ARCH_STATUS
 	ONE("arch_status", S_IRUGO, proc_pid_arch_status),
 #endif
+#ifdef CONFIG_CPU_FREQ_TIMES
+	ONE("time_in_state", 0444, proc_time_in_state_show),
+#endif
 };
 
 static int proc_tid_base_readdir(struct file *file, struct dir_context *ctx)
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 217aa27..e653427 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -123,6 +123,56 @@
 }
 #endif
 
+static void seq_print_vma_name(struct seq_file *m, struct vm_area_struct *vma)
+{
+	const char __user *name = vma_get_anon_name(vma);
+	struct mm_struct *mm = vma->vm_mm;
+
+	unsigned long page_start_vaddr;
+	unsigned long page_offset;
+	unsigned long num_pages;
+	unsigned long max_len = NAME_MAX;
+	int i;
+
+	page_start_vaddr = (unsigned long)name & PAGE_MASK;
+	page_offset = (unsigned long)name - page_start_vaddr;
+	num_pages = DIV_ROUND_UP(page_offset + max_len, PAGE_SIZE);
+
+	seq_puts(m, "[anon:");
+
+	for (i = 0; i < num_pages; i++) {
+		int len;
+		int write_len;
+		const char *kaddr;
+		long pages_pinned;
+		struct page *page;
+
+		pages_pinned = get_user_pages_remote(mm, page_start_vaddr, 1, 0,
+						     &page, NULL, NULL);
+		if (pages_pinned < 1) {
+			seq_puts(m, "<fault>]");
+			return;
+		}
+
+		kaddr = (const char *)kmap(page);
+		len = min(max_len, PAGE_SIZE - page_offset);
+		write_len = strnlen(kaddr + page_offset, len);
+		seq_write(m, kaddr + page_offset, write_len);
+		kunmap(page);
+		put_page(page);
+
+		/* if strnlen hit a null terminator then we're done */
+		if (write_len != len)
+			break;
+
+		max_len -= len;
+		page_offset = 0;
+		page_start_vaddr += PAGE_SIZE;
+	}
+
+	seq_putc(m, ']');
+}
+
 static void *m_start(struct seq_file *m, loff_t *ppos)
 {
 	struct proc_maps_private *priv = m->private;
@@ -319,8 +369,15 @@
 			goto done;
 		}
 
-		if (is_stack(vma))
+		if (is_stack(vma)) {
 			name = "[stack]";
+			goto done;
+		}
+
+		if (vma_get_anon_name(vma)) {
+			seq_pad(m, ' ');
+			seq_print_vma_name(m, vma);
+		}
 	}
 
 done:
@@ -820,6 +877,11 @@
 	smap_gather_stats(vma, &mss, 0);
 
 	show_map_vma(m, vma);
+	if (vma_get_anon_name(vma)) {
+		seq_puts(m, "Name:           ");
+		seq_print_vma_name(m, vma);
+		seq_putc(m, '\n');
+	}
 
 	SEQ_PUT_DEC("Size:           ", vma->vm_end - vma->vm_start);
 	SEQ_PUT_DEC(" kB\nKernelPageSize: ", vma_kernel_pagesize(vma));
diff --git a/fs/reiserfs/xattr_security.c b/fs/reiserfs/xattr_security.c
index 20be9a0..eedfa07 100644
--- a/fs/reiserfs/xattr_security.c
+++ b/fs/reiserfs/xattr_security.c
@@ -11,7 +11,8 @@
 
 static int
 security_get(const struct xattr_handler *handler, struct dentry *unused,
-	     struct inode *inode, const char *name, void *buffer, size_t size)
+	     struct inode *inode, const char *name, void *buffer, size_t size,
+	     int flags)
 {
 	if (IS_PRIVATE(inode))
 		return -EPERM;
diff --git a/fs/reiserfs/xattr_trusted.c b/fs/reiserfs/xattr_trusted.c
index 5ed48da..2d11d98 100644
--- a/fs/reiserfs/xattr_trusted.c
+++ b/fs/reiserfs/xattr_trusted.c
@@ -10,7 +10,8 @@
 
 static int
 trusted_get(const struct xattr_handler *handler, struct dentry *unused,
-	    struct inode *inode, const char *name, void *buffer, size_t size)
+	    struct inode *inode, const char *name, void *buffer, size_t size,
+	    int flags)
 {
 	if (!capable(CAP_SYS_ADMIN) || IS_PRIVATE(inode))
 		return -EPERM;
diff --git a/fs/reiserfs/xattr_user.c b/fs/reiserfs/xattr_user.c
index a573ca4..2a59d85 100644
--- a/fs/reiserfs/xattr_user.c
+++ b/fs/reiserfs/xattr_user.c
@@ -9,7 +9,8 @@
 
 static int
 user_get(const struct xattr_handler *handler, struct dentry *unused,
-	 struct inode *inode, const char *name, void *buffer, size_t size)
+	 struct inode *inode, const char *name, void *buffer, size_t size,
+	 int flags)
 {
 	if (!reiserfs_xattrs_user(inode->i_sb))
 		return -EOPNOTSUPP;
diff --git a/fs/squashfs/xattr.c b/fs/squashfs/xattr.c
index e1e3f3d..d8d58c9 100644
--- a/fs/squashfs/xattr.c
+++ b/fs/squashfs/xattr.c
@@ -204,7 +204,7 @@
 				      struct dentry *unused,
 				      struct inode *inode,
 				      const char *name,
-				      void *buffer, size_t size)
+				      void *buffer, size_t size, int flags)
 {
 	return squashfs_xattr_get(inode, handler->flags, name,
 		buffer, size);
diff --git a/fs/sync.c b/fs/sync.c
index 1373a61..8e1c227 100644
--- a/fs/sync.c
+++ b/fs/sync.c
@@ -9,7 +9,7 @@
 #include <linux/slab.h>
 #include <linux/export.h>
 #include <linux/namei.h>
-#include <linux/sched.h>
+#include <linux/sched/xacct.h>
 #include <linux/writeback.h>
 #include <linux/syscalls.h>
 #include <linux/linkage.h>
@@ -223,6 +223,7 @@
 	if (f.file) {
 		ret = vfs_fsync(f.file, datasync);
 		fdput(f);
+		inc_syscfs(current);
 	}
 	return ret;
 }
diff --git a/fs/ubifs/dir.c b/fs/ubifs/dir.c
index 155521e..7a92043 100644
--- a/fs/ubifs/dir.c
+++ b/fs/ubifs/dir.c
@@ -203,6 +203,7 @@
 	dbg_gen("'%pd' in dir ino %lu", dentry, dir->i_ino);
 
 	err = fscrypt_prepare_lookup(dir, dentry, &nm);
+	generic_set_encrypted_ci_d_ops(dentry);
 	if (err == -ENOENT)
 		return d_splice_alias(NULL, dentry);
 	if (err)
diff --git a/fs/ubifs/xattr.c b/fs/ubifs/xattr.c
index a0b9b34..1afb628 100644
--- a/fs/ubifs/xattr.c
+++ b/fs/ubifs/xattr.c
@@ -671,7 +671,8 @@
 
 static int xattr_get(const struct xattr_handler *handler,
 			   struct dentry *dentry, struct inode *inode,
-			   const char *name, void *buffer, size_t size)
+			   const char *name, void *buffer, size_t size,
+			   int flags)
 {
 	dbg_gen("xattr '%s', ino %lu ('%pd'), buf size %zd", name,
 		inode->i_ino, dentry, size);
diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
index 000b457..d22b9ec 100644
--- a/fs/userfaultfd.c
+++ b/fs/userfaultfd.c
@@ -869,7 +869,8 @@
 				 new_flags, vma->anon_vma,
 				 vma->vm_file, vma->vm_pgoff,
 				 vma_policy(vma),
-				 NULL_VM_UFFD_CTX);
+				 NULL_VM_UFFD_CTX,
+				 vma_get_anon_name(vma));
 		if (prev)
 			vma = prev;
 		else
@@ -1417,7 +1418,8 @@
 		prev = vma_merge(mm, prev, start, vma_end, new_flags,
 				 vma->anon_vma, vma->vm_file, vma->vm_pgoff,
 				 vma_policy(vma),
-				 ((struct vm_userfaultfd_ctx){ ctx }));
+				 ((struct vm_userfaultfd_ctx){ ctx }),
+				 vma_get_anon_name(vma));
 		if (prev) {
 			vma = prev;
 			goto next;
@@ -1587,7 +1589,8 @@
 		prev = vma_merge(mm, prev, start, vma_end, new_flags,
 				 vma->anon_vma, vma->vm_file, vma->vm_pgoff,
 				 vma_policy(vma),
-				 NULL_VM_UFFD_CTX);
+				 NULL_VM_UFFD_CTX,
+				 vma_get_anon_name(vma));
 		if (prev) {
 			vma = prev;
 			goto next;
diff --git a/fs/xattr.c b/fs/xattr.c
index cd7a563..d6bf5a7 100644
--- a/fs/xattr.c
+++ b/fs/xattr.c
@@ -345,7 +345,7 @@
 		return PTR_ERR(handler);
 	if (!handler->get)
 		return -EOPNOTSUPP;
-	error = handler->get(handler, dentry, inode, name, NULL, 0);
+	error = handler->get(handler, dentry, inode, name, NULL, 0, 0);
 	if (error < 0)
 		return error;
 
@@ -356,32 +356,20 @@
 		memset(value, 0, error + 1);
 	}
 
-	error = handler->get(handler, dentry, inode, name, value, error);
+	error = handler->get(handler, dentry, inode, name, value, error, 0);
 	*xattr_value = value;
 	return error;
 }
 
 ssize_t
 __vfs_getxattr(struct dentry *dentry, struct inode *inode, const char *name,
-	       void *value, size_t size)
+	       void *value, size_t size, int flags)
 {
 	const struct xattr_handler *handler;
-
-	handler = xattr_resolve_name(inode, &name);
-	if (IS_ERR(handler))
-		return PTR_ERR(handler);
-	if (!handler->get)
-		return -EOPNOTSUPP;
-	return handler->get(handler, dentry, inode, name, value, size);
-}
-EXPORT_SYMBOL(__vfs_getxattr);
-
-ssize_t
-vfs_getxattr(struct dentry *dentry, const char *name, void *value, size_t size)
-{
-	struct inode *inode = dentry->d_inode;
 	int error;
 
+	if (flags & XATTR_NOSECURITY)
+		goto nolsm;
 	error = xattr_permission(inode, name, MAY_READ);
 	if (error)
 		return error;
@@ -403,7 +391,19 @@
 		return ret;
 	}
 nolsm:
-	return __vfs_getxattr(dentry, inode, name, value, size);
+	handler = xattr_resolve_name(inode, &name);
+	if (IS_ERR(handler))
+		return PTR_ERR(handler);
+	if (!handler->get)
+		return -EOPNOTSUPP;
+	return handler->get(handler, dentry, inode, name, value, size, flags);
+}
+EXPORT_SYMBOL(__vfs_getxattr);
+
+ssize_t
+vfs_getxattr(struct dentry *dentry, const char *name, void *value, size_t size)
+{
+	return __vfs_getxattr(dentry, dentry->d_inode, name, value, size, 0);
 }
 EXPORT_SYMBOL_GPL(vfs_getxattr);
 
diff --git a/fs/xfs/xfs_xattr.c b/fs/xfs/xfs_xattr.c
index bca48b3..dd81d10 100644
--- a/fs/xfs/xfs_xattr.c
+++ b/fs/xfs/xfs_xattr.c
@@ -19,7 +19,8 @@
 
 static int
 xfs_xattr_get(const struct xattr_handler *handler, struct dentry *unused,
-		struct inode *inode, const char *name, void *value, size_t size)
+		struct inode *inode, const char *name, void *value, size_t size,
+		int flags)
 {
 	struct xfs_da_args	args = {
 		.dp		= XFS_I(inode),
diff --git a/include/drm/drm_drv.h b/include/drm/drm_drv.h
index e57d044..e395d1f 100644
--- a/include/drm/drm_drv.h
+++ b/include/drm/drm_drv.h
@@ -29,6 +29,7 @@
 
 #include <linux/list.h>
 #include <linux/irqreturn.h>
+#include <linux/uuid.h>
 
 #include <drm/drm_device.h>
 
@@ -461,6 +462,15 @@
 				struct vm_area_struct *vma);
 
 	/**
+	 * @gem_prime_get_uuid
+	 *
+	 * get_uuid hook for GEM drivers. Retrieves the virtio uuid of the
+	 * given GEM buffer.
+	 */
+	int (*gem_prime_get_uuid)(struct drm_gem_object *obj,
+				  uuid_t *uuid);
+
+	/**
 	 * @dumb_create:
 	 *
 	 * This creates a new dumb buffer in the driver's backing storage manager (GEM,
diff --git a/include/drm/drm_mipi_dsi.h b/include/drm/drm_mipi_dsi.h
index 360e637..952f475 100644
--- a/include/drm/drm_mipi_dsi.h
+++ b/include/drm/drm_mipi_dsi.h
@@ -19,12 +19,18 @@
 #define MIPI_DSI_MSG_REQ_ACK	BIT(0)
 /* use Low Power Mode to transmit message */
 #define MIPI_DSI_MSG_USE_LPM	BIT(1)
+/* read mipi_dsi_msg.ctrl and unicast to only that ctrls */
+#define MIPI_DSI_MSG_UNICAST	BIT(2)
+/* Stack all commands until lastcommand bit and trigger all in one go */
+#define MIPI_DSI_MSG_LASTCOMMAND BIT(3)
 
 /**
  * struct mipi_dsi_msg - read/write DSI buffer
  * @channel: virtual channel id
  * @type: payload data type
  * @flags: flags controlling this message transmission
+ * @ctrl: ctrl index to transmit on
+ * @wait_ms: duration in ms to wait after message transmission
  * @tx_len: length of @tx_buf
  * @tx_buf: data to be written
  * @rx_len: length of @rx_buf
@@ -34,6 +40,8 @@
 	u8 channel;
 	u8 type;
 	u16 flags;
+	u32 ctrl;
+	u32 wait_ms;
 
 	size_t tx_len;
 	const void *tx_buf;
@@ -132,6 +140,10 @@
 #define MIPI_DSI_CLOCK_NON_CONTINUOUS	BIT(10)
 /* transmit data in low power */
 #define MIPI_DSI_MODE_LPM		BIT(11)
+/* disable BLLP area */
+#define MIPI_DSI_MODE_VIDEO_BLLP	BIT(12)
+/* disable EOF BLLP area */
+#define MIPI_DSI_MODE_VIDEO_EOF_BLLP	BIT(13)
 
 enum mipi_dsi_pixel_format {
 	MIPI_DSI_FMT_RGB888,
diff --git a/include/drm/drm_mode_object.h b/include/drm/drm_mode_object.h
index c34a3e8..6292fa6 100644
--- a/include/drm/drm_mode_object.h
+++ b/include/drm/drm_mode_object.h
@@ -60,7 +60,7 @@
 	void (*free_cb)(struct kref *kref);
 };
 
-#define DRM_OBJECT_MAX_PROPERTY 24
+#define DRM_OBJECT_MAX_PROPERTY 64
 /**
  * struct drm_object_properties - property tracking for &drm_mode_object
  */
diff --git a/include/drm/drm_prime.h b/include/drm/drm_prime.h
index 0f69f9f..a9d0c6b 100644
--- a/include/drm/drm_prime.h
+++ b/include/drm/drm_prime.h
@@ -107,5 +107,6 @@
 int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **pages,
 				     dma_addr_t *addrs, int max_pages);
 
+int drm_gem_dmabuf_get_uuid(struct dma_buf *dma_buf, uuid_t *uuid);
 
 #endif /* __DRM_PRIME_H__ */
diff --git a/include/linux/android_vendor.h b/include/linux/android_vendor.h
new file mode 100644
index 0000000..c0d3abb
--- /dev/null
+++ b/include/linux/android_vendor.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * android_vendor.h - Android vendor data
+ *
+ * Copyright 2020 Google LLC
+ *
+ * These macros are to be used to reserve space in kernel data structures
+ * for use by vendor modules.
+ *
+ * These macros should be used before the kernel abi is "frozen".
+ * Fields can be added to various kernel structures that need space
+ * for functionality implemented in vendor modules. The use of
+ * these fields is vendor specific.
+ */
+#ifndef _ANDROID_VENDOR_H
+#define _ANDROID_VENDOR_H
+
+/*
+ * ANDROID_VENDOR_DATA
+ *   Reserve some "padding" in a structure for potential future use.
+ *   This normally placed at the end of a structure.
+ *   number: the "number" of the padding variable in the structure.  Start with
+ *   1 and go up.
+ *
+ * ANDROID_VENDOR_DATA_ARRAY
+ *   Same as ANDROID_VENDOR_DATA but allocates an array of u64 with
+ *   the specified size
+ */
+#define ANDROID_VENDOR_DATA(n)		u64 android_vendor_data##n
+#define ANDROID_VENDOR_DATA_ARRAY(n, s)	u64 android_vendor_data##n[s]
+
+#endif /* _ANDROID_VENDOR_H */
diff --git a/include/linux/arch_topology.h b/include/linux/arch_topology.h
index 0f6cd6b..3f33dfe7 100644
--- a/include/linux/arch_topology.h
+++ b/include/linux/arch_topology.h
@@ -7,6 +7,7 @@
 
 #include <linux/types.h>
 #include <linux/percpu.h>
+#include <linux/android_vendor.h>
 
 void topology_normalize_cpu_scale(void);
 int topology_update_cpu_topology(void);
@@ -54,6 +55,8 @@
 	cpumask_t thread_sibling;
 	cpumask_t core_sibling;
 	cpumask_t llc_sibling;
+
+	cpumask_t android_vendor_data1;
 };
 
 #ifdef CONFIG_GENERIC_ARCH_TOPOLOGY
diff --git a/include/linux/blk-crypto.h b/include/linux/blk-crypto.h
index 69b24fe..c495572 100644
--- a/include/linux/blk-crypto.h
+++ b/include/linux/blk-crypto.h
@@ -17,6 +17,8 @@
 };
 
 #define BLK_CRYPTO_MAX_KEY_SIZE		64
+#define BLK_CRYPTO_MAX_WRAPPED_KEY_SIZE                128
+
 /**
  * struct blk_crypto_config - an inline encryption key's crypto configuration
  * @crypto_mode: encryption algorithm this key is for
@@ -25,11 +27,14 @@
  *	ciphertext.  This is always a power of 2.  It might be e.g. the
  *	filesystem block size or the disk sector size.
  * @dun_bytes: the maximum number of bytes of DUN used when using this key
+ * @is_hw_wrapped: @raw points to a wrapped key to be used by an inline
+ *	encryption hardware that accepts wrapped keys.
  */
 struct blk_crypto_config {
 	enum blk_crypto_mode_num crypto_mode;
 	unsigned int data_unit_size;
 	unsigned int dun_bytes;
+	bool is_hw_wrapped;
 };
 
 /**
@@ -48,7 +53,7 @@
 	struct blk_crypto_config crypto_cfg;
 	unsigned int data_unit_size_bits;
 	unsigned int size;
-	u8 raw[BLK_CRYPTO_MAX_KEY_SIZE];
+	u8 raw[BLK_CRYPTO_MAX_WRAPPED_KEY_SIZE];
 };
 
 #define BLK_CRYPTO_MAX_IV_SIZE		32
@@ -89,7 +94,9 @@
 				 unsigned int bytes,
 				 const u64 next_dun[BLK_CRYPTO_DUN_ARRAY_SIZE]);
 
-int blk_crypto_init_key(struct blk_crypto_key *blk_key, const u8 *raw_key,
+int blk_crypto_init_key(struct blk_crypto_key *blk_key,
+			const u8 *raw_key, unsigned int raw_key_size,
+			bool is_hw_wrapped,
 			enum blk_crypto_mode_num crypto_mode,
 			unsigned int dun_bytes,
 			unsigned int data_unit_size);
@@ -112,6 +119,9 @@
 
 #endif /* CONFIG_BLK_INLINE_ENCRYPTION */
 
+static inline void bio_clone_skip_dm_default_key(struct bio *dst,
+						 const struct bio *src);
+
 int __bio_crypt_clone(struct bio *dst, struct bio *src, gfp_t gfp_mask);
 /**
  * bio_crypt_clone - clone bio encryption context
@@ -127,9 +137,42 @@
 static inline int bio_crypt_clone(struct bio *dst, struct bio *src,
 				  gfp_t gfp_mask)
 {
+	bio_clone_skip_dm_default_key(dst, src);
 	if (bio_has_crypt_ctx(src))
 		return __bio_crypt_clone(dst, src, gfp_mask);
 	return 0;
 }
 
+#if IS_ENABLED(CONFIG_DM_DEFAULT_KEY)
+static inline void bio_set_skip_dm_default_key(struct bio *bio)
+{
+	bio->bi_skip_dm_default_key = true;
+}
+
+static inline bool bio_should_skip_dm_default_key(const struct bio *bio)
+{
+	return bio->bi_skip_dm_default_key;
+}
+
+static inline void bio_clone_skip_dm_default_key(struct bio *dst,
+						 const struct bio *src)
+{
+	dst->bi_skip_dm_default_key = src->bi_skip_dm_default_key;
+}
+#else /* CONFIG_DM_DEFAULT_KEY */
+static inline void bio_set_skip_dm_default_key(struct bio *bio)
+{
+}
+
+static inline bool bio_should_skip_dm_default_key(const struct bio *bio)
+{
+	return false;
+}
+
+static inline void bio_clone_skip_dm_default_key(struct bio *dst,
+						 const struct bio *src)
+{
+}
+#endif /* !CONFIG_DM_DEFAULT_KEY */
+
 #endif /* __LINUX_BLK_CRYPTO_H */
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index d9b69bb..8bac50d 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -235,6 +235,9 @@
 
 #ifdef CONFIG_BLK_INLINE_ENCRYPTION
 	struct bio_crypt_ctx	*bi_crypt_context;
+#if IS_ENABLED(CONFIG_DM_DEFAULT_KEY)
+	bool			bi_skip_dm_default_key;
+#endif
 #endif
 
 	union {
diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
index 03a5de5..1025a46 100644
--- a/include/linux/clk-provider.h
+++ b/include/linux/clk-provider.h
@@ -32,6 +32,7 @@
 #define CLK_OPS_PARENT_ENABLE	BIT(12)
 /* duty cycle call may be forwarded to the parent clock */
 #define CLK_DUTY_CYCLE_PARENT	BIT(13)
+#define CLK_DONT_HOLD_STATE	BIT(14) /* Don't hold state */
 
 struct clk;
 struct clk_hw;
@@ -205,6 +206,13 @@
  *		directory is provided as an argument.  Called with
  *		prepare_lock held.  Returns 0 on success, -EERROR otherwise.
  *
+ * @pre_rate_change: Optional callback for a clock to fulfill its rate
+ *		change requirements before any rate change has occurred in
+ *		its clock tree. Returns 0 on success, -EERROR otherwise.
+ *
+ * @post_rate_change: Optional callback for a clock to clean up any
+ *		requirements that were needed while the clock and its tree
+ *		was changing states. Returns 0 on success, -EERROR otherwise.
  *
  * The clk_enable/clk_disable and clk_prepare/clk_unprepare pairs allow
  * implementations to split any work between atomic (enable) and sleepable
@@ -252,6 +260,12 @@
 	int		(*init)(struct clk_hw *hw);
 	void		(*terminate)(struct clk_hw *hw);
 	void		(*debug_init)(struct clk_hw *hw, struct dentry *dentry);
+	int		(*pre_rate_change)(struct clk_hw *hw,
+					   unsigned long rate,
+					   unsigned long new_rate);
+	int		(*post_rate_change)(struct clk_hw *hw,
+					    unsigned long old_rate,
+					    unsigned long rate);
 };
 
 /**
@@ -1076,6 +1090,7 @@
 
 void clk_hw_unregister(struct clk_hw *hw);
 void devm_clk_hw_unregister(struct device *dev, struct clk_hw *hw);
+void clk_sync_state(struct device *dev);
 
 /* helper functions */
 const char *__clk_get_name(const struct clk *clk);
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index acbad3b..54ee250 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -1023,14 +1023,6 @@
 }
 #endif
 
-#if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL)
-void sched_cpufreq_governor_change(struct cpufreq_policy *policy,
-			struct cpufreq_governor *old_gov);
-#else
-static inline void sched_cpufreq_governor_change(struct cpufreq_policy *policy,
-			struct cpufreq_governor *old_gov) { }
-#endif
-
 extern void arch_freq_prepare_all(void);
 extern unsigned int arch_freq_get_on_cpu(int cpu);
 
diff --git a/include/linux/cpufreq_times.h b/include/linux/cpufreq_times.h
new file mode 100644
index 0000000..38272a5
--- /dev/null
+++ b/include/linux/cpufreq_times.h
@@ -0,0 +1,42 @@
+/* drivers/cpufreq/cpufreq_times.c
+ *
+ * Copyright (C) 2018 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _LINUX_CPUFREQ_TIMES_H
+#define _LINUX_CPUFREQ_TIMES_H
+
+#include <linux/cpufreq.h>
+#include <linux/pid.h>
+
+#ifdef CONFIG_CPU_FREQ_TIMES
+void cpufreq_task_times_init(struct task_struct *p);
+void cpufreq_task_times_alloc(struct task_struct *p);
+void cpufreq_task_times_exit(struct task_struct *p);
+int proc_time_in_state_show(struct seq_file *m, struct pid_namespace *ns,
+			    struct pid *pid, struct task_struct *p);
+void cpufreq_acct_update_power(struct task_struct *p, u64 cputime);
+void cpufreq_times_create_policy(struct cpufreq_policy *policy);
+void cpufreq_times_record_transition(struct cpufreq_policy *policy,
+                                     unsigned int new_freq);
+#else
+static inline void cpufreq_task_times_init(struct task_struct *p) {}
+static inline void cpufreq_task_times_alloc(struct task_struct *p) {}
+static inline void cpufreq_task_times_exit(struct task_struct *p) {}
+static inline void cpufreq_acct_update_power(struct task_struct *p,
+					     u64 cputime) {}
+static inline void cpufreq_times_create_policy(struct cpufreq_policy *policy) {}
+static inline void cpufreq_times_record_transition(
+	struct cpufreq_policy *policy, unsigned int new_freq) {}
+#endif /* CONFIG_CPU_FREQ_TIMES */
+#endif /* _LINUX_CPUFREQ_TIMES_H */
diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
index 04c20de66..7f1478c 100644
--- a/include/linux/cpuset.h
+++ b/include/linux/cpuset.h
@@ -55,8 +55,6 @@
 extern void cpuset_force_rebuild(void);
 extern void cpuset_update_active_cpus(void);
 extern void cpuset_wait_for_hotplug(void);
-extern void cpuset_read_lock(void);
-extern void cpuset_read_unlock(void);
 extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask);
 extern void cpuset_cpus_allowed_fallback(struct task_struct *p);
 extern nodemask_t cpuset_mems_allowed(struct task_struct *p);
@@ -178,9 +176,6 @@
 
 static inline void cpuset_wait_for_hotplug(void) { }
 
-static inline void cpuset_read_lock(void) { }
-static inline void cpuset_read_unlock(void) { }
-
 static inline void cpuset_cpus_allowed(struct task_struct *p,
 				       struct cpumask *mask)
 {
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index 6f95c33..49d0b6f 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -147,6 +147,7 @@
 	struct vfsmount *(*d_automount)(struct path *);
 	int (*d_manage)(const struct path *, bool);
 	struct dentry *(*d_real)(struct dentry *, const struct inode *);
+	void (*d_canonical_path)(const struct path *, struct path *);
 } ____cacheline_aligned;
 
 /*
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
index 61a66fb..6d05a6a 100644
--- a/include/linux/device-mapper.h
+++ b/include/linux/device-mapper.h
@@ -325,6 +325,12 @@
 	 * whether or not its underlying devices have support.
 	 */
 	bool discards_supported:1;
+
+	/*
+	 * Set if inline crypto capabilities from this target's underlying
+	 * device(s) can be exposed via the device-mapper device.
+	 */
+	bool may_passthrough_inline_crypto:1;
 };
 
 void *dm_per_bio_data(struct bio *bio, size_t data_size);
diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h
index 957b398..89e3a53 100644
--- a/include/linux/dma-buf.h
+++ b/include/linux/dma-buf.h
@@ -210,6 +210,41 @@
 	int (*begin_cpu_access)(struct dma_buf *, enum dma_data_direction);
 
 	/**
+	 * @begin_cpu_access_partial:
+	 *
+	 * This is called from dma_buf_begin_cpu_access_partial() and allows the
+	 * exporter to ensure that the memory specified in the range is
+	 * available for cpu access - the exporter might need to allocate or
+	 * swap-in and pin the backing storage.
+	 * The exporter also needs to ensure that cpu access is
+	 * coherent for the access direction. The direction can be used by the
+	 * exporter to optimize the cache flushing, i.e. access with a different
+	 * direction (read instead of write) might return stale or even bogus
+	 * data (e.g. when the exporter needs to copy the data to temporary
+	 * storage).
+	 *
+	 * This callback is optional.
+	 *
+	 * FIXME: This is both called through the DMA_BUF_IOCTL_SYNC command
+	 * from userspace (where storage shouldn't be pinned to avoid handing
+	 * de-factor mlock rights to userspace) and for the kernel-internal
+	 * users of the various kmap interfaces, where the backing storage must
+	 * be pinned to guarantee that the atomic kmap calls can succeed. Since
+	 * there's no in-kernel users of the kmap interfaces yet this isn't a
+	 * real problem.
+	 *
+	 * Returns:
+	 *
+	 * 0 on success or a negative error code on failure. This can for
+	 * example fail when the backing storage can't be allocated. Can also
+	 * return -ERESTARTSYS or -EINTR when the call has been interrupted and
+	 * needs to be restarted.
+	 */
+	int (*begin_cpu_access_partial)(struct dma_buf *dmabuf,
+					enum dma_data_direction,
+					unsigned int offset, unsigned int len);
+
+	/**
 	 * @end_cpu_access:
 	 *
 	 * This is called from dma_buf_end_cpu_access() when the importer is
@@ -229,6 +264,28 @@
 	int (*end_cpu_access)(struct dma_buf *, enum dma_data_direction);
 
 	/**
+	 * @end_cpu_access_partial:
+	 *
+	 * This is called from dma_buf_end_cpu_access_partial() when the
+	 * importer is done accessing the CPU. The exporter can use to limit
+	 * cache flushing to only the range specefied and to unpin any
+	 * resources pinned in @begin_cpu_access_umapped.
+	 * The result of any dma_buf kmap calls after end_cpu_access_partial is
+	 * undefined.
+	 *
+	 * This callback is optional.
+	 *
+	 * Returns:
+	 *
+	 * 0 on success or a negative error code on failure. Can return
+	 * -ERESTARTSYS or -EINTR when the call has been interrupted and needs
+	 * to be restarted.
+	 */
+	int (*end_cpu_access_partial)(struct dma_buf *dmabuf,
+				      enum dma_data_direction,
+				      unsigned int offset, unsigned int len);
+
+	/**
 	 * @mmap:
 	 *
 	 * This callback is used by the dma_buf_mmap() function
@@ -267,6 +324,35 @@
 
 	void *(*vmap)(struct dma_buf *);
 	void (*vunmap)(struct dma_buf *, void *vaddr);
+
+	/**
+	 * @get_uuid
+	 *
+	 * This is called by dma_buf_get_uuid to get the UUID which identifies
+	 * the buffer to virtio devices.
+	 *
+	 * This callback is optional.
+	 *
+	 * Returns:
+	 *
+	 * 0 on success or a negative error code on failure. On success uuid
+	 * will be populated with the buffer's UUID.
+	 */
+	int (*get_uuid)(struct dma_buf *dmabuf, uuid_t *uuid);
+
+	/**
+	 * @get_flags:
+	 *
+	 * This is called by dma_buf_get_flags and is used to get the buffer's
+	 * flags.
+	 * This callback is optional.
+	 *
+	 * Returns:
+	 *
+	 * 0 on success or a negative error code on failure. On success flags
+	 * will be populated with the buffer's flags.
+	 */
+	int (*get_flags)(struct dma_buf *dmabuf, unsigned long *flags);
 };
 
 /**
@@ -376,6 +462,8 @@
  * @importer_ops: importer operations for this attachment, if provided
  * dma_buf_map/unmap_attachment() must be called with the dma_resv lock held.
  * @importer_priv: importer specific attachment data.
+ * @dma_map_attrs: DMA attributes to be used when the exporter maps the buffer
+ * through dma_buf_map_attachment.
  *
  * This structure holds the attachment information between the dma_buf buffer
  * and its user device(s). The list contains one attachment struct per device
@@ -396,6 +484,7 @@
 	const struct dma_buf_attach_ops *importer_ops;
 	void *importer_priv;
 	void *priv;
+	unsigned long dma_map_attrs;
 };
 
 /**
@@ -497,11 +586,20 @@
 void dma_buf_move_notify(struct dma_buf *dma_buf);
 int dma_buf_begin_cpu_access(struct dma_buf *dma_buf,
 			     enum dma_data_direction dir);
+int dma_buf_begin_cpu_access_partial(struct dma_buf *dma_buf,
+				     enum dma_data_direction dir,
+				     unsigned int offset, unsigned int len);
 int dma_buf_end_cpu_access(struct dma_buf *dma_buf,
 			   enum dma_data_direction dir);
+int dma_buf_end_cpu_access_partial(struct dma_buf *dma_buf,
+				     enum dma_data_direction dir,
+				     unsigned int offset, unsigned int len);
 
 int dma_buf_mmap(struct dma_buf *, struct vm_area_struct *,
 		 unsigned long);
 void *dma_buf_vmap(struct dma_buf *);
 void dma_buf_vunmap(struct dma_buf *, void *vaddr);
+int dma_buf_get_flags(struct dma_buf *dmabuf, unsigned long *flags);
+int dma_buf_get_uuid(struct dma_buf *dmabuf, uuid_t *uuid);
+
 #endif /* __DMA_BUF_H__ */
diff --git a/include/linux/dma-heap.h b/include/linux/dma-heap.h
index 454e354..35dbdd8 100644
--- a/include/linux/dma-heap.h
+++ b/include/linux/dma-heap.h
@@ -16,12 +16,12 @@
 
 /**
  * struct dma_heap_ops - ops to operate on a given heap
- * @allocate:		allocate dmabuf and return fd
+ * @allocate:		allocate dmabuf and return struct dma_buf ptr
  *
- * allocate returns dmabuf fd  on success, -errno on error.
+ * allocate returns dmabuf on success, ERR_PTR(-errno) on error.
  */
 struct dma_heap_ops {
-	int (*allocate)(struct dma_heap *heap,
+	struct dma_buf *(*allocate)(struct dma_heap *heap,
 			unsigned long len,
 			unsigned long fd_flags,
 			unsigned long heap_flags);
@@ -56,4 +56,49 @@
  */
 struct dma_heap *dma_heap_add(const struct dma_heap_export_info *exp_info);
 
+/**
+ * dma_heap_put - drops a reference to a dmabuf heaps, potentially freeing it
+ * @heap:		heap pointer
+ */
+void dma_heap_put(struct dma_heap *heap);
+
+/**
+ * dma_heap_find - Returns the registered dma_heap with the specified name
+ * @name: Name of the heap to find
+ *
+ * NOTE: dma_heaps returned from this function MUST be released
+ * using dma_heap_put() when the user is done.
+ */
+struct dma_heap *dma_heap_find(const char *name);
+
+/**
+ * dma_heap_buffer_alloc - Allocate dma-buf from a dma_heap
+ * @heap:	dma_heap to allocate from
+ * @len:	size to allocate
+ * @fd_flags:	flags to set on returned dma-buf fd
+ * @heap_flags:	flags to pass to the dma heap
+ *
+ * This is for internal dma-buf allocations only.
+ */
+struct dma_buf *dma_heap_buffer_alloc(struct dma_heap *heap, size_t len,
+				      unsigned int fd_flags,
+				      unsigned int heap_flags);
+
+/** dma_heap_buffer_free - Free dma_buf allocated by dma_heap_buffer_alloc
+ * @dma_buf:	dma_buf to free
+ *
+ * This is really only a simple wrapper to dma_buf_put()
+ */
+void dma_heap_buffer_free(struct dma_buf *);
+
+/**
+ * dma_heap_bufferfd_alloc - Allocate dma-buf fd from a dma_heap
+ * @heap:	dma_heap to allocate from
+ * @len:	size to allocate
+ * @fd_flags:	flags to set on returned dma-buf fd
+ * @heap_flags:	flags to pass to the dma heap
+ */
+int dma_heap_bufferfd_alloc(struct dma_heap *heap, size_t len,
+			    unsigned int fd_flags,
+			    unsigned int heap_flags);
 #endif /* _DMA_HEAPS_H */
diff --git a/include/linux/dma-iommu.h b/include/linux/dma-iommu.h
index 2112f21..f51561ed 100644
--- a/include/linux/dma-iommu.h
+++ b/include/linux/dma-iommu.h
@@ -37,6 +37,11 @@
 
 void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list);
 
+int iommu_dma_reserve_iova(struct device *dev, dma_addr_t base,
+			   u64 size);
+
+int iommu_dma_enable_best_fit_algo(struct device *dev);
+
 #else /* CONFIG_IOMMU_DMA */
 
 struct iommu_domain;
@@ -78,5 +83,16 @@
 {
 }
 
+static inline int iommu_dma_reserve_iova(struct device *dev, dma_addr_t base,
+					 u64 size)
+{
+	return -ENODEV;
+}
+
+static inline int iommu_dma_enable_best_fit_algo(struct device *dev)
+{
+	return -ENODEV;
+}
+
 #endif	/* CONFIG_IOMMU_DMA */
 #endif	/* __DMA_IOMMU_H */
diff --git a/include/linux/export.h b/include/linux/export.h
index fceb5e8..b962df3c 100644
--- a/include/linux/export.h
+++ b/include/linux/export.h
@@ -118,7 +118,7 @@
  */
 #define __EXPORT_SYMBOL(sym, sec, ns)
 
-#elif defined(CONFIG_TRIM_UNUSED_KSYMS)
+#elif defined(CONFIG_TRIM_UNUSED_KSYMS) && !defined(MODULE)
 
 #include <generated/autoksyms.h>
 
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 8667d0c..11345e6 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -3202,6 +3202,7 @@
 extern int generic_ci_d_compare(const struct dentry *dentry, unsigned int len,
 				const char *str, const struct qstr *name);
 #endif
+extern void generic_set_encrypted_ci_d_ops(struct dentry *dentry);
 
 #ifdef CONFIG_MIGRATION
 extern int buffer_migrate_page(struct address_space *,
diff --git a/include/linux/fscrypt.h b/include/linux/fscrypt.h
index a8f7a43..63f6ec8 100644
--- a/include/linux/fscrypt.h
+++ b/include/linux/fscrypt.h
@@ -567,6 +567,11 @@
 bool fscrypt_mergeable_bio_bh(struct bio *bio,
 			      const struct buffer_head *next_bh);
 
+bool fscrypt_dio_supported(struct kiocb *iocb, struct iov_iter *iter);
+
+int fscrypt_limit_dio_pages(const struct inode *inode, loff_t pos,
+			    int nr_pages);
+
 #else /* CONFIG_FS_ENCRYPTION_INLINE_CRYPT */
 
 static inline bool __fscrypt_inode_uses_inline_crypto(const struct inode *inode)
@@ -595,8 +600,36 @@
 {
 	return true;
 }
+
+static inline bool fscrypt_dio_supported(struct kiocb *iocb,
+					 struct iov_iter *iter)
+{
+	const struct inode *inode = file_inode(iocb->ki_filp);
+
+	return !fscrypt_needs_contents_encryption(inode);
+}
+
+static inline int fscrypt_limit_dio_pages(const struct inode *inode, loff_t pos,
+					  int nr_pages)
+{
+	return nr_pages;
+}
 #endif /* !CONFIG_FS_ENCRYPTION_INLINE_CRYPT */
 
+#if IS_ENABLED(CONFIG_FS_ENCRYPTION) && IS_ENABLED(CONFIG_DM_DEFAULT_KEY)
+static inline bool
+fscrypt_inode_should_skip_dm_default_key(const struct inode *inode)
+{
+	return IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode);
+}
+#else
+static inline bool
+fscrypt_inode_should_skip_dm_default_key(const struct inode *inode)
+{
+	return false;
+}
+#endif
+
 /**
  * fscrypt_inode_uses_inline_crypto() - test whether an inode uses inline
  *					encryption
@@ -741,8 +774,11 @@
  * directory's encryption key is available, then the lookup is assumed to be by
  * plaintext name; otherwise, it is assumed to be by no-key name.
  *
- * This also installs a custom ->d_revalidate() method which will invalidate the
- * dentry if it was created without the key and the key is later added.
+ * This will set DCACHE_NOKEY_NAME on the dentry if the lookup is by no-key
+ * name.  In this case the filesystem must assign the dentry a dentry_operations
+ * which contains fscrypt_d_revalidate (or contains a d_revalidate method that
+ * calls fscrypt_d_revalidate), so that the dentry will be invalidated if the
+ * directory's encryption key is later added.
  *
  * Return: 0 on success; -ENOENT if the directory's key is unavailable but the
  * filename isn't a valid no-key name, so a negative dentry should be created;
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index c603237..918647ffa 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -39,11 +39,21 @@
 #define ___GFP_HARDWALL		0x100000u
 #define ___GFP_THISNODE		0x200000u
 #define ___GFP_ACCOUNT		0x400000u
+#ifdef CONFIG_CMA
+#define ___GFP_CMA		0x800000u
+#else
+#define ___GFP_CMA		0
+#endif
 #ifdef CONFIG_LOCKDEP
+#ifdef CONFIG_CMA
+#define ___GFP_NOLOCKDEP	0x1000000u
+#else
 #define ___GFP_NOLOCKDEP	0x800000u
+#endif
 #else
 #define ___GFP_NOLOCKDEP	0
 #endif
+
 /* If the above are modified, __GFP_BITS_SHIFT may need updating */
 
 /*
@@ -57,6 +67,7 @@
 #define __GFP_HIGHMEM	((__force gfp_t)___GFP_HIGHMEM)
 #define __GFP_DMA32	((__force gfp_t)___GFP_DMA32)
 #define __GFP_MOVABLE	((__force gfp_t)___GFP_MOVABLE)  /* ZONE_MOVABLE allowed */
+#define __GFP_CMA	((__force gfp_t)___GFP_CMA)
 #define GFP_ZONEMASK	(__GFP_DMA|__GFP_HIGHMEM|__GFP_DMA32|__GFP_MOVABLE)
 
 /**
@@ -224,7 +235,11 @@
 #define __GFP_NOLOCKDEP ((__force gfp_t)___GFP_NOLOCKDEP)
 
 /* Room for N __GFP_FOO bits */
+#ifdef CONFIG_CMA
+#define __GFP_BITS_SHIFT (24 + IS_ENABLED(CONFIG_LOCKDEP))
+#else
 #define __GFP_BITS_SHIFT (23 + IS_ENABLED(CONFIG_LOCKDEP))
+#endif
 #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
 
 /**
@@ -447,16 +462,7 @@
 	| 1 << (___GFP_MOVABLE | ___GFP_DMA32 | ___GFP_DMA | ___GFP_HIGHMEM)  \
 )
 
-static inline enum zone_type gfp_zone(gfp_t flags)
-{
-	enum zone_type z;
-	int bit = (__force int) (flags & GFP_ZONEMASK);
-
-	z = (GFP_ZONE_TABLE >> (bit * GFP_ZONES_SHIFT)) &
-					 ((1 << GFP_ZONES_SHIFT) - 1);
-	VM_BUG_ON((GFP_ZONE_BAD >> bit) & 1);
-	return z;
-}
+enum zone_type gfp_zone(gfp_t flags);
 
 /*
  * There is only one page-allocator function, and two main namespaces to
diff --git a/include/linux/highmem.h b/include/linux/highmem.h
index 14e6202..35f052b 100644
--- a/include/linux/highmem.h
+++ b/include/linux/highmem.h
@@ -274,7 +274,9 @@
 alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma,
 					unsigned long vaddr)
 {
-	return __alloc_zeroed_user_highpage(__GFP_MOVABLE, vma, vaddr);
+	return __alloc_zeroed_user_highpage(
+			__GFP_MOVABLE|__GFP_CMA, vma,
+			vaddr);
 }
 
 static inline void clear_highpage(struct page *page)
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index ee8299e..386ddf4 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -542,6 +542,12 @@
 };
 
 #define SOFTIRQ_STOP_IDLE_MASK (~(1 << RCU_SOFTIRQ))
+/* Softirq's where the handling might be long: */
+#define LONG_SOFTIRQ_MASK ((1 << NET_TX_SOFTIRQ)       | \
+			   (1 << NET_RX_SOFTIRQ)       | \
+			   (1 << BLOCK_SOFTIRQ)        | \
+			   (1 << IRQ_POLL_SOFTIRQ) | \
+			   (1 << TASKLET_SOFTIRQ))
 
 /* map softirq index to softirq name. update 'softirq_to_name' in
  * kernel/softirq.c when adding a new softirq.
@@ -577,6 +583,7 @@
 extern void raise_softirq(unsigned int nr);
 
 DECLARE_PER_CPU(struct task_struct *, ksoftirqd);
+DECLARE_PER_CPU(__u32, active_softirqs);
 
 static inline struct task_struct *this_cpu_ksoftirqd(void)
 {
diff --git a/include/linux/ion.h b/include/linux/ion.h
new file mode 100644
index 0000000..80c6fde
--- /dev/null
+++ b/include/linux/ion.h
@@ -0,0 +1,419 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _ION_KERNEL_H
+#define _ION_KERNEL_H
+
+#include <linux/dma-buf.h>
+#include <linux/err.h>
+#include <linux/device.h>
+#include <linux/dma-direction.h>
+#include <linux/kref.h>
+#include <linux/mm_types.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/rbtree.h>
+#include <linux/sched.h>
+#include <linux/shrinker.h>
+#include <linux/types.h>
+#include <uapi/linux/ion.h>
+
+/**
+ * struct ion_buffer - metadata for a particular buffer
+ * @list:		element in list of deferred freeable buffers
+ * @heap:		back pointer to the heap the buffer came from
+ * @flags:		buffer specific flags
+ * @private_flags:	internal buffer specific flags
+ * @size:		size of the buffer
+ * @priv_virt:		private data to the buffer representable as
+ *			a void *
+ * @lock:		protects the buffers cnt fields
+ * @kmap_cnt:		number of times the buffer is mapped to the kernel
+ * @vaddr:		the kernel mapping if kmap_cnt is not zero
+ * @sg_table:		the sg table for the buffer
+ * @attachments:	list of devices attached to this buffer
+ */
+struct ion_buffer {
+	struct list_head list;
+	struct ion_heap *heap;
+	unsigned long flags;
+	unsigned long private_flags;
+	size_t size;
+	void *priv_virt;
+	struct mutex lock;
+	int kmap_cnt;
+	void *vaddr;
+	struct sg_table *sg_table;
+	struct list_head attachments;
+};
+
+/**
+ * struct ion_heap_ops - ops to operate on a given heap
+ * @allocate:		allocate memory
+ * @free:		free memory
+ * @get_pool_size:	get pool size in pages
+ *
+ * allocate returns 0 on success, -errno on error.
+ * map_dma and map_kernel return pointer on success, ERR_PTR on
+ * error. @free will be called with ION_PRIV_FLAG_SHRINKER_FREE set in
+ * the buffer's private_flags when called from a shrinker. In that
+ * case, the pages being free'd must be truly free'd back to the
+ * system, not put in a page pool or otherwise cached.
+ */
+struct ion_heap_ops {
+	int (*allocate)(struct ion_heap *heap,
+			struct ion_buffer *buffer, unsigned long len,
+			unsigned long flags);
+	void (*free)(struct ion_buffer *buffer);
+	int (*shrink)(struct ion_heap *heap, gfp_t gfp_mask, int nr_to_scan);
+	long (*get_pool_size)(struct ion_heap *heap);
+};
+
+/**
+ * heap flags - flags between the heaps and core ion code
+ */
+#define ION_HEAP_FLAG_DEFER_FREE BIT(0)
+
+/**
+ * private flags - flags internal to ion
+ */
+/*
+ * Buffer is being freed from a shrinker function. Skip any possible
+ * heap-specific caching mechanism (e.g. page pools). Guarantees that
+ * any buffer storage that came from the system allocator will be
+ * returned to the system allocator.
+ */
+#define ION_PRIV_FLAG_SHRINKER_FREE BIT(0)
+
+/**
+ * struct ion_heap - represents a heap in the system
+ * @node:		rb node to put the heap on the device's tree of heaps
+ * @type:		type of heap
+ * @ops:		ops struct as above
+ * @buf_ops:		dma_buf ops specific to the heap implementation.
+ * @flags:		flags
+ * @id:			id of heap, also indicates priority of this heap when
+ *			allocating.  These are specified by platform data and
+ *			MUST be unique
+ * @name:		used for debugging
+ * @owner:		kernel module that implements this heap
+ * @shrinker:		a shrinker for the heap
+ * @free_list:		free list head if deferred free is used
+ * @free_list_size	size of the deferred free list in bytes
+ * @lock:		protects the free list
+ * @waitqueue:		queue to wait on from deferred free thread
+ * @task:		task struct of deferred free thread
+ * @num_of_buffers	the number of currently allocated buffers
+ * @num_of_alloc_bytes	the number of allocated bytes
+ * @alloc_bytes_wm	the number of allocated bytes watermark
+ *
+ * Represents a pool of memory from which buffers can be made.  In some
+ * systems the only heap is regular system memory allocated via vmalloc.
+ * On others, some blocks might require large physically contiguous buffers
+ * that are allocated from a specially reserved heap.
+ */
+struct ion_heap {
+	struct plist_node node;
+	enum ion_heap_type type;
+	struct ion_heap_ops *ops;
+	struct dma_buf_ops buf_ops;
+	unsigned long flags;
+	unsigned int id;
+	const char *name;
+	struct module *owner;
+
+	/* deferred free support */
+	struct shrinker shrinker;
+	struct list_head free_list;
+	size_t free_list_size;
+	spinlock_t free_lock;
+	wait_queue_head_t waitqueue;
+	struct task_struct *task;
+
+	/* heap statistics */
+	u64 num_of_buffers;
+	u64 num_of_alloc_bytes;
+	u64 alloc_bytes_wm;
+
+	/* protect heap statistics */
+	spinlock_t stat_lock;
+
+	/* heap's debugfs root */
+	struct dentry *debugfs_dir;
+};
+
+#define ion_device_add_heap(heap) __ion_device_add_heap(heap, THIS_MODULE)
+
+/**
+ * struct ion_dma_buf_attachment - hold device-table attachment data for buffer
+ * @dev:	device attached to the buffer.
+ * @table:	cached mapping.
+ * @list:	list of ion_dma_buf_attachment.
+ */
+struct ion_dma_buf_attachment {
+	struct device *dev;
+	struct sg_table *table;
+	struct list_head list;
+	bool mapped:1;
+};
+
+#ifdef CONFIG_ION
+
+/**
+ * __ion_device_add_heap - adds a heap to the ion device
+ *
+ * @heap:               the heap to add
+ *
+ * Returns 0 on success, negative error otherwise.
+ */
+int __ion_device_add_heap(struct ion_heap *heap, struct module *owner);
+
+/**
+ * ion_device_remove_heap - removes a heap from ion device
+ *
+ * @heap:		pointer to the heap to be removed
+ */
+void ion_device_remove_heap(struct ion_heap *heap);
+
+/**
+ * ion_heap_init_shrinker
+ * @heap:		the heap
+ *
+ * If a heap sets the ION_HEAP_FLAG_DEFER_FREE flag or defines the shrink op
+ * this function will be called to setup a shrinker to shrink the freelists
+ * and call the heap's shrink op.
+ */
+int ion_heap_init_shrinker(struct ion_heap *heap);
+
+/**
+ * ion_heap_init_deferred_free -- initialize deferred free functionality
+ * @heap:		the heap
+ *
+ * If a heap sets the ION_HEAP_FLAG_DEFER_FREE flag this function will
+ * be called to setup deferred frees. Calls to free the buffer will
+ * return immediately and the actual free will occur some time later
+ */
+int ion_heap_init_deferred_free(struct ion_heap *heap);
+
+/**
+ * ion_heap_freelist_add - add a buffer to the deferred free list
+ * @heap:		the heap
+ * @buffer:		the buffer
+ *
+ * Adds an item to the deferred freelist.
+ */
+void ion_heap_freelist_add(struct ion_heap *heap, struct ion_buffer *buffer);
+
+/**
+ * ion_heap_freelist_drain - drain the deferred free list
+ * @heap:		the heap
+ * @size:		amount of memory to drain in bytes
+ *
+ * Drains the indicated amount of memory from the deferred freelist immediately.
+ * Returns the total amount freed.  The total freed may be higher depending
+ * on the size of the items in the list, or lower if there is insufficient
+ * total memory on the freelist.
+ */
+size_t ion_heap_freelist_drain(struct ion_heap *heap, size_t size);
+
+/**
+ * ion_heap_freelist_shrink - drain the deferred free
+ *				list, skipping any heap-specific
+ *				pooling or caching mechanisms
+ *
+ * @heap:		the heap
+ * @size:		amount of memory to drain in bytes
+ *
+ * Drains the indicated amount of memory from the deferred freelist immediately.
+ * Returns the total amount freed.  The total freed may be higher depending
+ * on the size of the items in the list, or lower if there is insufficient
+ * total memory on the freelist.
+ *
+ * Unlike with @ion_heap_freelist_drain, don't put any pages back into
+ * page pools or otherwise cache the pages. Everything must be
+ * genuinely free'd back to the system. If you're free'ing from a
+ * shrinker you probably want to use this. Note that this relies on
+ * the heap.ops.free callback honoring the ION_PRIV_FLAG_SHRINKER_FREE
+ * flag.
+ */
+size_t ion_heap_freelist_shrink(struct ion_heap *heap,
+				size_t size);
+
+/**
+ * ion_heap_freelist_size - returns the size of the freelist in bytes
+ * @heap:		the heap
+ */
+size_t ion_heap_freelist_size(struct ion_heap *heap);
+
+/**
+ * ion_heap_map_kernel - map the ion_buffer in kernel virtual address space.
+ *
+ * @heap:               the heap
+ * @buffer:             buffer to be mapped
+ *
+ * Maps the buffer using vmap(). The function respects cache flags for the
+ * buffer and creates the page table entries accordingly. Returns virtual
+ * address at the beginning of the buffer or ERR_PTR.
+ */
+void *ion_heap_map_kernel(struct ion_heap *heap, struct ion_buffer *buffer);
+
+/**
+ * ion_heap_unmap_kernel - unmap ion_buffer
+ *
+ * @buffer:             buffer to be unmapped
+ *
+ * ION wrapper for vunmap() of the ion buffer.
+ */
+void ion_heap_unmap_kernel(struct ion_heap *heap, struct ion_buffer *buffer);
+
+/**
+ * ion_heap_map_user - map given ion buffer in provided vma
+ *
+ * @heap:               the heap this buffer belongs to
+ * @buffer:             Ion buffer to be mapped
+ * @vma:                vma of the process where buffer should be mapped.
+ *
+ * Maps the buffer using remap_pfn_range() into specific process's vma starting
+ * with vma->vm_start. The vma size is expected to be >= ion buffer size.
+ * If not, a partial buffer mapping may be created. Returns 0 on success.
+ */
+int ion_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
+		      struct vm_area_struct *vma);
+
+/* ion_buffer_zero - zeroes out an ion buffer respecting the ION_FLAGs.
+ *
+ * @buffer:		ion_buffer to zero
+ *
+ * Returns 0 on success, negative error otherwise.
+ */
+int ion_buffer_zero(struct ion_buffer *buffer);
+
+/**
+ * ion_buffer_prep_noncached - flush cache before non-cached mapping
+ *
+ * @buffer:		ion_buffer to flush
+ *
+ * The memory allocated by the heap could be in the CPU cache. To map
+ * this memory as non-cached, we need to flush the associated cache
+ * first. Without the flush, it is possible for stale dirty cache lines
+ * to be evicted after the ION client started writing into this buffer,
+ * leading to data corruption.
+ */
+void ion_buffer_prep_noncached(struct ion_buffer *buffer);
+
+/**
+ * ion_alloc - Allocates an ion buffer of given size from given heap
+ *
+ * @len:               size of the buffer to be allocated.
+ * @heap_id_mask:      a bitwise maks of heap ids to allocate from
+ * @flags:             ION_BUFFER_XXXX flags for the new buffer.
+ *
+ * The function exports a dma_buf object for the new ion buffer internally
+ * and returns that to the caller. So, the buffer is ready to be used by other
+ * drivers immediately. Returns ERR_PTR in case of failure.
+ */
+struct dma_buf *ion_alloc(size_t len, unsigned int heap_id_mask,
+			  unsigned int flags);
+
+/**
+ * ion_free - Releases the ion buffer.
+ *
+ * @buffer:             ion buffer to be released
+ */
+int ion_free(struct ion_buffer *buffer);
+
+/**
+ * ion_query_heaps_kernel - Returns information about available heaps to
+ * in-kernel clients.
+ *
+ * @hdata:             pointer to array of struct ion_heap_data.
+ * @size:             size of @hdata array.
+ *
+ * Returns the number of available heaps and populates @hdata with information
+ * regarding the same. When invoked with @size as 0, the function with return
+ * the number of available heaps without modifying @hdata. When the number of
+ * available heaps is higher than @size, @size is returned instead of the
+ * actual number of available heaps.
+ */
+
+size_t ion_query_heaps_kernel(struct ion_heap_data *hdata, size_t size);
+#else
+
+static inline int __ion_device_add_heap(struct ion_heap *heap,
+				      struct module *owner)
+{
+	return -ENODEV;
+}
+
+static inline int ion_heap_init_shrinker(struct ion_heap *heap)
+{
+	return -ENODEV;
+}
+
+static inline int ion_heap_init_deferred_free(struct ion_heap *heap)
+{
+	return -ENODEV;
+}
+
+static inline void ion_heap_freelist_add(struct ion_heap *heap,
+					 struct ion_buffer *buffer) {}
+
+static inline size_t ion_heap_freelist_drain(struct ion_heap *heap, size_t size)
+{
+	return -ENODEV;
+}
+
+static inline size_t ion_heap_freelist_shrink(struct ion_heap *heap,
+					      size_t size)
+{
+	return -ENODEV;
+}
+
+static inline size_t ion_heap_freelist_size(struct ion_heap *heap)
+{
+	return -ENODEV;
+}
+
+static inline void *ion_heap_map_kernel(struct ion_heap *heap,
+					struct ion_buffer *buffer)
+{
+	return ERR_PTR(-ENODEV);
+}
+
+static inline void ion_heap_unmap_kernel(struct ion_heap *heap,
+					 struct ion_buffer *buffer) {}
+
+static inline int ion_heap_map_user(struct ion_heap *heap,
+				    struct ion_buffer *buffer,
+				    struct vm_area_struct *vma)
+{
+	return -ENODEV;
+}
+
+static inline int ion_buffer_zero(struct ion_buffer *buffer)
+{
+	return -EINVAL;
+}
+
+static inline void ion_buffer_prep_noncached(struct ion_buffer *buffer) {}
+
+static inline struct dma_buf *ion_alloc(size_t len, unsigned int heap_id_mask,
+					unsigned int flags)
+{
+	return ERR_PTR(-ENOMEM);
+}
+
+static inline int ion_free(struct ion_buffer *buffer)
+{
+	return 0;
+}
+
+static inline size_t ion_query_heaps_kernel(struct ion_heap_data *hdata,
+					 size_t size)
+{
+	return 0;
+}
+#endif /* CONFIG_ION */
+#endif /* _ION_KERNEL_H */
diff --git a/include/linux/iova.h b/include/linux/iova.h
index a0637ab..5b4e75f 100644
--- a/include/linux/iova.h
+++ b/include/linux/iova.h
@@ -95,6 +95,7 @@
 						   flush-queues */
 	atomic_t fq_timer_on;			/* 1 when timer is active, 0
 						   when not */
+	bool best_fit;
 };
 
 static inline unsigned long iova_size(struct iova *iova)
@@ -163,6 +164,7 @@
 struct iova *split_and_remove_iova(struct iova_domain *iovad,
 	struct iova *iova, unsigned long pfn_lo, unsigned long pfn_hi);
 void free_cpu_cached_iovas(unsigned int cpu, struct iova_domain *iovad);
+void free_global_cached_iovas(struct iova_domain *iovad);
 #else
 static inline int iova_cache_get(void)
 {
@@ -270,6 +272,11 @@
 					 struct iova_domain *iovad)
 {
 }
+
+static inline void free_global_cached_iovas(struct iova_domain *iovad)
+{
+}
+
 #endif
 
 #endif
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
index dda61d1..3147cda 100644
--- a/include/linux/ipv6.h
+++ b/include/linux/ipv6.h
@@ -42,6 +42,7 @@
 	__s32		accept_ra_rt_info_max_plen;
 #endif
 #endif
+	__s32		accept_ra_rt_table;
 	__s32		proxy_ndp;
 	__s32		accept_source_route;
 	__s32		accept_ra_from_local;
diff --git a/include/linux/keyslot-manager.h b/include/linux/keyslot-manager.h
index 18f3f53..3910fb8 100644
--- a/include/linux/keyslot-manager.h
+++ b/include/linux/keyslot-manager.h
@@ -9,6 +9,17 @@
 #include <linux/bio.h>
 #include <linux/blk-crypto.h>
 
+/* Inline crypto feature bits.  Must set at least one. */
+enum {
+	/* Support for standard software-specified keys */
+	BLK_CRYPTO_FEATURE_STANDARD_KEYS = BIT(0),
+
+	/* Support for hardware-wrapped keys */
+	BLK_CRYPTO_FEATURE_WRAPPED_KEYS = BIT(1),
+};
+
+#ifdef CONFIG_BLK_INLINE_ENCRYPTION
+
 struct blk_keyslot_manager;
 
 /**
@@ -19,6 +30,9 @@
  *			The key is provided so that e.g. dm layers can evict
  *			keys from the devices that they map over.
  *			Returns 0 on success, -errno otherwise.
+ * @derive_raw_secret:	(Optional) Derive a software secret from a
+ *			hardware-wrapped key.  Returns 0 on success, -EOPNOTSUPP
+ *			if unsupported on the hardware, or another -errno code.
  *
  * This structure should be provided by storage device drivers when they set up
  * a keyslot manager - this structure holds the function ptrs that the keyslot
@@ -31,6 +45,10 @@
 	int (*keyslot_evict)(struct blk_keyslot_manager *ksm,
 			     const struct blk_crypto_key *key,
 			     unsigned int slot);
+	int (*derive_raw_secret)(struct blk_keyslot_manager *ksm,
+				 const u8 *wrapped_key,
+				 unsigned int wrapped_key_size,
+				 u8 *secret, unsigned int secret_size);
 };
 
 struct blk_keyslot_manager {
@@ -48,6 +66,12 @@
 	unsigned int max_dun_bytes_supported;
 
 	/*
+	 * The supported features as a bitmask of BLK_CRYPTO_FEATURE_* flags.
+	 * Most drivers should set BLK_CRYPTO_FEATURE_STANDARD_KEYS here.
+	 */
+	unsigned int features;
+
+	/*
 	 * Array of size BLK_ENCRYPTION_MODE_MAX of bitmasks that represents
 	 * whether a crypto mode and data unit size are supported. The i'th
 	 * bit of crypto_mode_supported[crypto_mode] is set iff a data unit
@@ -103,4 +127,16 @@
 
 void blk_ksm_destroy(struct blk_keyslot_manager *ksm);
 
+void blk_ksm_intersect_modes(struct blk_keyslot_manager *parent,
+			     const struct blk_keyslot_manager *child);
+
+int blk_ksm_derive_raw_secret(struct blk_keyslot_manager *ksm,
+			      const u8 *wrapped_key,
+			      unsigned int wrapped_key_size,
+			      u8 *secret, unsigned int secret_size);
+
+void blk_ksm_init_passthrough(struct blk_keyslot_manager *ksm);
+
+#endif /* CONFIG_BLK_INLINE_ENCRYPTION */
+
 #endif /* __LINUX_KEYSLOT_MANAGER_H */
diff --git a/include/linux/lsm_audit.h b/include/linux/lsm_audit.h
index 28f23b3..91d6990 100644
--- a/include/linux/lsm_audit.h
+++ b/include/linux/lsm_audit.h
@@ -74,7 +74,6 @@
 #define LSM_AUDIT_DATA_FILE	12
 #define LSM_AUDIT_DATA_IBPKEY	13
 #define LSM_AUDIT_DATA_IBENDPORT 14
-#define LSM_AUDIT_DATA_LOCKDOWN 15
 #define LSM_AUDIT_DATA_NOTIFICATION 16
 	union 	{
 		struct path path;
@@ -95,7 +94,6 @@
 		struct file *file;
 		struct lsm_ibpkey_audit *ibpkey;
 		struct lsm_ibendport_audit *ibendport;
-		int reason;
 	} u;
 	/* this union contains LSM specific data */
 	union {
diff --git a/include/linux/mm.h b/include/linux/mm.h
index db6ae4d..b75193c 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1883,27 +1883,28 @@
 	return (unsigned long)val;
 }
 
-void mm_trace_rss_stat(struct mm_struct *mm, int member, long count);
+void mm_trace_rss_stat(struct mm_struct *mm, int member, long count,
+		       long value);
 
 static inline void add_mm_counter(struct mm_struct *mm, int member, long value)
 {
 	long count = atomic_long_add_return(value, &mm->rss_stat.count[member]);
 
-	mm_trace_rss_stat(mm, member, count);
+	mm_trace_rss_stat(mm, member, count, value);
 }
 
 static inline void inc_mm_counter(struct mm_struct *mm, int member)
 {
 	long count = atomic_long_inc_return(&mm->rss_stat.count[member]);
 
-	mm_trace_rss_stat(mm, member, count);
+	mm_trace_rss_stat(mm, member, count, 1);
 }
 
 static inline void dec_mm_counter(struct mm_struct *mm, int member)
 {
 	long count = atomic_long_dec_return(&mm->rss_stat.count[member]);
 
-	mm_trace_rss_stat(mm, member, count);
+	mm_trace_rss_stat(mm, member, count, -1);
 }
 
 /* Optimized variant when page is already known not to be PageAnon */
@@ -2515,7 +2516,7 @@
 extern struct vm_area_struct *vma_merge(struct mm_struct *,
 	struct vm_area_struct *prev, unsigned long addr, unsigned long end,
 	unsigned long vm_flags, struct anon_vma *, struct file *, pgoff_t,
-	struct mempolicy *, struct vm_userfaultfd_ctx);
+	struct mempolicy *, struct vm_userfaultfd_ctx, const char __user *);
 extern struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *);
 extern int __split_vma(struct mm_struct *, struct vm_area_struct *,
 	unsigned long addr, int new_below);
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 5a9238f..cb6dfaa 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -336,11 +336,18 @@
 	/*
 	 * For areas with an address space and backing store,
 	 * linkage into the address_space->i_mmap interval tree.
+	 *
+	 * For private anonymous mappings, a pointer to a null terminated string
+	 * in the user process containing the name given to the vma, or NULL
+	 * if unnamed.
 	 */
-	struct {
-		struct rb_node rb;
-		unsigned long rb_subtree_last;
-	} shared;
+	union {
+		struct {
+			struct rb_node rb;
+			unsigned long rb_subtree_last;
+		} shared;
+		const char __user *anon_name;
+	};
 
 	/*
 	 * A file's MAP_PRIVATE vma can be in both i_mmap tree and anon_vma
@@ -786,4 +793,13 @@
 	unsigned long val;
 } swp_entry_t;
 
+/* Return the name for an anonymous mapping or NULL for a file-backed mapping */
+static inline const char __user *vma_get_anon_name(struct vm_area_struct *vma)
+{
+	if (vma->vm_file)
+		return NULL;
+
+	return vma->anon_name;
+}
+
 #endif /* _LINUX_MM_TYPES_H */
diff --git a/include/linux/mmc/pm.h b/include/linux/mmc/pm.h
index 3549f80..1d554b8 100644
--- a/include/linux/mmc/pm.h
+++ b/include/linux/mmc/pm.h
@@ -23,5 +23,6 @@
 
 #define MMC_PM_KEEP_POWER	(1 << 0)	/* preserve card power during suspend */
 #define MMC_PM_WAKE_SDIO_IRQ	(1 << 1)	/* wake up host system on SDIO IRQ assertion */
+#define MMC_PM_IGNORE_PM_NOTIFY	(1 << 2)	/* ignore mmc pm notify */
 
 #endif /* LINUX_MMC_PM_H */
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index fb3bf69..1df06a7 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -42,8 +42,6 @@
 	MIGRATE_UNMOVABLE,
 	MIGRATE_MOVABLE,
 	MIGRATE_RECLAIMABLE,
-	MIGRATE_PCPTYPES,	/* the number of types on the pcp lists */
-	MIGRATE_HIGHATOMIC = MIGRATE_PCPTYPES,
 #ifdef CONFIG_CMA
 	/*
 	 * MIGRATE_CMA migration type is designed to mimic the way
@@ -60,6 +58,8 @@
 	 */
 	MIGRATE_CMA,
 #endif
+	MIGRATE_PCPTYPES, /* the number of types on the pcp lists */
+	MIGRATE_HIGHATOMIC = MIGRATE_PCPTYPES,
 #ifdef CONFIG_MEMORY_ISOLATION
 	MIGRATE_ISOLATE,	/* can't allocate from here */
 #endif
@@ -72,9 +72,11 @@
 #ifdef CONFIG_CMA
 #  define is_migrate_cma(migratetype) unlikely((migratetype) == MIGRATE_CMA)
 #  define is_migrate_cma_page(_page) (get_pageblock_migratetype(_page) == MIGRATE_CMA)
+#  define get_cma_migrate_type() MIGRATE_CMA
 #else
 #  define is_migrate_cma(migratetype) false
 #  define is_migrate_cma_page(_page) false
+#  define get_cma_migrate_type() MIGRATE_MOVABLE
 #endif
 
 static inline bool is_migrate_movable(int mt)
@@ -155,9 +157,7 @@
 	NR_PAGETABLE,		/* used for pagetables */
 	/* Second 128 byte cacheline */
 	NR_BOUNCE,
-#if IS_ENABLED(CONFIG_ZSMALLOC)
 	NR_ZSPAGES,		/* allocated in zsmalloc */
-#endif
 	NR_FREE_CMA_PAGES,
 	NR_VM_ZONE_STAT_ITEMS };
 
diff --git a/include/linux/module.h b/include/linux/module.h
index 6264617..c491c1b 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -404,10 +404,12 @@
 	const s32 *unused_gpl_crcs;
 #endif
 
-#ifdef CONFIG_MODULE_SIG
-	/* Signature was verified. */
+	/*
+	 * Signature was verified. Unconditionally compiled in Android to
+	 * preserve ABI compatibility between kernels without module
+	 * signing enabled and signed modules.
+	 */
 	bool sig_ok;
-#endif
 
 	bool async_probe_requested;
 
diff --git a/include/linux/netfilter/xt_quota2.h b/include/linux/netfilter/xt_quota2.h
new file mode 100644
index 0000000..a391871
--- /dev/null
+++ b/include/linux/netfilter/xt_quota2.h
@@ -0,0 +1,26 @@
+#ifndef _XT_QUOTA_H
+#define _XT_QUOTA_H
+#include <linux/types.h>
+
+enum xt_quota_flags {
+	XT_QUOTA_INVERT    = 1 << 0,
+	XT_QUOTA_GROW      = 1 << 1,
+	XT_QUOTA_PACKET    = 1 << 2,
+	XT_QUOTA_NO_CHANGE = 1 << 3,
+	XT_QUOTA_MASK      = 0x0F,
+};
+
+struct xt_quota_counter;
+
+struct xt_quota_mtinfo2 {
+	char name[15];
+	u_int8_t flags;
+
+	/* Comparison-invariant */
+	aligned_u64 quota;
+
+	/* Used internally by the kernel */
+	struct xt_quota_counter *master __attribute__((aligned(8)));
+};
+
+#endif /* _XT_QUOTA_H */
diff --git a/include/linux/of_fdt.h b/include/linux/of_fdt.h
index acf820e..9bbd5c0 100644
--- a/include/linux/of_fdt.h
+++ b/include/linux/of_fdt.h
@@ -58,6 +58,27 @@
 extern unsigned long of_get_flat_dt_root(void);
 extern uint32_t of_get_flat_dt_phandle(unsigned long node);
 
+/*
+ * early_init_dt_scan_chosen - scan the device tree for ramdisk and bootargs
+ *
+ * The boot arguments will be placed into the memory pointed to by @data.
+ * That memory should be COMMAND_LINE_SIZE big and initialized to be a valid
+ * (possibly empty) string.  Logic for what will be in @data after this
+ * function finishes:
+ *
+ * - CONFIG_CMDLINE_FORCE=true
+ *     CONFIG_CMDLINE
+ * - CONFIG_CMDLINE_EXTEND=true, @data is non-empty string
+ *     @data + dt bootargs (even if dt bootargs are empty)
+ * - CONFIG_CMDLINE_EXTEND=true, @data is empty string
+ *     CONFIG_CMDLINE + dt bootargs (even if dt bootargs are empty)
+ * - CMDLINE_FROM_BOOTLOADER=true, dt bootargs=non-empty:
+ *     dt bootargs
+ * - CMDLINE_FROM_BOOTLOADER=true, dt bootargs=empty, @data is non-empty string
+ *     @data is left unchanged
+ * - CMDLINE_FROM_BOOTLOADER=true, dt bootargs=empty, @data is empty string
+ *     CONFIG_CMDLINE (or "" if that's not defined)
+ */
 extern int early_init_dt_scan_chosen(unsigned long node, const char *uname,
 				     int depth, void *data);
 extern int early_init_dt_scan_memory(unsigned long node, const char *uname,
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index d5570de..14da3ab2 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -15,6 +15,7 @@
 #include <linux/bitops.h>
 #include <linux/hardirq.h> /* for in_interrupt() */
 #include <linux/hugetlb_inline.h>
+#include <linux/sched/debug.h>
 
 struct pagevec;
 
@@ -295,10 +296,7 @@
 	return __page_cache_alloc(mapping_gfp_mask(x));
 }
 
-static inline gfp_t readahead_gfp_mask(struct address_space *x)
-{
-	return mapping_gfp_mask(x) | __GFP_NORETRY | __GFP_NOWARN;
-}
+gfp_t readahead_gfp_mask(struct address_space *x);
 
 typedef int filler_t(void *, struct page *);
 
@@ -604,7 +602,7 @@
 /*
  * lock_page may only be called if we have the page's inode pinned.
  */
-static inline void lock_page(struct page *page)
+static inline __sched void lock_page(struct page *page)
 {
 	might_sleep();
 	if (!trylock_page(page))
@@ -616,7 +614,7 @@
  * signals.  It returns 0 if it locked the page and -EINTR if it was
  * killed while waiting.
  */
-static inline int lock_page_killable(struct page *page)
+static inline __sched int lock_page_killable(struct page *page)
 {
 	might_sleep();
 	if (!trylock_page(page))
@@ -632,7 +630,7 @@
  * Returns 0 if the page is locked successfully, or -EIOCBQUEUED if the page
  * was already locked and the callback defined in 'wait' was queued.
  */
-static inline int lock_page_async(struct page *page,
+static inline __sched int lock_page_async(struct page *page,
 				  struct wait_page_queue *wait)
 {
 	if (!trylock_page(page))
@@ -647,7 +645,7 @@
  * Return value and mmap_lock implications depend on flags; see
  * __lock_page_or_retry().
  */
-static inline int lock_page_or_retry(struct page *page, struct mm_struct *mm,
+static inline __sched int lock_page_or_retry(struct page *page, struct mm_struct *mm,
 				     unsigned int flags)
 {
 	might_sleep();
@@ -668,13 +666,13 @@
  * ie with increased "page->count" so that the page won't
  * go away during the wait..
  */
-static inline void wait_on_page_locked(struct page *page)
+static inline __sched void wait_on_page_locked(struct page *page)
 {
 	if (PageLocked(page))
 		wait_on_page_bit(compound_head(page), PG_locked);
 }
 
-static inline int wait_on_page_locked_killable(struct page *page)
+static inline __sched int wait_on_page_locked_killable(struct page *page)
 {
 	if (!PageLocked(page))
 		return 0;
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 1ab1e24..1901fa3 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -2423,6 +2423,8 @@
 #define PCI_VENDOR_ID_LENOVO		0x17aa
 
 #define PCI_VENDOR_ID_QCOM		0x17cb
+#define PCIE_DEVICE_ID_QCOM_PCIE20	0x0106
+#define PCIE_DEVICE_ID_QCOM_PCIE30	0x0107
 
 #define PCI_VENDOR_ID_CDNS		0x17cd
 
diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h
index 81a55e9..aa61744 100644
--- a/include/linux/power_supply.h
+++ b/include/linux/power_supply.h
@@ -49,6 +49,12 @@
 	POWER_SUPPLY_CHARGE_TYPE_ADAPTIVE,	/* dynamically adjusted speed */
 	POWER_SUPPLY_CHARGE_TYPE_CUSTOM,	/* use CHARGE_CONTROL_* props */
 	POWER_SUPPLY_CHARGE_TYPE_LONGLIFE,	/* slow speed, longer life */
+
+	/*
+	 * force to 50 to minimize the chances of userspace binary
+	 * incompatibility on newer upstream kernels
+	 */
+	POWER_SUPPLY_CHARGE_TYPE_TAPER = 50,	/* charging in CV phase */
 };
 
 enum {
@@ -386,12 +392,21 @@
 #ifdef CONFIG_OF
 extern struct power_supply *power_supply_get_by_phandle(struct device_node *np,
 							const char *property);
+extern int power_supply_get_by_phandle_array(struct device_node *np,
+					     const char *property,
+					     struct power_supply **psy,
+					     ssize_t size);
 extern struct power_supply *devm_power_supply_get_by_phandle(
 				    struct device *dev, const char *property);
 #else /* !CONFIG_OF */
 static inline struct power_supply *
 power_supply_get_by_phandle(struct device_node *np, const char *property)
 { return NULL; }
+static int power_supply_get_by_phandle_array(struct device_node *np,
+					     const char *property,
+					     struct power_supply **psy,
+					     int size)
+{ return 0; }
 static inline struct power_supply *
 devm_power_supply_get_by_phandle(struct device *dev, const char *property)
 { return NULL; }
diff --git a/include/linux/pwm.h b/include/linux/pwm.h
index a13ff38..f0a7329b 100644
--- a/include/linux/pwm.h
+++ b/include/linux/pwm.h
@@ -48,6 +48,17 @@
 	PWMF_EXPORTED = 1 << 1,
 };
 
+/**
+ * enum pwm_output_type - output type of the PWM signal
+ * @PWM_OUTPUT_FIXED: PWM output is fixed until a change request
+ * @PWM_OUTPUT_MODULATED: PWM output is modulated in hardware
+ * autonomously with a predefined pattern
+ */
+enum pwm_output_type {
+	PWM_OUTPUT_FIXED = 1 << 0,
+	PWM_OUTPUT_MODULATED = 1 << 1,
+};
+
 /*
  * struct pwm_state - state of a PWM channel
  * @period: PWM period (in nanoseconds)
@@ -59,6 +70,7 @@
 	u64 period;
 	u64 duty_cycle;
 	enum pwm_polarity polarity;
+	enum pwm_output_type output_type;
 	bool enabled;
 };
 
@@ -146,6 +158,16 @@
 	return state.polarity;
 }
 
+static inline enum pwm_output_type pwm_get_output_type(
+		const struct pwm_device *pwm)
+{
+	struct pwm_state state;
+
+	pwm_get_state(pwm, &state);
+
+	return state.output_type;
+}
+
 static inline void pwm_get_args(const struct pwm_device *pwm,
 				struct pwm_args *args)
 {
@@ -249,6 +271,7 @@
  * @get_state: get the current PWM state. This function is only
  *	       called once per PWM device when the PWM chip is
  *	       registered.
+ * @get_output_type_supported: get the supported output type of this PWM
  * @owner: helps prevent removal of modules exporting active PWMs
  * @config: configure duty cycles and period length for this PWM
  * @set_polarity: configure the polarity of this PWM
@@ -264,6 +287,8 @@
 		     const struct pwm_state *state);
 	void (*get_state)(struct pwm_chip *chip, struct pwm_device *pwm,
 			  struct pwm_state *state);
+	int (*get_output_type_supported)(struct pwm_chip *chip,
+			struct pwm_device *pwm);
 	struct module *owner;
 
 	/* Only used by legacy drivers */
@@ -319,6 +344,24 @@
 int pwm_adjust_config(struct pwm_device *pwm);
 
 /**
+ * pwm_get_output_type_supported() - obtain output type of a PWM device.
+ * @pwm: PWM device
+ *
+ * Returns:  output type supported by the PWM device
+ */
+static inline int pwm_get_output_type_supported(struct pwm_device *pwm)
+{
+	if (!pwm)
+		return -EINVAL;
+
+	if (pwm->chip->ops->get_output_type_supported)
+		return pwm->chip->ops->get_output_type_supported(pwm->chip,
+				pwm);
+
+	return PWM_OUTPUT_FIXED;
+}
+
+/**
  * pwm_config() - change a PWM device configuration
  * @pwm: PWM device
  * @duty_ns: "on" time (in nanoseconds)
@@ -436,6 +479,11 @@
 	return -ENOTSUPP;
 }
 
+static inline int pwm_get_output_type_supported(struct pwm_device *pwm)
+{
+	return -EINVAL;
+}
+
 static inline int pwm_config(struct pwm_device *pwm, int duty_ns,
 			     int period_ns)
 {
diff --git a/include/linux/rpmsg.h b/include/linux/rpmsg.h
index 9fe156d..9cc3e79 100644
--- a/include/linux/rpmsg.h
+++ b/include/linux/rpmsg.h
@@ -2,6 +2,7 @@
 /*
  * Remote processor messaging
  *
+ * Copyright (c) 2020 The Linux Foundation.
  * Copyright (C) 2011 Texas Instruments, Inc.
  * Copyright (C) 2011 Google, Inc.
  * All rights reserved.
@@ -60,6 +61,7 @@
 };
 
 typedef int (*rpmsg_rx_cb_t)(struct rpmsg_device *, void *, int, void *, u32);
+typedef int (*rpmsg_rx_sig_t)(struct rpmsg_device *, void *, u32, u32);
 
 /**
  * struct rpmsg_endpoint - binds a local rpmsg address to its user
@@ -67,6 +69,7 @@
  * @refcount: when this drops to zero, the ept is deallocated
  * @cb: rx callback handler
  * @cb_lock: must be taken before accessing/changing @cb
+ * @sig_cb: rx serial signal handler
  * @addr: local rpmsg address
  * @priv: private data for the driver's use
  *
@@ -89,6 +92,7 @@
 	struct kref refcount;
 	rpmsg_rx_cb_t cb;
 	struct mutex cb_lock;
+	rpmsg_rx_sig_t sig_cb;
 	u32 addr;
 	void *priv;
 
@@ -102,6 +106,7 @@
  * @probe: invoked when a matching rpmsg channel (i.e. device) is found
  * @remove: invoked when the rpmsg channel is removed
  * @callback: invoked when an inbound message is received on the channel
+ * @signals: invoked when a serial signal change is received on the channel
  */
 struct rpmsg_driver {
 	struct device_driver drv;
@@ -109,6 +114,8 @@
 	int (*probe)(struct rpmsg_device *dev);
 	void (*remove)(struct rpmsg_device *dev);
 	int (*callback)(struct rpmsg_device *, void *, int, void *, u32);
+	int (*signals)(struct rpmsg_device *rpdev,
+		       void *priv, u32 old, u32 new);
 };
 
 #if IS_ENABLED(CONFIG_RPMSG)
@@ -135,6 +142,9 @@
 __poll_t rpmsg_poll(struct rpmsg_endpoint *ept, struct file *filp,
 			poll_table *wait);
 
+int rpmsg_get_signals(struct rpmsg_endpoint *ept);
+int rpmsg_set_signals(struct rpmsg_endpoint *ept, u32 set, u32 clear);
+
 #else
 
 static inline int register_rpmsg_device(struct rpmsg_device *dev)
@@ -242,6 +252,23 @@
 	return 0;
 }
 
+static inline int rpmsg_get_signals(struct rpmsg_endpoint *ept)
+{
+	/* This shouldn't be possible */
+	WARN_ON(1);
+
+	return -ENXIO;
+}
+
+static inline int rpmsg_set_signals(struct rpmsg_endpoint *ept,
+				    u32 set, u32 clear)
+{
+	/* This shouldn't be possible */
+	WARN_ON(1);
+
+	return -ENXIO;
+}
+
 #endif /* IS_ENABLED(CONFIG_RPMSG) */
 
 /* use a macro to avoid include chaining to get THIS_MODULE */
diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h
index 25e3fde8..9248dc0 100644
--- a/include/linux/rwsem.h
+++ b/include/linux/rwsem.h
@@ -19,6 +19,7 @@
 #ifdef CONFIG_RWSEM_SPIN_ON_OWNER
 #include <linux/osq_lock.h>
 #endif
+#include <linux/android_vendor.h>
 
 /*
  * For an uncontended rwsem, count and owner are the only fields a task
@@ -51,6 +52,7 @@
 #ifdef CONFIG_DEBUG_LOCK_ALLOC
 	struct lockdep_map	dep_map;
 #endif
+	ANDROID_VENDOR_DATA(1);
 };
 
 /* In all implementations count != 0 means locked */
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 76cd21f..5ff7d26 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -34,6 +34,7 @@
 #include <linux/rseq.h>
 #include <linux/seqlock.h>
 #include <linux/kcsan.h>
+#include <linux/android_vendor.h>
 
 /* task_struct member predeclarations (sorted alphabetically): */
 struct audit_context;
@@ -888,6 +889,10 @@
 	u64				stimescaled;
 #endif
 	u64				gtime;
+#ifdef CONFIG_CPU_FREQ_TIMES
+	u64				*time_in_state;
+	unsigned int			max_state;
+#endif
 	struct prev_cputime		prev_cputime;
 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
 	struct vtime			vtime;
@@ -999,6 +1004,7 @@
 	raw_spinlock_t			pi_lock;
 
 	struct wake_q_node		wake_q;
+	struct wake_q_head		*wake_q_head;
 
 #ifdef CONFIG_RT_MUTEXES
 	/* PI waiters blocked on a rt_mutex held by this task: */
@@ -1347,6 +1353,7 @@
 					__mce_reserved : 62;
 	struct callback_head		mce_kill_me;
 #endif
+	ANDROID_VENDOR_DATA_ARRAY(1, 64);
 
 	/*
 	 * New fields for task_struct should be added above here, so that
@@ -1650,6 +1657,16 @@
 
 extern int cpuset_cpumask_can_shrink(const struct cpumask *cur, const struct cpumask *trial);
 extern int task_can_attach(struct task_struct *p, const struct cpumask *cs_cpus_allowed);
+
+#ifdef CONFIG_RT_SOFTINT_OPTIMIZATION
+extern bool cpupri_check_rt(void);
+#else
+static inline bool cpupri_check_rt(void)
+{
+	return false;
+}
+#endif
+
 #ifdef CONFIG_SMP
 extern void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask);
 extern int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask);
diff --git a/include/linux/sched/topology.h b/include/linux/sched/topology.h
index 9ef7bf6..4521582 100644
--- a/include/linux/sched/topology.h
+++ b/include/linux/sched/topology.h
@@ -3,6 +3,7 @@
 #define _LINUX_SCHED_TOPOLOGY_H
 
 #include <linux/topology.h>
+#include <linux/android_vendor.h>
 
 #include <linux/sched/idle.h>
 
@@ -74,6 +75,8 @@
 	atomic_t	ref;
 	atomic_t	nr_busy_cpus;
 	int		has_idle_cores;
+
+	ANDROID_VENDOR_DATA(1);
 };
 
 struct sched_domain {
diff --git a/include/linux/sched/wake_q.h b/include/linux/sched/wake_q.h
index 26a2013..1e05e56 100644
--- a/include/linux/sched/wake_q.h
+++ b/include/linux/sched/wake_q.h
@@ -38,6 +38,7 @@
 struct wake_q_head {
 	struct wake_q_node *first;
 	struct wake_q_node **lastp;
+	int count;
 };
 
 #define WAKE_Q_TAIL ((struct wake_q_node *) 0x01)
@@ -49,6 +50,7 @@
 {
 	head->first = WAKE_Q_TAIL;
 	head->lastp = &head->first;
+	head->count = 0;
 }
 
 static inline bool wake_q_empty(struct wake_q_head *head)
diff --git a/include/linux/sched/xacct.h b/include/linux/sched/xacct.h
index c078f0a..9544c9d 100644
--- a/include/linux/sched/xacct.h
+++ b/include/linux/sched/xacct.h
@@ -28,6 +28,11 @@
 {
 	tsk->ioac.syscw++;
 }
+
+static inline void inc_syscfs(struct task_struct *tsk)
+{
+	tsk->ioac.syscfs++;
+}
 #else
 static inline void add_rchar(struct task_struct *tsk, ssize_t amt)
 {
@@ -44,6 +49,10 @@
 static inline void inc_syscw(struct task_struct *tsk)
 {
 }
+
+static inline void inc_syscfs(struct task_struct *tsk)
+{
+}
 #endif
 
 #endif /* _LINUX_SCHED_XACCT_H */
diff --git a/include/linux/security.h b/include/linux/security.h
index bc27254..6cd205a 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -130,8 +130,6 @@
 	LOCKDOWN_CONFIDENTIALITY_MAX,
 };
 
-extern const char *const lockdown_reasons[LOCKDOWN_CONFIDENTIALITY_MAX+1];
-
 /* These functions are in security/commoncap.c */
 extern int cap_capable(const struct cred *cred, struct user_namespace *ns,
 		       int cap, unsigned int opts);
diff --git a/include/linux/serdev.h b/include/linux/serdev.h
index 9f14f9c..54df861 100644
--- a/include/linux/serdev.h
+++ b/include/linux/serdev.h
@@ -165,9 +165,21 @@
 void serdev_device_remove(struct serdev_device *);
 
 struct serdev_controller *serdev_controller_alloc(struct device *, size_t);
-int serdev_controller_add(struct serdev_controller *);
+int serdev_controller_add_platform(struct serdev_controller *, bool);
 void serdev_controller_remove(struct serdev_controller *);
 
+/**
+ * serdev_controller_add() - Add an serdev controller
+ * @ctrl:	controller to be registered.
+ *
+ * Register a controller previously allocated via serdev_controller_alloc() with
+ * the serdev core.
+ */
+static inline int serdev_controller_add(struct serdev_controller *ctrl)
+{
+	return serdev_controller_add_platform(ctrl, false);
+}
+
 static inline void serdev_controller_write_wakeup(struct serdev_controller *ctrl)
 {
 	struct serdev_device *serdev = ctrl->serdev;
diff --git a/include/linux/suspend.h b/include/linux/suspend.h
index 8af13ba..04b337d 100644
--- a/include/linux/suspend.h
+++ b/include/linux/suspend.h
@@ -517,6 +517,7 @@
 extern bool pm_save_wakeup_count(unsigned int count);
 extern void pm_wakep_autosleep_enabled(bool set);
 extern void pm_print_active_wakeup_sources(void);
+extern void pm_get_active_wakeup_sources(char *pending_sources, size_t max);
 
 extern void lock_system_sleep(void);
 extern void unlock_system_sleep(void);
diff --git a/include/linux/task_io_accounting.h b/include/linux/task_io_accounting.h
index 6f6acce..bb26108 100644
--- a/include/linux/task_io_accounting.h
+++ b/include/linux/task_io_accounting.h
@@ -19,6 +19,8 @@
 	u64 syscr;
 	/* # of write syscalls */
 	u64 syscw;
+	/* # of fsync syscalls */
+	u64 syscfs;
 #endif /* CONFIG_TASK_XACCT */
 
 #ifdef CONFIG_TASK_IO_ACCOUNTING
diff --git a/include/linux/task_io_accounting_ops.h b/include/linux/task_io_accounting_ops.h
index bb5498b..733ab62 100644
--- a/include/linux/task_io_accounting_ops.h
+++ b/include/linux/task_io_accounting_ops.h
@@ -97,6 +97,7 @@
 	dst->wchar += src->wchar;
 	dst->syscr += src->syscr;
 	dst->syscw += src->syscw;
+	dst->syscfs += src->syscfs;
 }
 #else
 static inline void task_chr_io_accounting_add(struct task_io_accounting *dst,
diff --git a/include/linux/thermal.h b/include/linux/thermal.h
index d07ea27..fa9c9ea 100644
--- a/include/linux/thermal.h
+++ b/include/linux/thermal.h
@@ -399,6 +399,7 @@
 void thermal_notify_framework(struct thermal_zone_device *, int);
 int thermal_zone_device_enable(struct thermal_zone_device *tz);
 int thermal_zone_device_disable(struct thermal_zone_device *tz);
+int thermal_zone_device_is_enabled(struct thermal_zone_device *tz);
 #else
 static inline struct thermal_zone_device *thermal_zone_device_register(
 	const char *type, int trips, int mask, void *devdata,
@@ -452,6 +453,10 @@
 
 static inline int thermal_zone_device_disable(struct thermal_zone_device *tz)
 { return -ENODEV; }
+
+static inline int
+thermal_zone_device_is_enabled(struct thermal_zone_device *tz)
+{ return -ENODEV; }
 #endif /* CONFIG_THERMAL */
 
 #endif /* __THERMAL_H__ */
diff --git a/include/linux/usb/composite.h b/include/linux/usb/composite.h
index a2d229a..8b96c75 100644
--- a/include/linux/usb/composite.h
+++ b/include/linux/usb/composite.h
@@ -590,6 +590,7 @@
 	struct config_group group;
 	struct list_head cfs_list;
 	struct usb_function_driver *fd;
+	struct usb_function *f;
 	int (*set_inst_name)(struct usb_function_instance *inst,
 			      const char *name);
 	void (*free_func_inst)(struct usb_function_instance *inst);
diff --git a/include/linux/usb/f_accessory.h b/include/linux/usb/f_accessory.h
new file mode 100644
index 0000000..ebe3c4d
--- /dev/null
+++ b/include/linux/usb/f_accessory.h
@@ -0,0 +1,23 @@
+/*
+ * Gadget Function Driver for Android USB accessories
+ *
+ * Copyright (C) 2011 Google, Inc.
+ * Author: Mike Lockwood <lockwood@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __LINUX_USB_F_ACCESSORY_H
+#define __LINUX_USB_F_ACCESSORY_H
+
+#include <uapi/linux/usb/f_accessory.h>
+
+#endif /* __LINUX_USB_F_ACCESSORY_H */
diff --git a/include/linux/usb/pd.h b/include/linux/usb/pd.h
index 3a805e2..63a66dd 100644
--- a/include/linux/usb/pd.h
+++ b/include/linux/usb/pd.h
@@ -484,6 +484,7 @@
 
 #define PD_T_CC_DEBOUNCE	200	/* 100 - 200 ms */
 #define PD_T_PD_DEBOUNCE	20	/* 10 - 20 ms */
+#define PD_T_TRY_CC_DEBOUNCE	15	/* 10 - 20 ms */
 
 #define PD_N_CAPS_COUNT		(PD_T_NO_RESPONSE / PD_T_SEND_SOURCE_CAP)
 #define PD_N_HARD_RESET_COUNT	2
diff --git a/include/linux/usb/pd_vdo.h b/include/linux/usb/pd_vdo.h
index 68bdc4e..04a66c2 100644
--- a/include/linux/usb/pd_vdo.h
+++ b/include/linux/usb/pd_vdo.h
@@ -103,17 +103,25 @@
  * --------------------
  * <31>     :: data capable as a USB host
  * <30>     :: data capable as a USB device
- * <29:27>  :: product type
+ * <29:27>  :: product type (UFP / Cable)
  * <26>     :: modal operation supported (1b == yes)
- * <25:16>  :: Reserved, Shall be set to zero
+ * <25:16>  :: product type (DFP)
  * <15:0>   :: USB-IF assigned VID for this cable vendor
  */
 #define IDH_PTYPE_UNDEF		0
 #define IDH_PTYPE_HUB		1
 #define IDH_PTYPE_PERIPH	2
+#define IDH_PTYPE_PSD		3
+#define IDH_PTYPE_AMA		5
+
 #define IDH_PTYPE_PCABLE	3
 #define IDH_PTYPE_ACABLE	4
-#define IDH_PTYPE_AMA		5
+
+#define IDH_PTYPE_DFP_UNDEF	0
+#define IDH_PTYPE_DFP_HUB	1
+#define IDH_PTYPE_DFP_HOST	2
+#define IDH_PTYPE_DFP_PB	3
+#define IDH_PTYPE_DFP_AMC	4
 
 #define VDO_IDH(usbh, usbd, ptype, is_modal, vid)		\
 	((usbh) << 31 | (usbd) << 30 | ((ptype) & 0x7) << 27	\
@@ -122,6 +130,7 @@
 #define PD_IDH_PTYPE(vdo)	(((vdo) >> 27) & 0x7)
 #define PD_IDH_VID(vdo)		((vdo) & 0xffff)
 #define PD_IDH_MODAL_SUPP(vdo)	((vdo) & (1 << 26))
+#define PD_IDH_DFP_PTYPE(vdo)	(((vdo) >> 23) & 0x7)
 
 /*
  * Cert Stat VDO
diff --git a/include/linux/usb/tcpm.h b/include/linux/usb/tcpm.h
index 09762d2..62d54dac 100644
--- a/include/linux/usb/tcpm.h
+++ b/include/linux/usb/tcpm.h
@@ -83,6 +83,29 @@
  *		Optional; Called to enable/disable PD 3.0 fast role swap.
  *		Enabling frs is accessory dependent as not all PD3.0
  *		accessories support fast role swap.
+ * @frs_sourcing_vbus:
+ *		Optional; Called to notify that vbus is now being sourced.
+ *		Low level drivers can perform chip specific operations, if any.
+ * @enable_auto_vbus_discharge:
+ *		Optional; TCPCI spec based TCPC implementations can optionally
+ *		support hardware to autonomously dischrge vbus upon disconnecting
+ *		as sink or source. TCPM signals TCPC to enable the mechanism upon
+ *		entering connected state and signals disabling upon disconnect.
+ * @set_auto_vbus_discharge_threshold:
+ *		Mandatory when enable_auto_vbus_discharge is implemented. TCPM
+ *		calls this function to allow lower levels drivers to program the
+ *		vbus threshold voltage below which the vbus discharge circuit
+ *		will be turned on. requested_vbus_voltage is set to 0 when vbus
+ *		is going to disappear knowingly i.e. during PR_SWAP and
+ *		HARD_RESET etc.
+ * @check_contaminant:
+ *		Optional; The callback is called when CC pins report open status
+ *		at the end of the toggling period. Chip level drivers are
+ *		expected to check for contaminant and re-enable toggling if
+ *		needed. When 0 is not returned, check_contaminant is expected to
+ *		restart toggling after checking the connector for contaminant.
+ *		This forces the TCPM state machine to tranistion to TOGGLING state
+ *		without calling start_toggling callback.
  */
 struct tcpc_dev {
 	struct fwnode_handle *fwnode;
@@ -109,6 +132,11 @@
 			   const struct pd_message *msg);
 	int (*set_bist_data)(struct tcpc_dev *dev, bool on);
 	int (*enable_frs)(struct tcpc_dev *dev, bool enable);
+	void (*frs_sourcing_vbus)(struct tcpc_dev *dev);
+	int (*enable_auto_vbus_discharge)(struct tcpc_dev *dev, bool enable);
+	int (*set_auto_vbus_discharge_threshold)(struct tcpc_dev *dev, enum typec_pwr_opmode mode,
+						 bool pps_active, u32 requested_vbus_voltage);
+	int (*check_contaminant)(struct tcpc_dev *dev);
 };
 
 struct tcpm_port;
@@ -116,6 +144,10 @@
 struct tcpm_port *tcpm_register_port(struct device *dev, struct tcpc_dev *tcpc);
 void tcpm_unregister_port(struct tcpm_port *port);
 
+int tcpm_update_sink_capabilities(struct tcpm_port *port, const u32 *pdo,
+				  unsigned int nr_pdo,
+				  unsigned int operating_snk_mw);
+
 void tcpm_vbus_change(struct tcpm_port *port);
 void tcpm_cc_change(struct tcpm_port *port);
 void tcpm_sink_frs(struct tcpm_port *port);
@@ -126,5 +158,6 @@
 			       enum tcpm_transmit_status status);
 void tcpm_pd_hard_reset(struct tcpm_port *port);
 void tcpm_tcpc_reset(struct tcpm_port *port);
+bool tcpm_is_debouncing(struct tcpm_port *tcpm);
 
 #endif /* __LINUX_USB_TCPM_H */
diff --git a/include/linux/usb/typec.h b/include/linux/usb/typec.h
index 6be5580..5447532 100644
--- a/include/linux/usb/typec.h
+++ b/include/linux/usb/typec.h
@@ -126,9 +126,11 @@
 	enum typec_port_data	roles;
 };
 
+int typec_partner_set_num_altmodes(struct typec_partner *partner, int num_altmodes);
 struct typec_altmode
 *typec_partner_register_altmode(struct typec_partner *partner,
 				const struct typec_altmode_desc *desc);
+int typec_plug_set_num_altmodes(struct typec_plug *plug, int num_altmodes);
 struct typec_altmode
 *typec_plug_register_altmode(struct typec_plug *plug,
 			     const struct typec_altmode_desc *desc);
diff --git a/include/linux/usb/typec_tbt.h b/include/linux/usb/typec_tbt.h
index 47c2d50..63dd44b 100644
--- a/include/linux/usb/typec_tbt.h
+++ b/include/linux/usb/typec_tbt.h
@@ -39,12 +39,16 @@
 #define   TBT_CABLE_USB3_GEN1		1
 #define   TBT_CABLE_USB3_PASSIVE	2
 #define   TBT_CABLE_10_AND_20GBPS	3
-#define TBT_CABLE_ROUNDED		BIT(19)
+#define TBT_CABLE_ROUNDED_SUPPORT(_vdo_) \
+					(((_vdo_) & GENMASK(20, 19)) >> 19)
+#define   TBT_GEN3_NON_ROUNDED                 0
+#define   TBT_GEN3_GEN4_ROUNDED_NON_ROUNDED    1
 #define TBT_CABLE_OPTICAL		BIT(21)
 #define TBT_CABLE_RETIMER		BIT(22)
 #define TBT_CABLE_LINK_TRAINING		BIT(23)
 
 #define TBT_SET_CABLE_SPEED(_s_)	(((_s_) & GENMASK(2, 0)) << 16)
+#define TBT_SET_CABLE_ROUNDED(_g_)	(((_g_) & GENMASK(1, 0)) << 19)
 
 /* TBT3 Device Enter Mode VDO bits */
 #define TBT_ENTER_MODE_CABLE_SPEED(s)	TBT_SET_CABLE_SPEED(s)
diff --git a/include/linux/wakeup_reason.h b/include/linux/wakeup_reason.h
new file mode 100644
index 0000000..54f5caa
--- /dev/null
+++ b/include/linux/wakeup_reason.h
@@ -0,0 +1,37 @@
+/*
+ * include/linux/wakeup_reason.h
+ *
+ * Logs the reason which caused the kernel to resume
+ * from the suspend mode.
+ *
+ * Copyright (C) 2014 Google, Inc.
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _LINUX_WAKEUP_REASON_H
+#define _LINUX_WAKEUP_REASON_H
+
+#define MAX_SUSPEND_ABORT_LEN 256
+
+#ifdef CONFIG_SUSPEND
+void log_irq_wakeup_reason(int irq);
+void log_threaded_irq_wakeup_reason(int irq, int parent_irq);
+void log_suspend_abort_reason(const char *fmt, ...);
+void log_abnormal_wakeup_reason(const char *fmt, ...);
+void clear_wakeup_reasons(void);
+#else
+static inline void log_irq_wakeup_reason(int irq) { }
+static inline void log_threaded_irq_wakeup_reason(int irq, int parent_irq) { }
+static inline void log_suspend_abort_reason(const char *fmt, ...) { }
+static inline void log_abnormal_wakeup_reason(const char *fmt, ...) { }
+static inline void clear_wakeup_reasons(void) { }
+#endif
+
+#endif /* _LINUX_WAKEUP_REASON_H */
diff --git a/include/linux/xattr.h b/include/linux/xattr.h
index 10b4dc2..28c0da7 100644
--- a/include/linux/xattr.h
+++ b/include/linux/xattr.h
@@ -31,10 +31,10 @@
 	const char *prefix;
 	int flags;      /* fs private flags */
 	bool (*list)(struct dentry *dentry);
-	int (*get)(const struct xattr_handler *, struct dentry *dentry,
+	int (*get)(const struct xattr_handler *handler, struct dentry *dentry,
 		   struct inode *inode, const char *name, void *buffer,
-		   size_t size);
-	int (*set)(const struct xattr_handler *, struct dentry *dentry,
+		   size_t size, int flags);
+	int (*set)(const struct xattr_handler *handler, struct dentry *dentry,
 		   struct inode *inode, const char *name, const void *buffer,
 		   size_t size, int flags);
 };
@@ -47,7 +47,8 @@
 	size_t value_len;
 };
 
-ssize_t __vfs_getxattr(struct dentry *, struct inode *, const char *, void *, size_t);
+ssize_t __vfs_getxattr(struct dentry *dentry, struct inode *inode,
+		       const char *name, void *buffer, size_t size, int flags);
 ssize_t vfs_getxattr(struct dentry *, const char *, void *, size_t);
 ssize_t vfs_listxattr(struct dentry *d, char *list, size_t size);
 int __vfs_setxattr(struct dentry *, struct inode *, const char *, const void *, size_t, int);
diff --git a/include/media/videobuf2-core.h b/include/media/videobuf2-core.h
index bbb3f26..3f4f171 100644
--- a/include/media/videobuf2-core.h
+++ b/include/media/videobuf2-core.h
@@ -19,7 +19,7 @@
 #include <linux/bitops.h>
 #include <media/media-request.h>
 
-#define VB2_MAX_FRAME	(32)
+#define VB2_MAX_FRAME	(64)
 #define VB2_MAX_PLANES	(8)
 
 /**
diff --git a/include/net/addrconf.h b/include/net/addrconf.h
index 18f783d..27cd8c5d 100644
--- a/include/net/addrconf.h
+++ b/include/net/addrconf.h
@@ -270,6 +270,18 @@
 void addrconf_prefix_rcv(struct net_device *dev,
 			 u8 *opt, int len, bool sllao);
 
+/* Determines into what table to put autoconf PIO/RIO/default routes
+ * learned on this device.
+ *
+ * - If 0, use the same table for every device. This puts routes into
+ *   one of RT_TABLE_{PREFIX,INFO,DFLT} depending on the type of route
+ *   (but note that these three are currently all equal to
+ *   RT6_TABLE_MAIN).
+ * - If > 0, use the specified table.
+ * - If < 0, put routes into table dev->ifindex + (-rt_table).
+ */
+u32 addrconf_rt_table(const struct net_device *dev, u32 default_table);
+
 /*
  *	anycast prototypes (anycast.c)
  */
diff --git a/include/net/virt_wifi.h b/include/net/virt_wifi.h
new file mode 100644
index 0000000..343e739
--- /dev/null
+++ b/include/net/virt_wifi.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* include/net/virt_wifi.h
+ *
+ * Define the extension interface for the network data simulation
+ *
+ * Copyright (C) 2019 Google, Inc.
+ *
+ * Author: lesl@google.com
+ */
+#ifndef __VIRT_WIFI_H
+#define __VIRT_WIFI_H
+
+struct virt_wifi_network_simulation {
+	void (*notify_device_open)(struct net_device *dev);
+	void (*notify_device_stop)(struct net_device *dev);
+	void (*notify_scan_trigger)(struct wiphy *wiphy,
+				    struct cfg80211_scan_request *request);
+	int (*generate_virt_scan_result)(struct wiphy *wiphy);
+};
+
+int virt_wifi_register_network_simulation(
+	    struct virt_wifi_network_simulation *ops);
+int virt_wifi_unregister_network_simulation(void);
+#endif
+
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index b2a06f1..79c5333 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -1560,9 +1560,7 @@
 int xfrm_output_resume(struct sk_buff *skb, int err);
 int xfrm_output(struct sock *sk, struct sk_buff *skb);
 
-#if IS_ENABLED(CONFIG_NET_PKTGEN)
 int pktgen_xfrm_outer_mode_output(struct xfrm_state *x, struct sk_buff *skb);
-#endif
 
 void xfrm_local_error(struct sk_buff *skb, int mtu);
 int xfrm4_extract_input(struct xfrm_state *x, struct sk_buff *skb);
diff --git a/include/sound/soc.h b/include/sound/soc.h
index 3b038c5..f7fabb9 100644
--- a/include/sound/soc.h
+++ b/include/sound/soc.h
@@ -239,6 +239,14 @@
 	.get = xhandler_get, .put = xhandler_put, \
 	.private_value = SOC_DOUBLE_R_VALUE(reg_left, reg_right, xshift, \
 					    xmax, xinvert) }
+#define SOC_SINGLE_MULTI_EXT(xname, xreg, xshift, xmax, xinvert, xcount,\
+	xhandler_get, xhandler_put) \
+{	.iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
+	.info = snd_soc_info_multi_ext, \
+	.get = xhandler_get, .put = xhandler_put, \
+	.private_value = (unsigned long)&(struct soc_multi_mixer_control) \
+		{.reg = xreg, .shift = xshift, .rshift = xshift, .max = xmax, \
+		.count = xcount, .platform_max = xmax, .invert = xinvert} }
 #define SOC_SINGLE_EXT_TLV(xname, xreg, xshift, xmax, xinvert,\
 	 xhandler_get, xhandler_put, tlv_array) \
 {	.iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
@@ -615,6 +623,8 @@
 	struct snd_ctl_elem_value *ucontrol);
 int snd_soc_put_strobe(struct snd_kcontrol *kcontrol,
 	struct snd_ctl_elem_value *ucontrol);
+int snd_soc_info_multi_ext(struct snd_kcontrol *kcontrol,
+	struct snd_ctl_elem_info *uinfo);
 
 /**
  * struct snd_soc_jack_pin - Describes a pin to update based on jack detection
@@ -1238,6 +1248,11 @@
 	unsigned int regbase, regcount, nbits, invert;
 };
 
+struct soc_multi_mixer_control {
+	int min, max, platform_max, count;
+	unsigned int reg, rreg, shift, rshift, invert;
+};
+
 /* enumerated kcontrol */
 struct soc_enum {
 	int reg;
diff --git a/include/trace/events/android_fs.h b/include/trace/events/android_fs.h
new file mode 100644
index 0000000..7edb6bc
--- /dev/null
+++ b/include/trace/events/android_fs.h
@@ -0,0 +1,66 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM android_fs
+
+#if !defined(_TRACE_ANDROID_FS_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_ANDROID_FS_H
+
+#include <linux/fs.h>
+#include <linux/tracepoint.h>
+#include <trace/events/android_fs_template.h>
+
+DEFINE_EVENT(android_fs_data_start_template, android_fs_dataread_start,
+	TP_PROTO(struct inode *inode, loff_t offset, int bytes,
+		 pid_t pid, char *pathname, char *command),
+	TP_ARGS(inode, offset, bytes, pid, pathname, command));
+
+DEFINE_EVENT(android_fs_data_end_template, android_fs_dataread_end,
+	TP_PROTO(struct inode *inode, loff_t offset, int bytes),
+	TP_ARGS(inode, offset, bytes));
+
+DEFINE_EVENT(android_fs_data_start_template, android_fs_datawrite_start,
+	TP_PROTO(struct inode *inode, loff_t offset, int bytes,
+		 pid_t pid, char *pathname, char *command),
+	TP_ARGS(inode, offset, bytes, pid, pathname, command));
+
+DEFINE_EVENT(android_fs_data_end_template, android_fs_datawrite_end,
+	TP_PROTO(struct inode *inode, loff_t offset, int bytes),
+	     TP_ARGS(inode, offset, bytes));
+
+#endif /* _TRACE_ANDROID_FS_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
+
+#ifndef ANDROID_FSTRACE_GET_PATHNAME
+#define ANDROID_FSTRACE_GET_PATHNAME
+
+/* Sizes an on-stack array, so careful if sizing this up ! */
+#define MAX_TRACE_PATHBUF_LEN	256
+
+static inline char *
+android_fstrace_get_pathname(char *buf, int buflen, struct inode *inode)
+{
+	char *path;
+	struct dentry *d;
+
+	/*
+	 * d_obtain_alias() will either iput() if it locates an existing
+	 * dentry or transfer the reference to the new dentry created.
+	 * So get an extra reference here.
+	 */
+	ihold(inode);
+	d = d_obtain_alias(inode);
+	if (likely(!IS_ERR(d))) {
+		path = dentry_path_raw(d, buf, buflen);
+		if (unlikely(IS_ERR(path))) {
+			strcpy(buf, "ERROR");
+			path = buf;
+		}
+		dput(d);
+	} else {
+		strcpy(buf, "ERROR");
+		path = buf;
+	}
+	return path;
+}
+#endif
diff --git a/include/trace/events/android_fs_template.h b/include/trace/events/android_fs_template.h
new file mode 100644
index 0000000..efc4878
--- /dev/null
+++ b/include/trace/events/android_fs_template.h
@@ -0,0 +1,64 @@
+#if !defined(_TRACE_ANDROID_FS_TEMPLATE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_ANDROID_FS_TEMPLATE_H
+
+#include <linux/tracepoint.h>
+
+DECLARE_EVENT_CLASS(android_fs_data_start_template,
+	TP_PROTO(struct inode *inode, loff_t offset, int bytes,
+		 pid_t pid, char *pathname, char *command),
+	TP_ARGS(inode, offset, bytes, pid, pathname, command),
+	TP_STRUCT__entry(
+		__string(pathbuf, pathname)
+		__field(loff_t,	offset)
+		__field(int,	bytes)
+		__field(loff_t,	i_size)
+		__string(cmdline, command)
+		__field(pid_t,	pid)
+		__field(ino_t,	ino)
+	),
+	TP_fast_assign(
+		{
+			/*
+			 * Replace the spaces in filenames and cmdlines
+			 * because this screws up the tooling that parses
+			 * the traces.
+			 */
+			__assign_str(pathbuf, pathname);
+			(void)strreplace(__get_str(pathbuf), ' ', '_');
+			__entry->offset		= offset;
+			__entry->bytes		= bytes;
+			__entry->i_size		= i_size_read(inode);
+			__assign_str(cmdline, command);
+			(void)strreplace(__get_str(cmdline), ' ', '_');
+			__entry->pid		= pid;
+			__entry->ino		= inode->i_ino;
+		}
+	),
+	TP_printk("entry_name %s, offset %llu, bytes %d, cmdline %s,"
+		  " pid %d, i_size %llu, ino %lu",
+		  __get_str(pathbuf), __entry->offset, __entry->bytes,
+		  __get_str(cmdline), __entry->pid, __entry->i_size,
+		  (unsigned long) __entry->ino)
+);
+
+DECLARE_EVENT_CLASS(android_fs_data_end_template,
+	TP_PROTO(struct inode *inode, loff_t offset, int bytes),
+	TP_ARGS(inode, offset, bytes),
+	TP_STRUCT__entry(
+		__field(ino_t,	ino)
+		__field(loff_t,	offset)
+		__field(int,	bytes)
+	),
+	TP_fast_assign(
+		{
+			__entry->ino		= inode->i_ino;
+			__entry->offset		= offset;
+			__entry->bytes		= bytes;
+		}
+	),
+	TP_printk("ino %lu, offset %llu, bytes %d",
+		  (unsigned long) __entry->ino,
+		  __entry->offset, __entry->bytes)
+);
+
+#endif /* _TRACE_ANDROID_FS_TEMPLATE_H */
diff --git a/include/trace/events/irq.h b/include/trace/events/irq.h
index eeceafa..bb70f46 100644
--- a/include/trace/events/irq.h
+++ b/include/trace/events/irq.h
@@ -160,6 +160,51 @@
 	TP_ARGS(vec_nr)
 );
 
+DECLARE_EVENT_CLASS(tasklet,
+
+	TP_PROTO(void *func),
+
+	TP_ARGS(func),
+
+	TP_STRUCT__entry(
+		__field( void *,	func)
+	),
+
+	TP_fast_assign(
+		__entry->func = func;
+	),
+
+	TP_printk("function=%ps", __entry->func)
+);
+
+DEFINE_EVENT(tasklet, tasklet_entry,
+
+	TP_PROTO(void *func),
+
+	TP_ARGS(func)
+);
+
+DEFINE_EVENT(tasklet, tasklet_exit,
+
+	TP_PROTO(void *func),
+
+	TP_ARGS(func)
+);
+
+DEFINE_EVENT(tasklet, tasklet_hi_entry,
+
+	TP_PROTO(void *func),
+
+	TP_ARGS(func)
+);
+
+DEFINE_EVENT(tasklet, tasklet_hi_exit,
+
+	TP_PROTO(void *func),
+
+	TP_ARGS(func)
+);
+
 #endif /*  _TRACE_IRQ_H */
 
 /* This part must be outside protection */
diff --git a/include/trace/events/namei.h b/include/trace/events/namei.h
new file mode 100644
index 0000000..e8c3e21
--- /dev/null
+++ b/include/trace/events/namei.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM namei
+
+#if !defined(_TRACE_INODEPATH_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_INODEPATH_H
+
+#include <linux/types.h>
+#include <linux/tracepoint.h>
+#include <linux/mm.h>
+#include <linux/memcontrol.h>
+#include <linux/device.h>
+#include <linux/kdev_t.h>
+
+TRACE_EVENT(inodepath,
+		TP_PROTO(struct inode *inode, char *path),
+
+		TP_ARGS(inode, path),
+
+		TP_STRUCT__entry(
+			/* dev_t and ino_t are arch dependent bit width
+			 * so just use 64-bit
+			 */
+			__field(unsigned long, ino)
+			__field(unsigned long, dev)
+			__string(path, path)
+		),
+
+		TP_fast_assign(
+			__entry->ino = inode->i_ino;
+			__entry->dev = inode->i_sb->s_dev;
+			__assign_str(path, path);
+		),
+
+		TP_printk("dev %d:%d ino=%lu path=%s",
+			MAJOR(__entry->dev), MINOR(__entry->dev),
+			__entry->ino, __get_str(path))
+);
+#endif /* _TRACE_INODEPATH_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
index c96a433..3f94218 100644
--- a/include/trace/events/sched.h
+++ b/include/trace/events/sched.h
@@ -403,6 +403,30 @@
 	     TP_ARGS(tsk, delay));
 
 /*
+ * Tracepoint for recording the cause of uninterruptible sleep.
+ */
+TRACE_EVENT(sched_blocked_reason,
+
+	TP_PROTO(struct task_struct *tsk),
+
+	TP_ARGS(tsk),
+
+	TP_STRUCT__entry(
+		__field( pid_t,	pid	)
+		__field( void*, caller	)
+		__field( bool, io_wait	)
+	),
+
+	TP_fast_assign(
+		__entry->pid	= tsk->pid;
+		__entry->caller = (void *)get_wchan(tsk);
+		__entry->io_wait = tsk->in_iowait;
+	),
+
+	TP_printk("pid=%d iowait=%d caller=%pS", __entry->pid, __entry->io_wait, __entry->caller)
+);
+
+/*
  * Tracepoint for accounting runtime (time the task is executing
  * on a CPU).
  */
diff --git a/include/trace/events/ufs.h b/include/trace/events/ufs.h
index 84841b3..50654f3 100644
--- a/include/trace/events/ufs.h
+++ b/include/trace/events/ufs.h
@@ -11,6 +11,15 @@
 
 #include <linux/tracepoint.h>
 
+#define str_opcode(opcode)						\
+	__print_symbolic(opcode,					\
+		{ WRITE_16,		"WRITE_16" },			\
+		{ WRITE_10,		"WRITE_10" },			\
+		{ READ_16,		"READ_16" },			\
+		{ READ_10,		"READ_10" },			\
+		{ SYNCHRONIZE_CACHE,	"SYNC" },			\
+		{ UNMAP,		"UNMAP" })
+
 #define UFS_LINK_STATES			\
 	EM(UIC_LINK_OFF_STATE)		\
 	EM(UIC_LINK_ACTIVE_STATE)	\
@@ -215,9 +224,10 @@
 TRACE_EVENT(ufshcd_command,
 	TP_PROTO(const char *dev_name, const char *str, unsigned int tag,
 			u32 doorbell, int transfer_len, u32 intr, u64 lba,
-			u8 opcode),
+			u8 opcode, u8 group_id),
 
-	TP_ARGS(dev_name, str, tag, doorbell, transfer_len, intr, lba, opcode),
+	TP_ARGS(dev_name, str, tag, doorbell, transfer_len,
+				intr, lba, opcode, group_id),
 
 	TP_STRUCT__entry(
 		__string(dev_name, dev_name)
@@ -228,6 +238,7 @@
 		__field(u32, intr)
 		__field(u64, lba)
 		__field(u8, opcode)
+		__field(u8, group_id)
 	),
 
 	TP_fast_assign(
@@ -239,13 +250,15 @@
 		__entry->intr = intr;
 		__entry->lba = lba;
 		__entry->opcode = opcode;
+		__entry->group_id = group_id;
 	),
 
 	TP_printk(
-		"%s: %s: tag: %u, DB: 0x%x, size: %d, IS: %u, LBA: %llu, opcode: 0x%x",
+		"%s: %s: tag: %u, DB: 0x%x, size: %d, IS: %u, LBA: %llu, opcode: 0x%x (%s), group_id: 0x%x",
 		__get_str(str), __get_str(dev_name), __entry->tag,
 		__entry->doorbell, __entry->transfer_len,
-		__entry->intr, __entry->lba, (u32)__entry->opcode
+		__entry->intr, __entry->lba, (u32)__entry->opcode,
+		str_opcode(__entry->opcode), (u32)__entry->group_id
 	)
 );
 
diff --git a/include/trace/hooks/binder.h b/include/trace/hooks/binder.h
new file mode 100644
index 0000000..ffd9adb
--- /dev/null
+++ b/include/trace/hooks/binder.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM binder
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH trace/hooks
+#if !defined(_TRACE_HOOK_BINDER_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_HOOK_BINDER_H
+#include <linux/tracepoint.h>
+#include <trace/hooks/vendor_hooks.h>
+/*
+ * Following tracepoints are not exported in tracefs and provide a
+ * mechanism for vendor modules to hook and extend functionality
+ */
+#if defined(CONFIG_TRACEPOINTS) && defined(CONFIG_ANDROID_VENDOR_HOOKS)
+struct binder_transaction;
+struct task_struct;
+DECLARE_HOOK(android_vh_binder_transaction_init,
+	TP_PROTO(struct binder_transaction *t),
+	TP_ARGS(t));
+DECLARE_HOOK(android_vh_binder_set_priority,
+	TP_PROTO(struct binder_transaction *t, struct task_struct *task),
+	TP_ARGS(t, task));
+DECLARE_HOOK(android_vh_binder_restore_priority,
+	TP_PROTO(struct binder_transaction *t, struct task_struct *task),
+	TP_ARGS(t, task));
+DECLARE_HOOK(android_vh_binder_wakeup_poll_threads_ilocked,
+	TP_PROTO(struct task_struct *task),
+	TP_ARGS(task));
+DECLARE_HOOK(android_vh_binder_wakeup_thread_ilocked,
+	TP_PROTO(struct task_struct *task),
+	TP_ARGS(task));
+#else
+#define trace_android_vh_binder_transaction_init(t)
+#define trace_android_vh_binder_set_priority(t, task)
+#define trace_android_vh_binder_restore_priority(t, task)
+#define trace_android_vh_binder_wakeup_poll_threads_ilocked(task)
+#define trace_android_vh_binder_wakeup_thread_ilocked(task)
+#endif
+#endif /* _TRACE_HOOK_BINDER_H */
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/hooks/cpufreq.h b/include/trace/hooks/cpufreq.h
new file mode 100644
index 0000000..79e6ddf
--- /dev/null
+++ b/include/trace/hooks/cpufreq.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM cpufreq
+
+#define TRACE_INCLUDE_PATH trace/hooks
+
+#if !defined(_TRACE_HOOK_CPUFREQ_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_HOOK_CPUFREQ_H
+
+#include <linux/tracepoint.h>
+#include <trace/hooks/vendor_hooks.h>
+
+#if defined(CONFIG_TRACEPOINTS) && defined(CONFIG_ANDROID_VENDOR_HOOKS)
+
+DECLARE_HOOK(android_vh_show_max_freq,
+	TP_PROTO(struct cpufreq_policy *policy, unsigned int *max_freq),
+	TP_ARGS(policy, max_freq));
+
+DECLARE_HOOK(android_vh_freq_table_limits,
+	TP_PROTO(struct cpufreq_policy *policy, unsigned int min_freq,
+		 unsigned int max_freq),
+	TP_ARGS(policy, min_freq, max_freq));
+#else
+
+#define trace_android_vh_show_max_freq(policy, max_freq)
+#define trace_android_vh_freq_table_limits(policy, min_freq, max_freq)
+
+#endif
+
+#endif /* _TRACE_HOOK_CPUFREQ_H */
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/hooks/cpuidle.h b/include/trace/hooks/cpuidle.h
new file mode 100644
index 0000000..826e98e
--- /dev/null
+++ b/include/trace/hooks/cpuidle.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM cpuidle
+
+#define TRACE_INCLUDE_PATH trace/hooks
+
+#if !defined(_TRACE_HOOK_CPUIDLE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_HOOK_CPUIDLE_H
+
+#include <linux/tracepoint.h>
+#include <trace/hooks/vendor_hooks.h>
+
+#if defined(CONFIG_TRACEPOINTS) && defined(CONFIG_ANDROID_VENDOR_HOOKS)
+DECLARE_HOOK(android_vh_cpu_idle,
+	TP_PROTO(int event, int state, int cpu),
+	TP_ARGS(event, state, cpu))
+#else
+#define trace_android_vh_cpu_idle(event, state, cpu)
+#endif
+
+#endif /* _TRACE_HOOK_CPUIDLE_H */
+/* This part must be outside protection */
+#include <trace/define_trace.h>
+
diff --git a/include/trace/hooks/debug.h b/include/trace/hooks/debug.h
new file mode 100644
index 0000000..ac006d7
--- /dev/null
+++ b/include/trace/hooks/debug.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM debug
+
+#define TRACE_INCLUDE_PATH trace/hooks
+
+#if !defined(_TRACE_HOOK_DEBUG_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_HOOK_DEBUG_H
+
+#include <linux/tracepoint.h>
+#include <trace/hooks/vendor_hooks.h>
+
+#if defined(CONFIG_TRACEPOINTS) && defined(CONFIG_ANDROID_VENDOR_HOOKS)
+struct pt_regs;
+
+DECLARE_HOOK(android_vh_ipi_stop,
+	TP_PROTO(struct pt_regs *regs),
+	TP_ARGS(regs))
+#else
+#define trace_android_vh_ipi_stop(regs)
+#define trace_android_vh_ipi_stop_rcuidle(regs)
+#endif
+
+#endif /* _TRACE_HOOK_DEBUG_H */
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/hooks/dtask.h b/include/trace/hooks/dtask.h
new file mode 100644
index 0000000..f4fafbf
--- /dev/null
+++ b/include/trace/hooks/dtask.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM dtask
+#define TRACE_INCLUDE_PATH trace/hooks
+
+#if !defined(_TRACE_HOOK_DTASK_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_HOOK_DTASK_H
+#include <linux/tracepoint.h>
+#include <trace/hooks/vendor_hooks.h>
+/*
+ * Following tracepoints are not exported in tracefs and provide a
+ * mechanism for vendor modules to hook and extend functionality
+ */
+#if defined(CONFIG_TRACEPOINTS) && defined(CONFIG_ANDROID_VENDOR_HOOKS)
+struct mutex;
+DECLARE_HOOK(android_vh_mutex_wait_start,
+	TP_PROTO(struct mutex *lock),
+	TP_ARGS(lock));
+DECLARE_HOOK(android_vh_mutex_wait_finish,
+	TP_PROTO(struct mutex *lock),
+	TP_ARGS(lock));
+
+struct rw_semaphore;
+DECLARE_HOOK(android_vh_rwsem_read_wait_start,
+	TP_PROTO(struct rw_semaphore *sem),
+	TP_ARGS(sem));
+DECLARE_HOOK(android_vh_rwsem_read_wait_finish,
+	TP_PROTO(struct rw_semaphore *sem),
+	TP_ARGS(sem));
+DECLARE_HOOK(android_vh_rwsem_write_wait_start,
+	TP_PROTO(struct rw_semaphore *sem),
+	TP_ARGS(sem));
+DECLARE_HOOK(android_vh_rwsem_write_wait_finish,
+	TP_PROTO(struct rw_semaphore *sem),
+	TP_ARGS(sem));
+
+struct task_struct;
+DECLARE_HOOK(android_vh_sched_show_task,
+	TP_PROTO(struct task_struct *task),
+	TP_ARGS(task));
+#else
+#define trace_android_vh_mutex_wait_start(lock)
+#define trace_android_vh_mutex_wait_finish(lock)
+#define trace_android_vh_rwsem_read_wait_start(sem)
+#define trace_android_vh_rwsem_read_wait_finish(sem)
+#define trace_android_vh_rwsem_write_wait_start(sem)
+#define trace_android_vh_rwsem_write_wait_finish(sem)
+#define trace_android_vh_sched_show_task(task)
+#endif
+
+#endif /* _TRACE_HOOK_DTASK_H */
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/hooks/epoch.h b/include/trace/hooks/epoch.h
new file mode 100644
index 0000000..7da3139
--- /dev/null
+++ b/include/trace/hooks/epoch.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM epoch
+
+#define TRACE_INCLUDE_PATH trace/hooks
+
+#if !defined(_TRACE_HOOK_EPOCH_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_HOOK_EPOCH_H
+
+#include <linux/tracepoint.h>
+#include <trace/hooks/vendor_hooks.h>
+
+#if defined(CONFIG_TRACEPOINTS) && defined(CONFIG_ANDROID_VENDOR_HOOKS)
+
+DECLARE_HOOK(android_vh_show_suspend_epoch_val,
+	TP_PROTO(u64 suspend_ns, u64 suspend_cycles),
+	TP_ARGS(suspend_ns, suspend_cycles));
+
+DECLARE_HOOK(android_vh_show_resume_epoch_val,
+	TP_PROTO(u64 resume_cycles),
+	TP_ARGS(resume_cycles));
+#else
+
+#define trace_android_vh_show_suspend_epoch_val(suspend_ns, suspend_cycles)
+#define trace_android_vh_show_resume_epoch_val(resume_cycles)
+
+#endif
+
+#endif /* _TRACE_HOOK_EPOCH_H */
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/hooks/fpsimd.h b/include/trace/hooks/fpsimd.h
new file mode 100644
index 0000000..73ad2c00
--- /dev/null
+++ b/include/trace/hooks/fpsimd.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM fpsimd
+
+#define TRACE_INCLUDE_PATH trace/hooks
+
+#if !defined(_TRACE_HOOK_FPSIMD_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_HOOK_FPSIMD_H
+
+#include <linux/tracepoint.h>
+#include <trace/hooks/vendor_hooks.h>
+
+#if defined(CONFIG_TRACEPOINTS) && defined(CONFIG_ANDROID_VENDOR_HOOKS)
+struct task_struct;
+
+DECLARE_HOOK(android_vh_is_fpsimd_save,
+	TP_PROTO(struct task_struct *prev, struct task_struct *next),
+	TP_ARGS(prev, next))
+#else
+
+#define trace_android_vh_is_fpsimd_save(prev, next)
+#endif
+
+#endif /* _TRACE_HOOK_FPSIMD_H */
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/hooks/futex.h b/include/trace/hooks/futex.h
new file mode 100644
index 0000000..cbf6abb
--- /dev/null
+++ b/include/trace/hooks/futex.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM futex
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH trace/hooks
+#if !defined(_TRACE_HOOK_FUTEX_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_HOOK_FUTEX_H
+#include <linux/tracepoint.h>
+#include <trace/hooks/vendor_hooks.h>
+#include <linux/plist.h>
+/*
+ * Following tracepoints are not exported in tracefs and provide a
+ * mechanism for vendor modules to hook and extend functionality
+ */
+#if defined(CONFIG_TRACEPOINTS) && defined(CONFIG_ANDROID_VENDOR_HOOKS)
+DECLARE_HOOK(android_vh_alter_futex_plist_add,
+	TP_PROTO(struct plist_node *node,
+		 struct plist_head *head,
+		 bool *already_on_hb),
+	TP_ARGS(node, head, already_on_hb));
+#else
+#define trace_android_vh_alter_futex_plist_add(node, head, already_on_hb)
+#endif
+#endif /* _TRACE_HOOK_FUTEX_H */
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/hooks/gic.h b/include/trace/hooks/gic.h
new file mode 100644
index 0000000..bdc9235
--- /dev/null
+++ b/include/trace/hooks/gic.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM gic
+
+#define TRACE_INCLUDE_PATH trace/hooks
+
+#if !defined(_TRACE_HOOK_GIC_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_HOOK_GIC_H
+
+#include <linux/irqdomain.h>
+
+#include <linux/tracepoint.h>
+#include <trace/hooks/vendor_hooks.h>
+
+#if defined(CONFIG_TRACEPOINTS) && defined(CONFIG_ANDROID_VENDOR_HOOKS)
+
+DECLARE_HOOK(android_vh_gic_resume,
+	TP_PROTO(struct irq_domain *domain, void __iomem *dist_base),
+	TP_ARGS(domain, dist_base));
+
+#else
+
+#define trace_android_vh_gic_resume(domain, dist_base)
+
+#endif
+
+#endif /* _TRACE_HOOK_GIC_H */
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/hooks/gic_v3.h b/include/trace/hooks/gic_v3.h
new file mode 100644
index 0000000..8cf67e51
--- /dev/null
+++ b/include/trace/hooks/gic_v3.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM gic_v3
+#define TRACE_INCLUDE_PATH trace/hooks
+#if !defined(_TRACE_HOOK_GIC_V3_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_HOOK_GIC_V3_H
+#include <linux/tracepoint.h>
+#include <trace/hooks/vendor_hooks.h>
+/*
+ * Following tracepoints are not exported in tracefs and provide a
+ * mechanism for vendor modules to hook and extend functionality
+ */
+#if defined(CONFIG_TRACEPOINTS) && defined(CONFIG_ANDROID_VENDOR_HOOKS)
+struct irq_data;
+struct cpumask;
+DECLARE_HOOK(android_vh_gic_v3_affinity_init,
+	TP_PROTO(int irq, u32 offset, u64 *affinity),
+	TP_ARGS(irq, offset, affinity));
+DECLARE_HOOK(android_vh_gic_v3_set_affinity,
+	TP_PROTO(struct irq_data *d, const struct cpumask *mask_val,
+		 u64 *affinity),
+	TP_ARGS(d, mask_val, affinity));
+#else
+#define trace_android_vh_gic_v3_affinity_init(irq, offset, affinity)
+#define trace_android_vh_gic_v3_set_affinity(d, mask_val, affinity)
+#endif
+#endif /* _TRACE_HOOK_GIC_V3_H */
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/hooks/mm.h b/include/trace/hooks/mm.h
new file mode 100644
index 0000000..b7fb70f
--- /dev/null
+++ b/include/trace/hooks/mm.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM mm
+
+#define TRACE_INCLUDE_PATH trace/hooks
+
+#if !defined(_TRACE_HOOK_MM_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_HOOK_MM_H
+
+#include <linux/types.h>
+
+#include <linux/tracepoint.h>
+#include <trace/hooks/vendor_hooks.h>
+
+#if defined(CONFIG_TRACEPOINTS) && defined(CONFIG_ANDROID_VENDOR_HOOKS)
+
+DECLARE_RESTRICTED_HOOK(android_rvh_set_skip_swapcache_flags,
+			TP_PROTO(gfp_t *flags),
+			TP_ARGS(flags), 1);
+DECLARE_RESTRICTED_HOOK(android_rvh_set_gfp_zone_flags,
+			TP_PROTO(gfp_t *flags),
+			TP_ARGS(flags), 1);
+DECLARE_RESTRICTED_HOOK(android_rvh_set_readahead_gfp_mask,
+			TP_PROTO(gfp_t *flags),
+			TP_ARGS(flags), 1);
+
+#else
+
+#define trace_android_rvh_set_skip_swapcache_flags(gfp_flags)
+#define trace_android_rvh_set_gfp_zone_flags(gfp_flags)
+#define trace_android_rvh_set_readahead_gfp_mask(gfp_flags)
+
+#endif
+
+#endif /* _TRACE_HOOK_MM_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/hooks/mpam.h b/include/trace/hooks/mpam.h
new file mode 100644
index 0000000..1a89dae
--- /dev/null
+++ b/include/trace/hooks/mpam.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM mpam
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH trace/hooks
+#if !defined(_TRACE_HOOK_MPAM_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_HOOK_MPAM_H
+#include <linux/tracepoint.h>
+#include <trace/hooks/vendor_hooks.h>
+/*
+ * Following tracepoints are not exported in tracefs and provide a
+ * mechanism for vendor modules to hook and extend functionality
+ */
+#if defined(CONFIG_TRACEPOINTS) && defined(CONFIG_ANDROID_VENDOR_HOOKS)
+struct task_struct;
+DECLARE_HOOK(android_vh_mpam_set,
+	TP_PROTO(struct task_struct *prev, struct task_struct *next),
+	TP_ARGS(prev, next));
+#else
+#define trace_android_vh_mpam_set(prev, next)
+#endif
+#endif /* _TRACE_HOOK_MPAM_H */
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/hooks/preemptirq.h b/include/trace/hooks/preemptirq.h
new file mode 100644
index 0000000..be96261
--- /dev/null
+++ b/include/trace/hooks/preemptirq.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM preemptirq
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH trace/hooks
+
+#if !defined(_TRACE_HOOK_PREEMPTIRQ_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_HOOK_PREEMPTIRQ_H
+
+#include <linux/tracepoint.h>
+#include <trace/hooks/vendor_hooks.h>
+
+#if defined(CONFIG_TRACEPOINTS) && defined(CONFIG_ANDROID_VENDOR_HOOKS)
+
+DECLARE_RESTRICTED_HOOK(android_rvh_preempt_disable,
+	TP_PROTO(unsigned long ip, unsigned long parent_ip),
+	TP_ARGS(ip, parent_ip), 1);
+
+DECLARE_RESTRICTED_HOOK(android_rvh_preempt_enable,
+	TP_PROTO(unsigned long ip, unsigned long parent_ip),
+	TP_ARGS(ip, parent_ip), 1);
+
+DECLARE_RESTRICTED_HOOK(android_rvh_irqs_disable,
+	TP_PROTO(unsigned long ip, unsigned long parent_ip),
+	TP_ARGS(ip, parent_ip), 1);
+
+DECLARE_RESTRICTED_HOOK(android_rvh_irqs_enable,
+	TP_PROTO(unsigned long ip, unsigned long parent_ip),
+	TP_ARGS(ip, parent_ip), 1);
+
+#else
+
+#define trace_android_rvh_preempt_disable(ip, parent_ip)
+#define trace_android_rvh_preempt_enable(ip, parent_ip)
+#define trace_android_rvh_irqs_disable(ip, parent_ip)
+#define trace_android_rvh_irqs_enable(ip, parent_ip)
+
+#endif
+
+#endif /* _TRACE_HOOK_PREEMPTIRQ_H */
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/hooks/printk.h b/include/trace/hooks/printk.h
new file mode 100644
index 0000000..83f8ad5
--- /dev/null
+++ b/include/trace/hooks/printk.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM printk
+
+#define TRACE_INCLUDE_PATH trace/hooks
+
+#if !defined(_TRACE_HOOK_PRINTK_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_HOOK_PRINTK_H
+
+#include <linux/tracepoint.h>
+#include <trace/hooks/vendor_hooks.h>
+
+#if defined(CONFIG_TRACEPOINTS) && defined(CONFIG_ANDROID_VENDOR_HOOKS)
+
+DECLARE_HOOK(android_vh_printk_hotplug,
+	TP_PROTO(int *flag),
+	TP_ARGS(flag));
+
+#else
+
+#define trace_android_vh_printk_hotplug(flag)
+
+#endif
+
+#endif /* _TRACE_HOOK_PRINTK_H */
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/hooks/rwsem.h b/include/trace/hooks/rwsem.h
new file mode 100644
index 0000000..7c7a1f78
--- /dev/null
+++ b/include/trace/hooks/rwsem.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM rwsem
+#define TRACE_INCLUDE_PATH trace/hooks
+#if !defined(_TRACE_HOOK_RWSEM_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_HOOK_RWSEM_H
+#include <linux/tracepoint.h>
+#include <trace/hooks/vendor_hooks.h>
+/*
+ * Following tracepoints are not exported in tracefs and provide a
+ * mechanism for vendor modules to hook and extend functionality
+ */
+#if defined(CONFIG_TRACEPOINTS) && defined(CONFIG_ANDROID_VENDOR_HOOKS)
+struct rw_semaphore;
+struct rwsem_waiter;
+DECLARE_HOOK(android_vh_rwsem_init,
+	TP_PROTO(struct rw_semaphore *sem),
+	TP_ARGS(sem));
+DECLARE_HOOK(android_vh_rwsem_wake,
+	TP_PROTO(struct rw_semaphore *sem),
+	TP_ARGS(sem));
+DECLARE_HOOK(android_vh_rwsem_write_finished,
+	TP_PROTO(struct rw_semaphore *sem),
+	TP_ARGS(sem));
+DECLARE_HOOK(android_vh_alter_rwsem_list_add,
+	TP_PROTO(struct rwsem_waiter *waiter,
+		 struct rw_semaphore *sem,
+		 bool *already_on_list),
+	TP_ARGS(waiter, sem, already_on_list));
+#else
+#define trace_android_vh_rwsem_init(sem)
+#define trace_android_vh_rwsem_wake(sem)
+#define trace_android_vh_rwsem_write_finished(sem)
+#define trace_android_vh_alter_rwsem_list_add(waiter, sem, already_on_list)
+#endif
+#endif /* _TRACE_HOOK_RWSEM_H */
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/hooks/sched.h b/include/trace/hooks/sched.h
new file mode 100644
index 0000000..a2570fc
--- /dev/null
+++ b/include/trace/hooks/sched.h
@@ -0,0 +1,157 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM sched
+#define TRACE_INCLUDE_PATH trace/hooks
+#if !defined(_TRACE_HOOK_SCHED_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_HOOK_SCHED_H
+#include <linux/tracepoint.h>
+#include <trace/hooks/vendor_hooks.h>
+/*
+ * Following tracepoints are not exported in tracefs and provide a
+ * mechanism for vendor modules to hook and extend functionality
+ */
+#if defined(CONFIG_TRACEPOINTS) && defined(CONFIG_ANDROID_VENDOR_HOOKS)
+struct task_struct;
+DECLARE_RESTRICTED_HOOK(android_rvh_select_task_rq_fair,
+	TP_PROTO(struct task_struct *p, int prev_cpu, int sd_flag, int wake_flags, int *new_cpu),
+	TP_ARGS(p, prev_cpu, sd_flag, wake_flags, new_cpu), 1);
+
+DECLARE_RESTRICTED_HOOK(android_rvh_select_task_rq_rt,
+	TP_PROTO(struct task_struct *p, int prev_cpu, int sd_flag, int wake_flags, int *new_cpu),
+	TP_ARGS(p, prev_cpu, sd_flag, wake_flags, new_cpu), 1);
+
+DECLARE_RESTRICTED_HOOK(android_rvh_select_fallback_rq,
+	TP_PROTO(int cpu, struct task_struct *p, int *new_cpu),
+	TP_ARGS(cpu, p, new_cpu), 1);
+
+struct rq;
+DECLARE_HOOK(android_vh_scheduler_tick,
+	TP_PROTO(struct rq *rq),
+	TP_ARGS(rq));
+
+DECLARE_RESTRICTED_HOOK(android_rvh_enqueue_task,
+	TP_PROTO(struct rq *rq, struct task_struct *p),
+	TP_ARGS(rq, p), 1);
+
+DECLARE_RESTRICTED_HOOK(android_rvh_dequeue_task,
+	TP_PROTO(struct rq *rq, struct task_struct *p),
+	TP_ARGS(rq, p), 1);
+
+DECLARE_RESTRICTED_HOOK(android_rvh_can_migrate_task,
+	TP_PROTO(struct task_struct *p, int dst_cpu, int *can_migrate),
+	TP_ARGS(p, dst_cpu, can_migrate), 1);
+
+DECLARE_RESTRICTED_HOOK(android_rvh_find_lowest_rq,
+	TP_PROTO(struct task_struct *p, struct cpumask *local_cpu_mask,
+			int ret, int *lowest_cpu),
+	TP_ARGS(p, local_cpu_mask, ret, lowest_cpu), 1);
+
+DECLARE_RESTRICTED_HOOK(android_rvh_prepare_prio_fork,
+	TP_PROTO(struct task_struct *p),
+	TP_ARGS(p), 1);
+
+DECLARE_RESTRICTED_HOOK(android_rvh_finish_prio_fork,
+	TP_PROTO(struct task_struct *p),
+	TP_ARGS(p), 1);
+
+DECLARE_RESTRICTED_HOOK(android_rvh_rtmutex_prepare_setprio,
+	TP_PROTO(struct task_struct *p, struct task_struct *pi_task),
+	TP_ARGS(p, pi_task), 1);
+
+DECLARE_RESTRICTED_HOOK(android_rvh_set_user_nice,
+	TP_PROTO(struct task_struct *p, long *nice),
+	TP_ARGS(p, nice), 1);
+
+DECLARE_RESTRICTED_HOOK(android_rvh_setscheduler,
+	TP_PROTO(struct task_struct *p),
+	TP_ARGS(p), 1);
+
+struct sched_group;
+DECLARE_RESTRICTED_HOOK(android_rvh_find_busiest_group,
+	TP_PROTO(struct sched_group *busiest, struct rq *dst_rq, int *out_balance),
+		TP_ARGS(busiest, dst_rq, out_balance), 1);
+
+DECLARE_HOOK(android_vh_dump_throttled_rt_tasks,
+	TP_PROTO(int cpu, u64 clock, ktime_t rt_period, u64 rt_runtime,
+			s64 rt_period_timer_expires),
+	TP_ARGS(cpu, clock, rt_period, rt_runtime, rt_period_timer_expires));
+
+DECLARE_HOOK(android_vh_jiffies_update,
+	TP_PROTO(void *unused),
+	TP_ARGS(unused));
+
+struct rq_flags;
+DECLARE_RESTRICTED_HOOK(android_rvh_sched_newidle_balance,
+	TP_PROTO(struct rq *this_rq, struct rq_flags *rf,
+		 int *pulled_task, int *done),
+	TP_ARGS(this_rq, rf, pulled_task, done), 1);
+
+DECLARE_RESTRICTED_HOOK(android_rvh_sched_nohz_balancer_kick,
+	TP_PROTO(struct rq *rq, unsigned int *flags, int *done),
+	TP_ARGS(rq, flags, done), 1);
+
+DECLARE_RESTRICTED_HOOK(android_rvh_find_busiest_queue,
+	TP_PROTO(int dst_cpu, struct sched_group *group,
+		 struct cpumask *env_cpus, struct rq **busiest,
+		 int *done),
+	TP_ARGS(dst_cpu, group, env_cpus, busiest, done), 1);
+
+DECLARE_RESTRICTED_HOOK(android_rvh_migrate_queued_task,
+	TP_PROTO(struct rq *rq, struct rq_flags *rf,
+		 struct task_struct *p, int new_cpu,
+		 int *detached),
+	TP_ARGS(rq, rf, p, new_cpu, detached), 1);
+
+DECLARE_RESTRICTED_HOOK(android_rvh_find_energy_efficient_cpu,
+	TP_PROTO(struct task_struct *p, int prev_cpu, int sync, int *new_cpu),
+	TP_ARGS(p, prev_cpu, sync, new_cpu), 1);
+struct sched_attr;
+DECLARE_HOOK(android_vh_set_sugov_sched_attr,
+	TP_PROTO(struct sched_attr *attr),
+	TP_ARGS(attr));
+DECLARE_RESTRICTED_HOOK(android_rvh_set_iowait,
+	TP_PROTO(struct task_struct *p, int *should_iowait_boost),
+	TP_ARGS(p, should_iowait_boost), 1);
+struct sugov_policy;
+DECLARE_RESTRICTED_HOOK(android_rvh_set_sugov_update,
+	TP_PROTO(struct sugov_policy *sg_policy, unsigned int next_freq, bool *should_update),
+	TP_ARGS(sg_policy, next_freq, should_update), 1);
+DECLARE_RESTRICTED_HOOK(android_rvh_sched_setaffinity,
+	TP_PROTO(struct task_struct *p, const struct cpumask *in_mask, int *retval),
+	TP_ARGS(p, in_mask, retval), 1);
+
+DECLARE_RESTRICTED_HOOK(android_rvh_update_cpus_allowed,
+	TP_PROTO(struct task_struct *p, cpumask_var_t cpus_requested,
+		 const struct cpumask *new_mask, int *ret),
+	TP_ARGS(p, cpus_requested, new_mask, ret), 1);
+#else
+#define trace_android_rvh_select_task_rq_fair(p, prev_cpu, sd_flag, wake_flags, new_cpu)
+#define trace_android_rvh_select_task_rq_rt(p, prev_cpu, sd_flag, wake_flags, new_cpu)
+#define trace_android_rvh_select_fallback_rq(cpu, p, dest_cpu)
+#define trace_android_vh_scheduler_tick(rq)
+#define trace_android_rvh_enqueue_task(rq, p)
+#define trace_android_rvh_dequeue_task(rq, p)
+#define trace_android_rvh_can_migrate_task(p, dst_cpu, can_migrate)
+#define trace_android_rvh_find_lowest_rq(p, local_cpu_mask, ret, lowest_cpu)
+#define trace_android_rvh_prepare_prio_fork(p)
+#define trace_android_rvh_finish_prio_fork(p)
+#define trace_android_rvh_rtmutex_prepare_setprio(p, pi_task)
+#define trace_android_rvh_set_user_nice(p, nice)
+#define trace_android_rvh_setscheduler(p)
+#define trace_android_rvh_find_busiest_group(busiest, dst_rq, out_balance)
+#define trace_android_vh_dump_throttled_rt_tasks(cpu, clock, rt_period, rt_runtime, rt_period_timer_expires)
+#define trace_android_vh_jiffies_update(unused)
+#define trace_android_rvh_sched_newidle_balance(this_rq, rf, pulled_task, done)
+#define trace_android_rvh_sched_nohz_balancer_kick(rq, flags, done)
+#define trace_android_rvh_find_busiest_queue(dst_cpu, group, env_cpus, busiest, done)
+#define trace_android_rvh_migrate_queued_task(rq, rf, p, new_cpu, detached)
+#define trace_android_rvh_find_energy_efficient_cpu(p, prev_cpu, sync, new_cpu)
+#define trace_android_vh_set_sugov_sched_attr(attr)
+#define trace_android_rvh_set_iowait(p, should_iowait_boost)
+#define trace_android_rvh_set_sugov_update(sg_policy, next_freq, should_update)
+#define trace_android_rvh_sched_setaffinity(p, in_mask, retval)
+#define trace_android_rvh_update_cpus_allowed(p, cpus_requested, new_mask, ret)
+#endif
+#endif /* _TRACE_HOOK_SCHED_H */
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/hooks/sysrqcrash.h b/include/trace/hooks/sysrqcrash.h
new file mode 100644
index 0000000..e5d9102
--- /dev/null
+++ b/include/trace/hooks/sysrqcrash.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM sysrqcrash
+#define TRACE_INCLUDE_PATH trace/hooks
+
+#if !defined(_TRACE_HOOK_SYSRQCRASH_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_HOOK_SYSRQCRASH_H
+#include <linux/tracepoint.h>
+#include <trace/hooks/vendor_hooks.h>
+/*
+ * Following tracepoints are not exported in tracefs and provide a
+ * mechanism for vendor modules to hook and extend functionality
+ */
+#if defined(CONFIG_TRACEPOINTS) && defined(CONFIG_ANDROID_VENDOR_HOOKS)
+DECLARE_HOOK(android_vh_sysrq_crash,
+	TP_PROTO(void *data),
+	TP_ARGS(data));
+#else
+#define trace_android_vh_sysrq_crash(data)
+#endif
+
+#endif /* _TRACE_HOOK_SYSRQCRASH_H */
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/hooks/topology.h b/include/trace/hooks/topology.h
new file mode 100644
index 0000000..d67d849
--- /dev/null
+++ b/include/trace/hooks/topology.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM topology
+
+#define TRACE_INCLUDE_PATH trace/hooks
+
+#if !defined(_TRACE_HOOK_TOPOLOGY_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_HOOK_TOPOLOGY_H
+
+#include <linux/tracepoint.h>
+#include <trace/hooks/vendor_hooks.h>
+#include <linux/cpumask.h>
+
+#if defined(CONFIG_TRACEPOINTS) && defined(CONFIG_ANDROID_VENDOR_HOOKS)
+
+DECLARE_HOOK(android_vh_arch_set_freq_scale,
+	TP_PROTO(const struct cpumask *cpus, unsigned long freq,
+		 unsigned long max, unsigned long *scale),
+	TP_ARGS(cpus, freq, max, scale));
+
+#else
+
+#define trace_android_vh_arch_set_freq_scale(cpus, freq, max, scale)
+
+#endif
+
+#endif /* _TRACE_HOOK_TOPOLOGY_H */
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/hooks/vendor_hooks.h b/include/trace/hooks/vendor_hooks.h
new file mode 100644
index 0000000..e6cabd3
--- /dev/null
+++ b/include/trace/hooks/vendor_hooks.h
@@ -0,0 +1,104 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/*
+ * Note: we intentionally omit include file ifdef protection
+ *  This is due to the way trace events work. If a file includes two
+ *  trace event headers under one "CREATE_TRACE_POINTS" the first include
+ *  will override the DECLARE_RESTRICTED_HOOK and break the second include.
+ */
+
+#include <linux/tracepoint.h>
+
+#define DECLARE_HOOK DECLARE_TRACE
+
+#ifdef TRACE_HEADER_MULTI_READ
+
+#define DEFINE_HOOK_FN(_name, _reg, _unreg, proto, args)		\
+	static const char __tpstrtab_##_name[]				\
+	__section("__tracepoints_strings") = #_name;			\
+	extern struct static_call_key STATIC_CALL_KEY(tp_func_##_name);	\
+	int __traceiter_##_name(void *__data, proto);			\
+	struct tracepoint __tracepoint_##_name	__used			\
+	__section("__tracepoints") = {					\
+		.name = __tpstrtab_##_name,				\
+		.key = STATIC_KEY_INIT_FALSE,				\
+		.static_call_key = &STATIC_CALL_KEY(tp_func_##_name),	\
+		.static_call_tramp = STATIC_CALL_TRAMP_ADDR(tp_func_##_name), \
+		.iterator = &__traceiter_##_name,			\
+		.regfunc = _reg,					\
+		.unregfunc = _unreg,					\
+		.funcs = NULL };					\
+	__TRACEPOINT_ENTRY(_name);					\
+	int __traceiter_##_name(void *__data, proto)			\
+	{								\
+		struct tracepoint_func *it_func_ptr;			\
+		void *it_func;						\
+									\
+		it_func_ptr = (&__tracepoint_##_name)->funcs;		\
+		it_func = (it_func_ptr)->func;				\
+		__data = (it_func_ptr)->data;				\
+		((void(*)(void *, proto))(it_func))(__data, args);	\
+		WARN_ON(((++it_func_ptr)->func));			\
+		return 0;						\
+	}								\
+	DEFINE_STATIC_CALL(tp_func_##_name, __traceiter_##_name);
+
+#undef DECLARE_RESTRICTED_HOOK
+#define DECLARE_RESTRICTED_HOOK(name, proto, args, cond) \
+	DEFINE_HOOK_FN(name, NULL, NULL, PARAMS(proto), PARAMS(args))
+
+/* prevent additional recursion */
+#undef TRACE_HEADER_MULTI_READ
+#else /* TRACE_HEADER_MULTI_READ */
+
+#define DO_HOOK(name, proto, args, cond)				\
+	do {								\
+		struct tracepoint_func *it_func_ptr;			\
+		void *__data;						\
+									\
+		if (!(cond))						\
+			return;						\
+									\
+		it_func_ptr = (&__tracepoint_##name)->funcs;		\
+		if (it_func_ptr) {					\
+			__data = (it_func_ptr)->data;			\
+			__DO_TRACE_CALL(name)(args);			\
+		}							\
+	} while (0)
+
+#define __DECLARE_HOOK(name, proto, args, cond, data_proto, data_args)	\
+	extern int __traceiter_##name(data_proto);			\
+	DECLARE_STATIC_CALL(tp_func_##name, __traceiter_##name);	\
+	extern struct tracepoint __tracepoint_##name;			\
+	static inline void trace_##name(proto)				\
+	{								\
+		if (static_key_false(&__tracepoint_##name.key))		\
+			DO_HOOK(name,					\
+				TP_PROTO(data_proto),			\
+				TP_ARGS(data_args),			\
+				TP_CONDITION(cond));			\
+	}								\
+	static inline bool						\
+	trace_##name##_enabled(void)					\
+	{								\
+		return static_key_false(&__tracepoint_##name.key);	\
+	}								\
+	static inline int						\
+	register_trace_##name(void (*probe)(data_proto), void *data) 	\
+	{								\
+		/* only allow a single attachment */			\
+		if (trace_##name##_enabled())				\
+			return -EBUSY;					\
+		return tracepoint_probe_register(&__tracepoint_##name,	\
+						(void *)probe, data);	\
+	}								\
+	/* vendor hooks cannot be unregistered */			\
+
+#undef DECLARE_RESTRICTED_HOOK
+#define DECLARE_RESTRICTED_HOOK(name, proto, args, cond)		\
+	__DECLARE_HOOK(name, PARAMS(proto), PARAMS(args),		\
+			cond,						\
+			PARAMS(void *__data, proto),			\
+			PARAMS(__data, args))
+
+#endif /* TRACE_HEADER_MULTI_READ */
diff --git a/include/trace/hooks/wqlockup.h b/include/trace/hooks/wqlockup.h
new file mode 100644
index 0000000..de70eac
--- /dev/null
+++ b/include/trace/hooks/wqlockup.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM wqlockup
+#define TRACE_INCLUDE_PATH trace/hooks
+
+#if !defined(_TRACE_HOOK_WQLOCKUP_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_HOOK_WQLOCKUP_H
+#include <linux/tracepoint.h>
+#include <trace/hooks/vendor_hooks.h>
+/*
+ * Following tracepoints are not exported in tracefs and provide a
+ * mechanism for vendor modules to hook and extend functionality
+ */
+#if defined(CONFIG_TRACEPOINTS) && defined(CONFIG_ANDROID_VENDOR_HOOKS)
+DECLARE_HOOK(android_vh_wq_lockup_pool,
+	TP_PROTO(int cpu, unsigned long pool_ts),
+	TP_ARGS(cpu, pool_ts));
+#else
+#define trace_android_vh_wq_lockup_pool(cpu, pool_ts)
+#endif
+
+#endif /* _TRACE_HOOK_WQLOCKUP_H */
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/uapi/linux/android/binder.h b/include/uapi/linux/android/binder.h
index f1ce2c4..6d7b8fe 100644
--- a/include/uapi/linux/android/binder.h
+++ b/include/uapi/linux/android/binder.h
@@ -38,11 +38,59 @@
 	BINDER_TYPE_PTR		= B_PACK_CHARS('p', 't', '*', B_TYPE_LARGE),
 };
 
-enum {
+/**
+ * enum flat_binder_object_shifts: shift values for flat_binder_object_flags
+ * @FLAT_BINDER_FLAG_SCHED_POLICY_SHIFT: shift for getting scheduler policy.
+ *
+ */
+enum flat_binder_object_shifts {
+	FLAT_BINDER_FLAG_SCHED_POLICY_SHIFT = 9,
+};
+
+/**
+ * enum flat_binder_object_flags - flags for use in flat_binder_object.flags
+ */
+enum flat_binder_object_flags {
+	/**
+	 * @FLAT_BINDER_FLAG_PRIORITY_MASK: bit-mask for min scheduler priority
+	 *
+	 * These bits can be used to set the minimum scheduler priority
+	 * at which transactions into this node should run. Valid values
+	 * in these bits depend on the scheduler policy encoded in
+	 * @FLAT_BINDER_FLAG_SCHED_POLICY_MASK.
+	 *
+	 * For SCHED_NORMAL/SCHED_BATCH, the valid range is between [-20..19]
+	 * For SCHED_FIFO/SCHED_RR, the value can run between [1..99]
+	 */
 	FLAT_BINDER_FLAG_PRIORITY_MASK = 0xff,
+	/**
+	 * @FLAT_BINDER_FLAG_ACCEPTS_FDS: whether the node accepts fds.
+	 */
 	FLAT_BINDER_FLAG_ACCEPTS_FDS = 0x100,
 
 	/**
+	 * @FLAT_BINDER_FLAG_SCHED_POLICY_MASK: bit-mask for scheduling policy
+	 *
+	 * These two bits can be used to set the min scheduling policy at which
+	 * transactions on this node should run. These match the UAPI
+	 * scheduler policy values, eg:
+	 * 00b: SCHED_NORMAL
+	 * 01b: SCHED_FIFO
+	 * 10b: SCHED_RR
+	 * 11b: SCHED_BATCH
+	 */
+	FLAT_BINDER_FLAG_SCHED_POLICY_MASK =
+		3U << FLAT_BINDER_FLAG_SCHED_POLICY_SHIFT,
+
+	/**
+	 * @FLAT_BINDER_FLAG_INHERIT_RT: whether the node inherits RT policy
+	 *
+	 * Only when set, calls into this node will inherit a real-time
+	 * scheduling policy from the caller (for synchronous transactions).
+	 */
+	FLAT_BINDER_FLAG_INHERIT_RT = 0x800,
+
+	/**
 	 * @FLAT_BINDER_FLAG_TXN_SECURITY_CTX: request security contexts
 	 *
 	 * Only when set, causes senders to include their security
diff --git a/include/uapi/linux/capability.h b/include/uapi/linux/capability.h
index c6ca330..395dd0d 100644
--- a/include/uapi/linux/capability.h
+++ b/include/uapi/linux/capability.h
@@ -288,8 +288,6 @@
    processes and setting the scheduling algorithm used by another
    process. */
 /* Allow setting cpu affinity on other processes */
-/* Allow setting realtime ioprio class */
-/* Allow setting ioprio class on other processes */
 
 #define CAP_SYS_NICE         23
 
diff --git a/include/uapi/linux/fscrypt.h b/include/uapi/linux/fscrypt.h
index e5de603..6cbf121 100644
--- a/include/uapi/linux/fscrypt.h
+++ b/include/uapi/linux/fscrypt.h
@@ -125,7 +125,10 @@
 	struct fscrypt_key_specifier key_spec;
 	__u32 raw_size;
 	__u32 key_id;
-	__u32 __reserved[8];
+	__u32 __reserved[7];
+	/* N.B.: "temporary" flag, not reserved upstream */
+#define __FSCRYPT_ADD_KEY_FLAG_HW_WRAPPED		0x00000001
+	__u32 __flags;
 	__u8 raw[];
 };
 
diff --git a/include/uapi/linux/fuse.h b/include/uapi/linux/fuse.h
index 7233502..0f5f709ce 100644
--- a/include/uapi/linux/fuse.h
+++ b/include/uapi/linux/fuse.h
@@ -349,6 +349,7 @@
 #define FUSE_EXPLICIT_INVAL_DATA (1 << 25)
 #define FUSE_MAP_ALIGNMENT	(1 << 26)
 #define FUSE_SUBMOUNTS		(1 << 27)
+#define FUSE_PASSTHROUGH	(1 << 31)
 
 /**
  * CUSE INIT request/reply flags
@@ -479,6 +480,7 @@
 	FUSE_COPY_FILE_RANGE	= 47,
 	FUSE_SETUPMAPPING	= 48,
 	FUSE_REMOVEMAPPING	= 49,
+	FUSE_CANONICAL_PATH	= 2016,
 
 	/* CUSE specific operations */
 	CUSE_INIT		= 4096,
@@ -605,7 +607,7 @@
 struct fuse_open_out {
 	uint64_t	fh;
 	uint32_t	open_flags;
-	uint32_t	padding;
+	uint32_t	passthrough_fh;
 };
 
 struct fuse_release_in {
@@ -808,6 +810,13 @@
 	uint32_t	padding;
 };
 
+struct fuse_passthrough_out {
+	uint32_t	fd;
+	/* For future implementation */
+	uint32_t	len;
+	void		*vec;
+};
+
 struct fuse_out_header {
 	uint32_t	len;
 	int32_t		error;
@@ -883,7 +892,8 @@
 };
 
 /* Device ioctls: */
-#define FUSE_DEV_IOC_CLONE	_IOR(229, 0, uint32_t)
+#define FUSE_DEV_IOC_CLONE		_IOR(229, 0, uint32_t)
+#define FUSE_DEV_IOC_PASSTHROUGH_OPEN	_IOW(229, 1, struct fuse_passthrough_out)
 
 struct fuse_lseek_in {
 	uint64_t	fh;
diff --git a/include/uapi/linux/incrementalfs.h b/include/uapi/linux/incrementalfs.h
new file mode 100644
index 0000000..482cebd
--- /dev/null
+++ b/include/uapi/linux/incrementalfs.h
@@ -0,0 +1,539 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * Userspace interface for Incremental FS.
+ *
+ * Incremental FS is special-purpose Linux virtual file system that allows
+ * execution of a program while its binary and resource files are still being
+ * lazily downloaded over the network, USB etc.
+ *
+ * Copyright 2019 Google LLC
+ */
+#ifndef _UAPI_LINUX_INCREMENTALFS_H
+#define _UAPI_LINUX_INCREMENTALFS_H
+
+#include <linux/limits.h>
+#include <linux/ioctl.h>
+#include <linux/types.h>
+#include <linux/xattr.h>
+
+/* ===== constants ===== */
+#define INCFS_NAME "incremental-fs"
+#define INCFS_MAGIC_NUMBER (unsigned long)(0x5346434e49ul)
+#define INCFS_DATA_FILE_BLOCK_SIZE 4096
+#define INCFS_HEADER_VER 1
+
+/* TODO: This value is assumed in incfs_copy_signature_info_from_user to be the
+ * actual signature length. Set back to 64 when fixed.
+ */
+#define INCFS_MAX_HASH_SIZE 32
+#define INCFS_MAX_FILE_ATTR_SIZE 512
+
+#define INCFS_PENDING_READS_FILENAME ".pending_reads"
+#define INCFS_LOG_FILENAME ".log"
+#define INCFS_BLOCKS_WRITTEN_FILENAME ".blocks_written"
+#define INCFS_XATTR_ID_NAME (XATTR_USER_PREFIX "incfs.id")
+#define INCFS_XATTR_SIZE_NAME (XATTR_USER_PREFIX "incfs.size")
+#define INCFS_XATTR_METADATA_NAME (XATTR_USER_PREFIX "incfs.metadata")
+
+#define INCFS_MAX_SIGNATURE_SIZE 8096
+#define INCFS_SIGNATURE_VERSION 2
+#define INCFS_SIGNATURE_SECTIONS 2
+
+#define INCFS_IOCTL_BASE_CODE 'g'
+
+/* ===== ioctl requests on the command dir ===== */
+
+/*
+ * Create a new file
+ * May only be called on .pending_reads file
+ */
+#define INCFS_IOC_CREATE_FILE \
+	_IOWR(INCFS_IOCTL_BASE_CODE, 30, struct incfs_new_file_args)
+
+/* Read file signature */
+#define INCFS_IOC_READ_FILE_SIGNATURE                                          \
+	_IOR(INCFS_IOCTL_BASE_CODE, 31, struct incfs_get_file_sig_args)
+
+/*
+ * Fill in one or more data block. This may only be called on a handle
+ * passed as a parameter to INCFS_IOC_PERMIT_FILLING
+ *
+ * Returns number of blocks filled in, or error if none were
+ */
+#define INCFS_IOC_FILL_BLOCKS                                                  \
+	_IOR(INCFS_IOCTL_BASE_CODE, 32, struct incfs_fill_blocks)
+
+/*
+ * Permit INCFS_IOC_FILL_BLOCKS on the given file descriptor
+ * May only be called on .pending_reads file
+ *
+ * Returns 0 on success or error
+ */
+#define INCFS_IOC_PERMIT_FILL                                                  \
+	_IOW(INCFS_IOCTL_BASE_CODE, 33, struct incfs_permit_fill)
+
+/*
+ * Fills buffer with ranges of populated blocks
+ *
+ * Returns 0 if all ranges written
+ *	   error otherwise
+ *
+ *	   Either way, range_buffer_size_out is set to the number
+ *	   of bytes written. Should be set to 0 by caller. The ranges
+ *	   filled are valid, but if an error was returned there might
+ *	   be more ranges to come.
+ *
+ *	   Ranges are ranges of filled blocks:
+ *
+ *	   1 2 7 9
+ *
+ *	   means blocks 1, 2, 7, 8, 9 are filled, 0, 3, 4, 5, 6 and 10 on
+ *	   are not
+ *
+ *	   If hashing is enabled for the file, the hash blocks are simply
+ *	   treated as though they immediately followed the data blocks.
+ */
+#define INCFS_IOC_GET_FILLED_BLOCKS                                            \
+	_IOR(INCFS_IOCTL_BASE_CODE, 34, struct incfs_get_filled_blocks_args)
+
+/*
+ * Creates a new mapped file
+ * May only be called on .pending_reads file
+ */
+#define INCFS_IOC_CREATE_MAPPED_FILE \
+	_IOWR(INCFS_IOCTL_BASE_CODE, 35, struct incfs_create_mapped_file_args)
+
+/*
+ * Get number of blocks, total and filled
+ * May only be called on .pending_reads file
+ */
+#define INCFS_IOC_GET_BLOCK_COUNT \
+	_IOR(INCFS_IOCTL_BASE_CODE, 36, struct incfs_get_block_count_args)
+
+/*
+ * Get per UID read timeouts
+ * May only be called on .pending_reads file
+ */
+#define INCFS_IOC_GET_READ_TIMEOUTS \
+	_IOR(INCFS_IOCTL_BASE_CODE, 37, struct incfs_get_read_timeouts_args)
+
+/*
+ * Set per UID read timeouts
+ * May only be called on .pending_reads file
+ */
+#define INCFS_IOC_SET_READ_TIMEOUTS \
+	_IOW(INCFS_IOCTL_BASE_CODE, 38, struct incfs_set_read_timeouts_args)
+
+/* ===== sysfs feature flags ===== */
+/*
+ * Each flag is represented by a file in /sys/fs/incremental-fs/features
+ * If the file exists the feature is supported
+ * Also the file contents will be the line "supported"
+ */
+
+/*
+ * Basic flag stating that the core incfs file system is available
+ */
+#define INCFS_FEATURE_FLAG_COREFS "corefs"
+
+/*
+ * report_uid mount option is supported
+ */
+#define INCFS_FEATURE_FLAG_REPORT_UID "report_uid"
+
+enum incfs_compression_alg {
+	COMPRESSION_NONE = 0,
+	COMPRESSION_LZ4 = 1,
+	COMPRESSION_ZSTD = 2,
+};
+
+enum incfs_block_flags {
+	INCFS_BLOCK_FLAGS_NONE = 0,
+	INCFS_BLOCK_FLAGS_HASH = 1,
+};
+
+typedef struct {
+	__u8 bytes[16];
+} incfs_uuid_t __attribute__((aligned (8)));
+
+/*
+ * Description of a pending read. A pending read - a read call by
+ * a userspace program for which the filesystem currently doesn't have data.
+ *
+ * Reads from .pending_reads and .log return an array of these structure
+ */
+struct incfs_pending_read_info {
+	/* Id of a file that is being read from. */
+	incfs_uuid_t file_id;
+
+	/* A number of microseconds since system boot to the read. */
+	__aligned_u64 timestamp_us;
+
+	/* Index of a file block that is being read. */
+	__u32 block_index;
+
+	/* A serial number of this pending read. */
+	__u32 serial_number;
+};
+
+/*
+ * Description of a pending read. A pending read - a read call by
+ * a userspace program for which the filesystem currently doesn't have data.
+ *
+ * This version of incfs_pending_read_info is used whenever the file system is
+ * mounted with the report_uid flag
+ */
+struct incfs_pending_read_info2 {
+	/* Id of a file that is being read from. */
+	incfs_uuid_t file_id;
+
+	/* A number of microseconds since system boot to the read. */
+	__aligned_u64 timestamp_us;
+
+	/* Index of a file block that is being read. */
+	__u32 block_index;
+
+	/* A serial number of this pending read. */
+	__u32 serial_number;
+
+	/* The UID of the reading process */
+	__u32 uid;
+
+	__u32 reserved;
+};
+
+/*
+ * Description of a data or hash block to add to a data file.
+ */
+struct incfs_fill_block {
+	/* Index of a data block. */
+	__u32 block_index;
+
+	/* Length of data */
+	__u32 data_len;
+
+	/*
+	 * A pointer to an actual data for the block.
+	 *
+	 * Equivalent to: __u8 *data;
+	 */
+	__aligned_u64 data;
+
+	/*
+	 * Compression algorithm used to compress the data block.
+	 * Values from enum incfs_compression_alg.
+	 */
+	__u8 compression;
+
+	/* Values from enum incfs_block_flags */
+	__u8 flags;
+
+	__u16 reserved1;
+
+	__u32 reserved2;
+
+	__aligned_u64 reserved3;
+};
+
+/*
+ * Description of a number of blocks to add to a data file
+ *
+ * Argument for INCFS_IOC_FILL_BLOCKS
+ */
+struct incfs_fill_blocks {
+	/* Number of blocks */
+	__u64 count;
+
+	/* A pointer to an array of incfs_fill_block structs */
+	__aligned_u64 fill_blocks;
+};
+
+/*
+ * Permit INCFS_IOC_FILL_BLOCKS on the given file descriptor
+ * May only be called on .pending_reads file
+ *
+ * Argument for INCFS_IOC_PERMIT_FILL
+ */
+struct incfs_permit_fill {
+	/* File to permit fills on */
+	__u32 file_descriptor;
+};
+
+enum incfs_hash_tree_algorithm {
+	INCFS_HASH_TREE_NONE = 0,
+	INCFS_HASH_TREE_SHA256 = 1
+};
+
+/*
+ * Create a new file or directory.
+ */
+struct incfs_new_file_args {
+	/* Id of a file to create. */
+	incfs_uuid_t file_id;
+
+	/*
+	 * Total size of the new file. Ignored if S_ISDIR(mode).
+	 */
+	__aligned_u64 size;
+
+	/*
+	 * File mode. Permissions and dir flag.
+	 */
+	__u16 mode;
+
+	__u16 reserved1;
+
+	__u32 reserved2;
+
+	/*
+	 * A pointer to a null-terminated relative path to the file's parent
+	 * dir.
+	 * Max length: PATH_MAX
+	 *
+	 * Equivalent to: char *directory_path;
+	 */
+	__aligned_u64 directory_path;
+
+	/*
+	 * A pointer to a null-terminated file's name.
+	 * Max length: PATH_MAX
+	 *
+	 * Equivalent to: char *file_name;
+	 */
+	__aligned_u64 file_name;
+
+	/*
+	 * A pointer to a file attribute to be set on creation.
+	 *
+	 * Equivalent to: u8 *file_attr;
+	 */
+	__aligned_u64 file_attr;
+
+	/*
+	 * Length of the data buffer specfied by file_attr.
+	 * Max value: INCFS_MAX_FILE_ATTR_SIZE
+	 */
+	__u32 file_attr_len;
+
+	__u32 reserved4;
+
+	/*
+	 * Points to an APK V4 Signature data blob
+	 * Signature must have two sections
+	 * Format is:
+	 *	u32 version
+	 *	u32 size_of_hash_info_section
+	 *	u8 hash_info_section[]
+	 *	u32 size_of_signing_info_section
+	 *	u8 signing_info_section[]
+	 *
+	 * Note that incfs does not care about what is in signing_info_section
+	 *
+	 * hash_info_section has following format:
+	 *	u32 hash_algorithm; // Must be SHA256 == 1
+	 *	u8 log2_blocksize;  // Must be 12 for 4096 byte blocks
+	 *	u32 salt_size;
+	 *	u8 salt[];
+	 *	u32 hash_size;
+	 *	u8 root_hash[];
+	 */
+	__aligned_u64 signature_info;
+
+	/* Size of signature_info */
+	__aligned_u64 signature_size;
+
+	__aligned_u64 reserved6;
+};
+
+/*
+ * Request a digital signature blob for a given file.
+ * Argument for INCFS_IOC_READ_FILE_SIGNATURE ioctl
+ */
+struct incfs_get_file_sig_args {
+	/*
+	 * A pointer to the data buffer to save an signature blob to.
+	 *
+	 * Equivalent to: u8 *file_signature;
+	 */
+	__aligned_u64 file_signature;
+
+	/* Size of the buffer at file_signature. */
+	__u32 file_signature_buf_size;
+
+	/*
+	 * Number of bytes save file_signature buffer.
+	 * It is set after ioctl done.
+	 */
+	__u32 file_signature_len_out;
+};
+
+struct incfs_filled_range {
+	__u32 begin;
+	__u32 end;
+};
+
+/*
+ * Request ranges of filled blocks
+ * Argument for INCFS_IOC_GET_FILLED_BLOCKS
+ */
+struct incfs_get_filled_blocks_args {
+	/*
+	 * A buffer to populate with ranges of filled blocks
+	 *
+	 * Equivalent to struct incfs_filled_ranges *range_buffer
+	 */
+	__aligned_u64 range_buffer;
+
+	/* Size of range_buffer */
+	__u32 range_buffer_size;
+
+	/* Start index to read from */
+	__u32 start_index;
+
+	/*
+	 * End index to read to. 0 means read to end. This is a range,
+	 * so incfs will read from start_index to end_index - 1
+	 */
+	__u32 end_index;
+
+	/* Actual number of blocks in file */
+	__u32 total_blocks_out;
+
+	/* The  number of data blocks in file */
+	__u32 data_blocks_out;
+
+	/* Number of bytes written to range buffer */
+	__u32 range_buffer_size_out;
+
+	/* Sector scanned up to, if the call was interrupted */
+	__u32 index_out;
+};
+
+/*
+ * Create a new mapped file
+ * Argument for INCFS_IOC_CREATE_MAPPED_FILE
+ */
+struct incfs_create_mapped_file_args {
+	/*
+	 * Total size of the new file.
+	 */
+	__aligned_u64 size;
+
+	/*
+	 * File mode. Permissions and dir flag.
+	 */
+	__u16 mode;
+
+	__u16 reserved1;
+
+	__u32 reserved2;
+
+	/*
+	 * A pointer to a null-terminated relative path to the incfs mount
+	 * point
+	 * Max length: PATH_MAX
+	 *
+	 * Equivalent to: char *directory_path;
+	 */
+	__aligned_u64 directory_path;
+
+	/*
+	 * A pointer to a null-terminated file name.
+	 * Max length: PATH_MAX
+	 *
+	 * Equivalent to: char *file_name;
+	 */
+	__aligned_u64 file_name;
+
+	/* Id of source file to map. */
+	incfs_uuid_t source_file_id;
+
+	/*
+	 * Offset in source file to start mapping. Must be a multiple of
+	 * INCFS_DATA_FILE_BLOCK_SIZE
+	 */
+	__aligned_u64 source_offset;
+};
+
+/*
+ * Get information about the blocks in this file
+ * Argument for INCFS_IOC_GET_BLOCK_COUNT
+ */
+struct incfs_get_block_count_args {
+	/* Total number of data blocks in the file */
+	__u32 total_data_blocks_out;
+
+	/* Number of filled data blocks in the file */
+	__u32 filled_data_blocks_out;
+
+	/* Total number of hash blocks in the file */
+	__u32 total_hash_blocks_out;
+
+	/* Number of filled hash blocks in the file */
+	__u32 filled_hash_blocks_out;
+};
+
+/* Description of timeouts for one UID */
+struct incfs_per_uid_read_timeouts {
+	/* UID to apply these timeouts to */
+	__u32 uid;
+
+	/*
+	 * Min time to read any block. Note that this doesn't apply to reads
+	 * which are satisfied from the page cache.
+	 */
+	__u32 min_time_ms;
+
+	/*
+	 * Min time to satisfy a pending read. Must be >= min_time_ms. Any
+	 * pending read which is filled before this time will be delayed so
+	 * that the total read time >= this value.
+	 */
+	__u32 min_pending_time_ms;
+
+	/*
+	 * Max time to satisfy a pending read before the read times out.
+	 * If set to U32_MAX, defaults to mount options read_timeout_ms=
+	 * Must be >= min_pending_time_ms
+	 */
+	__u32 max_pending_time_ms;
+};
+
+/*
+ * Get the read timeouts array
+ * Argument for INCFS_IOC_GET_READ_TIMEOUTS
+ */
+struct incfs_get_read_timeouts_args {
+	/*
+	 * A pointer to a buffer to fill with the current timeouts
+	 *
+	 * Equivalent to struct incfs_per_uid_read_timeouts *
+	 */
+	__aligned_u64 timeouts_array;
+
+	/* Size of above buffer in bytes */
+	__u32 timeouts_array_size;
+
+	/* Size used in bytes, or size needed if -ENOMEM returned */
+	__u32 timeouts_array_size_out;
+};
+
+/*
+ * Set the read timeouts array
+ * Arguments for INCFS_IOC_SET_READ_TIMEOUTS
+ */
+struct incfs_set_read_timeouts_args {
+	/*
+	 * A pointer to an array containing the new timeouts
+	 * This will replace any existing timeouts
+	 *
+	 * Equivalent to struct incfs_per_uid_read_timeouts *
+	 */
+	__aligned_u64 timeouts_array;
+
+	/* Size of above array in bytes. Must be < 256 */
+	__u32 timeouts_array_size;
+};
+
+
+#endif /* _UAPI_LINUX_INCREMENTALFS_H */
diff --git a/drivers/staging/android/uapi/ion.h b/include/uapi/linux/ion.h
similarity index 60%
rename from drivers/staging/android/uapi/ion.h
rename to include/uapi/linux/ion.h
index 46c93fc..371e446 100644
--- a/drivers/staging/android/uapi/ion.h
+++ b/include/uapi/linux/ion.h
@@ -12,30 +12,46 @@
 #include <linux/types.h>
 
 /**
- * enum ion_heap_types - list of all possible types of heaps
- * @ION_HEAP_TYPE_SYSTEM:	 memory allocated via vmalloc
- * @ION_HEAP_TYPE_SYSTEM_CONTIG: memory allocated via kmalloc
- * @ION_HEAP_TYPE_CARVEOUT:	 memory allocated from a prereserved
- *				 carveout heap, allocations are physically
- *				 contiguous
- * @ION_HEAP_TYPE_DMA:		 memory allocated via DMA API
- * @ION_NUM_HEAPS:		 helper for iterating over heaps, a bit mask
- *				 is used to identify the heaps, so only 32
- *				 total heap types are supported
+ * ion_heap_types - list of all possible types of heaps that Android can use
+ *
+ * @ION_HEAP_TYPE_SYSTEM:        Reserved heap id for ion heap that allocates
+ *				 memory using alloc_page(). Also, supports
+ *				 deferred free and allocation pools.
+* @ION_HEAP_TYPE_DMA:		 Reserved heap id for ion heap that manages
+ * 				 single CMA (contiguous memory allocator)
+ * 				 region. Uses standard DMA APIs for
+ *				 managing memory within the CMA region.
  */
 enum ion_heap_type {
-	ION_HEAP_TYPE_SYSTEM,
-	ION_HEAP_TYPE_SYSTEM_CONTIG,
-	ION_HEAP_TYPE_CARVEOUT,
-	ION_HEAP_TYPE_CHUNK,
-	ION_HEAP_TYPE_DMA,
-	ION_HEAP_TYPE_CUSTOM, /*
-			       * must be last so device specific heaps always
-			       * are at the end of this enum
-			       */
+	ION_HEAP_TYPE_SYSTEM = 0,
+	ION_HEAP_TYPE_DMA = 2,
+	/* reserved range for future standard heap types */
+	ION_HEAP_TYPE_CUSTOM = 16,
+	ION_HEAP_TYPE_MAX = 31,
 };
 
-#define ION_NUM_HEAP_IDS		(sizeof(unsigned int) * 8)
+/**
+ * ion_heap_id - list of standard heap ids that Android can use
+ *
+ * @ION_HEAP_SYSTEM		Id for the ION_HEAP_TYPE_SYSTEM
+ * @ION_HEAP_DMA_START 		Start of reserved id range for heaps of type
+ *				ION_HEAP_TYPE_DMA
+ * @ION_HEAP_DMA_END		End of reserved id range for heaps of type
+ *				ION_HEAP_TYPE_DMA
+ * @ION_HEAP_CUSTOM_START	Start of reserved id range for heaps of custom
+ *				type
+ * @ION_HEAP_CUSTOM_END		End of reserved id range for heaps of custom
+ *				type
+ */
+enum ion_heap_id {
+	ION_HEAP_SYSTEM = (1 << ION_HEAP_TYPE_SYSTEM),
+	ION_HEAP_DMA_START = (ION_HEAP_SYSTEM << 1),
+	ION_HEAP_DMA_END = (ION_HEAP_DMA_START << 7),
+	ION_HEAP_CUSTOM_START = (ION_HEAP_DMA_END << 1),
+	ION_HEAP_CUSTOM_END = (ION_HEAP_CUSTOM_START << 22),
+};
+
+#define ION_NUM_MAX_HEAPS	(32)
 
 /**
  * allocation flags - the lower 16 bits are used by core ion, the upper 16
@@ -46,7 +62,7 @@
  * mappings of this buffer should be cached, ion will do cache maintenance
  * when the buffer is mapped for dma
  */
-#define ION_FLAG_CACHED 1
+#define ION_FLAG_CACHED		1
 
 /**
  * DOC: Ion Userspace API
@@ -124,4 +140,11 @@
 #define ION_IOC_HEAP_QUERY     _IOWR(ION_IOC_MAGIC, 8, \
 					struct ion_heap_query)
 
+/**
+ * DOC: ION_IOC_HEAP_ABI_VERSION - return ABI version
+ *
+ * Returns ABI version for this driver
+ */
+#define ION_IOC_ABI_VERSION    _IOR(ION_IOC_MAGIC, 9, \
+					__u32)
 #endif /* _UAPI_LINUX_ION_H */
diff --git a/include/uapi/linux/netfilter/xt_IDLETIMER.h b/include/uapi/linux/netfilter/xt_IDLETIMER.h
index 49ddcdc..046ee37 100644
--- a/include/uapi/linux/netfilter/xt_IDLETIMER.h
+++ b/include/uapi/linux/netfilter/xt_IDLETIMER.h
@@ -4,6 +4,7 @@
  * Header file for Xtables timer target module.
  *
  * Copyright (C) 2004, 2010 Nokia Corporation
+ *
  * Written by Timo Teras <ext-timo.teras@nokia.com>
  *
  * Converted to x_tables and forward-ported to 2.6.34
@@ -34,11 +35,19 @@
 #define MAX_IDLETIMER_LABEL_SIZE 28
 #define XT_IDLETIMER_ALARM 0x01
 
+#define NLMSG_MAX_SIZE 64
+
+#define NL_EVENT_TYPE_INACTIVE 0
+#define NL_EVENT_TYPE_ACTIVE 1
+
 struct idletimer_tg_info {
 	__u32 timeout;
 
 	char label[MAX_IDLETIMER_LABEL_SIZE];
 
+	/* Use netlink messages for notification in addition to sysfs */
+	__u8 send_nl_msg;
+
 	/* for kernel module internal use only */
 	struct idletimer_tg *timer __attribute__((aligned(8)));
 };
diff --git a/include/uapi/linux/prctl.h b/include/uapi/linux/prctl.h
index 7f08277..c190e22 100644
--- a/include/uapi/linux/prctl.h
+++ b/include/uapi/linux/prctl.h
@@ -247,4 +247,7 @@
 #define PR_SET_IO_FLUSHER		57
 #define PR_GET_IO_FLUSHER		58
 
+#define PR_SET_VMA		0x53564d41
+# define PR_SET_VMA_ANON_NAME		0
+
 #endif /* _LINUX_PRCTL_H */
diff --git a/include/uapi/linux/usb/f_accessory.h b/include/uapi/linux/usb/f_accessory.h
new file mode 100644
index 0000000..0baeb7d
--- /dev/null
+++ b/include/uapi/linux/usb/f_accessory.h
@@ -0,0 +1,146 @@
+/*
+ * Gadget Function Driver for Android USB accessories
+ *
+ * Copyright (C) 2011 Google, Inc.
+ * Author: Mike Lockwood <lockwood@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _UAPI_LINUX_USB_F_ACCESSORY_H
+#define _UAPI_LINUX_USB_F_ACCESSORY_H
+
+/* Use Google Vendor ID when in accessory mode */
+#define USB_ACCESSORY_VENDOR_ID 0x18D1
+
+
+/* Product ID to use when in accessory mode */
+#define USB_ACCESSORY_PRODUCT_ID 0x2D00
+
+/* Product ID to use when in accessory mode and adb is enabled */
+#define USB_ACCESSORY_ADB_PRODUCT_ID 0x2D01
+
+/* Indexes for strings sent by the host via ACCESSORY_SEND_STRING */
+#define ACCESSORY_STRING_MANUFACTURER   0
+#define ACCESSORY_STRING_MODEL          1
+#define ACCESSORY_STRING_DESCRIPTION    2
+#define ACCESSORY_STRING_VERSION        3
+#define ACCESSORY_STRING_URI            4
+#define ACCESSORY_STRING_SERIAL         5
+
+/* Control request for retrieving device's protocol version
+ *
+ *	requestType:    USB_DIR_IN | USB_TYPE_VENDOR
+ *	request:        ACCESSORY_GET_PROTOCOL
+ *	value:          0
+ *	index:          0
+ *	data            version number (16 bits little endian)
+ *                     1 for original accessory support
+ *                     2 adds HID and device to host audio support
+ */
+#define ACCESSORY_GET_PROTOCOL  51
+
+/* Control request for host to send a string to the device
+ *
+ *	requestType:    USB_DIR_OUT | USB_TYPE_VENDOR
+ *	request:        ACCESSORY_SEND_STRING
+ *	value:          0
+ *	index:          string ID
+ *	data            zero terminated UTF8 string
+ *
+ *  The device can later retrieve these strings via the
+ *  ACCESSORY_GET_STRING_* ioctls
+ */
+#define ACCESSORY_SEND_STRING   52
+
+/* Control request for starting device in accessory mode.
+ * The host sends this after setting all its strings to the device.
+ *
+ *	requestType:    USB_DIR_OUT | USB_TYPE_VENDOR
+ *	request:        ACCESSORY_START
+ *	value:          0
+ *	index:          0
+ *	data            none
+ */
+#define ACCESSORY_START         53
+
+/* Control request for registering a HID device.
+ * Upon registering, a unique ID is sent by the accessory in the
+ * value parameter. This ID will be used for future commands for
+ * the device
+ *
+ *	requestType:    USB_DIR_OUT | USB_TYPE_VENDOR
+ *	request:        ACCESSORY_REGISTER_HID_DEVICE
+ *	value:          Accessory assigned ID for the HID device
+ *	index:          total length of the HID report descriptor
+ *	data            none
+ */
+#define ACCESSORY_REGISTER_HID         54
+
+/* Control request for unregistering a HID device.
+ *
+ *	requestType:    USB_DIR_OUT | USB_TYPE_VENDOR
+ *	request:        ACCESSORY_REGISTER_HID
+ *	value:          Accessory assigned ID for the HID device
+ *	index:          0
+ *	data            none
+ */
+#define ACCESSORY_UNREGISTER_HID         55
+
+/* Control request for sending the HID report descriptor.
+ * If the HID descriptor is longer than the endpoint zero max packet size,
+ * the descriptor will be sent in multiple ACCESSORY_SET_HID_REPORT_DESC
+ * commands. The data for the descriptor must be sent sequentially
+ * if multiple packets are needed.
+ *
+ *	requestType:    USB_DIR_OUT | USB_TYPE_VENDOR
+ *	request:        ACCESSORY_SET_HID_REPORT_DESC
+ *	value:          Accessory assigned ID for the HID device
+ *	index:          offset of data in descriptor
+ *                      (needed when HID descriptor is too big for one packet)
+ *	data            the HID report descriptor
+ */
+#define ACCESSORY_SET_HID_REPORT_DESC         56
+
+/* Control request for sending HID events.
+ *
+ *	requestType:    USB_DIR_OUT | USB_TYPE_VENDOR
+ *	request:        ACCESSORY_SEND_HID_EVENT
+ *	value:          Accessory assigned ID for the HID device
+ *	index:          0
+ *	data            the HID report for the event
+ */
+#define ACCESSORY_SEND_HID_EVENT         57
+
+/* Control request for setting the audio mode.
+ *
+ *	requestType:	USB_DIR_OUT | USB_TYPE_VENDOR
+ *	request:        ACCESSORY_SET_AUDIO_MODE
+ *	value:          0 - no audio
+ *                     1 - device to host, 44100 16-bit stereo PCM
+ *	index:          0
+ *	data            none
+ */
+#define ACCESSORY_SET_AUDIO_MODE         58
+
+/* ioctls for retrieving strings set by the host */
+#define ACCESSORY_GET_STRING_MANUFACTURER   _IOW('M', 1, char[256])
+#define ACCESSORY_GET_STRING_MODEL          _IOW('M', 2, char[256])
+#define ACCESSORY_GET_STRING_DESCRIPTION    _IOW('M', 3, char[256])
+#define ACCESSORY_GET_STRING_VERSION        _IOW('M', 4, char[256])
+#define ACCESSORY_GET_STRING_URI            _IOW('M', 5, char[256])
+#define ACCESSORY_GET_STRING_SERIAL         _IOW('M', 6, char[256])
+/* returns 1 if there is a start request pending */
+#define ACCESSORY_IS_START_REQUESTED        _IO('M', 7)
+/* returns audio mode (set via the ACCESSORY_SET_AUDIO_MODE control request) */
+#define ACCESSORY_GET_AUDIO_MODE            _IO('M', 8)
+
+#endif /* _UAPI_LINUX_USB_F_ACCESSORY_H */
diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h
index 534eaa4..2c75f91 100644
--- a/include/uapi/linux/videodev2.h
+++ b/include/uapi/linux/videodev2.h
@@ -70,7 +70,7 @@
  * Common stuff for both V4L1 and V4L2
  * Moved from videodev.h
  */
-#define VIDEO_MAX_FRAME               32
+#define VIDEO_MAX_FRAME               64
 #define VIDEO_MAX_PLANES               8
 
 /*
@@ -978,7 +978,9 @@
  *			descriptor associated with this plane
  * @data_offset:	offset in the plane to the start of data; usually 0,
  *			unless there is a header in front of the data
- *
+ * @reserved:		few userspace clients and drivers use reserved fields
+ *			and it is up to them how these fields are used. v4l2
+ *			simply copy reserved fields between them.
  * Multi-planar buffers consist of one or more planes, e.g. an YCbCr buffer
  * with two planes can have one plane for Y, and another for interleaved CbCr
  * components. Each plane can reside in a separate memory buffer, or even in
@@ -993,6 +995,7 @@
 		__s32		fd;
 	} m;
 	__u32			data_offset;
+	/* reserved fields used by few userspace clients and drivers */
 	__u32			reserved[11];
 };
 
diff --git a/include/uapi/linux/xattr.h b/include/uapi/linux/xattr.h
index 9463db2..d22191a 100644
--- a/include/uapi/linux/xattr.h
+++ b/include/uapi/linux/xattr.h
@@ -18,8 +18,11 @@
 #if __UAPI_DEF_XATTR
 #define __USE_KERNEL_XATTR_DEFS
 
-#define XATTR_CREATE	0x1	/* set value, fail if attr already exists */
-#define XATTR_REPLACE	0x2	/* set value, fail if attr does not exist */
+#define XATTR_CREATE	 0x1	/* set value, fail if attr already exists */
+#define XATTR_REPLACE	 0x2	/* set value, fail if attr does not exist */
+#ifdef __KERNEL__ /* following is kernel internal, colocated for maintenance */
+#define XATTR_NOSECURITY 0x4	/* get value, do not involve security check */
+#endif
 #endif
 
 /* Namespaces */
diff --git a/init/Kconfig b/init/Kconfig
index 02d13ae..49a99db 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -1211,6 +1211,17 @@
 	  desktop applications.  Task group autogeneration is currently based
 	  upon task session.
 
+config RT_SOFTINT_OPTIMIZATION
+       bool "Improve RT scheduling during long softint execution"
+       depends on ARM64
+       depends on SMP
+       default n
+       help
+         Enable an optimization which tries to avoid placing RT tasks on CPUs
+	 occupied by nonpreemptible tasks, such as a long softint, or CPUs
+	 which may soon block preemptions, such as a CPU running a ksoftirq
+	 thread which handles slow softints.
+
 config SYSFS_DEPRECATED
 	bool "Enable deprecated sysfs features to support old userspace tools"
 	depends on SYSFS
@@ -2346,3 +2357,5 @@
 # <asm/syscall_wrapper.h>.
 config ARCH_HAS_SYSCALL_WRAPPER
 	def_bool n
+
+source "init/Kconfig.gki"
diff --git a/init/Kconfig.gki b/init/Kconfig.gki
new file mode 100644
index 0000000..fad37010
--- /dev/null
+++ b/init/Kconfig.gki
@@ -0,0 +1,217 @@
+config GKI_HIDDEN_DRM_CONFIGS
+	bool "Hidden DRM configs needed for GKI"
+	select DRM_KMS_HELPER if (HAS_IOMEM && DRM)
+	select DRM_GEM_SHMEM_HELPER if (DRM)
+	select DRM_GEM_CMA_HELPER
+	select DRM_KMS_CMA_HELPER
+	select DRM_MIPI_DSI
+	select DRM_TTM if (HAS_IOMEM && DRM)
+	select VIDEOMODE_HELPERS
+	select WANT_DEV_COREDUMP
+	select INTERVAL_TREE
+	help
+	  Dummy config option used to enable hidden DRM configs.
+	  These are normally selected implicitly when including a
+	  DRM module, but for GKI, the modules are built out-of-tree.
+
+config GKI_HIDDEN_REGMAP_CONFIGS
+	bool "Hidden Regmap configs needed for GKI"
+	select REGMAP_IRQ
+	select REGMAP_MMIO
+	help
+	  Dummy config option used to enable hidden regmap configs.
+	  These are normally selected implicitly when a module
+	  that relies on it is configured.
+
+config GKI_HIDDEN_CRYPTO_CONFIGS
+	bool "Hidden CRYPTO configs needed for GKI"
+	select CRYPTO_ENGINE
+	help
+	  Dummy config option used to enable hidden CRYPTO configs.
+	  These are normally selected implicitly when a module
+	  that relies on it is configured.
+
+config GKI_HIDDEN_SND_CONFIGS
+	bool "Hidden SND configs needed for GKI"
+	select SND_VMASTER
+	select SND_PCM_ELD
+	select SND_JACK
+	select SND_JACK_INPUT_DEV
+	select SND_INTEL_NHLT if (ACPI)
+	help
+	  Dummy config option used to enable hidden SND configs.
+	  These are normally selected implicitly when a module
+	  that relies on it is configured.
+
+config GKI_HIDDEN_SND_SOC_CONFIGS
+	bool "Hidden SND_SOC configs needed for GKI"
+	select SND_SOC_GENERIC_DMAENGINE_PCM if (SND_SOC && SND)
+	select SND_PCM_IEC958
+	select SND_SOC_COMPRESS if (SND_SOC && SND)
+	select DMA_VIRTUAL_CHANNELS
+	help
+	  Dummy config option used to enable hidden SND_SOC configs.
+	  These are normally selected implicitly when a module
+	  that relies on it is configured.
+
+config GKI_HIDDEN_MMC_CONFIGS
+	bool "Hidden MMC configs needed for GKI"
+	select MMC_SDHCI_IO_ACCESSORS if (MMC_SDHCI)
+	help
+	  Dummy config option used to enable hidden MMC configs.
+	  These are normally selected implicitly when a module
+	  that relies on it is configured.
+
+config GKI_HIDDEN_GPIO_CONFIGS
+	bool "Hidden GPIO configs needed for GKI"
+	select PINCTRL_SINGLE if (PINCTRL && OF && HAS_IOMEM)
+	select GPIO_PL061 if (HAS_IOMEM && ARM_AMBA && GPIOLIB)
+	help
+	  Dummy config option used to enable hidden GPIO configs.
+	  These are normally selected implicitly when a module
+	  that relies on it is configured.
+
+config GKI_HIDDEN_QCOM_CONFIGS
+	bool "Hidden QCOM configs needed for GKI"
+	select QCOM_SMEM_STATE
+	select QCOM_GDSC if (ARCH_QCOM)
+	select IOMMU_IO_PGTABLE_LPAE if (ARCH_QCOM)
+
+	help
+	  Dummy config option used to enable hidden QCOM configs.
+	  These are normally selected implicitly when a module
+	  that relies on it is configured.
+
+config GKI_HIDDEN_MEDIA_CONFIGS
+	bool "Hidden Media configs needed for GKI"
+	select VIDEOBUF2_CORE
+	select V4L2_MEM2MEM_DEV
+	select MEDIA_CONTROLLER
+	select MEDIA_CONTROLLER_REQUEST_API
+	select MEDIA_SUPPORT
+	select FRAME_VECTOR
+	select CEC_CORE
+	select CEC_NOTIFIER
+	select CEC_PIN
+	help
+	  Dummy config option used to enable hidden media configs.
+	  These are normally selected implicitly when a module
+	  that relies on it is configured.
+
+config GKI_HIDDEN_VIRTUAL_CONFIGS
+	bool "Hidden Virtual configs needed for GKI"
+	select HVC_DRIVER
+	help
+	  Dummy config option used to enable hidden virtual device configs.
+	  These are normally selected implicitly when a module
+	  that relies on it is configured.
+
+# LEGACY_WEXT_ALLCONFIG Discussed upstream, soundly rejected as a unique
+# problem for GKI to solve.  It should be noted that these extensions are
+# in-effect deprecated and generally unsupported and we should pressure
+# the SOC vendors to drop any modules that require these extensions.
+config GKI_LEGACY_WEXT_ALLCONFIG
+	bool "Hidden wireless extension configs needed for GKI"
+	select WIRELESS_EXT
+	select WEXT_CORE
+	select WEXT_PROC
+	select WEXT_SPY
+	select WEXT_PRIV
+	help
+	  Dummy config option used to enable all the hidden legacy wireless
+	  extensions to the core wireless network functionality used by
+	  add-in modules.
+
+	  If you are not building a kernel to be used for a variety of
+	  out-of-kernel built wireless modules, say N here.
+
+config GKI_HIDDEN_USB_CONFIGS
+	bool "Hiddel USB configurations needed for GKI"
+	select USB_PHY
+	help
+	  Dummy config option used to enable all USB related hidden configs.
+	  These configurations are usually only selected by another config
+	  option or a combination of them.
+
+	  If you are not building a kernel to be used for a variety of
+	  out-of-kernel build USB drivers, say N here.
+
+config GKI_HIDDEN_SOC_BUS_CONFIGS
+	bool "Hidden SoC bus configuration needed for GKI"
+	select SOC_BUS
+	  help
+	    Dummy config option used to enable SOC_BUS hidden Kconfig.
+	    The configuration is required for SoCs to register themselves to the bus.
+
+	    If you are not building a kernel to be used for a variety of SoCs and
+	    out-of-tree drivers, say N here.
+
+config GKI_HIDDEN_RPMSG_CONFIGS
+	bool "Hidden RPMSG configuration needed for GKI"
+	select RPMSG
+	help
+	  Dummy config option used to enable the hidden RPMSG config.
+	  This configuration is usually only selected by another config
+	  option or a combination of them.
+
+	  If you are not building a kernel to be used for a variety of
+	  out-of-kernel build RPMSG drivers, say N here.
+
+config GKI_HIDDEN_GPU_CONFIGS
+	bool "Hidden GPU configuration needed for GKI"
+	select TRACE_GPU_MEM
+	help
+	  Dummy config option used to enable the hidden GPU config.
+	  These are normally selected implicitly when a module
+	  that relies on it is configured.
+
+config GKI_HIDDEN_IRQ_CONFIGS
+	bool "Hidden IRQ configuration needed for GKI"
+	select GENERIC_IRQ_CHIP
+	select IRQ_DOMAIN_HIERARCHY
+	select IRQ_FASTEOI_HIERARCHY_HANDLERS
+	help
+	  Dummy config option used to enable GENERIC_IRQ_CHIP hidden
+	  config, required by various SoC platforms. This is usually
+	  selected by ARCH_*.
+
+config GKI_HIDDEN_HYPERVISOR_CONFIGS
+	bool "Hidden hypervisor configuration needed for GKI"
+	select SYS_HYPERVISOR
+	help
+	  Dummy config option used to enable the SYS_HYPERVISOR hidden
+	  config, required by various SoC platforms. This is usually
+	  selected by XEN or S390.
+
+# Atrocities needed for
+# a) building GKI modules in separate tree, or
+# b) building drivers that are not modularizable
+#
+# All of these should be reworked into an upstream solution
+# if possible.
+#
+config GKI_HACKS_TO_FIX
+	bool "GKI Dummy config options"
+	select GKI_HIDDEN_CRYPTO_CONFIGS
+	select GKI_HIDDEN_DRM_CONFIGS
+	select GKI_HIDDEN_REGMAP_CONFIGS
+	select GKI_HIDDEN_SND_CONFIGS
+	select GKI_HIDDEN_SND_SOC_CONFIGS
+	select GKI_HIDDEN_MMC_CONFIGS
+	select GKI_HIDDEN_GPIO_CONFIGS
+	select GKI_HIDDEN_QCOM_CONFIGS
+	select GKI_LEGACY_WEXT_ALLCONFIG
+	select GKI_HIDDEN_MEDIA_CONFIGS
+	select GKI_HIDDEN_VIRTUAL_CONFIGS
+	select GKI_HIDDEN_USB_CONFIGS
+	select GKI_HIDDEN_SOC_BUS_CONFIGS
+	select GKI_HIDDEN_RPMSG_CONFIGS
+	select GKI_HIDDEN_GPU_CONFIGS
+	select GKI_HIDDEN_IRQ_CONFIGS
+	select GKI_HIDDEN_HYPERVISOR_CONFIGS
+	help
+	  Dummy config option used to enable core functionality used by
+	  modules that may not be selectable in this config.
+
+	  Unless you are building a GKI kernel to be used with modules
+	  built from a different config, say N here.
diff --git a/kernel/cgroup/cgroup-v1.c b/kernel/cgroup/cgroup-v1.c
index 191c329..5746351 100644
--- a/kernel/cgroup/cgroup-v1.c
+++ b/kernel/cgroup/cgroup-v1.c
@@ -510,7 +510,8 @@
 	tcred = get_task_cred(task);
 	if (!uid_eq(cred->euid, GLOBAL_ROOT_UID) &&
 	    !uid_eq(cred->euid, tcred->uid) &&
-	    !uid_eq(cred->euid, tcred->suid))
+	    !uid_eq(cred->euid, tcred->suid) &&
+	    !ns_capable(tcred->user_ns, CAP_SYS_NICE))
 		ret = -EACCES;
 	put_cred(tcred);
 	if (ret)
diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
index e41c218..8319e9e 100644
--- a/kernel/cgroup/cgroup.c
+++ b/kernel/cgroup/cgroup.c
@@ -4166,6 +4166,7 @@
 		return next;
 	return NULL;
 }
+EXPORT_SYMBOL_GPL(css_next_child);
 
 /**
  * css_next_descendant_pre - find the next descendant for pre-order walk
diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
index 57b5b5d..b43c234 100644
--- a/kernel/cgroup/cpuset.c
+++ b/kernel/cgroup/cpuset.c
@@ -66,6 +66,8 @@
 #include <linux/cgroup.h>
 #include <linux/wait.h>
 
+#include <trace/hooks/sched.h>
+
 DEFINE_STATIC_KEY_FALSE(cpusets_pre_enable_key);
 DEFINE_STATIC_KEY_FALSE(cpusets_enabled_key);
 
@@ -105,6 +107,7 @@
 
 	/* user-configured CPUs and Memory Nodes allow to tasks */
 	cpumask_var_t cpus_allowed;
+	cpumask_var_t cpus_requested;
 	nodemask_t mems_allowed;
 
 	/* effective CPUs and Memory Nodes allow to tasks */
@@ -334,17 +337,6 @@
  */
 
 DEFINE_STATIC_PERCPU_RWSEM(cpuset_rwsem);
-
-void cpuset_read_lock(void)
-{
-	percpu_down_read(&cpuset_rwsem);
-}
-
-void cpuset_read_unlock(void)
-{
-	percpu_up_read(&cpuset_rwsem);
-}
-
 static DEFINE_SPINLOCK(callback_lock);
 
 static struct workqueue_struct *cpuset_migrate_mm_wq;
@@ -447,7 +439,7 @@
 
 static int is_cpuset_subset(const struct cpuset *p, const struct cpuset *q)
 {
-	return	cpumask_subset(p->cpus_allowed, q->cpus_allowed) &&
+	return	cpumask_subset(p->cpus_requested, q->cpus_requested) &&
 		nodes_subset(p->mems_allowed, q->mems_allowed) &&
 		is_cpu_exclusive(p) <= is_cpu_exclusive(q) &&
 		is_mem_exclusive(p) <= is_mem_exclusive(q);
@@ -484,8 +476,13 @@
 	if (!zalloc_cpumask_var(pmask3, GFP_KERNEL))
 		goto free_two;
 
+	if (cs && !zalloc_cpumask_var(&cs->cpus_requested, GFP_KERNEL))
+		goto free_three;
+
 	return 0;
 
+free_three:
+	free_cpumask_var(*pmask3);
 free_two:
 	free_cpumask_var(*pmask2);
 free_one:
@@ -502,6 +499,7 @@
 {
 	if (cs) {
 		free_cpumask_var(cs->cpus_allowed);
+		free_cpumask_var(cs->cpus_requested);
 		free_cpumask_var(cs->effective_cpus);
 		free_cpumask_var(cs->subparts_cpus);
 	}
@@ -530,6 +528,7 @@
 	}
 
 	cpumask_copy(trial->cpus_allowed, cs->cpus_allowed);
+	cpumask_copy(trial->cpus_requested, cs->cpus_requested);
 	cpumask_copy(trial->effective_cpus, cs->effective_cpus);
 	return trial;
 }
@@ -598,7 +597,7 @@
 	cpuset_for_each_child(c, css, par) {
 		if ((is_cpu_exclusive(trial) || is_cpu_exclusive(c)) &&
 		    c != cur &&
-		    cpumask_intersects(trial->cpus_allowed, c->cpus_allowed))
+		    cpumask_intersects(trial->cpus_requested, c->cpus_requested))
 			goto out;
 		if ((is_mem_exclusive(trial) || is_mem_exclusive(c)) &&
 		    c != cur &&
@@ -1024,6 +1023,18 @@
 	put_online_cpus();
 }
 
+static int update_cpus_allowed(struct cpuset *cs, struct task_struct *p,
+				const struct cpumask *new_mask)
+{
+	int ret = -EINVAL;
+
+	trace_android_rvh_update_cpus_allowed(p, cs->cpus_requested, new_mask, &ret);
+	if (!ret)
+		return ret;
+
+	return set_cpus_allowed_ptr(p, new_mask);
+}
+
 /**
  * update_tasks_cpumask - Update the cpumasks of tasks in the cpuset.
  * @cs: the cpuset in which each task's cpus_allowed mask needs to be changed
@@ -1039,7 +1050,7 @@
 
 	css_task_iter_start(&cs->css, 0, &it);
 	while ((task = css_task_iter_next(&it)))
-		set_cpus_allowed_ptr(task, cs->effective_cpus);
+		update_cpus_allowed(cs, task, cs->effective_cpus);
 	css_task_iter_end(&it);
 }
 
@@ -1060,10 +1071,10 @@
 	if (parent->nr_subparts_cpus) {
 		cpumask_or(new_cpus, parent->effective_cpus,
 			   parent->subparts_cpus);
-		cpumask_and(new_cpus, new_cpus, cs->cpus_allowed);
+		cpumask_and(new_cpus, new_cpus, cs->cpus_requested);
 		cpumask_and(new_cpus, new_cpus, cpu_active_mask);
 	} else {
-		cpumask_and(new_cpus, cs->cpus_allowed, parent->effective_cpus);
+		cpumask_and(new_cpus, cs->cpus_requested, parent_cs(cs)->effective_cpus);
 	}
 }
 
@@ -1486,25 +1497,26 @@
 		return -EACCES;
 
 	/*
-	 * An empty cpus_allowed is ok only if the cpuset has no tasks.
+	 * An empty cpus_requested is ok only if the cpuset has no tasks.
 	 * Since cpulist_parse() fails on an empty mask, we special case
 	 * that parsing.  The validate_change() call ensures that cpusets
 	 * with tasks have cpus.
 	 */
 	if (!*buf) {
-		cpumask_clear(trialcs->cpus_allowed);
+		cpumask_clear(trialcs->cpus_requested);
 	} else {
-		retval = cpulist_parse(buf, trialcs->cpus_allowed);
+		retval = cpulist_parse(buf, trialcs->cpus_requested);
 		if (retval < 0)
 			return retval;
-
-		if (!cpumask_subset(trialcs->cpus_allowed,
-				    top_cpuset.cpus_allowed))
-			return -EINVAL;
 	}
 
+	if (!cpumask_subset(trialcs->cpus_requested, cpu_present_mask))
+		return -EINVAL;
+
+	cpumask_and(trialcs->cpus_allowed, trialcs->cpus_requested, cpu_active_mask);
+
 	/* Nothing to do if the cpus didn't change */
-	if (cpumask_equal(cs->cpus_allowed, trialcs->cpus_allowed))
+	if (cpumask_equal(cs->cpus_requested, trialcs->cpus_requested))
 		return 0;
 
 	retval = validate_change(cs, trialcs);
@@ -1532,6 +1544,7 @@
 
 	spin_lock_irq(&callback_lock);
 	cpumask_copy(cs->cpus_allowed, trialcs->cpus_allowed);
+	cpumask_copy(cs->cpus_requested, trialcs->cpus_requested);
 
 	/*
 	 * Make sure that subparts_cpus is a subset of cpus_allowed.
@@ -2189,7 +2202,7 @@
 		 * can_attach beforehand should guarantee that this doesn't
 		 * fail.  TODO: have a better way to handle failure here
 		 */
-		WARN_ON_ONCE(set_cpus_allowed_ptr(task, cpus_attach));
+		WARN_ON_ONCE(update_cpus_allowed(cs, task, cpus_attach));
 
 		cpuset_change_task_nodemask(task, &cpuset_attach_nodemask_to);
 		cpuset_update_task_spread_flag(cs, task);
@@ -2413,7 +2426,7 @@
 
 	switch (type) {
 	case FILE_CPULIST:
-		seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->cpus_allowed));
+		seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->cpus_requested));
 		break;
 	case FILE_MEMLIST:
 		seq_printf(sf, "%*pbl\n", nodemask_pr_args(&cs->mems_allowed));
@@ -2782,6 +2795,7 @@
 	cs->mems_allowed = parent->mems_allowed;
 	cs->effective_mems = parent->mems_allowed;
 	cpumask_copy(cs->cpus_allowed, parent->cpus_allowed);
+	cpumask_copy(cs->cpus_requested, parent->cpus_requested);
 	cpumask_copy(cs->effective_cpus, parent->cpus_allowed);
 	spin_unlock_irq(&callback_lock);
 out_unlock:
@@ -2898,8 +2912,10 @@
 	BUG_ON(!alloc_cpumask_var(&top_cpuset.cpus_allowed, GFP_KERNEL));
 	BUG_ON(!alloc_cpumask_var(&top_cpuset.effective_cpus, GFP_KERNEL));
 	BUG_ON(!zalloc_cpumask_var(&top_cpuset.subparts_cpus, GFP_KERNEL));
+	BUG_ON(!alloc_cpumask_var(&top_cpuset.cpus_requested, GFP_KERNEL));
 
 	cpumask_setall(top_cpuset.cpus_allowed);
+	cpumask_setall(top_cpuset.cpus_requested);
 	nodes_setall(top_cpuset.mems_allowed);
 	cpumask_setall(top_cpuset.effective_cpus);
 	nodes_setall(top_cpuset.effective_mems);
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 6ff2578..1a7ad66 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -32,6 +32,7 @@
 #include <linux/relay.h>
 #include <linux/slab.h>
 #include <linux/percpu-rwsem.h>
+#include <uapi/linux/sched/types.h>
 
 #include <trace/events/power.h>
 #define CREATE_TRACE_POINTS
@@ -1186,6 +1187,25 @@
 	complete_ap_thread(st, true);
 }
 
+static int switch_to_rt_policy(void)
+{
+	struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
+	unsigned int policy = current->policy;
+
+	if (policy == SCHED_NORMAL)
+		/* Switch to SCHED_FIFO from SCHED_NORMAL. */
+		return sched_setscheduler_nocheck(current, SCHED_FIFO, &param);
+	else
+		return 1;
+}
+
+static int switch_to_fair_policy(void)
+{
+	struct sched_param param = { .sched_priority = 0 };
+
+	return sched_setscheduler_nocheck(current, SCHED_NORMAL, &param);
+}
+
 /* Requires cpu_add_remove_lock to be held */
 static int _cpu_up(unsigned int cpu, int tasks_frozen, enum cpuhp_state target)
 {
@@ -1249,6 +1269,7 @@
 static int cpu_up(unsigned int cpu, enum cpuhp_state target)
 {
 	int err = 0;
+	int switch_err;
 
 	if (!cpu_possible(cpu)) {
 		pr_err("can't online cpu %d because it is not configured as may-hotadd at boot time\n",
@@ -1259,9 +1280,21 @@
 		return -EINVAL;
 	}
 
+	/*
+	 * CPU hotplug operations consists of many steps and each step
+	 * calls a callback of core kernel subsystem. CPU hotplug-in
+	 * operation may get preempted by other CFS tasks and whole
+	 * operation of cpu hotplug in CPU gets delayed. Switch the
+	 * current task to SCHED_FIFO from SCHED_NORMAL, so that
+	 * hotplug in operation may complete quickly in heavy loaded
+	 * conditions and new CPU will start handle the workload.
+	 */
+
+	switch_err = switch_to_rt_policy();
+
 	err = try_online_node(cpu_to_node(cpu));
 	if (err)
-		return err;
+		goto switch_out;
 
 	cpu_maps_update_begin();
 
@@ -1277,6 +1310,14 @@
 	err = _cpu_up(cpu, 0, target);
 out:
 	cpu_maps_update_done();
+switch_out:
+	if (!switch_err) {
+		switch_err = switch_to_fair_policy();
+		if (switch_err)
+			pr_err("Hotplug policy switch err=%d Task %s pid=%d\n",
+				switch_err, current->comm, current->pid);
+	}
+
 	return err;
 }
 
@@ -1412,6 +1453,7 @@
 void thaw_secondary_cpus(void)
 {
 	int cpu, error;
+	struct device *cpu_device;
 
 	/* Allow everyone to use the CPU hotplug again */
 	cpu_maps_update_begin();
@@ -1429,6 +1471,12 @@
 		trace_suspend_resume(TPS("CPU_ON"), cpu, false);
 		if (!error) {
 			pr_info("CPU%d is up\n", cpu);
+			cpu_device = get_cpu_device(cpu);
+			if (!cpu_device)
+				pr_err("%s: failed to get cpu%d device\n",
+				       __func__, cpu);
+			else
+				kobject_uevent(&cpu_device->kobj, KOBJ_ONLINE);
 			continue;
 		}
 		pr_warn("Error taking CPU%d up: %d\n", cpu, error);
diff --git a/kernel/dma/contiguous.c b/kernel/dma/contiguous.c
index 16b95ff..b836768 100644
--- a/kernel/dma/contiguous.c
+++ b/kernel/dma/contiguous.c
@@ -58,6 +58,7 @@
 #endif
 
 struct cma *dma_contiguous_default_area;
+EXPORT_SYMBOL_GPL(dma_contiguous_default_area);
 
 /*
  * Default global CMA area size can be defined in kernel's .config.
diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
index 06c1115..b6b106f 100644
--- a/kernel/dma/direct.c
+++ b/kernel/dma/direct.c
@@ -43,6 +43,7 @@
 
 	return (1ULL << (fls64(max_dma) - 1)) * 2 - 1;
 }
+EXPORT_SYMBOL_GPL(dma_direct_get_required_mask);
 
 static gfp_t dma_direct_optimal_gfp_mask(struct device *dev, u64 dma_mask,
 				  u64 *phys_limit)
@@ -313,6 +314,7 @@
 	dma_free_contiguous(dev, page, size);
 	return NULL;
 }
+EXPORT_SYMBOL_GPL(dma_direct_alloc);
 
 void dma_direct_free_pages(struct device *dev, size_t size,
 		struct page *page, dma_addr_t dma_addr,
@@ -331,6 +333,7 @@
 
 	dma_free_contiguous(dev, page, size);
 }
+EXPORT_SYMBOL_GPL(dma_direct_free);
 
 #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \
     defined(CONFIG_SWIOTLB)
diff --git a/kernel/dma/ops_helpers.c b/kernel/dma/ops_helpers.c
index 910ae69..1c09451 100644
--- a/kernel/dma/ops_helpers.c
+++ b/kernel/dma/ops_helpers.c
@@ -20,6 +20,7 @@
 		sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
 	return ret;
 }
+EXPORT_SYMBOL_GPL(dma_common_get_sgtable);
 
 /*
  * Create userspace mapping for the DMA-coherent memory.
@@ -49,6 +50,7 @@
 	return -ENXIO;
 #endif /* CONFIG_MMU */
 }
+EXPORT_SYMBOL_GPL(dma_common_mmap);
 
 struct page *dma_common_alloc_pages(struct device *dev, size_t size,
 		dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp)
diff --git a/kernel/events/core.c b/kernel/events/core.c
index dc568ca..1800c99 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -4341,6 +4341,7 @@
 
 	return ret;
 }
+EXPORT_SYMBOL_GPL(perf_event_read_local);
 
 static int perf_event_read(struct perf_event *event, bool group)
 {
diff --git a/kernel/fork.c b/kernel/fork.c
index 6d26638..f2d60e3 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -96,6 +96,7 @@
 #include <linux/kasan.h>
 #include <linux/scs.h>
 #include <linux/io_uring.h>
+#include <linux/cpufreq_times.h>
 
 #include <asm/pgalloc.h>
 #include <linux/uaccess.h>
@@ -138,6 +139,7 @@
 DEFINE_PER_CPU(unsigned long, process_counts) = 0;
 
 __cacheline_aligned DEFINE_RWLOCK(tasklist_lock);  /* outer */
+EXPORT_SYMBOL_GPL(tasklist_lock);
 
 #ifdef CONFIG_PROVE_RCU
 int lockdep_tasklist_lock_is_held(void)
@@ -441,6 +443,7 @@
 
 void free_task(struct task_struct *tsk)
 {
+	cpufreq_task_times_exit(tsk);
 	scs_release(tsk);
 
 #ifndef CONFIG_THREAD_INFO_IN_TASK
@@ -1939,6 +1942,8 @@
 	if (!p)
 		goto fork_out;
 
+	cpufreq_task_times_init(p);
+
 	/*
 	 * This _must_ happen before we call free_task(), i.e. before we jump
 	 * to any of the bad_fork_* labels. This is to avoid freeing
@@ -2459,6 +2464,8 @@
 	if (IS_ERR(p))
 		return PTR_ERR(p);
 
+	cpufreq_task_times_alloc(p);
+
 	/*
 	 * Do this prior waking up the new thread - the thread pointer
 	 * might get invalid after that point, if the thread exits quickly.
diff --git a/kernel/futex.c b/kernel/futex.c
index 00259c7..fc6e2a7 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -44,6 +44,7 @@
 #include <asm/futex.h>
 
 #include "locking/rtmutex_common.h"
+#include <trace/hooks/futex.h>
 
 /*
  * READ this before attempting to hack on futexes!
@@ -2219,6 +2220,7 @@
 static inline void __queue_me(struct futex_q *q, struct futex_hash_bucket *hb)
 {
 	int prio;
+	bool already_on_hb = false;
 
 	/*
 	 * The priority used to register this element is
@@ -2231,7 +2233,9 @@
 	prio = min(current->normal_prio, MAX_RT_PRIO);
 
 	plist_node_init(&q->list, prio);
-	plist_add(&q->list, &hb->chain);
+	trace_android_vh_alter_futex_plist_add(&q->list, &hb->chain, &already_on_hb);
+	if (!already_on_hb)
+		plist_add(&q->list, &hb->chain);
 	q->task = current;
 }
 
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index b9b9618..9e385bd 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -14,6 +14,7 @@
 #include <linux/interrupt.h>
 #include <linux/kernel_stat.h>
 #include <linux/irqdomain.h>
+#include <linux/wakeup_reason.h>
 
 #include <trace/events/irq.h>
 
@@ -507,8 +508,22 @@
 	 * If the interrupt is not in progress and is not an armed
 	 * wakeup interrupt, proceed.
 	 */
-	if (!irqd_has_set(&desc->irq_data, mask))
+	if (!irqd_has_set(&desc->irq_data, mask)) {
+#ifdef CONFIG_PM_SLEEP
+		if (unlikely(desc->no_suspend_depth &&
+			     irqd_is_wakeup_set(&desc->irq_data))) {
+			unsigned int irq = irq_desc_get_irq(desc);
+			const char *name = "(unnamed)";
+
+			if (desc->action && desc->action->name)
+				name = desc->action->name;
+
+			log_abnormal_wakeup_reason("misconfigured IRQ %u %s",
+						   irq, name);
+		}
+#endif
 		return true;
+	}
 
 	/*
 	 * If the interrupt is an armed wakeup source, mark it pending
diff --git a/kernel/irq/cpuhotplug.c b/kernel/irq/cpuhotplug.c
index 02236b1..cf8d4f75 100644
--- a/kernel/irq/cpuhotplug.c
+++ b/kernel/irq/cpuhotplug.c
@@ -42,7 +42,7 @@
 		 * If this happens then there was a missed IRQ fixup at some
 		 * point. Warn about it and enforce fixup.
 		 */
-		pr_warn("Eff. affinity %*pbl of IRQ %u contains only offline CPUs after offlining CPU %u\n",
+		pr_debug("Eff. affinity %*pbl of IRQ %u contains only offline CPUs after offlining CPU %u\n",
 			cpumask_pr_args(m), d->irq, cpu);
 		return true;
 	}
@@ -166,7 +166,7 @@
 		raw_spin_unlock(&desc->lock);
 
 		if (affinity_broken) {
-			pr_warn_ratelimited("IRQ %u: no longer affine to CPU%u\n",
+			pr_debug_ratelimited("IRQ %u: no longer affine to CPU%u\n",
 					    irq, smp_processor_id());
 		}
 	}
diff --git a/kernel/irq_work.c b/kernel/irq_work.c
index eca8396..e0ed16d 100644
--- a/kernel/irq_work.c
+++ b/kernel/irq_work.c
@@ -111,7 +111,7 @@
 	return true;
 #endif /* CONFIG_SMP */
 }
-
+EXPORT_SYMBOL_GPL(irq_work_queue_on);
 
 bool irq_work_needs_cpu(void)
 {
diff --git a/kernel/kthread.c b/kernel/kthread.c
index 933a625..ff3477d 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -455,6 +455,7 @@
 {
 	__kthread_bind_mask(p, mask, TASK_UNINTERRUPTIBLE);
 }
+EXPORT_SYMBOL_GPL(kthread_bind_mask);
 
 /**
  * kthread_bind - bind a just-created kthread to a cpu.
diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
index 5352ce5..0a4a377 100644
--- a/kernel/locking/mutex.c
+++ b/kernel/locking/mutex.c
@@ -36,6 +36,8 @@
 # include "mutex.h"
 #endif
 
+#include <trace/hooks/dtask.h>
+
 void
 __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
 {
@@ -1002,6 +1004,7 @@
 
 	waiter.task = current;
 
+	trace_android_vh_mutex_wait_start(lock);
 	set_current_state(state);
 	for (;;) {
 		/*
@@ -1057,6 +1060,7 @@
 	spin_lock(&lock->wait_lock);
 acquired:
 	__set_current_state(TASK_RUNNING);
+	trace_android_vh_mutex_wait_finish(lock);
 
 	if (use_ww_ctx && ww_ctx) {
 		/*
@@ -1087,6 +1091,7 @@
 
 err:
 	__set_current_state(TASK_RUNNING);
+	trace_android_vh_mutex_wait_finish(lock);
 	mutex_remove_waiter(lock, &waiter, current);
 err_early_kill:
 	spin_unlock(&lock->wait_lock);
diff --git a/kernel/locking/rwsem.c b/kernel/locking/rwsem.c
index f11b9bd..46bb563 100644
--- a/kernel/locking/rwsem.c
+++ b/kernel/locking/rwsem.c
@@ -29,6 +29,8 @@
 #include <linux/atomic.h>
 
 #include "lock_events.h"
+#include <trace/hooks/rwsem.h>
+#include <trace/hooks/dtask.h>
 
 /*
  * The least significant 3 bits of the owner value has the following
@@ -340,6 +342,7 @@
 #ifdef CONFIG_RWSEM_SPIN_ON_OWNER
 	osq_lock_init(&sem->osq);
 #endif
+	trace_android_vh_rwsem_init(sem);
 }
 EXPORT_SYMBOL(__init_rwsem);
 
@@ -995,6 +998,7 @@
 	struct rwsem_waiter waiter;
 	DEFINE_WAKE_Q(wake_q);
 	bool wake = false;
+	bool already_on_list = false;
 
 	/*
 	 * Save the current read-owner of rwsem, if available, and the
@@ -1056,7 +1060,11 @@
 		}
 		adjustment += RWSEM_FLAG_WAITERS;
 	}
-	list_add_tail(&waiter.list, &sem->wait_list);
+	trace_android_vh_alter_rwsem_list_add(
+					&waiter,
+					sem, &already_on_list);
+	if (!already_on_list)
+		list_add_tail(&waiter.list, &sem->wait_list);
 
 	/* we're now waiting on the lock, but no longer actively locking */
 	if (adjustment)
@@ -1078,10 +1086,12 @@
 		    (adjustment & RWSEM_FLAG_WAITERS)))
 		rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q);
 
+	trace_android_vh_rwsem_wake(sem);
 	raw_spin_unlock_irq(&sem->wait_lock);
 	wake_up_q(&wake_q);
 
 	/* wait to be given the lock */
+	trace_android_vh_rwsem_read_wait_start(sem);
 	for (;;) {
 		set_current_state(state);
 		if (!smp_load_acquire(&waiter.task)) {
@@ -1101,6 +1111,7 @@
 	}
 
 	__set_current_state(TASK_RUNNING);
+	trace_android_vh_rwsem_read_wait_finish(sem);
 	lockevent_inc(rwsem_rlock);
 	return sem;
 
@@ -1112,6 +1123,7 @@
 	}
 	raw_spin_unlock_irq(&sem->wait_lock);
 	__set_current_state(TASK_RUNNING);
+	trace_android_vh_rwsem_read_wait_finish(sem);
 	lockevent_inc(rwsem_rlock_fail);
 	return ERR_PTR(-EINTR);
 }
@@ -1141,6 +1153,7 @@
 	struct rwsem_waiter waiter;
 	struct rw_semaphore *ret = sem;
 	DEFINE_WAKE_Q(wake_q);
+	bool already_on_list = false;
 
 	/* do optimistic spinning and steal lock if possible */
 	if (rwsem_can_spin_on_owner(sem, RWSEM_WR_NONSPINNABLE) &&
@@ -1169,7 +1182,11 @@
 	/* account for this before adding a new element to the list */
 	wstate = list_empty(&sem->wait_list) ? WRITER_FIRST : WRITER_NOT_FIRST;
 
-	list_add_tail(&waiter.list, &sem->wait_list);
+	trace_android_vh_alter_rwsem_list_add(
+					&waiter,
+					sem, &already_on_list);
+	if (!already_on_list)
+		list_add_tail(&waiter.list, &sem->wait_list);
 
 	/* we're now waiting on the lock */
 	if (wstate == WRITER_NOT_FIRST) {
@@ -1205,7 +1222,9 @@
 	}
 
 wait:
+	trace_android_vh_rwsem_wake(sem);
 	/* wait until we successfully acquire the lock */
+	trace_android_vh_rwsem_write_wait_start(sem);
 	set_current_state(state);
 	for (;;) {
 		if (rwsem_try_write_lock(sem, wstate)) {
@@ -1265,6 +1284,7 @@
 		raw_spin_lock_irq(&sem->wait_lock);
 	}
 	__set_current_state(TASK_RUNNING);
+	trace_android_vh_rwsem_write_wait_finish(sem);
 	list_del(&waiter.list);
 	rwsem_disable_reader_optspin(sem, disable_rspin);
 	raw_spin_unlock_irq(&sem->wait_lock);
@@ -1274,6 +1294,7 @@
 
 out_nolock:
 	__set_current_state(TASK_RUNNING);
+	trace_android_vh_rwsem_write_wait_finish(sem);
 	raw_spin_lock_irq(&sem->wait_lock);
 	list_del(&waiter.list);
 
@@ -1581,6 +1602,7 @@
 void up_write(struct rw_semaphore *sem)
 {
 	rwsem_release(&sem->dep_map, _RET_IP_);
+	trace_android_vh_rwsem_write_finished(sem);
 	__up_write(sem);
 }
 EXPORT_SYMBOL(up_write);
@@ -1591,6 +1613,7 @@
 void downgrade_write(struct rw_semaphore *sem)
 {
 	lock_downgrade(&sem->dep_map, _RET_IP_);
+	trace_android_vh_rwsem_write_finished(sem);
 	__downgrade_write(sem);
 }
 EXPORT_SYMBOL(downgrade_write);
diff --git a/kernel/pid.c b/kernel/pid.c
index a96bc4b..dab5557 100644
--- a/kernel/pid.c
+++ b/kernel/pid.c
@@ -421,6 +421,7 @@
 {
 	return find_task_by_pid_ns(vnr, task_active_pid_ns(current));
 }
+EXPORT_SYMBOL_GPL(find_task_by_vpid);
 
 struct task_struct *find_get_task_by_vpid(pid_t nr)
 {
diff --git a/kernel/power/Makefile b/kernel/power/Makefile
index 5899260..9770575 100644
--- a/kernel/power/Makefile
+++ b/kernel/power/Makefile
@@ -17,4 +17,5 @@
 
 obj-$(CONFIG_MAGIC_SYSRQ)	+= poweroff.o
 
+obj-$(CONFIG_SUSPEND)		+= wakeup_reason.o
 obj-$(CONFIG_ENERGY_MODEL)	+= energy_model.o
diff --git a/kernel/power/process.c b/kernel/power/process.c
index 45b054b..cc06230 100644
--- a/kernel/power/process.c
+++ b/kernel/power/process.c
@@ -85,18 +85,21 @@
 	elapsed = ktime_sub(end, start);
 	elapsed_msecs = ktime_to_ms(elapsed);
 
-	if (todo) {
+	if (wakeup) {
 		pr_cont("\n");
-		pr_err("Freezing of tasks %s after %d.%03d seconds "
-		       "(%d tasks refusing to freeze, wq_busy=%d):\n",
-		       wakeup ? "aborted" : "failed",
+		pr_err("Freezing of tasks aborted after %d.%03d seconds",
+		       elapsed_msecs / 1000, elapsed_msecs % 1000);
+	} else if (todo) {
+		pr_cont("\n");
+		pr_err("Freezing of tasks failed after %d.%03d seconds"
+		       " (%d tasks refusing to freeze, wq_busy=%d):\n",
 		       elapsed_msecs / 1000, elapsed_msecs % 1000,
 		       todo - wq_busy, wq_busy);
 
 		if (wq_busy)
 			show_workqueue_state();
 
-		if (!wakeup || pm_debug_messages_on) {
+		if (pm_debug_messages_on) {
 			read_lock(&tasklist_lock);
 			for_each_process_thread(g, p) {
 				if (p != current && !freezer_should_skip(p)
diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
index 32391ac..54595837 100644
--- a/kernel/power/suspend.c
+++ b/kernel/power/suspend.c
@@ -30,6 +30,7 @@
 #include <trace/events/power.h>
 #include <linux/compiler.h>
 #include <linux/moduleparam.h>
+#include <linux/wakeup_reason.h>
 
 #include "power.h"
 
@@ -139,6 +140,7 @@
 		}
 
 		pm_wakeup_clear(false);
+		clear_wakeup_reasons();
 
 		s2idle_enter();
 	}
@@ -359,6 +361,7 @@
 	if (!error)
 		return 0;
 
+	log_suspend_abort_reason("One or more tasks refusing to freeze");
 	suspend_stats.failed_freeze++;
 	dpm_save_failed_step(SUSPEND_FREEZE);
 	pm_notifier_call_chain(PM_POST_SUSPEND);
@@ -388,7 +391,7 @@
  */
 static int suspend_enter(suspend_state_t state, bool *wakeup)
 {
-	int error;
+	int error, last_dev;
 
 	error = platform_suspend_prepare(state);
 	if (error)
@@ -396,7 +399,11 @@
 
 	error = dpm_suspend_late(PMSG_SUSPEND);
 	if (error) {
+		last_dev = suspend_stats.last_failed_dev + REC_FAILED_NUM - 1;
+		last_dev %= REC_FAILED_NUM;
 		pr_err("late suspend of devices failed\n");
+		log_suspend_abort_reason("late suspend of %s device failed",
+					 suspend_stats.failed_devs[last_dev]);
 		goto Platform_finish;
 	}
 	error = platform_suspend_prepare_late(state);
@@ -405,7 +412,11 @@
 
 	error = dpm_suspend_noirq(PMSG_SUSPEND);
 	if (error) {
+		last_dev = suspend_stats.last_failed_dev + REC_FAILED_NUM - 1;
+		last_dev %= REC_FAILED_NUM;
 		pr_err("noirq suspend of devices failed\n");
+		log_suspend_abort_reason("noirq suspend of %s device failed",
+					 suspend_stats.failed_devs[last_dev]);
 		goto Platform_early_resume;
 	}
 	error = platform_suspend_prepare_noirq(state);
@@ -421,8 +432,10 @@
 	}
 
 	error = suspend_disable_secondary_cpus();
-	if (error || suspend_test(TEST_CPUS))
+	if (error || suspend_test(TEST_CPUS)) {
+		log_suspend_abort_reason("Disabling non-boot cpus failed");
 		goto Enable_cpus;
+	}
 
 	arch_suspend_disable_irqs();
 	BUG_ON(!irqs_disabled());
@@ -493,6 +506,8 @@
 	error = dpm_suspend_start(PMSG_SUSPEND);
 	if (error) {
 		pr_err("Some devices failed to suspend, or early wake event detected\n");
+		log_suspend_abort_reason(
+				"Some devices failed to suspend, or early wake event detected");
 		goto Recover_platform;
 	}
 	suspend_test_finish("suspend devices");
diff --git a/kernel/power/wakeup_reason.c b/kernel/power/wakeup_reason.c
new file mode 100644
index 0000000..8fefaa3
--- /dev/null
+++ b/kernel/power/wakeup_reason.c
@@ -0,0 +1,438 @@
+/*
+ * kernel/power/wakeup_reason.c
+ *
+ * Logs the reasons which caused the kernel to resume from
+ * the suspend mode.
+ *
+ * Copyright (C) 2020 Google, Inc.
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/wakeup_reason.h>
+#include <linux/kernel.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kobject.h>
+#include <linux/sysfs.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/notifier.h>
+#include <linux/suspend.h>
+#include <linux/slab.h>
+
+/*
+ * struct wakeup_irq_node - stores data and relationships for IRQs logged as
+ * either base or nested wakeup reasons during suspend/resume flow.
+ * @siblings - for membership on leaf or parent IRQ lists
+ * @irq      - the IRQ number
+ * @irq_name - the name associated with the IRQ, or a default if none
+ */
+struct wakeup_irq_node {
+	struct list_head siblings;
+	int irq;
+	const char *irq_name;
+};
+
+enum wakeup_reason_flag {
+	RESUME_NONE = 0,
+	RESUME_IRQ,
+	RESUME_ABORT,
+	RESUME_ABNORMAL,
+};
+
+static DEFINE_SPINLOCK(wakeup_reason_lock);
+
+static LIST_HEAD(leaf_irqs);   /* kept in ascending IRQ sorted order */
+static LIST_HEAD(parent_irqs); /* unordered */
+
+static struct kmem_cache *wakeup_irq_nodes_cache;
+
+static const char *default_irq_name = "(unnamed)";
+
+static struct kobject *kobj;
+
+static bool capture_reasons;
+static int wakeup_reason;
+static char non_irq_wake_reason[MAX_SUSPEND_ABORT_LEN];
+
+static ktime_t last_monotime; /* monotonic time before last suspend */
+static ktime_t curr_monotime; /* monotonic time after last suspend */
+static ktime_t last_stime; /* monotonic boottime offset before last suspend */
+static ktime_t curr_stime; /* monotonic boottime offset after last suspend */
+
+static void init_node(struct wakeup_irq_node *p, int irq)
+{
+	struct irq_desc *desc;
+
+	INIT_LIST_HEAD(&p->siblings);
+
+	p->irq = irq;
+	desc = irq_to_desc(irq);
+	if (desc && desc->action && desc->action->name)
+		p->irq_name = desc->action->name;
+	else
+		p->irq_name = default_irq_name;
+}
+
+static struct wakeup_irq_node *create_node(int irq)
+{
+	struct wakeup_irq_node *result;
+
+	result = kmem_cache_alloc(wakeup_irq_nodes_cache, GFP_ATOMIC);
+	if (unlikely(!result))
+		pr_warn("Failed to log wakeup IRQ %d\n", irq);
+	else
+		init_node(result, irq);
+
+	return result;
+}
+
+static void delete_list(struct list_head *head)
+{
+	struct wakeup_irq_node *n;
+
+	while (!list_empty(head)) {
+		n = list_first_entry(head, struct wakeup_irq_node, siblings);
+		list_del(&n->siblings);
+		kmem_cache_free(wakeup_irq_nodes_cache, n);
+	}
+}
+
+static bool add_sibling_node_sorted(struct list_head *head, int irq)
+{
+	struct wakeup_irq_node *n = NULL;
+	struct list_head *predecessor = head;
+
+	if (unlikely(WARN_ON(!head)))
+		return NULL;
+
+	if (!list_empty(head))
+		list_for_each_entry(n, head, siblings) {
+			if (n->irq < irq)
+				predecessor = &n->siblings;
+			else if (n->irq == irq)
+				return true;
+			else
+				break;
+		}
+
+	n = create_node(irq);
+	if (n) {
+		list_add(&n->siblings, predecessor);
+		return true;
+	}
+
+	return false;
+}
+
+static struct wakeup_irq_node *find_node_in_list(struct list_head *head,
+						 int irq)
+{
+	struct wakeup_irq_node *n;
+
+	if (unlikely(WARN_ON(!head)))
+		return NULL;
+
+	list_for_each_entry(n, head, siblings)
+		if (n->irq == irq)
+			return n;
+
+	return NULL;
+}
+
+void log_irq_wakeup_reason(int irq)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&wakeup_reason_lock, flags);
+	if (wakeup_reason == RESUME_ABNORMAL || wakeup_reason == RESUME_ABORT) {
+		spin_unlock_irqrestore(&wakeup_reason_lock, flags);
+		return;
+	}
+
+	if (!capture_reasons) {
+		spin_unlock_irqrestore(&wakeup_reason_lock, flags);
+		return;
+	}
+
+	if (find_node_in_list(&parent_irqs, irq) == NULL)
+		add_sibling_node_sorted(&leaf_irqs, irq);
+
+	wakeup_reason = RESUME_IRQ;
+	spin_unlock_irqrestore(&wakeup_reason_lock, flags);
+}
+
+void log_threaded_irq_wakeup_reason(int irq, int parent_irq)
+{
+	struct wakeup_irq_node *parent;
+	unsigned long flags;
+
+	/*
+	 * Intentionally unsynchronized.  Calls that come in after we have
+	 * resumed should have a fast exit path since there's no work to be
+	 * done, any any coherence issue that could cause a wrong value here is
+	 * both highly improbable - given the set/clear timing - and very low
+	 * impact (parent IRQ gets logged instead of the specific child).
+	 */
+	if (!capture_reasons)
+		return;
+
+	spin_lock_irqsave(&wakeup_reason_lock, flags);
+
+	if (wakeup_reason == RESUME_ABNORMAL || wakeup_reason == RESUME_ABORT) {
+		spin_unlock_irqrestore(&wakeup_reason_lock, flags);
+		return;
+	}
+
+	if (!capture_reasons || (find_node_in_list(&leaf_irqs, irq) != NULL)) {
+		spin_unlock_irqrestore(&wakeup_reason_lock, flags);
+		return;
+	}
+
+	parent = find_node_in_list(&parent_irqs, parent_irq);
+	if (parent != NULL)
+		add_sibling_node_sorted(&leaf_irqs, irq);
+	else {
+		parent = find_node_in_list(&leaf_irqs, parent_irq);
+		if (parent != NULL) {
+			list_del_init(&parent->siblings);
+			list_add_tail(&parent->siblings, &parent_irqs);
+			add_sibling_node_sorted(&leaf_irqs, irq);
+		}
+	}
+
+	spin_unlock_irqrestore(&wakeup_reason_lock, flags);
+}
+EXPORT_SYMBOL_GPL(log_threaded_irq_wakeup_reason);
+
+static void __log_abort_or_abnormal_wake(bool abort, const char *fmt,
+					 va_list args)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&wakeup_reason_lock, flags);
+
+	/* Suspend abort or abnormal wake reason has already been logged. */
+	if (wakeup_reason != RESUME_NONE) {
+		spin_unlock_irqrestore(&wakeup_reason_lock, flags);
+		return;
+	}
+
+	if (abort)
+		wakeup_reason = RESUME_ABORT;
+	else
+		wakeup_reason = RESUME_ABNORMAL;
+
+	vsnprintf(non_irq_wake_reason, MAX_SUSPEND_ABORT_LEN, fmt, args);
+
+	spin_unlock_irqrestore(&wakeup_reason_lock, flags);
+}
+
+void log_suspend_abort_reason(const char *fmt, ...)
+{
+	va_list args;
+
+	va_start(args, fmt);
+	__log_abort_or_abnormal_wake(true, fmt, args);
+	va_end(args);
+}
+EXPORT_SYMBOL_GPL(log_suspend_abort_reason);
+
+void log_abnormal_wakeup_reason(const char *fmt, ...)
+{
+	va_list args;
+
+	va_start(args, fmt);
+	__log_abort_or_abnormal_wake(false, fmt, args);
+	va_end(args);
+}
+EXPORT_SYMBOL_GPL(log_abnormal_wakeup_reason);
+
+void clear_wakeup_reasons(void)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&wakeup_reason_lock, flags);
+
+	delete_list(&leaf_irqs);
+	delete_list(&parent_irqs);
+	wakeup_reason = RESUME_NONE;
+	capture_reasons = true;
+
+	spin_unlock_irqrestore(&wakeup_reason_lock, flags);
+}
+
+static void print_wakeup_sources(void)
+{
+	struct wakeup_irq_node *n;
+	unsigned long flags;
+
+	spin_lock_irqsave(&wakeup_reason_lock, flags);
+
+	capture_reasons = false;
+
+	if (wakeup_reason == RESUME_ABORT) {
+		pr_info("Abort: %s\n", non_irq_wake_reason);
+		spin_unlock_irqrestore(&wakeup_reason_lock, flags);
+		return;
+	}
+
+	if (wakeup_reason == RESUME_IRQ && !list_empty(&leaf_irqs))
+		list_for_each_entry(n, &leaf_irqs, siblings)
+			pr_info("Resume caused by IRQ %d, %s\n", n->irq,
+				n->irq_name);
+	else if (wakeup_reason == RESUME_ABNORMAL)
+		pr_info("Resume caused by %s\n", non_irq_wake_reason);
+	else
+		pr_info("Resume cause unknown\n");
+
+	spin_unlock_irqrestore(&wakeup_reason_lock, flags);
+}
+
+static ssize_t last_resume_reason_show(struct kobject *kobj,
+				       struct kobj_attribute *attr, char *buf)
+{
+	ssize_t buf_offset = 0;
+	struct wakeup_irq_node *n;
+	unsigned long flags;
+
+	spin_lock_irqsave(&wakeup_reason_lock, flags);
+
+	if (wakeup_reason == RESUME_ABORT) {
+		buf_offset = scnprintf(buf, PAGE_SIZE, "Abort: %s",
+				       non_irq_wake_reason);
+		spin_unlock_irqrestore(&wakeup_reason_lock, flags);
+		return buf_offset;
+	}
+
+	if (wakeup_reason == RESUME_IRQ && !list_empty(&leaf_irqs))
+		list_for_each_entry(n, &leaf_irqs, siblings)
+			buf_offset += scnprintf(buf + buf_offset,
+						PAGE_SIZE - buf_offset,
+						"%d %s\n", n->irq, n->irq_name);
+	else if (wakeup_reason == RESUME_ABNORMAL)
+		buf_offset = scnprintf(buf, PAGE_SIZE, "-1 %s",
+				       non_irq_wake_reason);
+
+	spin_unlock_irqrestore(&wakeup_reason_lock, flags);
+
+	return buf_offset;
+}
+
+static ssize_t last_suspend_time_show(struct kobject *kobj,
+			struct kobj_attribute *attr, char *buf)
+{
+	struct timespec64 sleep_time;
+	struct timespec64 total_time;
+	struct timespec64 suspend_resume_time;
+
+	/*
+	 * total_time is calculated from monotonic bootoffsets because
+	 * unlike CLOCK_MONOTONIC it include the time spent in suspend state.
+	 */
+	total_time = ktime_to_timespec64(ktime_sub(curr_stime, last_stime));
+
+	/*
+	 * suspend_resume_time is calculated as monotonic (CLOCK_MONOTONIC)
+	 * time interval before entering suspend and post suspend.
+	 */
+	suspend_resume_time =
+		ktime_to_timespec64(ktime_sub(curr_monotime, last_monotime));
+
+	/* sleep_time = total_time - suspend_resume_time */
+	sleep_time = timespec64_sub(total_time, suspend_resume_time);
+
+	/* Export suspend_resume_time and sleep_time in pair here. */
+	return sprintf(buf, "%llu.%09lu %llu.%09lu\n",
+		       (unsigned long long)suspend_resume_time.tv_sec,
+		       suspend_resume_time.tv_nsec,
+		       (unsigned long long)sleep_time.tv_sec,
+		       sleep_time.tv_nsec);
+}
+
+static struct kobj_attribute resume_reason = __ATTR_RO(last_resume_reason);
+static struct kobj_attribute suspend_time = __ATTR_RO(last_suspend_time);
+
+static struct attribute *attrs[] = {
+	&resume_reason.attr,
+	&suspend_time.attr,
+	NULL,
+};
+static struct attribute_group attr_group = {
+	.attrs = attrs,
+};
+
+/* Detects a suspend and clears all the previous wake up reasons*/
+static int wakeup_reason_pm_event(struct notifier_block *notifier,
+		unsigned long pm_event, void *unused)
+{
+	switch (pm_event) {
+	case PM_SUSPEND_PREPARE:
+		/* monotonic time since boot */
+		last_monotime = ktime_get();
+		/* monotonic time since boot including the time spent in suspend */
+		last_stime = ktime_get_boottime();
+		clear_wakeup_reasons();
+		break;
+	case PM_POST_SUSPEND:
+		/* monotonic time since boot */
+		curr_monotime = ktime_get();
+		/* monotonic time since boot including the time spent in suspend */
+		curr_stime = ktime_get_boottime();
+		print_wakeup_sources();
+		break;
+	default:
+		break;
+	}
+	return NOTIFY_DONE;
+}
+
+static struct notifier_block wakeup_reason_pm_notifier_block = {
+	.notifier_call = wakeup_reason_pm_event,
+};
+
+static int __init wakeup_reason_init(void)
+{
+	if (register_pm_notifier(&wakeup_reason_pm_notifier_block)) {
+		pr_warn("[%s] failed to register PM notifier\n", __func__);
+		goto fail;
+	}
+
+	kobj = kobject_create_and_add("wakeup_reasons", kernel_kobj);
+	if (!kobj) {
+		pr_warn("[%s] failed to create a sysfs kobject\n", __func__);
+		goto fail_unregister_pm_notifier;
+	}
+
+	if (sysfs_create_group(kobj, &attr_group)) {
+		pr_warn("[%s] failed to create a sysfs group\n", __func__);
+		goto fail_kobject_put;
+	}
+
+	wakeup_irq_nodes_cache =
+		kmem_cache_create("wakeup_irq_node_cache",
+				  sizeof(struct wakeup_irq_node), 0, 0, NULL);
+	if (!wakeup_irq_nodes_cache)
+		goto fail_remove_group;
+
+	return 0;
+
+fail_remove_group:
+	sysfs_remove_group(kobj, &attr_group);
+fail_kobject_put:
+	kobject_put(kobj);
+fail_unregister_pm_notifier:
+	unregister_pm_notifier(&wakeup_reason_pm_notifier_block);
+fail:
+	return 1;
+}
+
+late_initcall(wakeup_reason_init);
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
index bc1e3b5..99ea2f9 100644
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
@@ -54,6 +54,8 @@
 #include <trace/events/initcall.h>
 #define CREATE_TRACE_POINTS
 #include <trace/events/printk.h>
+#undef CREATE_TRACE_POINTS
+#include <trace/hooks/printk.h>
 
 #include "printk_ringbuffer.h"
 #include "console_cmdline.h"
@@ -2293,6 +2295,12 @@
  */
 static int console_cpu_notify(unsigned int cpu)
 {
+	int flag = 0;
+
+	trace_android_vh_printk_hotplug(&flag);
+	if (flag)
+		return 0;
+
 	if (!cpuhp_tasks_frozen) {
 		/* If trylock fails, someone else is doing the printing */
 		if (console_trylock())
@@ -3075,6 +3083,7 @@
 
 	return r;
 }
+EXPORT_SYMBOL_GPL(printk_deferred);
 
 /*
  * printk rate limiting, lifted from the networking subsystem.
diff --git a/kernel/reboot.c b/kernel/reboot.c
index af6f23d..aa274e1 100644
--- a/kernel/reboot.c
+++ b/kernel/reboot.c
@@ -32,7 +32,9 @@
 #define DEFAULT_REBOOT_MODE
 #endif
 enum reboot_mode reboot_mode DEFAULT_REBOOT_MODE;
+EXPORT_SYMBOL_GPL(reboot_mode);
 enum reboot_mode panic_reboot_mode = REBOOT_UNDEFINED;
+EXPORT_SYMBOL_GPL(panic_reboot_mode);
 
 /*
  * This variable is used privately to keep track of whether or not
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index e7e4534..e0e57f4 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -27,6 +27,9 @@
 #include "pelt.h"
 #include "smp.h"
 
+#include <trace/hooks/sched.h>
+#include <trace/hooks/dtask.h>
+
 /*
  * Export tracepoints that act as a bare tracehook (ie: have no trace event
  * associated with them) to allow external modules to probe them.
@@ -43,6 +46,7 @@
 EXPORT_TRACEPOINT_SYMBOL_GPL(sched_update_nr_running_tp);
 
 DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
+EXPORT_SYMBOL_GPL(runqueues);
 
 #ifdef CONFIG_SCHED_DEBUG
 /*
@@ -197,6 +201,7 @@
 			cpu_relax();
 	}
 }
+EXPORT_SYMBOL_GPL(__task_rq_lock);
 
 /*
  * task_rq_lock - lock p->pi_lock and lock the rq @p resides on.
@@ -319,6 +324,7 @@
 	rq->clock += delta;
 	update_rq_clock_task(rq, delta);
 }
+EXPORT_SYMBOL_GPL(update_rq_clock);
 
 static inline void
 rq_csd_init(struct rq *rq, call_single_data_t *csd, smp_call_func_t func)
@@ -533,6 +539,7 @@
 	 */
 	*head->lastp = node;
 	head->lastp = &node->next;
+	head->count++;
 	return true;
 }
 
@@ -589,12 +596,14 @@
 		/* Task can safely be re-inserted now: */
 		node = node->next;
 		task->wake_q.next = NULL;
+		task->wake_q_head = head;
 
 		/*
 		 * wake_up_process() executes a full barrier, which pairs with
 		 * the queueing in wake_q_add() so as not to miss wakeups.
 		 */
 		wake_up_process(task);
+		task->wake_q_head = NULL;
 		put_task_struct(task);
 	}
 }
@@ -1119,6 +1128,7 @@
 
 	return (unsigned long)uc_eff.value;
 }
+EXPORT_SYMBOL_GPL(uclamp_eff_value);
 
 /*
  * When a task is enqueued on a rq, the clamp bucket currently defined by the
@@ -1570,6 +1580,8 @@
 
 	uclamp_rq_inc(rq, p);
 	p->sched_class->enqueue_task(rq, p, flags);
+
+	trace_android_rvh_enqueue_task(rq, p);
 }
 
 static inline void dequeue_task(struct rq *rq, struct task_struct *p, int flags)
@@ -1584,6 +1596,8 @@
 
 	uclamp_rq_dec(rq, p);
 	p->sched_class->dequeue_task(rq, p, flags);
+
+	trace_android_rvh_dequeue_task(rq, p);
 }
 
 void activate_task(struct rq *rq, struct task_struct *p, int flags)
@@ -1592,6 +1606,7 @@
 
 	p->on_rq = TASK_ON_RQ_QUEUED;
 }
+EXPORT_SYMBOL_GPL(activate_task);
 
 void deactivate_task(struct rq *rq, struct task_struct *p, int flags)
 {
@@ -1599,6 +1614,7 @@
 
 	dequeue_task(rq, p, flags);
 }
+EXPORT_SYMBOL_GPL(deactivate_task);
 
 /*
  * __normal_prio - return the priority that is based on the static prio
@@ -1693,6 +1709,7 @@
 	if (task_on_rq_queued(rq->curr) && test_tsk_need_resched(rq->curr))
 		rq_clock_skip_update(rq);
 }
+EXPORT_SYMBOL_GPL(check_preempt_curr);
 
 #ifdef CONFIG_SMP
 
@@ -1733,12 +1750,24 @@
 static struct rq *move_queued_task(struct rq *rq, struct rq_flags *rf,
 				   struct task_struct *p, int new_cpu)
 {
+	int detached = 0;
+
 	lockdep_assert_held(&rq->lock);
 
+	/*
+	 * The vendor hook may drop the lock temporarily, so
+	 * pass the rq flags to unpin lock. We expect the
+	 * rq lock to be held after return.
+	 */
+	trace_android_rvh_migrate_queued_task(rq, rf, p, new_cpu, &detached);
+	if (detached)
+		goto attach;
+
 	deactivate_task(rq, p, DEQUEUE_NOCLOCK);
 	set_task_cpu(p, new_cpu);
-	rq_unlock(rq, rf);
 
+attach:
+	rq_unlock(rq, rf);
 	rq = cpu_rq(new_cpu);
 
 	rq_lock(rq, rf);
@@ -2002,8 +2031,8 @@
 
 	__set_task_cpu(p, new_cpu);
 }
+EXPORT_SYMBOL_GPL(set_task_cpu);
 
-#ifdef CONFIG_NUMA_BALANCING
 static void __migrate_swap_task(struct task_struct *p, int cpu)
 {
 	if (task_on_rq_queued(p)) {
@@ -2118,7 +2147,7 @@
 out:
 	return ret;
 }
-#endif /* CONFIG_NUMA_BALANCING */
+EXPORT_SYMBOL_GPL(migrate_swap);
 
 /*
  * wait_task_inactive - wait for a thread to unschedule.
@@ -2280,7 +2309,11 @@
 	int nid = cpu_to_node(cpu);
 	const struct cpumask *nodemask = NULL;
 	enum { cpuset, possible, fail } state = cpuset;
-	int dest_cpu;
+	int dest_cpu = -1;
+
+	trace_android_rvh_select_fallback_rq(cpu, p, &dest_cpu);
+	if (dest_cpu >= 0)
+		return dest_cpu;
 
 	/*
 	 * If the node that the CPU is on has been offlined, cpu_to_node()
@@ -2638,6 +2671,7 @@
 out:
 	rcu_read_unlock();
 }
+EXPORT_SYMBOL_GPL(wake_up_if_idle);
 
 bool cpus_share_cache(int this_cpu, int that_cpu)
 {
@@ -3237,6 +3271,7 @@
 	 * Make sure we do not leak PI boosting priority to the child.
 	 */
 	p->prio = current->normal_prio;
+	trace_android_rvh_prepare_prio_fork(p);
 
 	uclamp_fork(p);
 
@@ -3269,6 +3304,7 @@
 		p->sched_class = &fair_sched_class;
 
 	init_entity_runnable_average(&p->se);
+	trace_android_rvh_finish_prio_fork(p);
 
 	/*
 	 * The child is not yet in the pid-hash so no cgroup attach races,
@@ -4009,6 +4045,8 @@
 	rq->idle_balance = idle_cpu(cpu);
 	trigger_load_balance(rq);
 #endif
+
+	trace_android_vh_scheduler_tick(rq);
 }
 
 #ifdef CONFIG_NO_HZ_FULL
@@ -4838,6 +4876,7 @@
 	struct rq_flags rf;
 	struct rq *rq;
 
+	trace_android_rvh_rtmutex_prepare_setprio(p, pi_task);
 	/* XXX used to be waiter->prio, not waiter->task->prio */
 	prio = __rt_effective_prio(pi_task, p->normal_prio);
 
@@ -4970,6 +5009,7 @@
 	 */
 	rq = task_rq_lock(p, &rf);
 	update_rq_clock(rq);
+	trace_android_rvh_set_user_nice(p, &nice);
 
 	/*
 	 * The RT priorities are set via sched_setscheduler(), but we still
@@ -5094,6 +5134,7 @@
 
 	return 1;
 }
+EXPORT_SYMBOL_GPL(idle_cpu);
 
 /**
  * available_idle_cpu - is a given CPU idle for enqueuing work.
@@ -5192,6 +5233,8 @@
 		p->sched_class = &rt_sched_class;
 	else
 		p->sched_class = &fair_sched_class;
+
+	trace_android_rvh_setscheduler(p);
 }
 
 /*
@@ -5320,9 +5363,6 @@
 			return retval;
 	}
 
-	if (pi)
-		cpuset_read_lock();
-
 	/*
 	 * Make sure no PI-waiters arrive (or leave) while we are
 	 * changing the priority of the task:
@@ -5397,8 +5437,6 @@
 	if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) {
 		policy = oldpolicy = -1;
 		task_rq_unlock(rq, p, &rf);
-		if (pi)
-			cpuset_read_unlock();
 		goto recheck;
 	}
 
@@ -5459,10 +5497,8 @@
 	preempt_disable();
 	task_rq_unlock(rq, p, &rf);
 
-	if (pi) {
-		cpuset_read_unlock();
+	if (pi)
 		rt_mutex_adjust_pi(p);
-	}
 
 	/* Run balance callbacks after we've adjusted the PI chain: */
 	balance_callback(rq);
@@ -5472,8 +5508,6 @@
 
 unlock:
 	task_rq_unlock(rq, p, &rf);
-	if (pi)
-		cpuset_read_unlock();
 	return retval;
 }
 
@@ -5512,11 +5546,13 @@
 {
 	return _sched_setscheduler(p, policy, param, true);
 }
+EXPORT_SYMBOL_GPL(sched_setscheduler);
 
 int sched_setattr(struct task_struct *p, const struct sched_attr *attr)
 {
 	return __sched_setscheduler(p, attr, true, true);
 }
+EXPORT_SYMBOL_GPL(sched_setattr);
 
 int sched_setattr_nocheck(struct task_struct *p, const struct sched_attr *attr)
 {
@@ -5541,6 +5577,7 @@
 {
 	return _sched_setscheduler(p, policy, param, false);
 }
+EXPORT_SYMBOL_GPL(sched_setscheduler_nocheck);
 
 /*
  * SCHED_FIFO is a broken scheduler model; that is, it is fundamentally
@@ -5602,14 +5639,9 @@
 	rcu_read_lock();
 	retval = -ESRCH;
 	p = find_process_by_pid(pid);
-	if (likely(p))
-		get_task_struct(p);
-	rcu_read_unlock();
-
-	if (likely(p)) {
+	if (p != NULL)
 		retval = sched_setscheduler(p, policy, &lparam);
-		put_task_struct(p);
-	}
+	rcu_read_unlock();
 
 	return retval;
 }
@@ -5974,6 +6006,9 @@
 			goto again;
 		}
 	}
+
+	trace_android_rvh_sched_setaffinity(p, in_mask, &retval);
+
 out_free_new_mask:
 	free_cpumask_var(new_mask);
 out_free_cpus_allowed:
@@ -6453,6 +6488,7 @@
 		(unsigned long)task_thread_info(p)->flags);
 
 	print_worker_info(KERN_INFO, p);
+	trace_android_vh_sched_show_task(p);
 	show_stack(p, NULL, KERN_INFO);
 	put_task_stack(p);
 }
@@ -7051,7 +7087,9 @@
  * Every task in system belongs to this group at bootup.
  */
 struct task_group root_task_group;
+EXPORT_SYMBOL_GPL(root_task_group);
 LIST_HEAD(task_groups);
+EXPORT_SYMBOL_GPL(task_groups);
 
 /* Cacheline aligned slab cache for task_group */
 static struct kmem_cache *task_group_cache __read_mostly;
@@ -7866,6 +7904,27 @@
 	cpu_uclamp_print(sf, UCLAMP_MAX);
 	return 0;
 }
+
+static int cpu_uclamp_ls_write_u64(struct cgroup_subsys_state *css,
+				   struct cftype *cftype, u64 ls)
+{
+	struct task_group *tg;
+
+	if (ls > 1)
+		return -EINVAL;
+	tg = css_tg(css);
+	tg->latency_sensitive = (unsigned int) ls;
+
+	return 0;
+}
+
+static u64 cpu_uclamp_ls_read_u64(struct cgroup_subsys_state *css,
+				  struct cftype *cft)
+{
+	struct task_group *tg = css_tg(css);
+
+	return (u64) tg->latency_sensitive;
+}
 #endif /* CONFIG_UCLAMP_TASK_GROUP */
 
 #ifdef CONFIG_FAIR_GROUP_SCHED
@@ -8234,6 +8293,12 @@
 		.seq_show = cpu_uclamp_max_show,
 		.write = cpu_uclamp_max_write,
 	},
+	{
+		.name = "uclamp.latency_sensitive",
+		.flags = CFTYPE_NOT_ON_ROOT,
+		.read_u64 = cpu_uclamp_ls_read_u64,
+		.write_u64 = cpu_uclamp_ls_write_u64,
+	},
 #endif
 	{ }	/* Terminate */
 };
@@ -8415,6 +8480,12 @@
 		.seq_show = cpu_uclamp_max_show,
 		.write = cpu_uclamp_max_write,
 	},
+	{
+		.name = "uclamp.latency_sensitive",
+		.flags = CFTYPE_NOT_ON_ROOT,
+		.read_u64 = cpu_uclamp_ls_read_u64,
+		.write_u64 = cpu_uclamp_ls_write_u64,
+	},
 #endif
 	{ }	/* terminate */
 };
diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
index 97d318b..e41f4fb 100644
--- a/kernel/sched/cpufreq_schedutil.c
+++ b/kernel/sched/cpufreq_schedutil.c
@@ -12,6 +12,7 @@
 
 #include <linux/sched/cpufreq.h>
 #include <trace/events/power.h>
+#include <trace/hooks/sched.h>
 
 #define IOWAIT_BOOST_MIN	(SCHED_CAPACITY_SCALE / 8)
 
@@ -102,6 +103,7 @@
 static bool sugov_update_next_freq(struct sugov_policy *sg_policy, u64 time,
 				   unsigned int next_freq)
 {
+	bool should_update = true;
 	if (!sg_policy->need_freq_update) {
 		if (sg_policy->next_freq == next_freq)
 			return false;
@@ -109,6 +111,10 @@
 		sg_policy->need_freq_update = cpufreq_driver_test_flags(CPUFREQ_NEED_UPDATE_LIMITS);
 	}
 
+	trace_android_rvh_set_sugov_update(sg_policy, next_freq, &should_update);
+	if (!should_update)
+		return false;
+
 	sg_policy->next_freq = next_freq;
 	sg_policy->last_freq_update_time = time;
 
@@ -277,6 +283,7 @@
 
 	return min(max, util);
 }
+EXPORT_SYMBOL_GPL(schedutil_cpu_util);
 
 static unsigned long sugov_get_util(struct sugov_cpu *sg_cpu)
 {
@@ -661,6 +668,7 @@
 	if (policy->fast_switch_enabled)
 		return 0;
 
+	trace_android_vh_set_sugov_sched_attr(&attr);
 	kthread_init_work(&sg_policy->work, sugov_work);
 	kthread_init_worker(&sg_policy->worker);
 	thread = kthread_create(kthread_worker_fn, &sg_policy->worker,
@@ -897,36 +905,3 @@
 #endif
 
 cpufreq_governor_init(schedutil_gov);
-
-#ifdef CONFIG_ENERGY_MODEL
-extern bool sched_energy_update;
-extern struct mutex sched_energy_mutex;
-
-static void rebuild_sd_workfn(struct work_struct *work)
-{
-	mutex_lock(&sched_energy_mutex);
-	sched_energy_update = true;
-	rebuild_sched_domains();
-	sched_energy_update = false;
-	mutex_unlock(&sched_energy_mutex);
-}
-static DECLARE_WORK(rebuild_sd_work, rebuild_sd_workfn);
-
-/*
- * EAS shouldn't be attempted without sugov, so rebuild the sched_domains
- * on governor changes to make sure the scheduler knows about it.
- */
-void sched_cpufreq_governor_change(struct cpufreq_policy *policy,
-				  struct cpufreq_governor *old_gov)
-{
-	if (old_gov == &schedutil_gov || policy->governor == &schedutil_gov) {
-		/*
-		 * When called from the cpufreq_register_driver() path, the
-		 * cpu_hotplug_lock is already held, so use a work item to
-		 * avoid nested locking in rebuild_sched_domains().
-		 */
-		schedule_work(&rebuild_sd_work);
-	}
-
-}
-#endif
diff --git a/kernel/sched/cpupri.c b/kernel/sched/cpupri.c
index 0033731..69bc0de 100644
--- a/kernel/sched/cpupri.c
+++ b/kernel/sched/cpupri.c
@@ -41,8 +41,29 @@
 	return cpupri;
 }
 
+#ifdef CONFIG_RT_SOFTINT_OPTIMIZATION
+/**
+ * drop_nopreempt_cpus - remove likely nonpreemptible cpus from the mask
+ * @lowest_mask: mask with selected CPUs (non-NULL)
+ */
+static void
+drop_nopreempt_cpus(struct cpumask *lowest_mask)
+{
+	unsigned int cpu = cpumask_first(lowest_mask);
+	while (cpu < nr_cpu_ids) {
+		/* unlocked access */
+		struct task_struct *task = READ_ONCE(cpu_rq(cpu)->curr);
+		if (task_may_not_preempt(task, cpu)) {
+			cpumask_clear_cpu(cpu, lowest_mask);
+		}
+		cpu = cpumask_next(cpu, lowest_mask);
+	}
+}
+#endif
+
 static inline int __cpupri_find(struct cpupri *cp, struct task_struct *p,
-				struct cpumask *lowest_mask, int idx)
+				struct cpumask *lowest_mask, int idx,
+				bool drop_nopreempts)
 {
 	struct cpupri_vec *vec  = &cp->pri_to_cpu[idx];
 	int skip = 0;
@@ -79,6 +100,11 @@
 	if (lowest_mask) {
 		cpumask_and(lowest_mask, p->cpus_ptr, vec->mask);
 
+#ifdef CONFIG_RT_SOFTINT_OPTIMIZATION
+		if (drop_nopreempts)
+			drop_nopreempt_cpus(lowest_mask);
+#endif
+
 		/*
 		 * We have to ensure that we have at least one bit
 		 * still set in the array, since the map could have
@@ -123,12 +149,16 @@
 {
 	int task_pri = convert_prio(p->prio);
 	int idx, cpu;
+	bool drop_nopreempts = task_pri <= MAX_RT_PRIO;
 
 	BUG_ON(task_pri >= CPUPRI_NR_PRIORITIES);
 
+#ifdef CONFIG_RT_SOFTINT_OPTIMIZATION
+retry:
+#endif
 	for (idx = 0; idx < task_pri; idx++) {
 
-		if (!__cpupri_find(cp, p, lowest_mask, idx))
+		if (!__cpupri_find(cp, p, lowest_mask, idx, drop_nopreempts))
 			continue;
 
 		if (!lowest_mask || !fitness_fn)
@@ -151,6 +181,17 @@
 	}
 
 	/*
+	 * If we can't find any non-preemptible cpu's, retry so we can
+	 * find the lowest priority target and avoid priority inversion.
+	 */
+#ifdef CONFIG_RT_SOFTINT_OPTIMIZATION
+	if (drop_nopreempts) {
+		drop_nopreempts = false;
+		goto retry;
+	}
+#endif
+
+	/*
 	 * If we failed to find a fitting lowest_mask, kick off a new search
 	 * but without taking into account any fitness criteria this time.
 	 *
@@ -290,3 +331,16 @@
 	for (i = 0; i < CPUPRI_NR_PRIORITIES; i++)
 		free_cpumask_var(cp->pri_to_cpu[i].mask);
 }
+
+#ifdef CONFIG_RT_SOFTINT_OPTIMIZATION
+/*
+ * cpupri_check_rt - check if CPU has a RT task
+ * should be called from rcu-sched read section.
+ */
+bool cpupri_check_rt(void)
+{
+	int cpu = raw_smp_processor_id();
+
+	return cpu_rq(cpu)->rd->cpupri.cpu_to_pri[cpu] > CPUPRI_NORMAL;
+}
+#endif
diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c
index 5a55d23..50fddb9 100644
--- a/kernel/sched/cputime.c
+++ b/kernel/sched/cputime.c
@@ -2,6 +2,7 @@
 /*
  * Simple CPU accounting cgroup controller
  */
+#include <linux/cpufreq_times.h>
 #include "sched.h"
 
 #ifdef CONFIG_IRQ_TIME_ACCOUNTING
@@ -18,6 +19,7 @@
  * compromise in place of having locks on each irq in account_system_time.
  */
 DEFINE_PER_CPU(struct irqtime, cpu_irqtime);
+EXPORT_PER_CPU_SYMBOL_GPL(cpu_irqtime);
 
 static int sched_clock_irqtime;
 
@@ -129,6 +131,9 @@
 
 	/* Account for user time used */
 	acct_account_cputime(p);
+
+	/* Account power usage for user time */
+	cpufreq_acct_update_power(p, cputime);
 }
 
 /*
@@ -173,6 +178,9 @@
 
 	/* Account for system time used */
 	acct_account_cputime(p);
+
+	/* Account power usage for system time */
+	cpufreq_acct_update_power(p, cputime);
 }
 
 /*
@@ -460,6 +468,7 @@
 	*ut = cputime.utime;
 	*st = cputime.stime;
 }
+EXPORT_SYMBOL_GPL(thread_group_cputime_adjusted);
 
 #else /* !CONFIG_VIRT_CPU_ACCOUNTING_NATIVE: */
 
@@ -629,6 +638,8 @@
 	thread_group_cputime(p, &cputime);
 	cputime_adjust(&cputime, &p->signal->prev_cputime, ut, st);
 }
+EXPORT_SYMBOL_GPL(thread_group_cputime_adjusted);
+
 #endif /* !CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
 
 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
index 2357921..c794f8bc 100644
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -81,6 +81,7 @@
 struct static_key sched_feat_keys[__SCHED_FEAT_NR] = {
 #include "features.h"
 };
+EXPORT_SYMBOL_GPL(sched_feat_keys);
 
 #undef SCHED_FEAT
 
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index ae7ceba..0b739c6 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -22,6 +22,8 @@
  */
 #include "sched.h"
 
+#include <trace/hooks/sched.h>
+
 /*
  * Targeted preemption latency for CPU-bound tasks:
  *
@@ -981,6 +983,7 @@
 			}
 
 			trace_sched_stat_blocked(tsk, delta);
+			trace_sched_blocked_reason(tsk);
 
 			/*
 			 * Blocking time is in units of nanosecs, so shift by
@@ -5478,6 +5481,7 @@
 	struct sched_entity *se = &p->se;
 	int idle_h_nr_running = task_has_idle_policy(p);
 	int task_new = !(flags & ENQUEUE_WAKEUP);
+	int should_iowait_boost;
 
 	/*
 	 * The code below (indirectly) updates schedutil which looks at
@@ -5492,7 +5496,9 @@
 	 * utilization updates, so do it here explicitly with the IOWAIT flag
 	 * passed.
 	 */
-	if (p->in_iowait)
+	should_iowait_boost = p->in_iowait;
+	trace_android_rvh_set_iowait(p, &should_iowait_boost);
+	if (should_iowait_boost)
 		cpufreq_update_util(rq, SCHED_CPUFREQ_IOWAIT);
 
 	for_each_sched_entity(se) {
@@ -6576,20 +6582,38 @@
  * other use-cases too. So, until someone finds a better way to solve this,
  * let's keep things simple by re-using the existing slow path.
  */
-static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu)
+static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu, int sync)
 {
 	unsigned long prev_delta = ULONG_MAX, best_delta = ULONG_MAX;
 	struct root_domain *rd = cpu_rq(smp_processor_id())->rd;
+	int max_spare_cap_cpu_ls = prev_cpu, best_idle_cpu = -1;
+	unsigned long max_spare_cap_ls = 0, target_cap;
 	unsigned long cpu_cap, util, base_energy = 0;
+	bool boosted, latency_sensitive = false;
+	unsigned int min_exit_lat = UINT_MAX;
 	int cpu, best_energy_cpu = prev_cpu;
+	struct cpuidle_state *idle;
 	struct sched_domain *sd;
 	struct perf_domain *pd;
+	int new_cpu = INT_MAX;
+
+	trace_android_rvh_find_energy_efficient_cpu(p, prev_cpu, sync, &new_cpu);
+	if (new_cpu != INT_MAX)
+		return new_cpu;
 
 	rcu_read_lock();
 	pd = rcu_dereference(rd->pd);
 	if (!pd || READ_ONCE(rd->overutilized))
 		goto fail;
 
+	cpu = smp_processor_id();
+	if (sync && cpu_rq(cpu)->nr_running == 1 &&
+	    cpumask_test_cpu(cpu, p->cpus_ptr) &&
+	    task_fits_capacity(p, capacity_of(cpu))) {
+		rcu_read_unlock();
+		return cpu;
+	}
+
 	/*
 	 * Energy-aware wake-up happens on the lowest sched_domain starting
 	 * from sd_asym_cpucapacity spanning over this_cpu and prev_cpu.
@@ -6604,6 +6628,10 @@
 	if (!task_util_est(p))
 		goto unlock;
 
+	latency_sensitive = uclamp_latency_sensitive(p);
+	boosted = uclamp_boosted(p);
+	target_cap = boosted ? 0 : ULONG_MAX;
+
 	for (; pd; pd = pd->next) {
 		unsigned long cur_delta, spare_cap, max_spare_cap = 0;
 		unsigned long base_energy_pd;
@@ -6634,7 +6662,7 @@
 				continue;
 
 			/* Always use prev_cpu as a candidate. */
-			if (cpu == prev_cpu) {
+			if (!latency_sensitive && cpu == prev_cpu) {
 				prev_delta = compute_energy(p, prev_cpu, pd);
 				prev_delta -= base_energy_pd;
 				best_delta = min(best_delta, prev_delta);
@@ -6648,10 +6676,34 @@
 				max_spare_cap = spare_cap;
 				max_spare_cap_cpu = cpu;
 			}
+
+			if (!latency_sensitive)
+				continue;
+
+			if (idle_cpu(cpu)) {
+				cpu_cap = capacity_orig_of(cpu);
+				if (boosted && cpu_cap < target_cap)
+					continue;
+				if (!boosted && cpu_cap > target_cap)
+					continue;
+				idle = idle_get_state(cpu_rq(cpu));
+				if (idle && idle->exit_latency > min_exit_lat &&
+						cpu_cap == target_cap)
+					continue;
+
+				if (idle)
+					min_exit_lat = idle->exit_latency;
+				target_cap = cpu_cap;
+				best_idle_cpu = cpu;
+			} else if (spare_cap > max_spare_cap_ls) {
+				max_spare_cap_ls = spare_cap;
+				max_spare_cap_cpu_ls = cpu;
+			}
 		}
 
 		/* Evaluate the energy impact of using this CPU. */
-		if (max_spare_cap_cpu >= 0 && max_spare_cap_cpu != prev_cpu) {
+		if (!latency_sensitive && max_spare_cap_cpu >= 0 &&
+						max_spare_cap_cpu != prev_cpu) {
 			cur_delta = compute_energy(p, max_spare_cap_cpu, pd);
 			cur_delta -= base_energy_pd;
 			if (cur_delta < best_delta) {
@@ -6663,6 +6715,9 @@
 unlock:
 	rcu_read_unlock();
 
+	if (latency_sensitive)
+		return best_idle_cpu >= 0 ? best_idle_cpu : max_spare_cap_cpu_ls;
+
 	/*
 	 * Pick the best CPU if prev_cpu cannot be used, or if it saves at
 	 * least 6% of the energy used by prev_cpu.
@@ -6701,12 +6756,18 @@
 	int new_cpu = prev_cpu;
 	int want_affine = 0;
 	int sync = (wake_flags & WF_SYNC) && !(current->flags & PF_EXITING);
+	int target_cpu = -1;
+
+	trace_android_rvh_select_task_rq_fair(p, prev_cpu, sd_flag,
+			wake_flags, &target_cpu);
+	if (target_cpu >= 0)
+		return target_cpu;
 
 	if (sd_flag & SD_BALANCE_WAKE) {
 		record_wakee(p);
 
 		if (sched_energy_enabled()) {
-			new_cpu = find_energy_efficient_cpu(p, prev_cpu);
+			new_cpu = find_energy_efficient_cpu(p, prev_cpu, sync);
 			if (new_cpu >= 0)
 				return new_cpu;
 			new_cpu = prev_cpu;
@@ -7411,6 +7472,7 @@
 	enum fbq_type		fbq_type;
 	enum migration_type	migration_type;
 	struct list_head	tasks;
+	struct rq_flags		*src_rq_rf;
 };
 
 /*
@@ -7517,9 +7579,14 @@
 int can_migrate_task(struct task_struct *p, struct lb_env *env)
 {
 	int tsk_cache_hot;
+	int can_migrate = 1;
 
 	lockdep_assert_held(&env->src_rq->lock);
 
+	trace_android_rvh_can_migrate_task(p, env->dst_cpu, &can_migrate);
+	if (!can_migrate)
+		return 0;
+
 	/*
 	 * We do not migrate tasks that are:
 	 * 1) throttled_lb_pair, or
@@ -7596,8 +7663,20 @@
  */
 static void detach_task(struct task_struct *p, struct lb_env *env)
 {
+	int detached = 0;
+
 	lockdep_assert_held(&env->src_rq->lock);
 
+	/*
+	 * The vendor hook may drop the lock temporarily, so
+	 * pass the rq flags to unpin lock. We expect the
+	 * rq lock to be held after return.
+	 */
+	trace_android_rvh_migrate_queued_task(env->src_rq, env->src_rq_rf, p,
+					      env->dst_cpu, &detached);
+	if (detached)
+		return;
+
 	deactivate_task(env->src_rq, p, DEQUEUE_NOCLOCK);
 	set_task_cpu(p, env->dst_cpu);
 }
@@ -9196,8 +9275,12 @@
 
 	if (sched_energy_enabled()) {
 		struct root_domain *rd = env->dst_rq->rd;
+		int out_balance = 1;
 
-		if (rcu_dereference(rd->pd) && !READ_ONCE(rd->overutilized))
+		trace_android_rvh_find_busiest_group(sds.busiest, env->dst_rq,
+					&out_balance);
+		if (rcu_dereference(rd->pd) && !READ_ONCE(rd->overutilized)
+					&& out_balance)
 			goto out_balanced;
 	}
 
@@ -9316,7 +9399,12 @@
 	struct rq *busiest = NULL, *rq;
 	unsigned long busiest_util = 0, busiest_load = 0, busiest_capacity = 1;
 	unsigned int busiest_nr = 0;
-	int i;
+	int i, done = 0;
+
+	trace_android_rvh_find_busiest_queue(env->dst_cpu, group, env->cpus,
+					     &busiest, &done);
+	if (done)
+		return busiest;
 
 	for_each_cpu_and(i, sched_group_span(group), env->cpus) {
 		unsigned long capacity, load, util;
@@ -9594,6 +9682,7 @@
 
 more_balance:
 		rq_lock_irqsave(busiest, &rf);
+		env.src_rq_rf = &rf;
 		update_rq_clock(busiest);
 
 		/*
@@ -9903,6 +9992,7 @@
 			 * about DST_PINNED.
 			 */
 			.flags		= LBF_DST_PINNED,
+			.src_rq_rf	= &rf,
 		};
 
 		schedstat_inc(sd->alb_count);
@@ -10122,6 +10212,7 @@
 	struct sched_domain *sd;
 	int nr_busy, i, cpu = rq->cpu;
 	unsigned int flags = 0;
+	int done = 0;
 
 	if (unlikely(rq->idle_balance))
 		return;
@@ -10146,6 +10237,10 @@
 	if (time_before(now, nohz.next_balance))
 		goto out;
 
+	trace_android_rvh_sched_nohz_balancer_kick(rq, &flags, &done);
+	if (done)
+		goto out;
+
 	if (rq->nr_running >= 2) {
 		flags = NOHZ_KICK_MASK;
 		goto out;
@@ -10520,6 +10615,11 @@
 	struct sched_domain *sd;
 	int pulled_task = 0;
 	u64 curr_cost = 0;
+	int done = 0;
+
+	trace_android_rvh_sched_newidle_balance(this_rq, rf, &pulled_task, &done);
+	if (done)
+		return pulled_task;
 
 	update_misfit_status(NULL, this_rq);
 	/*
diff --git a/kernel/sched/loadavg.c b/kernel/sched/loadavg.c
index d2a6556..b5837e2 100644
--- a/kernel/sched/loadavg.c
+++ b/kernel/sched/loadavg.c
@@ -75,6 +75,7 @@
 	loads[1] = (avenrun[1] + offset) << shift;
 	loads[2] = (avenrun[2] + offset) << shift;
 }
+EXPORT_SYMBOL_GPL(get_avenrun);
 
 long calc_load_fold_active(struct rq *this_rq, long adjust)
 {
diff --git a/kernel/sched/pelt.c b/kernel/sched/pelt.c
index 2c613e1..c6cc21b9 100644
--- a/kernel/sched/pelt.c
+++ b/kernel/sched/pelt.c
@@ -306,6 +306,7 @@
 
 	return 0;
 }
+EXPORT_SYMBOL_GPL(__update_load_avg_blocked_se);
 
 int __update_load_avg_se(u64 now, struct cfs_rq *cfs_rq, struct sched_entity *se)
 {
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index 49ec096..4533121 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -7,6 +7,8 @@
 
 #include "pelt.h"
 
+#include <trace/hooks/sched.h>
+
 int sched_rr_timeslice = RR_TIMESLICE;
 int sysctl_sched_rr_timeslice = (MSEC_PER_SEC / HZ) * RR_TIMESLICE;
 /* More than 4 hours if BW_SHIFT equals 20. */
@@ -971,6 +973,13 @@
 		if (likely(rt_b->rt_runtime)) {
 			rt_rq->rt_throttled = 1;
 			printk_deferred_once("sched: RT throttling activated\n");
+
+			trace_android_vh_dump_throttled_rt_tasks(
+				raw_smp_processor_id(),
+				rq_clock(rq_of_rt_rq(rt_rq)),
+				sched_rt_period(rt_rq),
+				runtime,
+				hrtimer_get_expires_ns(&rt_b->rt_period_timer));
 		} else {
 			/*
 			 * In case we did anyway, make it go away,
@@ -1427,12 +1436,39 @@
 #ifdef CONFIG_SMP
 static int find_lowest_rq(struct task_struct *task);
 
+#ifdef CONFIG_RT_SOFTINT_OPTIMIZATION
+/*
+ * Return whether the task on the given cpu is currently non-preemptible
+ * while handling a potentially long softint, or if the task is likely
+ * to block preemptions soon because it is a ksoftirq thread that is
+ * handling slow softints.
+ */
+bool
+task_may_not_preempt(struct task_struct *task, int cpu)
+{
+	__u32 softirqs = per_cpu(active_softirqs, cpu) |
+			 __IRQ_STAT(cpu, __softirq_pending);
+
+	struct task_struct *cpu_ksoftirqd = per_cpu(ksoftirqd, cpu);
+	return ((softirqs & LONG_SOFTIRQ_MASK) &&
+		(task == cpu_ksoftirqd ||
+		 task_thread_info(task)->preempt_count & SOFTIRQ_MASK));
+}
+#endif /* CONFIG_RT_SOFTINT_OPTIMIZATION */
+
 static int
 select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags)
 {
 	struct task_struct *curr;
 	struct rq *rq;
 	bool test;
+	int target_cpu = -1;
+	bool may_not_preempt;
+
+	trace_android_rvh_select_task_rq_rt(p, cpu, sd_flag,
+					flags, &target_cpu);
+	if (target_cpu >= 0)
+		return target_cpu;
 
 	/* For anything but wake ups, just return the task_cpu */
 	if (sd_flag != SD_BALANCE_WAKE && sd_flag != SD_BALANCE_FORK)
@@ -1444,7 +1480,12 @@
 	curr = READ_ONCE(rq->curr); /* unlocked access */
 
 	/*
-	 * If the current task on @p's runqueue is an RT task, then
+	 * If the current task on @p's runqueue is a softirq task,
+	 * it may run without preemption for a time that is
+	 * ill-suited for a waiting RT task. Therefore, try to
+	 * wake this RT task on another runqueue.
+	 *
+	 * Also, if the current task on @p's runqueue is an RT task, then
 	 * try to see if we can wake this RT task up on another
 	 * runqueue. Otherwise simply start this RT task
 	 * on its current runqueue.
@@ -1469,9 +1510,10 @@
 	 * requirement of the task - which is only important on heterogeneous
 	 * systems like big.LITTLE.
 	 */
-	test = curr &&
-	       unlikely(rt_task(curr)) &&
-	       (curr->nr_cpus_allowed < 2 || curr->prio <= p->prio);
+	may_not_preempt = task_may_not_preempt(curr, cpu);
+	test = (curr && (may_not_preempt ||
+			 (unlikely(rt_task(curr)) &&
+			  (curr->nr_cpus_allowed < 2 || curr->prio <= p->prio))));
 
 	if (test || !rt_task_fits_capacity(p, cpu)) {
 		int target = find_lowest_rq(p);
@@ -1484,11 +1526,14 @@
 			goto out_unlock;
 
 		/*
-		 * Don't bother moving it if the destination CPU is
+		 * If cpu is non-preemptible, prefer remote cpu
+		 * even if it's running a higher-prio task.
+		 * Otherwise: Don't bother moving it if the destination CPU is
 		 * not running a lower priority task.
 		 */
 		if (target != -1 &&
-		    p->prio < cpu_rq(target)->rt.highest_prio.curr)
+		    (may_not_preempt ||
+		     p->prio < cpu_rq(target)->rt.highest_prio.curr))
 			cpu = target;
 	}
 
@@ -1691,7 +1736,7 @@
 	struct sched_domain *sd;
 	struct cpumask *lowest_mask = this_cpu_cpumask_var_ptr(local_cpu_mask);
 	int this_cpu = smp_processor_id();
-	int cpu      = task_cpu(task);
+	int cpu      = -1;
 	int ret;
 
 	/* Make sure the mask is initialized first */
@@ -1716,9 +1761,15 @@
 				  task, lowest_mask);
 	}
 
+	trace_android_rvh_find_lowest_rq(task, lowest_mask, ret, &cpu);
+	if (cpu >= 0)
+		return cpu;
+
 	if (!ret)
 		return -1; /* No targets found */
 
+	cpu = task_cpu(task);
+
 	/*
 	 * At this point we have built a mask of CPUs representing the
 	 * lowest priority tasks in the system.  Now we want to elect
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index df80bfc..3a93dfa 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -65,6 +65,7 @@
 #include <linux/syscalls.h>
 #include <linux/task_work.h>
 #include <linux/tsacct_kern.h>
+#include <linux/android_vendor.h>
 
 #include <asm/tlb.h>
 #include <asm-generic/vmlinux.lds.h>
@@ -428,6 +429,10 @@
 	struct uclamp_se	uclamp_req[UCLAMP_CNT];
 	/* Effective clamp values used for a task group */
 	struct uclamp_se	uclamp[UCLAMP_CNT];
+	/* Latency-sensitive flag used for a task group */
+	unsigned int		latency_sensitive;
+
+	ANDROID_VENDOR_DATA_ARRAY(1, 4);
 #endif
 
 };
@@ -828,6 +833,8 @@
 	 * CPUs of the rd. Protected by RCU.
 	 */
 	struct perf_domain __rcu *pd;
+
+	ANDROID_VENDOR_DATA_ARRAY(1, 4);
 };
 
 extern void init_defrootdomain(void);
@@ -1048,6 +1055,8 @@
 	/* Must be inspected within a rcu lock section */
 	struct cpuidle_state	*idle_state;
 #endif
+
+	ANDROID_VENDOR_DATA_ARRAY(1, 96);
 };
 
 #ifdef CONFIG_FAIR_GROUP_SCHED
@@ -1370,8 +1379,6 @@
 };
 extern void sched_setnuma(struct task_struct *p, int node);
 extern int migrate_task_to(struct task_struct *p, int cpu);
-extern int migrate_swap(struct task_struct *p, struct task_struct *t,
-			int cpu, int scpu);
 extern void init_numa_balancing(unsigned long clone_flags, struct task_struct *p);
 #else
 static inline void
@@ -1382,6 +1389,8 @@
 
 #ifdef CONFIG_SMP
 
+extern int migrate_swap(struct task_struct *p, struct task_struct *t,
+			int cpu, int scpu);
 static inline void
 queue_balance_callback(struct rq *rq,
 		       struct callback_head *head,
@@ -2445,6 +2454,11 @@
 	return clamp(util, min_util, max_util);
 }
 
+static inline bool uclamp_boosted(struct task_struct *p)
+{
+	return uclamp_eff_value(p, UCLAMP_MIN) > 0;
+}
+
 /*
  * When uclamp is compiled in, the aggregation at rq level is 'turned off'
  * by default in the fast path and only gets turned on once userspace performs
@@ -2465,12 +2479,36 @@
 	return util;
 }
 
+static inline bool uclamp_boosted(struct task_struct *p)
+{
+	return false;
+}
+
 static inline bool uclamp_is_used(void)
 {
 	return false;
 }
 #endif /* CONFIG_UCLAMP_TASK */
 
+#ifdef CONFIG_UCLAMP_TASK_GROUP
+static inline bool uclamp_latency_sensitive(struct task_struct *p)
+{
+	struct cgroup_subsys_state *css = task_css(p, cpu_cgrp_id);
+	struct task_group *tg;
+
+	if (!css)
+		return false;
+	tg = container_of(css, struct task_group, css);
+
+	return tg->latency_sensitive;
+}
+#else
+static inline bool uclamp_latency_sensitive(struct task_struct *p)
+{
+	return false;
+}
+#endif /* CONFIG_UCLAMP_TASK_GROUP */
+
 #ifdef arch_scale_freq_capacity
 # ifndef arch_scale_freq_invariant
 #  define arch_scale_freq_invariant()	true
@@ -2633,3 +2671,15 @@
 
 void swake_up_all_locked(struct swait_queue_head *q);
 void __prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait);
+
+/*
+ * task_may_not_preempt - check whether a task may not be preemptible soon
+ */
+#ifdef CONFIG_RT_SOFTINT_OPTIMIZATION
+extern bool task_may_not_preempt(struct task_struct *task, int cpu);
+#else
+static inline bool task_may_not_preempt(struct task_struct *task, int cpu)
+{
+	return false;
+}
+#endif /* CONFIG_RT_SOFTINT_OPTIMIZATION */
diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
index dd77702..87f9d29 100644
--- a/kernel/sched/topology.c
+++ b/kernel/sched/topology.c
@@ -323,7 +323,6 @@
  *    2. the SD_ASYM_CPUCAPACITY flag is set in the sched_domain hierarchy.
  *    3. no SMT is detected.
  *    4. the EM complexity is low enough to keep scheduling overheads low;
- *    5. schedutil is driving the frequency of all CPUs of the rd;
  *
  * The complexity of the Energy Model is defined as:
  *
@@ -343,15 +342,12 @@
  */
 #define EM_MAX_COMPLEXITY 2048
 
-extern struct cpufreq_governor schedutil_gov;
 static bool build_perf_domains(const struct cpumask *cpu_map)
 {
 	int i, nr_pd = 0, nr_ps = 0, nr_cpus = cpumask_weight(cpu_map);
 	struct perf_domain *pd = NULL, *tmp;
 	int cpu = cpumask_first(cpu_map);
 	struct root_domain *rd = cpu_rq(cpu)->rd;
-	struct cpufreq_policy *policy;
-	struct cpufreq_governor *gov;
 
 	if (!sysctl_sched_energy_aware)
 		goto free;
@@ -377,19 +373,6 @@
 		if (find_pd(pd, i))
 			continue;
 
-		/* Do not attempt EAS if schedutil is not being used. */
-		policy = cpufreq_cpu_get(i);
-		if (!policy)
-			goto free;
-		gov = policy->governor;
-		cpufreq_cpu_put(policy);
-		if (gov != &schedutil_gov) {
-			if (rd->pd)
-				pr_warn("rd %*pbl: Disabling EAS, schedutil is mandatory\n",
-						cpumask_pr_args(cpu_map));
-			goto free;
-		}
-
 		/* Create the new pd and add it to the local list. */
 		tmp = pd_init(i);
 		if (!tmp)
diff --git a/kernel/sched/wait.c b/kernel/sched/wait.c
index 01f5d30..4b46340 100644
--- a/kernel/sched/wait.c
+++ b/kernel/sched/wait.c
@@ -384,7 +384,8 @@
 }
 EXPORT_SYMBOL(finish_wait);
 
-int autoremove_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key)
+__sched int autoremove_wake_function(struct wait_queue_entry *wq_entry, unsigned int mode,
+				     int sync, void *key)
 {
 	int ret = default_wake_function(wq_entry, mode, sync, key);
 
@@ -420,7 +421,7 @@
  * }						smp_mb(); // C
  * remove_wait_queue(&wq_head, &wait);		wq_entry->flags |= WQ_FLAG_WOKEN;
  */
-long wait_woken(struct wait_queue_entry *wq_entry, unsigned mode, long timeout)
+__sched long wait_woken(struct wait_queue_entry *wq_entry, unsigned int mode, long timeout)
 {
 	/*
 	 * The below executes an smp_mb(), which matches with the full barrier
@@ -445,7 +446,8 @@
 }
 EXPORT_SYMBOL(wait_woken);
 
-int woken_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key)
+__sched int woken_wake_function(struct wait_queue_entry *wq_entry, unsigned int mode,
+				int sync, void *key)
 {
 	/* Pairs with the smp_store_mb() in wait_woken(). */
 	smp_mb(); /* C */
diff --git a/kernel/signal.c b/kernel/signal.c
index ef8f2a2..dd83edc 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -1397,6 +1397,7 @@
 
 	return sighand;
 }
+EXPORT_SYMBOL_GPL(__lock_task_sighand);
 
 /*
  * send signal info to all the members of a group
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 09229ad..169a780 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -55,6 +55,14 @@
 static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
 
 DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
+EXPORT_PER_CPU_SYMBOL_GPL(ksoftirqd);
+
+/*
+ * active_softirqs -- per cpu, a mask of softirqs that are being handled,
+ * with the expectation that approximate answers are acceptable and therefore
+ * no synchronization.
+ */
+DEFINE_PER_CPU(__u32, active_softirqs);
 
 const char * const softirq_to_name[NR_SOFTIRQS] = {
 	"HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "IRQ_POLL",
@@ -77,22 +85,6 @@
 }
 
 /*
- * If ksoftirqd is scheduled, we do not want to process pending softirqs
- * right now. Let ksoftirqd handle this at its own rate, to get fairness,
- * unless we're doing some of the synchronous softirqs.
- */
-#define SOFTIRQ_NOW_MASK ((1 << HI_SOFTIRQ) | (1 << TASKLET_SOFTIRQ))
-static bool ksoftirqd_running(unsigned long pending)
-{
-	struct task_struct *tsk = __this_cpu_read(ksoftirqd);
-
-	if (pending & SOFTIRQ_NOW_MASK)
-		return false;
-	return tsk && (tsk->state == TASK_RUNNING) &&
-		!__kthread_should_park(tsk);
-}
-
-/*
  * preempt_count and SOFTIRQ_OFFSET usage:
  * - preempt_count is changed by SOFTIRQ_OFFSET on entering or leaving
  *   softirq processing.
@@ -252,6 +244,16 @@
 static inline void lockdep_softirq_end(bool in_hardirq) { }
 #endif
 
+#define softirq_deferred_for_rt(pending)		\
+({							\
+	__u32 deferred = 0;				\
+	if (cpupri_check_rt()) {			\
+		deferred = pending & LONG_SOFTIRQ_MASK; \
+		pending &= ~LONG_SOFTIRQ_MASK;		\
+	}						\
+	deferred;					\
+})
+
 asmlinkage __visible void __softirq_entry __do_softirq(void)
 {
 	unsigned long end = jiffies + MAX_SOFTIRQ_TIME;
@@ -259,6 +261,7 @@
 	int max_restart = MAX_SOFTIRQ_RESTART;
 	struct softirq_action *h;
 	bool in_hardirq;
+	__u32 deferred;
 	__u32 pending;
 	int softirq_bit;
 
@@ -270,14 +273,15 @@
 	current->flags &= ~PF_MEMALLOC;
 
 	pending = local_softirq_pending();
+	deferred = softirq_deferred_for_rt(pending);
 	account_irq_enter_time(current);
-
 	__local_bh_disable_ip(_RET_IP_, SOFTIRQ_OFFSET);
 	in_hardirq = lockdep_softirq_start();
 
 restart:
 	/* Reset the pending bitmask before enabling irqs */
-	set_softirq_pending(0);
+	set_softirq_pending(deferred);
+	__this_cpu_write(active_softirqs, pending);
 
 	local_irq_enable();
 
@@ -307,19 +311,24 @@
 		pending >>= softirq_bit;
 	}
 
+	__this_cpu_write(active_softirqs, 0);
 	if (__this_cpu_read(ksoftirqd) == current)
 		rcu_softirq_qs();
 	local_irq_disable();
 
 	pending = local_softirq_pending();
+	deferred = softirq_deferred_for_rt(pending);
+
 	if (pending) {
 		if (time_before(jiffies, end) && !need_resched() &&
 		    --max_restart)
 			goto restart;
-
-		wakeup_softirqd();
 	}
 
+#ifdef CONFIG_RT_SOFTINT_OPTIMIZATION
+	if (pending | deferred)
+		wakeup_softirqd();
+#endif
 	lockdep_softirq_end(in_hardirq);
 	account_irq_exit_time(current);
 	__local_bh_enable(SOFTIRQ_OFFSET);
@@ -339,7 +348,7 @@
 
 	pending = local_softirq_pending();
 
-	if (pending && !ksoftirqd_running(pending))
+	if (pending)
 		do_softirq_own_stack();
 
 	local_irq_restore(flags);
@@ -373,9 +382,6 @@
 
 static inline void invoke_softirq(void)
 {
-	if (ksoftirqd_running(local_softirq_pending()))
-		return;
-
 	if (!force_irqthreads) {
 #ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK
 		/*
@@ -554,10 +560,15 @@
 				if (!test_and_clear_bit(TASKLET_STATE_SCHED,
 							&t->state))
 					BUG();
-				if (t->use_callback)
+				if (t->use_callback) {
+					trace_tasklet_entry(t->callback);
 					t->callback(t);
-				else
+					trace_tasklet_exit(t->callback);
+				} else {
+					trace_tasklet_entry(t->func);
 					t->func(t->data);
+					trace_tasklet_exit(t->func);
+				}
 				tasklet_unlock(t);
 				continue;
 			}
diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c
index 890b79cf..f3c54c9 100644
--- a/kernel/stop_machine.c
+++ b/kernel/stop_machine.c
@@ -370,6 +370,7 @@
 	*work_buf = (struct cpu_stop_work){ .fn = fn, .arg = arg, };
 	return cpu_stop_queue_work(cpu, work_buf);
 }
+EXPORT_SYMBOL_GPL(stop_one_cpu_nowait);
 
 static bool queue_stop_cpus_work(const struct cpumask *cpumask,
 				 cpu_stop_fn_t fn, void *arg,
diff --git a/kernel/sys.c b/kernel/sys.c
index a730c03..e5b44b8 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -42,6 +42,8 @@
 #include <linux/syscore_ops.h>
 #include <linux/version.h>
 #include <linux/ctype.h>
+#include <linux/mm.h>
+#include <linux/mempolicy.h>
 
 #include <linux/compat.h>
 #include <linux/syscalls.h>
@@ -2278,6 +2280,153 @@
 	return -EINVAL;
 }
 
+#ifdef CONFIG_MMU
+static int prctl_update_vma_anon_name(struct vm_area_struct *vma,
+		struct vm_area_struct **prev,
+		unsigned long start, unsigned long end,
+		const char __user *name_addr)
+{
+	struct mm_struct *mm = vma->vm_mm;
+	int error = 0;
+	pgoff_t pgoff;
+
+	if (name_addr == vma_get_anon_name(vma)) {
+		*prev = vma;
+		goto out;
+	}
+
+	pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
+	*prev = vma_merge(mm, *prev, start, end, vma->vm_flags, vma->anon_vma,
+				vma->vm_file, pgoff, vma_policy(vma),
+				vma->vm_userfaultfd_ctx, name_addr);
+	if (*prev) {
+		vma = *prev;
+		goto success;
+	}
+
+	*prev = vma;
+
+	if (start != vma->vm_start) {
+		error = split_vma(mm, vma, start, 1);
+		if (error)
+			goto out;
+	}
+
+	if (end != vma->vm_end) {
+		error = split_vma(mm, vma, end, 0);
+		if (error)
+			goto out;
+	}
+
+success:
+	if (!vma->vm_file)
+		vma->anon_name = name_addr;
+
+out:
+	if (error == -ENOMEM)
+		error = -EAGAIN;
+	return error;
+}
+
+static int prctl_set_vma_anon_name(unsigned long start, unsigned long end,
+			unsigned long arg)
+{
+	unsigned long tmp;
+	struct vm_area_struct *vma, *prev;
+	int unmapped_error = 0;
+	int error = -EINVAL;
+
+	/*
+	 * If the interval [start,end) covers some unmapped address
+	 * ranges, just ignore them, but return -ENOMEM at the end.
+	 * - this matches the handling in madvise.
+	 */
+	vma = find_vma_prev(current->mm, start, &prev);
+	if (vma && start > vma->vm_start)
+		prev = vma;
+
+	for (;;) {
+		/* Still start < end. */
+		error = -ENOMEM;
+		if (!vma)
+			return error;
+
+		/* Here start < (end|vma->vm_end). */
+		if (start < vma->vm_start) {
+			unmapped_error = -ENOMEM;
+			start = vma->vm_start;
+			if (start >= end)
+				return error;
+		}
+
+		/* Here vma->vm_start <= start < (end|vma->vm_end) */
+		tmp = vma->vm_end;
+		if (end < tmp)
+			tmp = end;
+
+		/* Here vma->vm_start <= start < tmp <= (end|vma->vm_end). */
+		error = prctl_update_vma_anon_name(vma, &prev, start, tmp,
+				(const char __user *)arg);
+		if (error)
+			return error;
+		start = tmp;
+		if (prev && start < prev->vm_end)
+			start = prev->vm_end;
+		error = unmapped_error;
+		if (start >= end)
+			return error;
+		if (prev)
+			vma = prev->vm_next;
+		else	/* madvise_remove dropped mmap_lock */
+			vma = find_vma(current->mm, start);
+	}
+}
+
+static int prctl_set_vma(unsigned long opt, unsigned long start,
+		unsigned long len_in, unsigned long arg)
+{
+	struct mm_struct *mm = current->mm;
+	int error;
+	unsigned long len;
+	unsigned long end;
+
+	if (start & ~PAGE_MASK)
+		return -EINVAL;
+	len = (len_in + ~PAGE_MASK) & PAGE_MASK;
+
+	/* Check to see whether len was rounded up from small -ve to zero */
+	if (len_in && !len)
+		return -EINVAL;
+
+	end = start + len;
+	if (end < start)
+		return -EINVAL;
+
+	if (end == start)
+		return 0;
+
+	mmap_write_lock(mm);
+
+	switch (opt) {
+	case PR_SET_VMA_ANON_NAME:
+		error = prctl_set_vma_anon_name(start, end, arg);
+		break;
+	default:
+		error = -EINVAL;
+	}
+
+	mmap_write_unlock(mm);
+
+	return error;
+}
+#else /* CONFIG_MMU */
+static int prctl_set_vma(unsigned long opt, unsigned long start,
+		unsigned long len_in, unsigned long arg)
+{
+	return -EINVAL;
+}
+#endif
+
 #define PR_IO_FLUSHER (PF_MEMALLOC_NOIO | PF_LOCAL_THROTTLE)
 
 SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
@@ -2492,6 +2641,9 @@
 			return -EINVAL;
 		error = arch_prctl_spec_ctrl_set(me, arg2, arg3);
 		break;
+	case PR_SET_VMA:
+		error = prctl_set_vma(arg2, arg3, arg4, arg5);
+		break;
 	case PR_PAC_RESET_KEYS:
 		if (arg3 || arg4 || arg5)
 			return -EINVAL;
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index afad085..4f50100 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -106,6 +106,9 @@
 
 #if defined(CONFIG_SYSCTL)
 
+/* External variables not in a header file. */
+extern int extra_free_kbytes;
+
 /* Constants used for minimum and  maximum */
 #ifdef CONFIG_LOCKUP_DETECTOR
 static int sixty = 60;
@@ -2905,6 +2908,14 @@
 		.extra2		= &one_thousand,
 	},
 	{
+		.procname	= "extra_free_kbytes",
+		.data		= &extra_free_kbytes,
+		.maxlen		= sizeof(extra_free_kbytes),
+		.mode		= 0644,
+		.proc_handler	= min_free_kbytes_sysctl_handler,
+		.extra1		= SYSCTL_ZERO,
+	},
+	{
 		.procname	= "percpu_pagelist_fraction",
 		.data		= &percpu_pagelist_fraction,
 		.maxlen		= sizeof(percpu_pagelist_fraction),
diff --git a/kernel/time/sched_clock.c b/kernel/time/sched_clock.c
index b1b9b12..d5ab3fb 100644
--- a/kernel/time/sched_clock.c
+++ b/kernel/time/sched_clock.c
@@ -16,6 +16,7 @@
 #include <linux/sched_clock.h>
 #include <linux/seqlock.h>
 #include <linux/bitops.h>
+#include <trace/hooks/epoch.h>
 
 #include "timekeeping.h"
 
@@ -270,6 +271,7 @@
 	update_sched_clock();
 	hrtimer_cancel(&sched_clock_timer);
 	rd->read_sched_clock = suspended_sched_clock_read;
+	trace_android_vh_show_suspend_epoch_val(rd->epoch_ns, rd->epoch_cyc);
 
 	return 0;
 }
@@ -281,6 +283,7 @@
 	rd->epoch_cyc = cd.actual_read_sched_clock();
 	hrtimer_start(&sched_clock_timer, cd.wrap_kt, HRTIMER_MODE_REL_HARD);
 	rd->read_sched_clock = cd.actual_read_sched_clock;
+	trace_android_vh_show_resume_epoch_val(rd->epoch_cyc);
 }
 
 static struct syscore_ops sched_clock_ops = {
diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c
index 6c9c342..5c507c6 100644
--- a/kernel/time/tick-common.c
+++ b/kernel/time/tick-common.c
@@ -17,6 +17,7 @@
 #include <linux/sched.h>
 #include <linux/module.h>
 #include <trace/events/power.h>
+#include <trace/hooks/sched.h>
 
 #include <asm/irq_regs.h>
 
@@ -94,6 +95,7 @@
 		write_seqcount_end(&jiffies_seq);
 		raw_spin_unlock(&jiffies_lock);
 		update_wall_time();
+		trace_android_vh_jiffies_update(NULL);
 	}
 
 	update_process_times(user_mode(get_irq_regs()));
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 81632cd..4de8504 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -25,6 +25,7 @@
 #include <linux/posix-timers.h>
 #include <linux/context_tracking.h>
 #include <linux/mm.h>
+#include <trace/hooks/sched.h>
 
 #include <asm/irq_regs.h>
 
@@ -143,8 +144,10 @@
 #endif
 
 	/* Check, if the jiffies need an update */
-	if (tick_do_timer_cpu == cpu)
+	if (tick_do_timer_cpu == cpu) {
 		tick_do_update_jiffies64(now);
+		trace_android_vh_jiffies_update(NULL);
+	}
 
 	if (ts->inidle)
 		ts->got_idle_tick = 1;
@@ -1117,6 +1120,7 @@
 
 	return ktime_sub(next_event, now);
 }
+EXPORT_SYMBOL_GPL(tick_nohz_get_sleep_length);
 
 /**
  * tick_nohz_get_idle_calls_cpu - return the current idle calls counter value
diff --git a/kernel/time/time.c b/kernel/time/time.c
index 3985b2b..5fe2cc0 100644
--- a/kernel/time/time.c
+++ b/kernel/time/time.c
@@ -686,6 +686,7 @@
 	return div_u64(x * 9, (9ull * NSEC_PER_SEC + (USER_HZ / 2)) / USER_HZ);
 #endif
 }
+EXPORT_SYMBOL_GPL(nsec_to_clock_t);
 
 u64 jiffies64_to_nsecs(u64 j)
 {
diff --git a/kernel/trace/power-traces.c b/kernel/trace/power-traces.c
index 21bb161..3ca551c 100644
--- a/kernel/trace/power-traces.c
+++ b/kernel/trace/power-traces.c
@@ -18,4 +18,6 @@
 EXPORT_TRACEPOINT_SYMBOL_GPL(cpu_idle);
 EXPORT_TRACEPOINT_SYMBOL_GPL(cpu_frequency);
 EXPORT_TRACEPOINT_SYMBOL_GPL(powernv_throttle);
-
+EXPORT_TRACEPOINT_SYMBOL_GPL(device_pm_callback_start);
+EXPORT_TRACEPOINT_SYMBOL_GPL(device_pm_callback_end);
+EXPORT_TRACEPOINT_SYMBOL_GPL(clock_set_rate);
diff --git a/kernel/trace/trace_preemptirq.c b/kernel/trace/trace_preemptirq.c
index f493804..c881e5a 100644
--- a/kernel/trace/trace_preemptirq.c
+++ b/kernel/trace/trace_preemptirq.c
@@ -14,6 +14,8 @@
 
 #define CREATE_TRACE_POINTS
 #include <trace/events/preemptirq.h>
+#undef CREATE_TRACE_POINTS
+#include <trace/hooks/preemptirq.h>
 
 #ifdef CONFIG_TRACE_IRQFLAGS
 /* Per-cpu variable to prevent redundant calls when IRQs already off */
@@ -28,8 +30,11 @@
 void trace_hardirqs_on_prepare(void)
 {
 	if (this_cpu_read(tracing_irq_cpu)) {
-		if (!in_nmi())
+		if (!in_nmi()) {
 			trace_irq_enable(CALLER_ADDR0, CALLER_ADDR1);
+			trace_android_rvh_irqs_enable(CALLER_ADDR0,
+						      CALLER_ADDR1);
+		}
 		tracer_hardirqs_on(CALLER_ADDR0, CALLER_ADDR1);
 		this_cpu_write(tracing_irq_cpu, 0);
 	}
@@ -40,8 +45,11 @@
 void trace_hardirqs_on(void)
 {
 	if (this_cpu_read(tracing_irq_cpu)) {
-		if (!in_nmi())
+		if (!in_nmi()) {
 			trace_irq_enable_rcuidle(CALLER_ADDR0, CALLER_ADDR1);
+			trace_android_rvh_irqs_enable(CALLER_ADDR0,
+						      CALLER_ADDR1);
+		}
 		tracer_hardirqs_on(CALLER_ADDR0, CALLER_ADDR1);
 		this_cpu_write(tracing_irq_cpu, 0);
 	}
@@ -63,8 +71,11 @@
 	if (!this_cpu_read(tracing_irq_cpu)) {
 		this_cpu_write(tracing_irq_cpu, 1);
 		tracer_hardirqs_off(CALLER_ADDR0, CALLER_ADDR1);
-		if (!in_nmi())
+		if (!in_nmi()) {
 			trace_irq_disable(CALLER_ADDR0, CALLER_ADDR1);
+			trace_android_rvh_irqs_disable(CALLER_ADDR0,
+						       CALLER_ADDR1);
+		}
 	}
 
 }
@@ -78,8 +89,11 @@
 	if (!this_cpu_read(tracing_irq_cpu)) {
 		this_cpu_write(tracing_irq_cpu, 1);
 		tracer_hardirqs_off(CALLER_ADDR0, CALLER_ADDR1);
-		if (!in_nmi())
+		if (!in_nmi()) {
 			trace_irq_disable_rcuidle(CALLER_ADDR0, CALLER_ADDR1);
+			trace_android_rvh_irqs_disable(CALLER_ADDR0,
+						       CALLER_ADDR1);
+		}
 	}
 }
 EXPORT_SYMBOL(trace_hardirqs_off);
@@ -88,8 +102,11 @@
 __visible void trace_hardirqs_on_caller(unsigned long caller_addr)
 {
 	if (this_cpu_read(tracing_irq_cpu)) {
-		if (!in_nmi())
+		if (!in_nmi()) {
 			trace_irq_enable_rcuidle(CALLER_ADDR0, caller_addr);
+			trace_android_rvh_irqs_enable(CALLER_ADDR0,
+						      caller_addr);
+		}
 		tracer_hardirqs_on(CALLER_ADDR0, caller_addr);
 		this_cpu_write(tracing_irq_cpu, 0);
 	}
@@ -107,8 +124,11 @@
 	if (!this_cpu_read(tracing_irq_cpu)) {
 		this_cpu_write(tracing_irq_cpu, 1);
 		tracer_hardirqs_off(CALLER_ADDR0, caller_addr);
-		if (!in_nmi())
+		if (!in_nmi()) {
 			trace_irq_disable_rcuidle(CALLER_ADDR0, caller_addr);
+			trace_android_rvh_irqs_enable(CALLER_ADDR0,
+						      caller_addr);
+		}
 	}
 }
 EXPORT_SYMBOL(trace_hardirqs_off_caller);
@@ -119,15 +139,19 @@
 
 void trace_preempt_on(unsigned long a0, unsigned long a1)
 {
-	if (!in_nmi())
+	if (!in_nmi()) {
 		trace_preempt_enable_rcuidle(a0, a1);
+		trace_android_rvh_preempt_enable(a0, a1);
+	}
 	tracer_preempt_on(a0, a1);
 }
 
 void trace_preempt_off(unsigned long a0, unsigned long a1)
 {
-	if (!in_nmi())
+	if (!in_nmi()) {
 		trace_preempt_disable_rcuidle(a0, a1);
+		trace_android_rvh_preempt_disable(a0, a1);
+	}
 	tracer_preempt_off(a0, a1);
 }
 #endif
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 437935e..5640ec0 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -53,6 +53,10 @@
 
 #include "workqueue_internal.h"
 
+#include <trace/hooks/wqlockup.h>
+/* events/workqueue.h uses default TRACE_INCLUDE_PATH */
+#undef TRACE_INCLUDE_PATH
+
 enum {
 	/*
 	 * worker_pool flags
@@ -5792,6 +5796,7 @@
 			pr_cont_pool_info(pool);
 			pr_cont(" stuck for %us!\n",
 				jiffies_to_msecs(jiffies - pool_ts) / 1000);
+			trace_android_vh_wq_lockup_pool(pool->cpu, pool_ts);
 		}
 	}
 
diff --git a/mm/cma.c b/mm/cma.c
index 7f415d7..878dbcc 100644
--- a/mm/cma.c
+++ b/mm/cma.c
@@ -24,6 +24,7 @@
 #include <linux/memblock.h>
 #include <linux/err.h>
 #include <linux/mm.h>
+#include <linux/module.h>
 #include <linux/mutex.h>
 #include <linux/sizes.h>
 #include <linux/slab.h>
@@ -32,6 +33,8 @@
 #include <linux/highmem.h>
 #include <linux/io.h>
 #include <linux/kmemleak.h>
+#include <linux/sched.h>
+#include <linux/jiffies.h>
 #include <trace/events/cma.h>
 
 #include "cma.h"
@@ -54,6 +57,7 @@
 {
 	return cma->name;
 }
+EXPORT_SYMBOL_GPL(cma_get_name);
 
 static unsigned long cma_bitmap_aligned_mask(const struct cma *cma,
 					     unsigned int align_order)
@@ -418,6 +422,8 @@
 	size_t i;
 	struct page *page = NULL;
 	int ret = -ENOMEM;
+	int num_attempts = 0;
+	int max_retries = 5;
 
 	if (!cma || !cma->count || !cma->bitmap)
 		return NULL;
@@ -442,8 +448,28 @@
 				bitmap_maxno, start, bitmap_count, mask,
 				offset);
 		if (bitmap_no >= bitmap_maxno) {
-			mutex_unlock(&cma->lock);
-			break;
+			if ((num_attempts < max_retries) && (ret == -EBUSY)) {
+				mutex_unlock(&cma->lock);
+
+				if (fatal_signal_pending(current))
+					break;
+
+				/*
+				 * Page may be momentarily pinned by some other
+				 * process which has been scheduled out, e.g.
+				 * in exit path, during unmap call, or process
+				 * fork and so cannot be freed there. Sleep
+				 * for 100ms and retry the allocation.
+				 */
+				start = 0;
+				ret = -ENOMEM;
+				schedule_timeout_killable(msecs_to_jiffies(100));
+				num_attempts++;
+				continue;
+			} else {
+				mutex_unlock(&cma->lock);
+				break;
+			}
 		}
 		bitmap_set(cma->bitmap, bitmap_no, bitmap_count);
 		/*
@@ -494,6 +520,7 @@
 	pr_debug("%s(): returned %p\n", __func__, page);
 	return page;
 }
+EXPORT_SYMBOL_GPL(cma_alloc);
 
 /**
  * cma_release() - release allocated pages
@@ -527,6 +554,7 @@
 
 	return true;
 }
+EXPORT_SYMBOL_GPL(cma_release);
 
 int cma_for_each_area(int (*it)(struct cma *cma, void *data), void *data)
 {
@@ -541,3 +569,4 @@
 
 	return 0;
 }
+EXPORT_SYMBOL_GPL(cma_for_each_area);
diff --git a/mm/filemap.c b/mm/filemap.c
index 0b2067b..be73f7f 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1189,7 +1189,7 @@
 /* How many times do we accept lock stealing from under a waiter? */
 int sysctl_page_lock_unfairness = 5;
 
-static inline int wait_on_page_bit_common(wait_queue_head_t *q,
+static inline __sched int wait_on_page_bit_common(wait_queue_head_t *q,
 	struct page *page, int bit_nr, int state, enum behavior behavior)
 {
 	int unfairness = sysctl_page_lock_unfairness;
@@ -1328,14 +1328,14 @@
 	return wait->flags & WQ_FLAG_WOKEN ? 0 : -EINTR;
 }
 
-void wait_on_page_bit(struct page *page, int bit_nr)
+__sched void wait_on_page_bit(struct page *page, int bit_nr)
 {
 	wait_queue_head_t *q = page_waitqueue(page);
 	wait_on_page_bit_common(q, page, bit_nr, TASK_UNINTERRUPTIBLE, SHARED);
 }
 EXPORT_SYMBOL(wait_on_page_bit);
 
-int wait_on_page_bit_killable(struct page *page, int bit_nr)
+__sched int wait_on_page_bit_killable(struct page *page, int bit_nr)
 {
 	wait_queue_head_t *q = page_waitqueue(page);
 	return wait_on_page_bit_common(q, page, bit_nr, TASK_KILLABLE, SHARED);
@@ -1532,7 +1532,7 @@
  * __lock_page - get a lock on the page, assuming we need to sleep to get it
  * @__page: the page to lock
  */
-void __lock_page(struct page *__page)
+__sched void __lock_page(struct page *__page)
 {
 	struct page *page = compound_head(__page);
 	wait_queue_head_t *q = page_waitqueue(page);
@@ -1541,7 +1541,7 @@
 }
 EXPORT_SYMBOL(__lock_page);
 
-int __lock_page_killable(struct page *__page)
+__sched int __lock_page_killable(struct page *__page)
 {
 	struct page *page = compound_head(__page);
 	wait_queue_head_t *q = page_waitqueue(page);
@@ -1550,7 +1550,7 @@
 }
 EXPORT_SYMBOL_GPL(__lock_page_killable);
 
-int __lock_page_async(struct page *page, struct wait_page_queue *wait)
+__sched int __lock_page_async(struct page *page, struct wait_page_queue *wait)
 {
 	return __wait_on_page_locked_async(page, wait, true);
 }
@@ -1566,7 +1566,7 @@
  * If neither ALLOW_RETRY nor KILLABLE are set, will always return 1
  * with the page locked and the mmap_lock unperturbed.
  */
-int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
+__sched int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
 			 unsigned int flags)
 {
 	if (fault_flag_allow_retry_first(flags)) {
diff --git a/mm/madvise.c b/mm/madvise.c
index a8d8d48..dc2a9bb 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -136,7 +136,7 @@
 	pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
 	*prev = vma_merge(mm, *prev, start, end, new_flags, vma->anon_vma,
 			  vma->vm_file, pgoff, vma_policy(vma),
-			  vma->vm_userfaultfd_ctx);
+			  vma->vm_userfaultfd_ctx, vma_get_anon_name(vma));
 	if (*prev) {
 		vma = *prev;
 		goto success;
diff --git a/mm/memblock.c b/mm/memblock.c
index b68ee867..d4e7922 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -1656,6 +1656,7 @@
 
 	return (memblock.memory.regions[idx].base + memblock.memory.regions[idx].size);
 }
+EXPORT_SYMBOL_GPL(memblock_end_of_DRAM);
 
 static phys_addr_t __init_memblock __find_max_addr(phys_addr_t limit)
 {
diff --git a/mm/memory.c b/mm/memory.c
index c48f8df..49fbc88 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -73,6 +73,7 @@
 #include <linux/perf_event.h>
 #include <linux/ptrace.h>
 #include <linux/vmalloc.h>
+#include <trace/hooks/mm.h>
 
 #include <trace/events/kmem.h>
 
@@ -156,10 +157,23 @@
 }
 core_initcall(init_zero_pfn);
 
-void mm_trace_rss_stat(struct mm_struct *mm, int member, long count)
+/*
+ * Only trace rss_stat when there is a 512kb cross over.
+ * Smaller changes may be lost unless every small change is
+ * crossing into or returning to a 512kb boundary.
+ */
+#define TRACE_MM_COUNTER_THRESHOLD 128
+
+void mm_trace_rss_stat(struct mm_struct *mm, int member, long count,
+		       long value)
 {
-	trace_rss_stat(mm, member, count);
+	long thresh_mask = ~(TRACE_MM_COUNTER_THRESHOLD - 1);
+
+	/* Threshold roll-over, trace it */
+	if ((count & thresh_mask) != ((count - value) & thresh_mask))
+		trace_rss_stat(mm, member, count);
 }
+EXPORT_SYMBOL_GPL(mm_trace_rss_stat);
 
 #if defined(SPLIT_RSS_COUNTING)
 
@@ -3280,8 +3294,10 @@
 		if (data_race(si->flags & SWP_SYNCHRONOUS_IO) &&
 		    __swap_count(entry) == 1) {
 			/* skip swapcache */
-			page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma,
-							vmf->address);
+			gfp_t flags = GFP_HIGHUSER_MOVABLE;
+
+			trace_android_rvh_set_skip_swapcache_flags(&flags);
+			page = alloc_page_vma(flags, vma, vmf->address);
 			if (page) {
 				int err;
 
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 3ca4898..1f763ed 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -829,7 +829,8 @@
 			((vmstart - vma->vm_start) >> PAGE_SHIFT);
 		prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags,
 				 vma->anon_vma, vma->vm_file, pgoff,
-				 new_pol, vma->vm_userfaultfd_ctx);
+				 new_pol, vma->vm_userfaultfd_ctx,
+				 vma_get_anon_name(vma));
 		if (prev) {
 			vma = prev;
 			next = vma->vm_next;
diff --git a/mm/mlock.c b/mm/mlock.c
index 884b121..0e4f89c 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -540,7 +540,7 @@
 	pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
 	*prev = vma_merge(mm, *prev, start, end, newflags, vma->anon_vma,
 			  vma->vm_file, pgoff, vma_policy(vma),
-			  vma->vm_userfaultfd_ctx);
+			  vma->vm_userfaultfd_ctx, vma_get_anon_name(vma));
 	if (*prev) {
 		vma = *prev;
 		goto success;
diff --git a/mm/mmap.c b/mm/mmap.c
index d91ecb0..b7e903c 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1028,7 +1028,8 @@
  */
 static inline int is_mergeable_vma(struct vm_area_struct *vma,
 				struct file *file, unsigned long vm_flags,
-				struct vm_userfaultfd_ctx vm_userfaultfd_ctx)
+				struct vm_userfaultfd_ctx vm_userfaultfd_ctx,
+				const char __user *anon_name)
 {
 	/*
 	 * VM_SOFTDIRTY should not prevent from VMA merging, if we
@@ -1046,6 +1047,8 @@
 		return 0;
 	if (!is_mergeable_vm_userfaultfd_ctx(vma, vm_userfaultfd_ctx))
 		return 0;
+	if (vma_get_anon_name(vma) != anon_name)
+		return 0;
 	return 1;
 }
 
@@ -1078,9 +1081,10 @@
 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
 		     struct anon_vma *anon_vma, struct file *file,
 		     pgoff_t vm_pgoff,
-		     struct vm_userfaultfd_ctx vm_userfaultfd_ctx)
+		     struct vm_userfaultfd_ctx vm_userfaultfd_ctx,
+		     const char __user *anon_name)
 {
-	if (is_mergeable_vma(vma, file, vm_flags, vm_userfaultfd_ctx) &&
+	if (is_mergeable_vma(vma, file, vm_flags, vm_userfaultfd_ctx, anon_name) &&
 	    is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
 		if (vma->vm_pgoff == vm_pgoff)
 			return 1;
@@ -1099,9 +1103,10 @@
 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
 		    struct anon_vma *anon_vma, struct file *file,
 		    pgoff_t vm_pgoff,
-		    struct vm_userfaultfd_ctx vm_userfaultfd_ctx)
+		    struct vm_userfaultfd_ctx vm_userfaultfd_ctx,
+		    const char __user *anon_name)
 {
-	if (is_mergeable_vma(vma, file, vm_flags, vm_userfaultfd_ctx) &&
+	if (is_mergeable_vma(vma, file, vm_flags, vm_userfaultfd_ctx, anon_name) &&
 	    is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
 		pgoff_t vm_pglen;
 		vm_pglen = vma_pages(vma);
@@ -1112,9 +1117,9 @@
 }
 
 /*
- * Given a mapping request (addr,end,vm_flags,file,pgoff), figure out
- * whether that can be merged with its predecessor or its successor.
- * Or both (it neatly fills a hole).
+ * Given a mapping request (addr,end,vm_flags,file,pgoff,anon_name),
+ * figure out whether that can be merged with its predecessor or its
+ * successor.  Or both (it neatly fills a hole).
  *
  * In most cases - when called for mmap, brk or mremap - [addr,end) is
  * certain not to be mapped by the time vma_merge is called; but when
@@ -1159,7 +1164,8 @@
 			unsigned long end, unsigned long vm_flags,
 			struct anon_vma *anon_vma, struct file *file,
 			pgoff_t pgoff, struct mempolicy *policy,
-			struct vm_userfaultfd_ctx vm_userfaultfd_ctx)
+			struct vm_userfaultfd_ctx vm_userfaultfd_ctx,
+			const char __user *anon_name)
 {
 	pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
 	struct vm_area_struct *area, *next;
@@ -1189,7 +1195,8 @@
 			mpol_equal(vma_policy(prev), policy) &&
 			can_vma_merge_after(prev, vm_flags,
 					    anon_vma, file, pgoff,
-					    vm_userfaultfd_ctx)) {
+					    vm_userfaultfd_ctx,
+					    anon_name)) {
 		/*
 		 * OK, it can.  Can we now merge in the successor as well?
 		 */
@@ -1198,7 +1205,8 @@
 				can_vma_merge_before(next, vm_flags,
 						     anon_vma, file,
 						     pgoff+pglen,
-						     vm_userfaultfd_ctx) &&
+						     vm_userfaultfd_ctx,
+						     anon_name) &&
 				is_mergeable_anon_vma(prev->anon_vma,
 						      next->anon_vma, NULL)) {
 							/* cases 1, 6 */
@@ -1221,7 +1229,8 @@
 			mpol_equal(policy, vma_policy(next)) &&
 			can_vma_merge_before(next, vm_flags,
 					     anon_vma, file, pgoff+pglen,
-					     vm_userfaultfd_ctx)) {
+					     vm_userfaultfd_ctx,
+					     anon_name)) {
 		if (prev && addr < prev->vm_end)	/* case 4 */
 			err = __vma_adjust(prev, prev->vm_start,
 					 addr, prev->vm_pgoff, NULL, next);
@@ -1765,7 +1774,7 @@
 	 * Can we just expand an old mapping?
 	 */
 	vma = vma_merge(mm, prev, addr, addr + len, vm_flags,
-			NULL, file, pgoff, NULL, NULL_VM_UFFD_CTX);
+			NULL, file, pgoff, NULL, NULL_VM_UFFD_CTX, NULL);
 	if (vma)
 		goto out;
 
@@ -1813,7 +1822,8 @@
 		 */
 		if (unlikely(vm_flags != vma->vm_flags && prev)) {
 			merge = vma_merge(mm, prev, vma->vm_start, vma->vm_end, vma->vm_flags,
-				NULL, vma->vm_file, vma->vm_pgoff, NULL, NULL_VM_UFFD_CTX);
+				NULL, vma->vm_file, vma->vm_pgoff, NULL, NULL_VM_UFFD_CTX,
+				vma_get_anon_name(vma));
 			if (merge) {
 				/* ->mmap() can change vma->vm_file and fput the original file. So
 				 * fput the vma->vm_file here or we would add an extra fput for file
@@ -3102,7 +3112,7 @@
 
 	/* Can we just expand an old private anonymous mapping? */
 	vma = vma_merge(mm, prev, addr, addr + len, flags,
-			NULL, NULL, pgoff, NULL, NULL_VM_UFFD_CTX);
+			NULL, NULL, pgoff, NULL, NULL_VM_UFFD_CTX, NULL);
 	if (vma)
 		goto out;
 
@@ -3301,7 +3311,7 @@
 		return NULL;	/* should never get here */
 	new_vma = vma_merge(mm, prev, addr, addr + len, vma->vm_flags,
 			    vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma),
-			    vma->vm_userfaultfd_ctx);
+			    vma->vm_userfaultfd_ctx, vma_get_anon_name(vma));
 	if (new_vma) {
 		/*
 		 * Source vma may have been merged into new_vma
diff --git a/mm/mmzone.c b/mm/mmzone.c
index 4686fdc..f04cbef 100644
--- a/mm/mmzone.c
+++ b/mm/mmzone.c
@@ -9,6 +9,7 @@
 #include <linux/stddef.h>
 #include <linux/mm.h>
 #include <linux/mmzone.h>
+#include <trace/hooks/mm.h>
 
 struct pglist_data *first_online_pgdat(void)
 {
@@ -113,3 +114,19 @@
 	return last_cpupid;
 }
 #endif
+
+enum zone_type gfp_zone(gfp_t flags)
+{
+	enum zone_type z;
+	gfp_t local_flags = flags;
+	int bit;
+
+	trace_android_rvh_set_gfp_zone_flags(&local_flags);
+
+	bit = (__force int) ((local_flags) & GFP_ZONEMASK);
+
+	z = (GFP_ZONE_TABLE >> (bit * GFP_ZONES_SHIFT)) &
+					 ((1 << GFP_ZONES_SHIFT) - 1);
+	VM_BUG_ON((GFP_ZONE_BAD >> bit) & 1);
+	return z;
+}
diff --git a/mm/mprotect.c b/mm/mprotect.c
index 56c02be..a2a62d6 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -454,7 +454,7 @@
 	pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
 	*pprev = vma_merge(mm, *pprev, start, end, newflags,
 			   vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma),
-			   vma->vm_userfaultfd_ctx);
+			   vma->vm_userfaultfd_ctx, vma_get_anon_name(vma));
 	if (*pprev) {
 		vma = *pprev;
 		VM_WARN_ON((vma->vm_flags ^ newflags) & ~VM_SOFTDIRTY);
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index eaa227a..3f51a56 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -324,10 +324,10 @@
 	"Unmovable",
 	"Movable",
 	"Reclaimable",
-	"HighAtomic",
 #ifdef CONFIG_CMA
 	"CMA",
 #endif
+	"HighAtomic",
 #ifdef CONFIG_MEMORY_ISOLATION
 	"Isolate",
 #endif
@@ -344,6 +344,11 @@
 #endif
 };
 
+/*
+ * Try to keep at least this much lowmem free.  Do not allow normal
+ * allocations below this point, only high priority ones. Automatically
+ * tuned according to the amount of memory in the system.
+ */
 int min_free_kbytes = 1024;
 int user_min_free_kbytes = -1;
 #ifdef CONFIG_DISCONTIGMEM
@@ -362,6 +367,13 @@
 #endif
 int watermark_scale_factor = 10;
 
+/*
+ * Extra memory for the system to try freeing. Used to temporarily
+ * free memory, to make space for new workloads. Anyone can allocate
+ * down to the min watermarks controlled by min_free_kbytes above.
+ */
+int extra_free_kbytes = 0;
+
 static unsigned long nr_kernel_pages __initdata;
 static unsigned long nr_all_pages __initdata;
 static unsigned long dma_reserve __initdata;
@@ -2843,35 +2855,35 @@
 {
 	struct page *page;
 
-#ifdef CONFIG_CMA
-	/*
-	 * Balance movable allocations between regular and CMA areas by
-	 * allocating from CMA when over half of the zone's free memory
-	 * is in the CMA area.
-	 */
-	if (alloc_flags & ALLOC_CMA &&
-	    zone_page_state(zone, NR_FREE_CMA_PAGES) >
-	    zone_page_state(zone, NR_FREE_PAGES) / 2) {
-		page = __rmqueue_cma_fallback(zone, order);
-		if (page)
-			return page;
-	}
-#endif
 retry:
 	page = __rmqueue_smallest(zone, order, migratetype);
-	if (unlikely(!page)) {
-		if (alloc_flags & ALLOC_CMA)
-			page = __rmqueue_cma_fallback(zone, order);
 
-		if (!page && __rmqueue_fallback(zone, order, migratetype,
-								alloc_flags))
-			goto retry;
-	}
+	if (unlikely(!page) && __rmqueue_fallback(zone, order, migratetype,
+						  alloc_flags))
+		goto retry;
 
 	trace_mm_page_alloc_zone_locked(page, order, migratetype);
 	return page;
 }
 
+#ifdef CONFIG_CMA
+static struct page *__rmqueue_cma(struct zone *zone, unsigned int order,
+				  int migratetype,
+				  unsigned int alloc_flags)
+{
+	struct page *page = __rmqueue_cma_fallback(zone, order);
+	trace_mm_page_alloc_zone_locked(page, order, MIGRATE_CMA);
+	return page;
+}
+#else
+static inline struct page *__rmqueue_cma(struct zone *zone, unsigned int order,
+					 int migratetype,
+					 unsigned int alloc_flags)
+{
+	return NULL;
+}
+#endif
+
 /*
  * Obtain a specified number of elements from the buddy allocator, all under
  * a single hold of the lock, for efficiency.  Add them to the supplied list.
@@ -2885,8 +2897,14 @@
 
 	spin_lock(&zone->lock);
 	for (i = 0; i < count; ++i) {
-		struct page *page = __rmqueue(zone, order, migratetype,
-								alloc_flags);
+		struct page *page;
+
+		if (is_migrate_cma(migratetype))
+			page = __rmqueue_cma(zone, order, migratetype,
+					     alloc_flags);
+		else
+			page = __rmqueue(zone, order, migratetype, alloc_flags);
+
 		if (unlikely(page == NULL))
 			break;
 
@@ -2921,6 +2939,28 @@
 	return alloced;
 }
 
+/*
+ * Return the pcp list that corresponds to the migrate type if that list isn't
+ * empty.
+ * If the list is empty return NULL.
+ */
+static struct list_head *get_populated_pcp_list(struct zone *zone,
+			unsigned int order, struct per_cpu_pages *pcp,
+			int migratetype, unsigned int alloc_flags)
+{
+	struct list_head *list = &pcp->lists[migratetype];
+
+	if (list_empty(list)) {
+		pcp->count += rmqueue_bulk(zone, order,
+				pcp->batch, list,
+				migratetype, alloc_flags);
+
+		if (list_empty(list))
+			list = NULL;
+	}
+	return list;
+}
+
 #ifdef CONFIG_NUMA
 /*
  * Called from the vmstat counter updater to drain pagesets of this
@@ -3371,16 +3411,28 @@
 static struct page *__rmqueue_pcplist(struct zone *zone, int migratetype,
 			unsigned int alloc_flags,
 			struct per_cpu_pages *pcp,
-			struct list_head *list)
+			gfp_t gfp_flags)
 {
-	struct page *page;
+	struct page *page = NULL;
+	struct list_head *list = NULL;
 
 	do {
-		if (list_empty(list)) {
-			pcp->count += rmqueue_bulk(zone, 0,
-					pcp->batch, list,
+		/* First try to get CMA pages */
+		if (migratetype == MIGRATE_MOVABLE &&
+				gfp_flags & __GFP_CMA) {
+			list = get_populated_pcp_list(zone, 0, pcp,
+					get_cma_migrate_type(), alloc_flags);
+		}
+
+		if (list == NULL) {
+			/*
+			 * Either CMA is not suitable or there are no
+			 * free CMA pages.
+			 */
+			list = get_populated_pcp_list(zone, 0, pcp,
 					migratetype, alloc_flags);
-			if (unlikely(list_empty(list)))
+			if (unlikely(list == NULL) ||
+					unlikely(list_empty(list)))
 				return NULL;
 		}
 
@@ -3398,14 +3450,13 @@
 			int migratetype, unsigned int alloc_flags)
 {
 	struct per_cpu_pages *pcp;
-	struct list_head *list;
 	struct page *page;
 	unsigned long flags;
 
 	local_irq_save(flags);
 	pcp = &this_cpu_ptr(zone->pageset)->pcp;
-	list = &pcp->lists[migratetype];
-	page = __rmqueue_pcplist(zone,  migratetype, alloc_flags, pcp, list);
+	page = __rmqueue_pcplist(zone,  migratetype, alloc_flags, pcp,
+				 gfp_flags);
 	if (page) {
 		__count_zid_vm_events(PGALLOC, page_zonenum(page), 1);
 		zone_statistics(preferred_zone, zone);
@@ -3427,16 +3478,9 @@
 	struct page *page;
 
 	if (likely(order == 0)) {
-		/*
-		 * MIGRATE_MOVABLE pcplist could have the pages on CMA area and
-		 * we need to skip it when CMA area isn't allowed.
-		 */
-		if (!IS_ENABLED(CONFIG_CMA) || alloc_flags & ALLOC_CMA ||
-				migratetype != MIGRATE_MOVABLE) {
-			page = rmqueue_pcplist(preferred_zone, zone, gfp_flags,
-					migratetype, alloc_flags);
-			goto out;
-		}
+		page = rmqueue_pcplist(preferred_zone, zone, gfp_flags,
+				       migratetype, alloc_flags);
+		goto out;
 	}
 
 	/*
@@ -3459,8 +3503,14 @@
 			if (page)
 				trace_mm_page_alloc_zone_locked(page, order, migratetype);
 		}
-		if (!page)
-			page = __rmqueue(zone, order, migratetype, alloc_flags);
+		if (!page) {
+			if (gfp_flags & __GFP_CMA && migratetype == MIGRATE_MOVABLE)
+				page = __rmqueue_cma(zone, order, migratetype,
+						     alloc_flags);
+			if (!page)
+				page = __rmqueue(zone, order, migratetype,
+						 alloc_flags);
+		}
 	} while (page && check_new_pages(page, order));
 	spin_unlock(&zone->lock);
 	if (!page)
@@ -3638,6 +3688,14 @@
 			continue;
 
 		for (mt = 0; mt < MIGRATE_PCPTYPES; mt++) {
+#ifdef CONFIG_CMA
+			/*
+			 * Note that this check is needed only
+			 * when MIGRATE_CMA < MIGRATE_PCPTYPES.
+			 */
+			if (mt == MIGRATE_CMA)
+				continue;
+#endif
 			if (!free_area_empty(area, mt))
 				return true;
 		}
@@ -3773,7 +3831,8 @@
 	unsigned int pflags = current->flags;
 
 	if (!(pflags & PF_MEMALLOC_NOCMA) &&
-			gfp_migratetype(gfp_mask) == MIGRATE_MOVABLE)
+			gfp_migratetype(gfp_mask) == MIGRATE_MOVABLE &&
+			gfp_mask & __GFP_CMA)
 		alloc_flags |= ALLOC_CMA;
 
 #endif
@@ -7827,6 +7886,7 @@
 static void __setup_per_zone_wmarks(void)
 {
 	unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10);
+	unsigned long pages_low = extra_free_kbytes >> (PAGE_SHIFT - 10);
 	unsigned long lowmem_pages = 0;
 	struct zone *zone;
 	unsigned long flags;
@@ -7838,11 +7898,13 @@
 	}
 
 	for_each_zone(zone) {
-		u64 tmp;
+		u64 tmp, low;
 
 		spin_lock_irqsave(&zone->lock, flags);
 		tmp = (u64)pages_min * zone_managed_pages(zone);
 		do_div(tmp, lowmem_pages);
+		low = (u64)pages_low * zone_managed_pages(zone);
+		do_div(low, nr_free_zone_pages(gfp_zone(GFP_HIGHUSER_MOVABLE)));
 		if (is_highmem(zone)) {
 			/*
 			 * __GFP_HIGH and PF_MEMALLOC allocations usually don't
@@ -7876,8 +7938,8 @@
 				      watermark_scale_factor, 10000));
 
 		zone->watermark_boost = 0;
-		zone->_watermark[WMARK_LOW]  = min_wmark_pages(zone) + tmp;
-		zone->_watermark[WMARK_HIGH] = min_wmark_pages(zone) + tmp * 2;
+		zone->_watermark[WMARK_LOW]  = min_wmark_pages(zone) + low + tmp;
+		zone->_watermark[WMARK_HIGH] = min_wmark_pages(zone) + low + tmp * 2;
 
 		spin_unlock_irqrestore(&zone->lock, flags);
 	}
@@ -7962,7 +8024,7 @@
 /*
  * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so
  *	that we can call two helper functions whenever min_free_kbytes
- *	changes.
+ *	or extra_free_kbytes changes.
  */
 int min_free_kbytes_sysctl_handler(struct ctl_table *table, int write,
 		void *buffer, size_t *length, loff_t *ppos)
diff --git a/mm/readahead.c b/mm/readahead.c
index c5b0457..a6bfa98 100644
--- a/mm/readahead.c
+++ b/mm/readahead.c
@@ -23,6 +23,7 @@
 #include <linux/blk-cgroup.h>
 #include <linux/fadvise.h>
 #include <linux/sched/mm.h>
+#include <trace/hooks/mm.h>
 
 #include "internal.h"
 
@@ -114,6 +115,15 @@
 
 EXPORT_SYMBOL(read_cache_pages);
 
+gfp_t readahead_gfp_mask(struct address_space *x)
+{
+	gfp_t mask = mapping_gfp_mask(x) | __GFP_NORETRY | __GFP_NOWARN;
+
+	trace_android_rvh_set_readahead_gfp_mask(&mask);
+	return mask;
+}
+EXPORT_SYMBOL_GPL(readahead_gfp_mask);
+
 static void read_pages(struct readahead_control *rac, struct list_head *pages,
 		bool skip_page)
 {
diff --git a/mm/shmem.c b/mm/shmem.c
index 537c137..2a27707 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -3269,7 +3269,8 @@
 
 static int shmem_xattr_handler_get(const struct xattr_handler *handler,
 				   struct dentry *unused, struct inode *inode,
-				   const char *name, void *buffer, size_t size)
+				   const char *name, void *buffer, size_t size,
+				   int flags)
 {
 	struct shmem_inode_info *info = SHMEM_I(inode);
 
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 698bc0b..cf8df27 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -1159,9 +1159,7 @@
 	"nr_mlock",
 	"nr_page_table_pages",
 	"nr_bounce",
-#if IS_ENABLED(CONFIG_ZSMALLOC)
 	"nr_zspages",
-#endif
 	"nr_free_cma",
 
 	/* enum numa_stat_item counters */
diff --git a/net/core/filter.c b/net/core/filter.c
index 2ca5eec..9d19f721 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -3554,8 +3554,9 @@
 
 static u32 __bpf_skb_max_len(const struct sk_buff *skb)
 {
-	return skb->dev ? skb->dev->mtu + skb->dev->hard_header_len :
-			  SKB_MAX_ALLOC;
+	if (skb_at_tc_ingress(skb) || !skb->dev)
+		return SKB_MAX_ALLOC;
+	return skb->dev->mtu + skb->dev->hard_header_len;
 }
 
 BPF_CALL_4(sk_skb_adjust_room, struct sk_buff *, skb, s32, len_diff,
diff --git a/net/core/net-traces.c b/net/core/net-traces.c
index 283ddb2..465362a 100644
--- a/net/core/net-traces.c
+++ b/net/core/net-traces.c
@@ -35,13 +35,11 @@
 #include <trace/events/tcp.h>
 #include <trace/events/fib.h>
 #include <trace/events/qdisc.h>
-#if IS_ENABLED(CONFIG_BRIDGE)
 #include <trace/events/bridge.h>
 EXPORT_TRACEPOINT_SYMBOL_GPL(br_fdb_add);
 EXPORT_TRACEPOINT_SYMBOL_GPL(br_fdb_external_learn_add);
 EXPORT_TRACEPOINT_SYMBOL_GPL(fdb_delete);
 EXPORT_TRACEPOINT_SYMBOL_GPL(br_fdb_update);
-#endif
 
 #if IS_ENABLED(CONFIG_PAGE_POOL)
 #include <trace/events/page_pool.h>
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 8b6eb38..fd6cef9 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -216,6 +216,7 @@
 	.accept_ra_rt_info_max_plen = 0,
 #endif
 #endif
+	.accept_ra_rt_table	= 0,
 	.proxy_ndp		= 0,
 	.accept_source_route	= 0,	/* we do not accept RH0 by default. */
 	.disable_ipv6		= 0,
@@ -271,6 +272,7 @@
 	.accept_ra_rt_info_max_plen = 0,
 #endif
 #endif
+	.accept_ra_rt_table	= 0,
 	.proxy_ndp		= 0,
 	.accept_source_route	= 0,	/* we do not accept RH0 by default. */
 	.disable_ipv6		= 0,
@@ -2377,6 +2379,26 @@
 		goto regen;
 }
 
+u32 addrconf_rt_table(const struct net_device *dev, u32 default_table)
+{
+	struct inet6_dev *idev = in6_dev_get(dev);
+	int sysctl;
+	u32 table;
+
+	if (!idev)
+		return default_table;
+	sysctl = idev->cnf.accept_ra_rt_table;
+	if (sysctl == 0) {
+		table = default_table;
+	} else if (sysctl > 0) {
+		table = (u32) sysctl;
+	} else {
+		table = (unsigned) dev->ifindex + (-sysctl);
+	}
+	in6_dev_put(idev);
+	return table;
+}
+
 /*
  *	Add prefix route.
  */
@@ -2387,7 +2409,7 @@
 		      u32 flags, gfp_t gfp_flags)
 {
 	struct fib6_config cfg = {
-		.fc_table = l3mdev_fib_table(dev) ? : RT6_TABLE_PREFIX,
+		.fc_table = l3mdev_fib_table(dev) ? : addrconf_rt_table(dev, RT6_TABLE_PREFIX),
 		.fc_metric = metric ? : IP6_RT_PRIO_ADDRCONF,
 		.fc_ifindex = dev->ifindex,
 		.fc_expires = expires,
@@ -2422,7 +2444,7 @@
 	struct fib6_node *fn;
 	struct fib6_info *rt = NULL;
 	struct fib6_table *table;
-	u32 tb_id = l3mdev_fib_table(dev) ? : RT6_TABLE_PREFIX;
+	u32 tb_id = l3mdev_fib_table(dev) ? : addrconf_rt_table(dev, RT6_TABLE_PREFIX);
 
 	table = fib6_get_table(dev_net(dev), tb_id);
 	if (!table)
@@ -6713,6 +6735,13 @@
 #endif
 #endif
 	{
+		.procname	= "accept_ra_rt_table",
+		.data		= &ipv6_devconf.accept_ra_rt_table,
+		.maxlen		= sizeof(int),
+		.mode		= 0644,
+		.proc_handler	= proc_dointvec,
+	},
+	{
 		.procname	= "proxy_ndp",
 		.data		= &ipv6_devconf.proxy_ndp,
 		.maxlen		= sizeof(int),
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 7e0ce7a..8b0df69 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -4149,7 +4149,7 @@
 					   const struct in6_addr *gwaddr,
 					   struct net_device *dev)
 {
-	u32 tb_id = l3mdev_fib_table(dev) ? : RT6_TABLE_INFO;
+	u32 tb_id = l3mdev_fib_table(dev) ? : addrconf_rt_table(dev, RT6_TABLE_INFO);
 	int ifindex = dev->ifindex;
 	struct fib6_node *fn;
 	struct fib6_info *rt = NULL;
@@ -4203,7 +4203,7 @@
 		.fc_nlinfo.nl_net = net,
 	};
 
-	cfg.fc_table = l3mdev_fib_table(dev) ? : RT6_TABLE_INFO;
+	cfg.fc_table = l3mdev_fib_table(dev) ? : addrconf_rt_table(dev, RT6_TABLE_INFO);
 	cfg.fc_dst = *prefix;
 	cfg.fc_gateway = *gwaddr;
 
@@ -4221,7 +4221,7 @@
 				     const struct in6_addr *addr,
 				     struct net_device *dev)
 {
-	u32 tb_id = l3mdev_fib_table(dev) ? : RT6_TABLE_DFLT;
+	u32 tb_id = l3mdev_fib_table(dev) ? : addrconf_rt_table(dev, RT6_TABLE_DFLT);
 	struct fib6_info *rt;
 	struct fib6_table *table;
 
@@ -4255,7 +4255,7 @@
 				     unsigned int pref)
 {
 	struct fib6_config cfg = {
-		.fc_table	= l3mdev_fib_table(dev) ? : RT6_TABLE_DFLT,
+		.fc_table	= l3mdev_fib_table(dev) ? : addrconf_rt_table(dev, RT6_TABLE_DFLT),
 		.fc_metric	= IP6_RT_PRIO_USER,
 		.fc_ifindex	= dev->ifindex,
 		.fc_flags	= RTF_GATEWAY | RTF_ADDRCONF | RTF_DEFAULT |
@@ -4280,47 +4280,24 @@
 	return rt6_get_dflt_router(net, gwaddr, dev);
 }
 
-static void __rt6_purge_dflt_routers(struct net *net,
-				     struct fib6_table *table)
+static int rt6_addrconf_purge(struct fib6_info *rt, void *arg)
 {
-	struct fib6_info *rt;
+	struct net_device *dev = fib6_info_nh_dev(rt);
+	struct inet6_dev *idev = dev ? __in6_dev_get(dev) : NULL;
 
-restart:
-	rcu_read_lock();
-	for_each_fib6_node_rt_rcu(&table->tb6_root) {
-		struct net_device *dev = fib6_info_nh_dev(rt);
-		struct inet6_dev *idev = dev ? __in6_dev_get(dev) : NULL;
-
-		if (rt->fib6_flags & (RTF_DEFAULT | RTF_ADDRCONF) &&
-		    (!idev || idev->cnf.accept_ra != 2) &&
-		    fib6_info_hold_safe(rt)) {
-			rcu_read_unlock();
-			ip6_del_rt(net, rt, false);
-			goto restart;
-		}
+	if (rt->fib6_flags & (RTF_DEFAULT | RTF_ADDRCONF) &&
+	    (!idev || idev->cnf.accept_ra != 2)) {
+		/* Delete this route. See fib6_clean_tree() */
+		return -1;
 	}
-	rcu_read_unlock();
 
-	table->flags &= ~RT6_TABLE_HAS_DFLT_ROUTER;
+	/* Continue walking */
+	return 0;
 }
 
 void rt6_purge_dflt_routers(struct net *net)
 {
-	struct fib6_table *table;
-	struct hlist_head *head;
-	unsigned int h;
-
-	rcu_read_lock();
-
-	for (h = 0; h < FIB6_TABLE_HASHSZ; h++) {
-		head = &net->ipv6.fib_table_hash[h];
-		hlist_for_each_entry_rcu(table, head, tb6_hlist) {
-			if (table->flags & RT6_TABLE_HAS_DFLT_ROUTER)
-				__rt6_purge_dflt_routers(net, table);
-		}
-	}
-
-	rcu_read_unlock();
+	fib6_clean_all(net, rt6_addrconf_purge, NULL);
 }
 
 static void rtmsg_to_fib6_config(struct net *net,
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index 5237021..15ccd84 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -1477,6 +1477,29 @@
 	  If you want to compile it as a module, say M here and read
 	  <file:Documentation/kbuild/modules.rst>.  If unsure, say `N'.
 
+config NETFILTER_XT_MATCH_QUOTA2
+	tristate '"quota2" match support'
+	depends on NETFILTER_ADVANCED
+	help
+	  This option adds a `quota2' match, which allows to match on a
+	  byte counter correctly and not per CPU.
+	  It allows naming the quotas.
+	  This is based on http://xtables-addons.git.sourceforge.net
+
+	  If you want to compile it as a module, say M here and read
+	  <file:Documentation/kbuild/modules.txt>.  If unsure, say `N'.
+
+config NETFILTER_XT_MATCH_QUOTA2_LOG
+	bool '"quota2" Netfilter LOG support'
+	depends on NETFILTER_XT_MATCH_QUOTA2
+	default n
+	help
+	  This option allows `quota2' to log ONCE when a quota limit
+	  is passed. It logs via NETLINK using the NETLINK_NFLOG family.
+	  It logs similarly to how ipt_ULOG would without data.
+
+	  If unsure, say `N'.
+
 config NETFILTER_XT_MATCH_RATEEST
 	tristate '"rateest" match support'
 	depends on NETFILTER_ADVANCED
diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
index 0e0ded8..9100085 100644
--- a/net/netfilter/Makefile
+++ b/net/netfilter/Makefile
@@ -197,6 +197,7 @@
 obj-$(CONFIG_NETFILTER_XT_MATCH_PKTTYPE) += xt_pkttype.o
 obj-$(CONFIG_NETFILTER_XT_MATCH_POLICY) += xt_policy.o
 obj-$(CONFIG_NETFILTER_XT_MATCH_QUOTA) += xt_quota.o
+obj-$(CONFIG_NETFILTER_XT_MATCH_QUOTA2) += xt_quota2.o
 obj-$(CONFIG_NETFILTER_XT_MATCH_RATEEST) += xt_rateest.o
 obj-$(CONFIG_NETFILTER_XT_MATCH_REALM) += xt_realm.o
 obj-$(CONFIG_NETFILTER_XT_MATCH_RECENT) += xt_recent.o
diff --git a/net/netfilter/xt_IDLETIMER.c b/net/netfilter/xt_IDLETIMER.c
index 7b2f359..5b45815 100644
--- a/net/netfilter/xt_IDLETIMER.c
+++ b/net/netfilter/xt_IDLETIMER.c
@@ -6,6 +6,7 @@
  * After timer expires a kevent will be sent.
  *
  * Copyright (C) 2004, 2010 Nokia Corporation
+ *
  * Written by Timo Teras <ext-timo.teras@nokia.com>
  *
  * Converted to x_tables and reworked for upstream inclusion
@@ -26,8 +27,17 @@
 #include <linux/netfilter/xt_IDLETIMER.h>
 #include <linux/kdev_t.h>
 #include <linux/kobject.h>
+#include <linux/skbuff.h>
 #include <linux/workqueue.h>
 #include <linux/sysfs.h>
+#include <linux/rtc.h>
+#include <linux/time.h>
+#include <linux/math64.h>
+#include <linux/suspend.h>
+#include <linux/notifier.h>
+#include <net/net_namespace.h>
+#include <net/sock.h>
+#include <net/inet_sock.h>
 
 struct idletimer_tg {
 	struct list_head entry;
@@ -38,15 +48,114 @@
 	struct kobject *kobj;
 	struct device_attribute attr;
 
+	struct timespec64 delayed_timer_trigger;
+	struct timespec64 last_modified_timer;
+	struct timespec64 last_suspend_time;
+	struct notifier_block pm_nb;
+
+	int timeout;
 	unsigned int refcnt;
 	u8 timer_type;
+
+	bool work_pending;
+	bool send_nl_msg;
+	bool active;
+	uid_t uid;
+	bool suspend_time_valid;
 };
 
 static LIST_HEAD(idletimer_tg_list);
 static DEFINE_MUTEX(list_mutex);
+static DEFINE_SPINLOCK(timestamp_lock);
 
 static struct kobject *idletimer_tg_kobj;
 
+static bool check_for_delayed_trigger(struct idletimer_tg *timer,
+		struct timespec64 *ts)
+{
+	bool state;
+	struct timespec64 temp;
+	spin_lock_bh(&timestamp_lock);
+	timer->work_pending = false;
+	if ((ts->tv_sec - timer->last_modified_timer.tv_sec) > timer->timeout ||
+			timer->delayed_timer_trigger.tv_sec != 0) {
+		state = false;
+		temp.tv_sec = timer->timeout;
+		temp.tv_nsec = 0;
+		if (timer->delayed_timer_trigger.tv_sec != 0) {
+			temp = timespec64_add(timer->delayed_timer_trigger,
+					      temp);
+			ts->tv_sec = temp.tv_sec;
+			ts->tv_nsec = temp.tv_nsec;
+			timer->delayed_timer_trigger.tv_sec = 0;
+			timer->work_pending = true;
+			schedule_work(&timer->work);
+		} else {
+			temp = timespec64_add(timer->last_modified_timer, temp);
+			ts->tv_sec = temp.tv_sec;
+			ts->tv_nsec = temp.tv_nsec;
+		}
+	} else {
+		state = timer->active;
+	}
+	spin_unlock_bh(&timestamp_lock);
+	return state;
+}
+
+static void notify_netlink_uevent(const char *iface, struct idletimer_tg *timer)
+{
+	char iface_msg[NLMSG_MAX_SIZE];
+	char state_msg[NLMSG_MAX_SIZE];
+	char timestamp_msg[NLMSG_MAX_SIZE];
+	char uid_msg[NLMSG_MAX_SIZE];
+	char *envp[] = { iface_msg, state_msg, timestamp_msg, uid_msg, NULL };
+	int res;
+	struct timespec64 ts;
+	uint64_t time_ns;
+	bool state;
+
+	res = snprintf(iface_msg, NLMSG_MAX_SIZE, "INTERFACE=%s",
+		       iface);
+	if (NLMSG_MAX_SIZE <= res) {
+		pr_err("message too long (%d)", res);
+		return;
+	}
+
+	ts = ktime_to_timespec64(ktime_get_boottime());
+	state = check_for_delayed_trigger(timer, &ts);
+	res = snprintf(state_msg, NLMSG_MAX_SIZE, "STATE=%s",
+			state ? "active" : "inactive");
+
+	if (NLMSG_MAX_SIZE <= res) {
+		pr_err("message too long (%d)", res);
+		return;
+	}
+
+	if (state) {
+		res = snprintf(uid_msg, NLMSG_MAX_SIZE, "UID=%u", timer->uid);
+		if (NLMSG_MAX_SIZE <= res)
+			pr_err("message too long (%d)", res);
+	} else {
+		res = snprintf(uid_msg, NLMSG_MAX_SIZE, "UID=");
+		if (NLMSG_MAX_SIZE <= res)
+			pr_err("message too long (%d)", res);
+	}
+
+	time_ns = timespec64_to_ns(&ts);
+	res = snprintf(timestamp_msg, NLMSG_MAX_SIZE, "TIME_NS=%llu", time_ns);
+	if (NLMSG_MAX_SIZE <= res) {
+		timestamp_msg[0] = '\0';
+		pr_err("message too long (%d)", res);
+	}
+
+	pr_debug("putting nlmsg: <%s> <%s> <%s> <%s>\n", iface_msg, state_msg,
+		 timestamp_msg, uid_msg);
+	kobject_uevent_env(idletimer_tg_kobj, KOBJ_CHANGE, envp);
+	return;
+
+
+}
+
 static
 struct idletimer_tg *__idletimer_tg_find_by_label(const char *label)
 {
@@ -67,6 +176,7 @@
 	unsigned long expires = 0;
 	struct timespec64 ktimespec = {};
 	long time_diff = 0;
+	unsigned long now = jiffies;
 
 	mutex_lock(&list_mutex);
 
@@ -84,9 +194,13 @@
 
 	mutex_unlock(&list_mutex);
 
-	if (time_after(expires, jiffies) || ktimespec.tv_sec > 0)
+	if (time_after(expires, now) || ktimespec.tv_sec > 0)
 		return snprintf(buf, PAGE_SIZE, "%ld\n", time_diff);
 
+	if (timer->send_nl_msg)
+		return sprintf(buf, "0 %d\n",
+			jiffies_to_msecs(now - expires) / 1000);
+
 	return snprintf(buf, PAGE_SIZE, "0\n");
 }
 
@@ -96,6 +210,9 @@
 						  work);
 
 	sysfs_notify(idletimer_tg_kobj, NULL, timer->attr.attr.name);
+
+	if (timer->send_nl_msg)
+		notify_netlink_uevent(timer->attr.attr.name, timer);
 }
 
 static void idletimer_tg_expired(struct timer_list *t)
@@ -103,8 +220,61 @@
 	struct idletimer_tg *timer = from_timer(timer, t, timer);
 
 	pr_debug("timer %s expired\n", timer->attr.attr.name);
-
+	spin_lock_bh(&timestamp_lock);
+	timer->active = false;
+	timer->work_pending = true;
 	schedule_work(&timer->work);
+	spin_unlock_bh(&timestamp_lock);
+}
+
+static int idletimer_resume(struct notifier_block *notifier,
+		unsigned long pm_event, void *unused)
+{
+	struct timespec64 ts;
+	unsigned long time_diff, now = jiffies;
+	struct idletimer_tg *timer = container_of(notifier,
+			struct idletimer_tg, pm_nb);
+	if (!timer)
+		return NOTIFY_DONE;
+	switch (pm_event) {
+	case PM_SUSPEND_PREPARE:
+		timer->last_suspend_time =
+			ktime_to_timespec64(ktime_get_boottime());
+		timer->suspend_time_valid = true;
+		break;
+	case PM_POST_SUSPEND:
+		if (!timer->suspend_time_valid)
+			break;
+		timer->suspend_time_valid = false;
+
+		spin_lock_bh(&timestamp_lock);
+		if (!timer->active) {
+			spin_unlock_bh(&timestamp_lock);
+			break;
+		}
+		/* since jiffies are not updated when suspended now represents
+		 * the time it would have suspended */
+		if (time_after(timer->timer.expires, now)) {
+			ts = ktime_to_timespec64(ktime_get_boottime());
+			ts = timespec64_sub(ts, timer->last_suspend_time);
+			time_diff = timespec64_to_jiffies(&ts);
+			if (timer->timer.expires > (time_diff + now)) {
+				mod_timer_pending(&timer->timer,
+						(timer->timer.expires - time_diff));
+			} else {
+				del_timer(&timer->timer);
+				timer->timer.expires = 0;
+				timer->active = false;
+				timer->work_pending = true;
+				schedule_work(&timer->work);
+			}
+		}
+		spin_unlock_bh(&timestamp_lock);
+		break;
+	default:
+		break;
+	}
+	return NOTIFY_DONE;
 }
 
 static enum alarmtimer_restart idletimer_tg_alarmproc(struct alarm *alarm,
@@ -137,7 +307,7 @@
 {
 	int ret;
 
-	info->timer = kmalloc(sizeof(*info->timer), GFP_KERNEL);
+	info->timer = kzalloc(sizeof(*info->timer), GFP_KERNEL);
 	if (!info->timer) {
 		ret = -ENOMEM;
 		goto out;
@@ -166,6 +336,22 @@
 
 	timer_setup(&info->timer->timer, idletimer_tg_expired, 0);
 	info->timer->refcnt = 1;
+	info->timer->send_nl_msg = (info->send_nl_msg == 0) ? false : true;
+	info->timer->active = true;
+	info->timer->timeout = info->timeout;
+
+	info->timer->delayed_timer_trigger.tv_sec = 0;
+	info->timer->delayed_timer_trigger.tv_nsec = 0;
+	info->timer->work_pending = false;
+	info->timer->uid = 0;
+	info->timer->last_modified_timer =
+		ktime_to_timespec64(ktime_get_boottime());
+
+	info->timer->pm_nb.notifier_call = idletimer_resume;
+	ret = register_pm_notifier(&info->timer->pm_nb);
+	if (ret)
+		printk(KERN_WARNING "[%s] Failed to register pm notifier %d\n",
+				__func__, ret);
 
 	INIT_WORK(&info->timer->work, idletimer_tg_work);
 
@@ -182,6 +368,42 @@
 	return ret;
 }
 
+static void reset_timer(const struct idletimer_tg_info *info,
+			struct sk_buff *skb)
+{
+	unsigned long now = jiffies;
+	struct idletimer_tg *timer = info->timer;
+	bool timer_prev;
+
+	spin_lock_bh(&timestamp_lock);
+	timer_prev = timer->active;
+	timer->active = true;
+	/* timer_prev is used to guard overflow problem in time_before*/
+	if (!timer_prev || time_before(timer->timer.expires, now)) {
+		pr_debug("Starting Checkentry timer (Expired, Jiffies): %lu, %lu\n",
+				timer->timer.expires, now);
+
+		/* Stores the uid resposible for waking up the radio */
+		if (skb && (skb->sk)) {
+			timer->uid = from_kuid_munged(current_user_ns(),
+					sock_i_uid(skb_to_full_sk(skb)));
+		}
+
+		/* checks if there is a pending inactive notification*/
+		if (timer->work_pending)
+			timer->delayed_timer_trigger = timer->last_modified_timer;
+		else {
+			timer->work_pending = true;
+			schedule_work(&timer->work);
+		}
+	}
+
+	timer->last_modified_timer = ktime_to_timespec64(ktime_get_boottime());
+	mod_timer(&timer->timer,
+			msecs_to_jiffies(info->timeout * 1000) + now);
+	spin_unlock_bh(&timestamp_lock);
+}
+
 static int idletimer_tg_create_v1(struct idletimer_tg_info_v1 *info)
 {
 	int ret;
@@ -251,13 +473,23 @@
 					 const struct xt_action_param *par)
 {
 	const struct idletimer_tg_info *info = par->targinfo;
+	unsigned long now = jiffies;
 
 	pr_debug("resetting timer %s, timeout period %u\n",
 		 info->label, info->timeout);
 
-	mod_timer(&info->timer->timer,
-		  msecs_to_jiffies(info->timeout * 1000) + jiffies);
+	BUG_ON(!info->timer);
 
+	info->timer->active = true;
+
+	if (time_before(info->timer->timer.expires, now)) {
+		schedule_work(&info->timer->work);
+		pr_debug("Starting timer %s (Expired, Jiffies): %lu, %lu\n",
+			 info->label, info->timer->timer.expires, now);
+	}
+
+	/* TODO: Avoid modifying timers on each packet */
+	reset_timer(info, skb);
 	return XT_CONTINUE;
 }
 
@@ -321,9 +553,7 @@
 	info->timer = __idletimer_tg_find_by_label(info->label);
 	if (info->timer) {
 		info->timer->refcnt++;
-		mod_timer(&info->timer->timer,
-			  msecs_to_jiffies(info->timeout * 1000) + jiffies);
-
+		reset_timer(info, NULL);
 		pr_debug("increased refcnt of timer %s to %u\n",
 			 info->label, info->timer->refcnt);
 	} else {
@@ -336,6 +566,7 @@
 	}
 
 	mutex_unlock(&list_mutex);
+
 	return 0;
 }
 
@@ -414,13 +645,14 @@
 
 		list_del(&info->timer->entry);
 		del_timer_sync(&info->timer->timer);
-		cancel_work_sync(&info->timer->work);
 		sysfs_remove_file(idletimer_tg_kobj, &info->timer->attr.attr);
+		unregister_pm_notifier(&info->timer->pm_nb);
+		cancel_work_sync(&info->timer->work);
 		kfree(info->timer->attr.attr.name);
 		kfree(info->timer);
 	} else {
 		pr_debug("decreased refcnt of timer %s to %u\n",
-			 info->label, info->timer->refcnt);
+		info->label, info->timer->refcnt);
 	}
 
 	mutex_unlock(&list_mutex);
@@ -459,6 +691,7 @@
 static struct xt_target idletimer_tg[] __read_mostly = {
 	{
 	.name		= "IDLETIMER",
+	.revision	= 1,
 	.family		= NFPROTO_UNSPEC,
 	.target		= idletimer_tg_target,
 	.targetsize     = sizeof(struct idletimer_tg_info),
@@ -540,3 +773,4 @@
 MODULE_LICENSE("GPL v2");
 MODULE_ALIAS("ipt_IDLETIMER");
 MODULE_ALIAS("ip6t_IDLETIMER");
+MODULE_ALIAS("arpt_IDLETIMER");
diff --git a/net/netfilter/xt_quota2.c b/net/netfilter/xt_quota2.c
new file mode 100644
index 0000000..2d87ad1
--- /dev/null
+++ b/net/netfilter/xt_quota2.c
@@ -0,0 +1,400 @@
+/*
+ * xt_quota2 - enhanced xt_quota that can count upwards and in packets
+ * as a minimal accounting match.
+ * by Jan Engelhardt <jengelh@medozas.de>, 2008
+ *
+ * Originally based on xt_quota.c:
+ * 	netfilter module to enforce network quotas
+ * 	Sam Johnston <samj@samj.net>
+ *
+ *	This program is free software; you can redistribute it and/or modify
+ *	it under the terms of the GNU General Public License; either
+ *	version 2 of the License, as published by the Free Software Foundation.
+ */
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/proc_fs.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <asm/atomic.h>
+#include <net/netlink.h>
+
+#include <linux/netfilter/x_tables.h>
+#include <linux/netfilter/xt_quota2.h>
+
+#ifdef CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG
+/* For compatibility, these definitions are copied from the
+ * deprecated header file <linux/netfilter_ipv4/ipt_ULOG.h> */
+#define ULOG_MAC_LEN	80
+#define ULOG_PREFIX_LEN	32
+
+/* Format of the ULOG packets passed through netlink */
+typedef struct ulog_packet_msg {
+	unsigned long mark;
+	long timestamp_sec;
+	long timestamp_usec;
+	unsigned int hook;
+	char indev_name[IFNAMSIZ];
+	char outdev_name[IFNAMSIZ];
+	size_t data_len;
+	char prefix[ULOG_PREFIX_LEN];
+	unsigned char mac_len;
+	unsigned char mac[ULOG_MAC_LEN];
+	unsigned char payload[0];
+} ulog_packet_msg_t;
+#endif
+
+/**
+ * @lock:	lock to protect quota writers from each other
+ */
+struct xt_quota_counter {
+	u_int64_t quota;
+	spinlock_t lock;
+	struct list_head list;
+	atomic_t ref;
+	char name[sizeof(((struct xt_quota_mtinfo2 *)NULL)->name)];
+	struct proc_dir_entry *procfs_entry;
+};
+
+#ifdef CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG
+/* Harald's favorite number +1 :D From ipt_ULOG.C */
+static int qlog_nl_event = 112;
+module_param_named(event_num, qlog_nl_event, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(event_num,
+		 "Event number for NETLINK_NFLOG message. 0 disables log."
+		 "111 is what ipt_ULOG uses.");
+static struct sock *nflognl;
+#endif
+
+static LIST_HEAD(counter_list);
+static DEFINE_SPINLOCK(counter_list_lock);
+
+static struct proc_dir_entry *proc_xt_quota;
+static unsigned int quota_list_perms = S_IRUGO | S_IWUSR;
+static kuid_t quota_list_uid = KUIDT_INIT(0);
+static kgid_t quota_list_gid = KGIDT_INIT(0);
+module_param_named(perms, quota_list_perms, uint, S_IRUGO | S_IWUSR);
+
+#ifdef CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG
+static void quota2_log(unsigned int hooknum,
+		       const struct sk_buff *skb,
+		       const struct net_device *in,
+		       const struct net_device *out,
+		       const char *prefix)
+{
+	ulog_packet_msg_t *pm;
+	struct sk_buff *log_skb;
+	size_t size;
+	struct nlmsghdr *nlh;
+
+	if (!qlog_nl_event)
+		return;
+
+	size = NLMSG_SPACE(sizeof(*pm));
+	size = max(size, (size_t)NLMSG_GOODSIZE);
+	log_skb = alloc_skb(size, GFP_ATOMIC);
+	if (!log_skb) {
+		pr_err("xt_quota2: cannot alloc skb for logging\n");
+		return;
+	}
+
+	nlh = nlmsg_put(log_skb, /*pid*/0, /*seq*/0, qlog_nl_event,
+			sizeof(*pm), 0);
+	if (!nlh) {
+		pr_err("xt_quota2: nlmsg_put failed\n");
+		kfree_skb(log_skb);
+		return;
+	}
+	pm = nlmsg_data(nlh);
+	if (skb->tstamp == 0)
+		__net_timestamp((struct sk_buff *)skb);
+	pm->data_len = 0;
+	pm->hook = hooknum;
+	if (prefix != NULL)
+		strlcpy(pm->prefix, prefix, sizeof(pm->prefix));
+	else
+		*(pm->prefix) = '\0';
+	if (in)
+		strlcpy(pm->indev_name, in->name, sizeof(pm->indev_name));
+	else
+		pm->indev_name[0] = '\0';
+
+	if (out)
+		strlcpy(pm->outdev_name, out->name, sizeof(pm->outdev_name));
+	else
+		pm->outdev_name[0] = '\0';
+
+	NETLINK_CB(log_skb).dst_group = 1;
+	pr_debug("throwing 1 packets to netlink group 1\n");
+	netlink_broadcast(nflognl, log_skb, 0, 1, GFP_ATOMIC);
+}
+#else
+static void quota2_log(unsigned int hooknum,
+		       const struct sk_buff *skb,
+		       const struct net_device *in,
+		       const struct net_device *out,
+		       const char *prefix)
+{
+}
+#endif  /* if+else CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG */
+
+static ssize_t quota_proc_read(struct file *file, char __user *buf,
+			   size_t size, loff_t *ppos)
+{
+	struct xt_quota_counter *e = PDE_DATA(file_inode(file));
+	char tmp[24];
+	size_t tmp_size;
+
+	spin_lock_bh(&e->lock);
+	tmp_size = scnprintf(tmp, sizeof(tmp), "%llu\n", e->quota);
+	spin_unlock_bh(&e->lock);
+	return simple_read_from_buffer(buf, size, ppos, tmp, tmp_size);
+}
+
+static ssize_t quota_proc_write(struct file *file, const char __user *input,
+                            size_t size, loff_t *ppos)
+{
+	struct xt_quota_counter *e = PDE_DATA(file_inode(file));
+	char buf[sizeof("18446744073709551616")];
+
+	if (size > sizeof(buf))
+		size = sizeof(buf);
+	if (copy_from_user(buf, input, size) != 0)
+		return -EFAULT;
+	buf[sizeof(buf)-1] = '\0';
+
+	spin_lock_bh(&e->lock);
+	e->quota = simple_strtoull(buf, NULL, 0);
+	spin_unlock_bh(&e->lock);
+	return size;
+}
+
+static const struct proc_ops q2_counter_fops = {
+	.proc_read	= quota_proc_read,
+	.proc_write	= quota_proc_write,
+	.proc_lseek	= default_llseek,
+};
+
+static struct xt_quota_counter *
+q2_new_counter(const struct xt_quota_mtinfo2 *q, bool anon)
+{
+	struct xt_quota_counter *e;
+	unsigned int size;
+
+	/* Do not need all the procfs things for anonymous counters. */
+	size = anon ? offsetof(typeof(*e), list) : sizeof(*e);
+	e = kmalloc(size, GFP_KERNEL);
+	if (e == NULL)
+		return NULL;
+
+	e->quota = q->quota;
+	spin_lock_init(&e->lock);
+	if (!anon) {
+		INIT_LIST_HEAD(&e->list);
+		atomic_set(&e->ref, 1);
+		strlcpy(e->name, q->name, sizeof(e->name));
+	}
+	return e;
+}
+
+/**
+ * q2_get_counter - get ref to counter or create new
+ * @name:	name of counter
+ */
+static struct xt_quota_counter *
+q2_get_counter(const struct xt_quota_mtinfo2 *q)
+{
+	struct proc_dir_entry *p;
+	struct xt_quota_counter *e = NULL;
+	struct xt_quota_counter *new_e;
+
+	if (*q->name == '\0')
+		return q2_new_counter(q, true);
+
+	/* No need to hold a lock while getting a new counter */
+	new_e = q2_new_counter(q, false);
+	if (new_e == NULL)
+		goto out;
+
+	spin_lock_bh(&counter_list_lock);
+	list_for_each_entry(e, &counter_list, list)
+		if (strcmp(e->name, q->name) == 0) {
+			atomic_inc(&e->ref);
+			spin_unlock_bh(&counter_list_lock);
+			kfree(new_e);
+			pr_debug("xt_quota2: old counter name=%s", e->name);
+			return e;
+		}
+	e = new_e;
+	pr_debug("xt_quota2: new_counter name=%s", e->name);
+	list_add_tail(&e->list, &counter_list);
+	/* The entry having a refcount of 1 is not directly destructible.
+	 * This func has not yet returned the new entry, thus iptables
+	 * has not references for destroying this entry.
+	 * For another rule to try to destroy it, it would 1st need for this
+	 * func* to be re-invoked, acquire a new ref for the same named quota.
+	 * Nobody will access the e->procfs_entry either.
+	 * So release the lock. */
+	spin_unlock_bh(&counter_list_lock);
+
+	/* create_proc_entry() is not spin_lock happy */
+	p = e->procfs_entry = proc_create_data(e->name, quota_list_perms,
+	                      proc_xt_quota, &q2_counter_fops, e);
+
+	if (IS_ERR_OR_NULL(p)) {
+		spin_lock_bh(&counter_list_lock);
+		list_del(&e->list);
+		spin_unlock_bh(&counter_list_lock);
+		goto out;
+	}
+	proc_set_user(p, quota_list_uid, quota_list_gid);
+	return e;
+
+ out:
+	kfree(e);
+	return NULL;
+}
+
+static int quota_mt2_check(const struct xt_mtchk_param *par)
+{
+	struct xt_quota_mtinfo2 *q = par->matchinfo;
+
+	pr_debug("xt_quota2: check() flags=0x%04x", q->flags);
+
+	if (q->flags & ~XT_QUOTA_MASK)
+		return -EINVAL;
+
+	q->name[sizeof(q->name)-1] = '\0';
+	if (*q->name == '.' || strchr(q->name, '/') != NULL) {
+		printk(KERN_ERR "xt_quota.3: illegal name\n");
+		return -EINVAL;
+	}
+
+	q->master = q2_get_counter(q);
+	if (q->master == NULL) {
+		printk(KERN_ERR "xt_quota.3: memory alloc failure\n");
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+static void quota_mt2_destroy(const struct xt_mtdtor_param *par)
+{
+	struct xt_quota_mtinfo2 *q = par->matchinfo;
+	struct xt_quota_counter *e = q->master;
+
+	if (*q->name == '\0') {
+		kfree(e);
+		return;
+	}
+
+	spin_lock_bh(&counter_list_lock);
+	if (!atomic_dec_and_test(&e->ref)) {
+		spin_unlock_bh(&counter_list_lock);
+		return;
+	}
+
+	list_del(&e->list);
+	spin_unlock_bh(&counter_list_lock);
+	remove_proc_entry(e->name, proc_xt_quota);
+	kfree(e);
+}
+
+static bool
+quota_mt2(const struct sk_buff *skb, struct xt_action_param *par)
+{
+	struct xt_quota_mtinfo2 *q = (void *)par->matchinfo;
+	struct xt_quota_counter *e = q->master;
+	int charge = (q->flags & XT_QUOTA_PACKET) ? 1 : skb->len;
+	bool no_change = q->flags & XT_QUOTA_NO_CHANGE;
+	bool ret = q->flags & XT_QUOTA_INVERT;
+
+	spin_lock_bh(&e->lock);
+	if (q->flags & XT_QUOTA_GROW) {
+		/*
+		 * While no_change is pointless in "grow" mode, we will
+		 * implement it here simply to have a consistent behavior.
+		 */
+		if (!no_change)
+			e->quota += charge;
+		ret = true; /* note: does not respect inversion (bug??) */
+	} else {
+		if (e->quota > charge) {
+			if (!no_change)
+				e->quota -= charge;
+			ret = !ret;
+		} else if (e->quota) {
+			/* We are transitioning, log that fact. */
+			quota2_log(xt_hooknum(par),
+				   skb,
+				   xt_in(par),
+				   xt_out(par),
+				   q->name);
+			/* we do not allow even small packets from now on */
+			e->quota = 0;
+		}
+	}
+	spin_unlock_bh(&e->lock);
+	return ret;
+}
+
+static struct xt_match quota_mt2_reg[] __read_mostly = {
+	{
+		.name       = "quota2",
+		.revision   = 3,
+		.family     = NFPROTO_IPV4,
+		.checkentry = quota_mt2_check,
+		.match      = quota_mt2,
+		.destroy    = quota_mt2_destroy,
+		.matchsize  = sizeof(struct xt_quota_mtinfo2),
+		.me         = THIS_MODULE,
+	},
+	{
+		.name       = "quota2",
+		.revision   = 3,
+		.family     = NFPROTO_IPV6,
+		.checkentry = quota_mt2_check,
+		.match      = quota_mt2,
+		.destroy    = quota_mt2_destroy,
+		.matchsize  = sizeof(struct xt_quota_mtinfo2),
+		.me         = THIS_MODULE,
+	},
+};
+
+static int __init quota_mt2_init(void)
+{
+	int ret;
+	pr_debug("xt_quota2: init()");
+
+#ifdef CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG
+	nflognl = netlink_kernel_create(&init_net, NETLINK_NFLOG, NULL);
+	if (!nflognl)
+		return -ENOMEM;
+#endif
+
+	proc_xt_quota = proc_mkdir("xt_quota", init_net.proc_net);
+	if (proc_xt_quota == NULL)
+		return -EACCES;
+
+	ret = xt_register_matches(quota_mt2_reg, ARRAY_SIZE(quota_mt2_reg));
+	if (ret < 0)
+		remove_proc_entry("xt_quota", init_net.proc_net);
+	pr_debug("xt_quota2: init() %d", ret);
+	return ret;
+}
+
+static void __exit quota_mt2_exit(void)
+{
+	xt_unregister_matches(quota_mt2_reg, ARRAY_SIZE(quota_mt2_reg));
+	remove_proc_entry("xt_quota", init_net.proc_net);
+}
+
+module_init(quota_mt2_init);
+module_exit(quota_mt2_exit);
+MODULE_DESCRIPTION("Xtables: countdown quota match; up counter");
+MODULE_AUTHOR("Sam Johnston <samj@samj.net>");
+MODULE_AUTHOR("Jan Engelhardt <jengelh@medozas.de>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("ipt_quota2");
+MODULE_ALIAS("ip6t_quota2");
diff --git a/net/socket.c b/net/socket.c
index 6e6cccc2..926f249 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -314,7 +314,8 @@
 
 static int sockfs_xattr_get(const struct xattr_handler *handler,
 			    struct dentry *dentry, struct inode *inode,
-			    const char *suffix, void *value, size_t size)
+			    const char *suffix, void *value, size_t size,
+			    int flags)
 {
 	if (value) {
 		if (dentry->d_name.len + 1 > size)
diff --git a/net/xfrm/xfrm_algo.c b/net/xfrm/xfrm_algo.c
index 4dae3ab..2e80e38b 100644
--- a/net/xfrm/xfrm_algo.c
+++ b/net/xfrm/xfrm_algo.c
@@ -237,7 +237,7 @@
 
 	.uinfo = {
 		.auth = {
-			.icv_truncbits = 96,
+			.icv_truncbits = IS_ENABLED(CONFIG_ANDROID) ? 128 : 96,
 			.icv_fullbits = 256,
 		}
 	},
diff --git a/net/xfrm/xfrm_output.c b/net/xfrm/xfrm_output.c
index a7ab193..87547cb 100644
--- a/net/xfrm/xfrm_output.c
+++ b/net/xfrm/xfrm_output.c
@@ -402,13 +402,11 @@
 	return -EOPNOTSUPP;
 }
 
-#if IS_ENABLED(CONFIG_NET_PKTGEN)
 int pktgen_xfrm_outer_mode_output(struct xfrm_state *x, struct sk_buff *skb)
 {
 	return xfrm_outer_mode_output(x, skb);
 }
 EXPORT_SYMBOL_GPL(pktgen_xfrm_outer_mode_output);
-#endif
 
 static int xfrm_output_one(struct sk_buff *skb, int err)
 {
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index a77da7a..cd2d2ef 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -2379,17 +2379,20 @@
 	if (IS_ERR(data))
 		return PTR_ERR(data);
 
-	if (in_compat_syscall()) {
-		struct xfrm_translator *xtr = xfrm_get_translator();
+	/* Use the 64-bit / untranslated format on Android, even for compat */
+	if (!IS_ENABLED(CONFIG_ANDROID) || IS_ENABLED(CONFIG_XFRM_USER_COMPAT)) {
+		if (in_compat_syscall()) {
+			struct xfrm_translator *xtr = xfrm_get_translator();
 
-		if (!xtr)
-			return -EOPNOTSUPP;
+			if (!xtr)
+				return -EOPNOTSUPP;
 
-		err = xtr->xlate_user_policy_sockptr(&data, optlen);
-		xfrm_put_translator(xtr);
-		if (err) {
-			kfree(data);
-			return err;
+			err = xtr->xlate_user_policy_sockptr(&data, optlen);
+			xfrm_put_translator(xtr);
+			if (err) {
+				kfree(data);
+				return err;
+			}
 		}
 	}
 
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index d0c32a8..4968742 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -2705,19 +2705,22 @@
 	if (!netlink_net_capable(skb, CAP_NET_ADMIN))
 		return -EPERM;
 
-	if (in_compat_syscall()) {
-		struct xfrm_translator *xtr = xfrm_get_translator();
+	/* Use the 64-bit / untranslated format on Android, even for compat */
+	if (!IS_ENABLED(CONFIG_ANDROID) || IS_ENABLED(CONFIG_XFRM_USER_COMPAT)) {
+		if (in_compat_syscall()) {
+			struct xfrm_translator *xtr = xfrm_get_translator();
 
-		if (!xtr)
-			return -EOPNOTSUPP;
+			if (!xtr)
+				return -EOPNOTSUPP;
 
-		nlh64 = xtr->rcv_msg_compat(nlh, link->nla_max,
-					    link->nla_pol, extack);
-		xfrm_put_translator(xtr);
-		if (IS_ERR(nlh64))
-			return PTR_ERR(nlh64);
-		if (nlh64)
-			nlh = nlh64;
+			nlh64 = xtr->rcv_msg_compat(nlh, link->nla_max,
+						    link->nla_pol, extack);
+			xfrm_put_translator(xtr);
+			if (IS_ERR(nlh64))
+				return PTR_ERR(nlh64);
+			if (nlh64)
+				nlh = nlh64;
+		}
 	}
 
 	if ((type == (XFRM_MSG_GETSA - XFRM_MSG_BASE) ||
diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib
index 9413370..b3e808c 100644
--- a/scripts/Makefile.lib
+++ b/scripts/Makefile.lib
@@ -274,25 +274,35 @@
 
 # DTC
 # ---------------------------------------------------------------------------
+ifeq ("$(origin DTC)", "command line")
+PHONY += $(DTC)
+dtc-option = $(call try-run, $(DTC) $1 -v,$1)
+else
+# Just add the flag. DTC is compiled later as a prerequisite, so there's no dtc
+# to test the flag against. This is okay because we're not testing flags which
+# aren't supported by in-kernel dtc to begin with.
+dtc-option = $1
+endif
+
 DTC ?= $(objtree)/scripts/dtc/dtc
-DTC_FLAGS += -Wno-interrupt_provider
+DTC_FLAGS += $(call dtc-option,-Wno-interrupt_provider)
 
 # Disable noisy checks by default
 ifeq ($(findstring 1,$(KBUILD_EXTRA_WARN)),)
-DTC_FLAGS += -Wno-unit_address_vs_reg \
-	-Wno-unit_address_format \
-	-Wno-avoid_unnecessary_addr_size \
-	-Wno-alias_paths \
-	-Wno-graph_child_address \
-	-Wno-simple_bus_reg \
-	-Wno-unique_unit_address \
-	-Wno-pci_device_reg
+DTC_FLAGS += $(call dtc-option,-Wno-unit_address_vs_reg) \
+	$(call dtc-option,-Wno-unit_address_format) \
+	$(call dtc-option,-Wno-avoid_unnecessary_addr_size) \
+	$(call dtc-option,-Wno-alias_paths) \
+	$(call dtc-option,-Wno-graph_child_address) \
+	$(call dtc-option,-Wno-simple_bus_reg) \
+	$(call dtc-option,-Wno-unique_unit_address) \
+	$(call dtc-option,-Wno-pci_device_reg)
 endif
 
 ifneq ($(findstring 2,$(KBUILD_EXTRA_WARN)),)
-DTC_FLAGS += -Wnode_name_chars_strict \
-	-Wproperty_name_chars_strict \
-	-Winterrupt_provider
+DTC_FLAGS += $(call dtc-option,-Wnode_name_chars_strict) \
+	$(call dtc-option,-Wproperty_name_chars_strict) \
+	$(call dtc-option,-Winterrupt_provider)
 endif
 
 DTC_FLAGS += $(DTC_FLAGS_$(basetarget))
@@ -376,7 +386,8 @@
       cmd_lzo = { cat $(real-prereqs) | $(KLZOP) -9; $(size_append); } > $@
 
 quiet_cmd_lz4 = LZ4     $@
-      cmd_lz4 = { cat $(real-prereqs) | $(LZ4) -l -c1 stdin stdout; \
+      cmd_lz4 = { cat $(real-prereqs) | \
+                  $(LZ4) -l -12 --favor-decSpeed stdin stdout; \
                   $(size_append); } > $@
 
 # U-Boot mkimage
diff --git a/scripts/extract-cert.c b/scripts/extract-cert.c
index 3bc48c7..3034d99 100644
--- a/scripts/extract-cert.c
+++ b/scripts/extract-cert.c
@@ -49,6 +49,7 @@
 	}
 }
 
+#ifndef OPENSSL_IS_BORINGSSL
 static void drain_openssl_errors(void)
 {
 	const char *file;
@@ -58,6 +59,7 @@
 		return;
 	while (ERR_get_error_line(&file, &line)) {}
 }
+#endif
 
 #define ERR(cond, fmt, ...)				\
 	do {						\
@@ -112,6 +114,10 @@
 		fclose(f);
 		exit(0);
 	} else if (!strncmp(cert_src, "pkcs11:", 7)) {
+#ifdef OPENSSL_IS_BORINGSSL
+		ERR(1, "BoringSSL does not support extracting from PKCS#11");
+		exit(1);
+#else
 		ENGINE *e;
 		struct {
 			const char *cert_id;
@@ -134,6 +140,7 @@
 		ENGINE_ctrl_cmd(e, "LOAD_CERT_CTRL", 0, &parms, NULL, 1);
 		ERR(!parms.cert, "Get X.509 from PKCS#11");
 		write_cert(parms.cert);
+#endif
 	} else {
 		BIO *b;
 		X509 *x509;
diff --git a/scripts/kconfig/merge_config.sh b/scripts/kconfig/merge_config.sh
index 63c8565..e5b4698 100755
--- a/scripts/kconfig/merge_config.sh
+++ b/scripts/kconfig/merge_config.sh
@@ -28,6 +28,7 @@
 	echo "  -r    list redundant entries when merging fragments"
 	echo "  -y    make builtin have precedence over modules"
 	echo "  -O    dir to put generated output files.  Consider setting \$KCONFIG_CONFIG instead."
+	echo "  -s    strict mode. Fail if the fragment redefines any value."
 	echo
 	echo "Used prefix: '$CONFIG_PREFIX'. You can redefine it with \$CONFIG_ environment variable."
 }
@@ -37,6 +38,7 @@
 WARNREDUN=false
 BUILTIN=false
 OUTPUT=.
+STRICT=false
 CONFIG_PREFIX=${CONFIG_-CONFIG_}
 
 while true; do
@@ -75,6 +77,11 @@
 		shift 2
 		continue
 		;;
+	"-s")
+		STRICT=true
+		shift
+		continue
+		;;
 	*)
 		break
 		;;
@@ -141,6 +148,9 @@
 			echo Previous  value: $PREV_VAL
 			echo New value:       $NEW_VAL
 			echo
+			if [ "$STRICT" = "true" ]; then
+				STRICT_MODE_VIOLATED=true
+			fi
 		elif [ "$WARNREDUN" = "true" ]; then
 			echo Value of $CFG is redundant by fragment $ORIG_MERGE_FILE:
 		fi
@@ -153,6 +163,11 @@
 	cat $MERGE_FILE >> $TMP_FILE
 done
 
+if [ "$STRICT_MODE_VIOLATED" = "true" ]; then
+	echo "The fragment redefined a value and strict mode had been passed."
+	exit 1
+fi
+
 if [ "$RUNMAKE" = "false" ]; then
 	cp -T -- "$TMP_FILE" "$KCONFIG_CONFIG"
 	echo "#"
diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
index f882ce0..c7501dc 100644
--- a/scripts/mod/modpost.c
+++ b/scripts/mod/modpost.c
@@ -426,10 +426,9 @@
 		s = new_symbol(name, mod, export);
 	} else if (!external_module || s->module->is_vmlinux ||
 		   s->module == mod) {
-		warn("%s: '%s' exported twice. Previous export was in %s%s\n",
-		     mod->name, name, s->module->name,
-		     s->module->is_vmlinux ? "" : ".ko");
-		return s;
+		fatal("%s: '%s' exported twice. Previous export was in %s%s\n",
+		      mod->name, name, s->module->name,
+		      s->module->is_vmlinux ? "" : ".ko");
 	}
 
 	s->module = mod;
@@ -2663,9 +2662,9 @@
 
 		for (s = symbolhash[n]; s; s = s->next) {
 			if (s->is_static)
-				warn("\"%s\" [%s] is a static %s\n",
-				     s->name, s->module->name,
-				     export_str(s->export));
+				fatal("\"%s\" [%s] is a static %s\n",
+				      s->name, s->module->name,
+				      export_str(s->export));
 		}
 	}
 
diff --git a/scripts/setlocalversion b/scripts/setlocalversion
index bb709ed..627c6e2 100755
--- a/scripts/setlocalversion
+++ b/scripts/setlocalversion
@@ -11,12 +11,14 @@
 #
 
 usage() {
-	echo "Usage: $0 [--save-scmversion] [srctree]" >&2
+	echo "Usage: $0 [--save-scmversion] [srctree] [branch] [kmi-generation]" >&2
 	exit 1
 }
 
 scm_only=false
 srctree=.
+android_release=
+kmi_generation=
 if test "$1" = "--save-scmversion"; then
 	scm_only=true
 	shift
@@ -25,6 +27,24 @@
 	srctree=$1
 	shift
 fi
+if test $# -gt 0; then
+	# Extract the Android release version. If there is no match, then return 255
+	# and clear the var $android_release
+	android_release=`echo "$1" | sed -e '/android[0-9]\{2,\}/!{q255}; \
+		s/^\(android[0-9]\{2,\}\)-.*/\1/'`
+	if test $? -ne 0; then
+		android_release=
+	fi
+	shift
+
+	if test $# -gt 0; then
+		kmi_generation=$1
+		[ $(expr $kmi_generation : '^[0-9]\+$') -eq 0 ] && usage
+		shift
+	else
+		usage
+	fi
+fi
 if test $# -gt 0 -o ! -d "$srctree"; then
 	usage
 fi
@@ -47,6 +67,10 @@
 	if test -z "$(git rev-parse --show-cdup 2>/dev/null)" &&
 	   head=$(git rev-parse --verify HEAD 2>/dev/null); then
 
+		if [ -n "$android_release" ] && [ -n "$kmi_generation" ]; then
+			printf '%s' "-$android_release-$kmi_generation"
+		fi
+
 		# If we are at a tagged commit (like "v2.6.30-rc6"), we ignore
 		# it, because this version is defined in the top level Makefile.
 		if [ -z "$(git describe --exact-match 2>/dev/null)" ]; then
diff --git a/security/commoncap.c b/security/commoncap.c
index 59bf3c16..6152085 100644
--- a/security/commoncap.c
+++ b/security/commoncap.c
@@ -297,7 +297,8 @@
 	struct inode *inode = d_backing_inode(dentry);
 	int error;
 
-	error = __vfs_getxattr(dentry, inode, XATTR_NAME_CAPS, NULL, 0);
+	error = __vfs_getxattr(dentry, inode, XATTR_NAME_CAPS, NULL, 0,
+			       XATTR_NOSECURITY);
 	return error > 0;
 }
 
@@ -586,7 +587,8 @@
 
 	fs_ns = inode->i_sb->s_user_ns;
 	size = __vfs_getxattr((struct dentry *)dentry, inode,
-			      XATTR_NAME_CAPS, &data, XATTR_CAPS_SZ);
+			      XATTR_NAME_CAPS, &data, XATTR_CAPS_SZ,
+			      XATTR_NOSECURITY);
 	if (size == -ENODATA || size == -EOPNOTSUPP)
 		/* no data, that's ok */
 		return -ENODATA;
diff --git a/security/integrity/evm/evm_main.c b/security/integrity/evm/evm_main.c
index 76d1914..d3b358b 100644
--- a/security/integrity/evm/evm_main.c
+++ b/security/integrity/evm/evm_main.c
@@ -101,7 +101,8 @@
 		return -EOPNOTSUPP;
 
 	list_for_each_entry_lockless(xattr, &evm_config_xattrnames, list) {
-		error = __vfs_getxattr(dentry, inode, xattr->name, NULL, 0);
+		error = __vfs_getxattr(dentry, inode, xattr->name, NULL, 0,
+				       XATTR_NOSECURITY);
 		if (error < 0) {
 			if (error == -ENODATA)
 				continue;
diff --git a/security/lockdown/lockdown.c b/security/lockdown/lockdown.c
index 87cbdc6..3f38583 100644
--- a/security/lockdown/lockdown.c
+++ b/security/lockdown/lockdown.c
@@ -16,6 +16,33 @@
 
 static enum lockdown_reason kernel_locked_down;
 
+static const char *const lockdown_reasons[LOCKDOWN_CONFIDENTIALITY_MAX+1] = {
+	[LOCKDOWN_NONE] = "none",
+	[LOCKDOWN_MODULE_SIGNATURE] = "unsigned module loading",
+	[LOCKDOWN_DEV_MEM] = "/dev/mem,kmem,port",
+	[LOCKDOWN_EFI_TEST] = "/dev/efi_test access",
+	[LOCKDOWN_KEXEC] = "kexec of unsigned images",
+	[LOCKDOWN_HIBERNATION] = "hibernation",
+	[LOCKDOWN_PCI_ACCESS] = "direct PCI access",
+	[LOCKDOWN_IOPORT] = "raw io port access",
+	[LOCKDOWN_MSR] = "raw MSR access",
+	[LOCKDOWN_ACPI_TABLES] = "modifying ACPI tables",
+	[LOCKDOWN_PCMCIA_CIS] = "direct PCMCIA CIS storage",
+	[LOCKDOWN_TIOCSSERIAL] = "reconfiguration of serial port IO",
+	[LOCKDOWN_MODULE_PARAMETERS] = "unsafe module parameters",
+	[LOCKDOWN_MMIOTRACE] = "unsafe mmio",
+	[LOCKDOWN_DEBUGFS] = "debugfs access",
+	[LOCKDOWN_XMON_WR] = "xmon write access",
+	[LOCKDOWN_INTEGRITY_MAX] = "integrity",
+	[LOCKDOWN_KCORE] = "/proc/kcore access",
+	[LOCKDOWN_KPROBES] = "use of kprobes",
+	[LOCKDOWN_BPF_READ] = "use of bpf to read kernel RAM",
+	[LOCKDOWN_PERF] = "unsafe use of perf",
+	[LOCKDOWN_TRACEFS] = "use of tracefs",
+	[LOCKDOWN_XMON_RW] = "xmon read and write access",
+	[LOCKDOWN_CONFIDENTIALITY_MAX] = "confidentiality",
+};
+
 static const enum lockdown_reason lockdown_levels[] = {LOCKDOWN_NONE,
 						 LOCKDOWN_INTEGRITY_MAX,
 						 LOCKDOWN_CONFIDENTIALITY_MAX};
diff --git a/security/lsm_audit.c b/security/lsm_audit.c
index 53d0d18..db24da9 100644
--- a/security/lsm_audit.c
+++ b/security/lsm_audit.c
@@ -27,7 +27,6 @@
 #include <linux/dccp.h>
 #include <linux/sctp.h>
 #include <linux/lsm_audit.h>
-#include <linux/security.h>
 
 /**
  * ipv4_skb_to_auditdata : fill auditdata from skb
@@ -431,10 +430,6 @@
 				 a->u.ibendport->dev_name,
 				 a->u.ibendport->port);
 		break;
-	case LSM_AUDIT_DATA_LOCKDOWN:
-		audit_log_format(ab, " lockdown_reason=\"%s\"",
-				 lockdown_reasons[a->u.reason]);
-		break;
 	} /* switch (a->type) */
 }
 
diff --git a/security/security.c b/security/security.c
index a28045d..7d81a9fbb 100644
--- a/security/security.c
+++ b/security/security.c
@@ -35,39 +35,6 @@
 /* How many LSMs were built into the kernel? */
 #define LSM_COUNT (__end_lsm_info - __start_lsm_info)
 
-/*
- * These are descriptions of the reasons that can be passed to the
- * security_locked_down() LSM hook. Placing this array here allows
- * all security modules to use the same descriptions for auditing
- * purposes.
- */
-const char *const lockdown_reasons[LOCKDOWN_CONFIDENTIALITY_MAX+1] = {
-	[LOCKDOWN_NONE] = "none",
-	[LOCKDOWN_MODULE_SIGNATURE] = "unsigned module loading",
-	[LOCKDOWN_DEV_MEM] = "/dev/mem,kmem,port",
-	[LOCKDOWN_EFI_TEST] = "/dev/efi_test access",
-	[LOCKDOWN_KEXEC] = "kexec of unsigned images",
-	[LOCKDOWN_HIBERNATION] = "hibernation",
-	[LOCKDOWN_PCI_ACCESS] = "direct PCI access",
-	[LOCKDOWN_IOPORT] = "raw io port access",
-	[LOCKDOWN_MSR] = "raw MSR access",
-	[LOCKDOWN_ACPI_TABLES] = "modifying ACPI tables",
-	[LOCKDOWN_PCMCIA_CIS] = "direct PCMCIA CIS storage",
-	[LOCKDOWN_TIOCSSERIAL] = "reconfiguration of serial port IO",
-	[LOCKDOWN_MODULE_PARAMETERS] = "unsafe module parameters",
-	[LOCKDOWN_MMIOTRACE] = "unsafe mmio",
-	[LOCKDOWN_DEBUGFS] = "debugfs access",
-	[LOCKDOWN_XMON_WR] = "xmon write access",
-	[LOCKDOWN_INTEGRITY_MAX] = "integrity",
-	[LOCKDOWN_KCORE] = "/proc/kcore access",
-	[LOCKDOWN_KPROBES] = "use of kprobes",
-	[LOCKDOWN_BPF_READ] = "use of bpf to read kernel RAM",
-	[LOCKDOWN_PERF] = "unsafe use of perf",
-	[LOCKDOWN_TRACEFS] = "use of tracefs",
-	[LOCKDOWN_XMON_RW] = "xmon read and write access",
-	[LOCKDOWN_CONFIDENTIALITY_MAX] = "confidentiality",
-};
-
 struct security_hook_heads security_hook_heads __lsm_ro_after_init;
 static BLOCKING_NOTIFIER_HEAD(blocking_lsm_notifier_chain);
 
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index 6b1826f..ded9dcc 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -504,7 +504,8 @@
 			goto out;
 		}
 
-		rc = __vfs_getxattr(root, root_inode, XATTR_NAME_SELINUX, NULL, 0);
+		rc = __vfs_getxattr(root, root_inode, XATTR_NAME_SELINUX, NULL,
+				    0, XATTR_NOSECURITY);
 		if (rc < 0 && rc != -ENODATA) {
 			if (rc == -EOPNOTSUPP)
 				pr_warn("SELinux: (dev %s, type "
@@ -1332,12 +1333,14 @@
 		return -ENOMEM;
 
 	context[len] = '\0';
-	rc = __vfs_getxattr(dentry, inode, XATTR_NAME_SELINUX, context, len);
+	rc = __vfs_getxattr(dentry, inode, XATTR_NAME_SELINUX, context, len,
+			    XATTR_NOSECURITY);
 	if (rc == -ERANGE) {
 		kfree(context);
 
 		/* Need a larger buffer.  Query for the right size. */
-		rc = __vfs_getxattr(dentry, inode, XATTR_NAME_SELINUX, NULL, 0);
+		rc = __vfs_getxattr(dentry, inode, XATTR_NAME_SELINUX, NULL, 0,
+				    XATTR_NOSECURITY);
 		if (rc < 0)
 			return rc;
 
@@ -1348,7 +1351,7 @@
 
 		context[len] = '\0';
 		rc = __vfs_getxattr(dentry, inode, XATTR_NAME_SELINUX,
-				    context, len);
+				    context, len, XATTR_NOSECURITY);
 	}
 	if (rc < 0) {
 		kfree(context);
@@ -6840,34 +6843,6 @@
 }
 #endif
 
-static int selinux_lockdown(enum lockdown_reason what)
-{
-	struct common_audit_data ad;
-	u32 sid = current_sid();
-	int invalid_reason = (what <= LOCKDOWN_NONE) ||
-			     (what == LOCKDOWN_INTEGRITY_MAX) ||
-			     (what >= LOCKDOWN_CONFIDENTIALITY_MAX);
-
-	if (WARN(invalid_reason, "Invalid lockdown reason")) {
-		audit_log(audit_context(),
-			  GFP_ATOMIC, AUDIT_SELINUX_ERR,
-			  "lockdown_reason=invalid");
-		return -EINVAL;
-	}
-
-	ad.type = LSM_AUDIT_DATA_LOCKDOWN;
-	ad.u.reason = what;
-
-	if (what <= LOCKDOWN_INTEGRITY_MAX)
-		return avc_has_perm(&selinux_state,
-				    sid, sid, SECCLASS_LOCKDOWN,
-				    LOCKDOWN__INTEGRITY, &ad);
-	else
-		return avc_has_perm(&selinux_state,
-				    sid, sid, SECCLASS_LOCKDOWN,
-				    LOCKDOWN__CONFIDENTIALITY, &ad);
-}
-
 struct lsm_blob_sizes selinux_blob_sizes __lsm_ro_after_init = {
 	.lbs_cred = sizeof(struct task_security_struct),
 	.lbs_file = sizeof(struct file_security_struct),
@@ -7173,8 +7148,6 @@
 	LSM_HOOK_INIT(perf_event_write, selinux_perf_event_write),
 #endif
 
-	LSM_HOOK_INIT(locked_down, selinux_lockdown),
-
 	/*
 	 * PUT "CLONING" (ACCESSING + ALLOCATING) HOOKS HERE
 	 */
diff --git a/security/selinux/include/classmap.h b/security/selinux/include/classmap.h
index 40cebde..3e770d3 100644
--- a/security/selinux/include/classmap.h
+++ b/security/selinux/include/classmap.h
@@ -117,7 +117,7 @@
 	  { COMMON_IPC_PERMS, NULL } },
 	{ "netlink_route_socket",
 	  { COMMON_SOCK_PERMS,
-	    "nlmsg_read", "nlmsg_write", NULL } },
+	    "nlmsg_read", "nlmsg_write", "nlmsg_readpriv", NULL } },
 	{ "netlink_tcpdiag_socket",
 	  { COMMON_SOCK_PERMS,
 	    "nlmsg_read", "nlmsg_write", NULL } },
@@ -247,8 +247,6 @@
 	  { COMMON_SOCK_PERMS, NULL } },
 	{ "perf_event",
 	  {"open", "cpu", "kernel", "tracepoint", "read", "write"} },
-	{ "lockdown",
-	  { "integrity", "confidentiality", NULL } },
 	{ NULL }
   };
 
diff --git a/security/selinux/include/security.h b/security/selinux/include/security.h
index 3cc8bab..c940f73 100644
--- a/security/selinux/include/security.h
+++ b/security/selinux/include/security.h
@@ -97,6 +97,7 @@
 	bool checkreqprot;
 	bool initialized;
 	bool policycap[__POLICYDB_CAPABILITY_MAX];
+	bool android_netlink_route;
 
 	struct page *status_page;
 	struct mutex status_lock;
@@ -219,6 +220,13 @@
 	return READ_ONCE(state->policycap[POLICYDB_CAPABILITY_GENFS_SECLABEL_SYMLINKS]);
 }
 
+static inline bool selinux_android_nlroute_getlink(void)
+{
+	struct selinux_state *state = &selinux_state;
+
+	return state->android_netlink_route;
+}
+
 int security_mls_enabled(struct selinux_state *state);
 int security_load_policy(struct selinux_state *state,
 			void *data, size_t len,
@@ -445,5 +453,6 @@
 extern void ebitmap_cache_init(void);
 extern void hashtab_cache_init(void);
 extern int security_sidtab_hash_stats(struct selinux_state *state, char *page);
+extern void selinux_nlmsg_init(void);
 
 #endif /* _SELINUX_SECURITY_H_ */
diff --git a/security/selinux/nlmsgtab.c b/security/selinux/nlmsgtab.c
index b692319..7968c2e 100644
--- a/security/selinux/nlmsgtab.c
+++ b/security/selinux/nlmsgtab.c
@@ -25,7 +25,7 @@
 	u32	perm;
 };
 
-static const struct nlmsg_perm nlmsg_route_perms[] =
+static struct nlmsg_perm nlmsg_route_perms[] =
 {
 	{ RTM_NEWLINK,		NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
 	{ RTM_DELLINK,		NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
@@ -211,3 +211,27 @@
 
 	return err;
 }
+
+static void nlmsg_set_getlink_perm(u32 perm)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(nlmsg_route_perms); i++) {
+		if (nlmsg_route_perms[i].nlmsg_type == RTM_GETLINK) {
+			nlmsg_route_perms[i].perm = perm;
+			break;
+		}
+	}
+}
+
+/**
+ * Use nlmsg_readpriv as the permission for RTM_GETLINK messages if the
+ * netlink_route_getlink policy capability is set. Otherwise use nlmsg_read.
+ */
+void selinux_nlmsg_init(void)
+{
+	if (selinux_android_nlroute_getlink())
+		nlmsg_set_getlink_perm(NETLINK_ROUTE_SOCKET__NLMSG_READPRIV);
+	else
+		nlmsg_set_getlink_perm(NETLINK_ROUTE_SOCKET__NLMSG_READ);
+}
diff --git a/security/selinux/ss/policydb.c b/security/selinux/ss/policydb.c
index 9fccf41..ff5cc4b 100644
--- a/security/selinux/ss/policydb.c
+++ b/security/selinux/ss/policydb.c
@@ -2493,6 +2493,10 @@
 	p->reject_unknown = !!(le32_to_cpu(buf[1]) & REJECT_UNKNOWN);
 	p->allow_unknown = !!(le32_to_cpu(buf[1]) & ALLOW_UNKNOWN);
 
+	if ((le32_to_cpu(buf[1]) & POLICYDB_CONFIG_ANDROID_NETLINK_ROUTE)) {
+		p->android_netlink_route = 1;
+	}
+
 	if (p->policyvers >= POLICYDB_VERSION_POLCAP) {
 		rc = ebitmap_read(&p->policycaps, fp);
 		if (rc)
diff --git a/security/selinux/ss/policydb.h b/security/selinux/ss/policydb.h
index c24d4e1..6bedec4 100644
--- a/security/selinux/ss/policydb.h
+++ b/security/selinux/ss/policydb.h
@@ -238,6 +238,7 @@
 /* The policy database */
 struct policydb {
 	int mls_enabled;
+	int android_netlink_route;
 
 	/* symbol tables */
 	struct symtab symtab[SYM_NUM];
@@ -334,6 +335,7 @@
 	struct policydb *p, struct role_trans_key *key);
 
 #define POLICYDB_CONFIG_MLS    1
+#define POLICYDB_CONFIG_ANDROID_NETLINK_ROUTE    (1 << 31)
 
 /* the config flags related to unknown classes/perms are bits 2 and 3 */
 #define REJECT_UNKNOWN	0x00000002
diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c
index 9704c8a..17979bb 100644
--- a/security/selinux/ss/services.c
+++ b/security/selinux/ss/services.c
@@ -2136,6 +2136,9 @@
 			pr_info("SELinux:  unknown policy capability %u\n",
 				i);
 	}
+
+	state->android_netlink_route = p->android_netlink_route;
+	selinux_nlmsg_init();
 }
 
 static int security_preserve_bools(struct selinux_policy *oldpolicy,
diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
index 5c90b9f..fe8d33b2 100644
--- a/security/smack/smack_lsm.c
+++ b/security/smack/smack_lsm.c
@@ -289,7 +289,8 @@
 	if (buffer == NULL)
 		return ERR_PTR(-ENOMEM);
 
-	rc = __vfs_getxattr(dp, ip, name, buffer, SMK_LONGLABEL);
+	rc = __vfs_getxattr(dp, ip, name, buffer, SMK_LONGLABEL,
+			    XATTR_NOSECURITY);
 	if (rc < 0)
 		skp = ERR_PTR(rc);
 	else if (rc == 0)
@@ -3432,7 +3433,7 @@
 			} else {
 				rc = __vfs_getxattr(dp, inode,
 					XATTR_NAME_SMACKTRANSMUTE, trattr,
-					TRANS_TRUE_SIZE);
+					TRANS_TRUE_SIZE, XATTR_NOSECURITY);
 				if (rc >= 0 && strncmp(trattr, TRANS_TRUE,
 						       TRANS_TRUE_SIZE) != 0)
 					rc = -EINVAL;
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index 05a085f..e1d98f5 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -3002,6 +3002,39 @@
 }
 EXPORT_SYMBOL_GPL(snd_soc_get_dai_id);
 
+/**
+ * snd_soc_info_multi_ext - external single mixer info callback
+ * @kcontrol: mixer control
+ * @uinfo: control element information
+ *
+ * Callback to provide information about a single external mixer control.
+ * that accepts multiple input.
+ *
+ * Returns 0 for success.
+ */
+int snd_soc_info_multi_ext(struct snd_kcontrol *kcontrol,
+	struct snd_ctl_elem_info *uinfo)
+{
+	struct soc_multi_mixer_control *mc =
+		(struct soc_multi_mixer_control *)kcontrol->private_value;
+	int platform_max;
+
+	if (!mc->platform_max)
+		mc->platform_max = mc->max;
+	platform_max = mc->platform_max;
+
+	if (platform_max == 1 && !strnstr(kcontrol->id.name, " Volume", 30))
+		uinfo->type = SNDRV_CTL_ELEM_TYPE_BOOLEAN;
+	else
+		uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
+
+	uinfo->count = mc->count;
+	uinfo->value.integer.min = 0;
+	uinfo->value.integer.max = platform_max;
+	return 0;
+}
+EXPORT_SYMBOL_GPL(snd_soc_info_multi_ext);
+
 int snd_soc_get_dai_name(struct of_phandle_args *args,
 				const char **dai_name)
 {
diff --git a/sound/usb/card.c b/sound/usb/card.c
index 4457214..4a52dd9 100644
--- a/sound/usb/card.c
+++ b/sound/usb/card.c
@@ -112,6 +112,185 @@
 static struct snd_usb_audio *usb_chip[SNDRV_CARDS];
 static struct usb_driver usb_audio_driver;
 
+static struct snd_usb_audio_vendor_ops *usb_vendor_ops;
+
+int snd_vendor_set_ops(struct snd_usb_audio_vendor_ops *ops)
+{
+	if ((!ops->connect) ||
+	    (!ops->disconnect) ||
+	    (!ops->set_interface) ||
+	    (!ops->set_rate) ||
+	    (!ops->set_pcm_buf) ||
+	    (!ops->set_pcm_intf) ||
+	    (!ops->set_pcm_connection) ||
+	    (!ops->set_pcm_binterval) ||
+	    (!ops->usb_add_ctls))
+		return -EINVAL;
+
+	usb_vendor_ops = ops;
+	return 0;
+}
+EXPORT_SYMBOL_GPL(snd_vendor_set_ops);
+
+struct snd_usb_audio_vendor_ops *snd_vendor_get_ops(void)
+{
+	return usb_vendor_ops;
+}
+
+static int snd_vendor_connect(struct usb_interface *intf)
+{
+	struct snd_usb_audio_vendor_ops *ops = snd_vendor_get_ops();
+
+	if (ops)
+		return ops->connect(intf);
+	return 0;
+}
+
+static void snd_vendor_disconnect(struct usb_interface *intf)
+{
+	struct snd_usb_audio_vendor_ops *ops = snd_vendor_get_ops();
+
+	if (ops)
+		ops->disconnect(intf);
+}
+
+int snd_vendor_set_interface(struct usb_device *udev,
+			     struct usb_host_interface *intf,
+			     int iface, int alt)
+{
+	struct snd_usb_audio_vendor_ops *ops = snd_vendor_get_ops();
+
+	if (ops)
+		return ops->set_interface(udev, intf, iface, alt);
+	return 0;
+}
+
+int snd_vendor_set_rate(struct usb_interface *intf, int iface, int rate,
+			int alt)
+{
+	struct snd_usb_audio_vendor_ops *ops = snd_vendor_get_ops();
+
+	if (ops)
+		return ops->set_rate(intf, iface, rate, alt);
+	return 0;
+}
+
+int snd_vendor_set_pcm_buf(struct usb_device *udev, int iface)
+{
+	struct snd_usb_audio_vendor_ops *ops = snd_vendor_get_ops();
+
+	if (ops)
+		ops->set_pcm_buf(udev, iface);
+	return 0;
+}
+
+int snd_vendor_set_pcm_intf(struct usb_interface *intf, int iface, int alt,
+			    int direction)
+{
+	struct snd_usb_audio_vendor_ops *ops = snd_vendor_get_ops();
+
+	if (ops)
+		return ops->set_pcm_intf(intf, iface, alt, direction);
+	return 0;
+}
+
+int snd_vendor_set_pcm_connection(struct usb_device *udev,
+				  enum snd_vendor_pcm_open_close onoff,
+				  int direction)
+{
+	struct snd_usb_audio_vendor_ops *ops = snd_vendor_get_ops();
+
+	if (ops)
+		return ops->set_pcm_connection(udev, onoff, direction);
+	return 0;
+}
+
+int snd_vendor_set_pcm_binterval(struct audioformat *fp,
+				 struct audioformat *found,
+				 int *cur_attr, int *attr)
+{
+	struct snd_usb_audio_vendor_ops *ops = snd_vendor_get_ops();
+
+	if (ops)
+		return ops->set_pcm_binterval(fp, found, cur_attr, attr);
+	return 0;
+}
+
+static int snd_vendor_usb_add_ctls(struct snd_usb_audio *chip)
+{
+	struct snd_usb_audio_vendor_ops *ops = snd_vendor_get_ops();
+
+	if (ops)
+		return ops->usb_add_ctls(chip);
+	return 0;
+}
+
+struct snd_usb_substream *find_snd_usb_substream(unsigned int card_num,
+	unsigned int pcm_idx, unsigned int direction, struct snd_usb_audio
+	**uchip, void (*disconnect_cb)(struct snd_usb_audio *chip))
+{
+	int idx;
+	struct snd_usb_stream *as;
+	struct snd_usb_substream *subs = NULL;
+	struct snd_usb_audio *chip = NULL;
+
+	mutex_lock(&register_mutex);
+	/*
+	 * legacy audio snd card number assignment is dynamic. Hence
+	 * search using chip->card->number
+	 */
+	for (idx = 0; idx < SNDRV_CARDS; idx++) {
+		if (!usb_chip[idx])
+			continue;
+		if (usb_chip[idx]->card->number == card_num) {
+			chip = usb_chip[idx];
+			break;
+		}
+	}
+
+	if (!chip || atomic_read(&chip->shutdown)) {
+		pr_debug("%s: instance of usb crad # %d does not exist\n",
+			__func__, card_num);
+		goto err;
+	}
+
+	if (pcm_idx >= chip->pcm_devs) {
+		pr_err("%s: invalid pcm dev number %u > %d\n", __func__,
+			pcm_idx, chip->pcm_devs);
+		goto err;
+	}
+
+	if (direction > SNDRV_PCM_STREAM_CAPTURE) {
+		pr_err("%s: invalid direction %u\n", __func__, direction);
+		goto err;
+	}
+
+	list_for_each_entry(as, &chip->pcm_list, list) {
+		if (as->pcm_index == pcm_idx) {
+			subs = &as->substream[direction];
+			if (subs->interface < 0 && !subs->data_endpoint &&
+				!subs->sync_endpoint) {
+				pr_debug("%s: stream disconnected, bail out\n",
+					__func__);
+				subs = NULL;
+				goto err;
+			}
+			goto done;
+		}
+	}
+
+done:
+	chip->card_num = card_num;
+	chip->disconnect_cb = disconnect_cb;
+err:
+	*uchip = chip;
+	if (!subs)
+		pr_debug("%s: substream instance not found\n", __func__);
+	mutex_unlock(&register_mutex);
+	return subs;
+}
+EXPORT_SYMBOL_GPL(find_snd_usb_substream);
+
 /*
  * disconnect streams
  * called from usb_audio_disconnect()
@@ -451,6 +630,7 @@
 	list_for_each_entry_safe(ep, n, &chip->ep_list, list)
 		snd_usb_endpoint_free(ep);
 
+	mutex_destroy(&chip->dev_lock);
 	mutex_destroy(&chip->mutex);
 	if (!atomic_read(&chip->shutdown))
 		dev_set_drvdata(&chip->dev->dev, NULL);
@@ -593,6 +773,7 @@
 
 	chip = card->private_data;
 	mutex_init(&chip->mutex);
+	mutex_init(&chip->dev_lock);
 	init_waitqueue_head(&chip->shutdown_wait);
 	chip->index = idx;
 	chip->dev = dev;
@@ -717,6 +898,10 @@
 	if (err < 0)
 		return err;
 
+	err = snd_vendor_connect(intf);
+	if (err)
+		return err;
+
 	/*
 	 * found a config.  now register to ALSA
 	 */
@@ -778,6 +963,8 @@
 
 	dev_set_drvdata(&dev->dev, chip);
 
+	snd_vendor_usb_add_ctls(chip);
+
 	/*
 	 * For devices with more than one control interface, we assume the
 	 * first contains the audio controls. We might need a more specific
@@ -863,6 +1050,11 @@
 
 	card = chip->card;
 
+	if (chip->disconnect_cb)
+		chip->disconnect_cb(chip);
+
+	snd_vendor_disconnect(intf);
+
 	mutex_lock(&register_mutex);
 	if (atomic_inc_return(&chip->shutdown) == 1) {
 		struct snd_usb_stream *as;
diff --git a/sound/usb/card.h b/sound/usb/card.h
index 5351d71..ce4f213 100644
--- a/sound/usb/card.h
+++ b/sound/usb/card.h
@@ -182,4 +182,25 @@
 	struct list_head list;
 };
 
+struct snd_usb_substream *find_snd_usb_substream(unsigned int card_num,
+	unsigned int pcm_idx, unsigned int direction, struct snd_usb_audio
+	**uchip, void (*disconnect_cb)(struct snd_usb_audio *chip));
+
+int snd_vendor_set_ops(struct snd_usb_audio_vendor_ops *vendor_ops);
+struct snd_usb_audio_vendor_ops *snd_vendor_get_ops(void);
+int snd_vendor_set_interface(struct usb_device *udev,
+			     struct usb_host_interface *alts,
+			     int iface, int alt);
+int snd_vendor_set_rate(struct usb_interface *intf, int iface, int rate,
+			int alt);
+int snd_vendor_set_pcm_buf(struct usb_device *udev, int iface);
+int snd_vendor_set_pcm_intf(struct usb_interface *intf, int iface, int alt,
+			    int direction);
+int snd_vendor_set_pcm_connection(struct usb_device *udev,
+				  enum snd_vendor_pcm_open_close onoff,
+				  int direction);
+int snd_vendor_set_pcm_binterval(struct audioformat *fp,
+				 struct audioformat *found,
+				 int *cur_attr, int *attr);
+
 #endif /* __USBAUDIO_CARD_H */
diff --git a/sound/usb/clock.c b/sound/usb/clock.c
index f3ca5900..3d56037 100644
--- a/sound/usb/clock.c
+++ b/sound/usb/clock.c
@@ -642,8 +642,13 @@
 	 * interface is active. */
 	if (rate != prev_rate) {
 		usb_set_interface(dev, iface, 0);
+
+		snd_vendor_set_interface(dev, alts, iface, 0);
+
 		snd_usb_set_interface_quirk(dev);
 		usb_set_interface(dev, iface, fmt->altsetting);
+
+		snd_vendor_set_interface(dev, alts, iface, fmt->altsetting);
 		snd_usb_set_interface_quirk(dev);
 	}
 
diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c
index a860303..947b2c2 100644
--- a/sound/usb/pcm.c
+++ b/sound/usb/pcm.c
@@ -134,6 +134,71 @@
 			found = fp;
 			cur_attr = attr;
 		}
+
+		snd_vendor_set_pcm_binterval(fp, found, &cur_attr, &attr);
+	}
+	return found;
+}
+
+/*
+ * find a matching audio format as well as non-zero service interval
+ */
+static struct audioformat *find_format_and_si(struct snd_usb_substream *subs,
+	unsigned int datainterval)
+{
+	unsigned int i;
+	struct audioformat *fp;
+	struct audioformat *found = NULL;
+	int cur_attr = 0, attr;
+
+	list_for_each_entry(fp, &subs->fmt_list, list) {
+		if (datainterval != fp->datainterval)
+			continue;
+		if (!(fp->formats & pcm_format_to_bits(subs->pcm_format)))
+			continue;
+		if (fp->channels != subs->channels)
+			continue;
+		if (subs->cur_rate < fp->rate_min ||
+		    subs->cur_rate > fp->rate_max)
+			continue;
+		if (!(fp->rates & SNDRV_PCM_RATE_CONTINUOUS)) {
+			for (i = 0; i < fp->nr_rates; i++)
+				if (fp->rate_table[i] == subs->cur_rate)
+					break;
+			if (i >= fp->nr_rates)
+				continue;
+		}
+		attr = fp->ep_attr & USB_ENDPOINT_SYNCTYPE;
+		if (!found) {
+			found = fp;
+			cur_attr = attr;
+			continue;
+		}
+		/* avoid async out and adaptive in if the other method
+		 * supports the same format.
+		 * this is a workaround for the case like
+		 * M-audio audiophile USB.
+		 */
+		if (attr != cur_attr) {
+			if ((attr == USB_ENDPOINT_SYNC_ASYNC &&
+			     subs->direction == SNDRV_PCM_STREAM_PLAYBACK) ||
+			    (attr == USB_ENDPOINT_SYNC_ADAPTIVE &&
+			     subs->direction == SNDRV_PCM_STREAM_CAPTURE))
+				continue;
+			if ((cur_attr == USB_ENDPOINT_SYNC_ASYNC &&
+			     subs->direction == SNDRV_PCM_STREAM_PLAYBACK) ||
+			    (cur_attr == USB_ENDPOINT_SYNC_ADAPTIVE &&
+			     subs->direction == SNDRV_PCM_STREAM_CAPTURE)) {
+				found = fp;
+				cur_attr = attr;
+				continue;
+			}
+		}
+		/* find the format with the largest max. packet size */
+		if (fp->maxpacksize > found->maxpacksize) {
+			found = fp;
+			cur_attr = attr;
+		}
 	}
 	return found;
 }
@@ -588,6 +653,10 @@
 		}
 		dev_dbg(&dev->dev, "setting usb interface %d:%d\n",
 			fmt->iface, fmt->altsetting);
+		err = snd_vendor_set_pcm_intf(iface, fmt->iface,
+					      fmt->altsetting, subs->direction);
+		if (err)
+			return err;
 		snd_usb_set_interface_quirk(dev);
 	}
 
@@ -615,6 +684,81 @@
 	return 0;
 }
 
+static int snd_usb_pcm_change_state(struct snd_usb_substream *subs, int state);
+
+int snd_usb_enable_audio_stream(struct snd_usb_substream *subs,
+	int datainterval, bool enable)
+{
+	struct audioformat *fmt;
+	struct usb_host_interface *alts;
+	struct usb_interface *iface;
+	int ret;
+
+	if (!enable) {
+		if (subs->interface >= 0) {
+			usb_set_interface(subs->dev, subs->interface, 0);
+			subs->altset_idx = 0;
+			subs->interface = -1;
+			subs->cur_audiofmt = NULL;
+		}
+
+		snd_usb_autosuspend(subs->stream->chip);
+		return 0;
+	}
+
+	snd_usb_autoresume(subs->stream->chip);
+
+	ret = snd_usb_pcm_change_state(subs, UAC3_PD_STATE_D0);
+	if (ret < 0)
+		return ret;
+
+	if (datainterval != -EINVAL)
+		fmt = find_format_and_si(subs, datainterval);
+	else
+		fmt = find_format(subs);
+	if (!fmt) {
+		dev_err(&subs->dev->dev,
+		"cannot set format: format = %#x, rate = %d, channels = %d\n",
+			   subs->pcm_format, subs->cur_rate, subs->channels);
+		return -EINVAL;
+	}
+
+	subs->altset_idx = 0;
+	subs->interface = -1;
+	if (atomic_read(&subs->stream->chip->shutdown)) {
+		ret = -ENODEV;
+	} else {
+		ret = set_format(subs, fmt);
+		if (ret < 0)
+			return ret;
+
+		iface = usb_ifnum_to_if(subs->dev, subs->cur_audiofmt->iface);
+		if (!iface) {
+			dev_err(&subs->dev->dev, "Could not get iface %d\n",
+				subs->cur_audiofmt->iface);
+			return -ENODEV;
+		}
+
+		alts = &iface->altsetting[subs->cur_audiofmt->altset_idx];
+		ret = snd_usb_init_sample_rate(subs->stream->chip,
+					       subs->cur_audiofmt->iface,
+					       alts,
+					       subs->cur_audiofmt,
+					       subs->cur_rate);
+		if (ret < 0) {
+			dev_err(&subs->dev->dev, "failed to set rate %d\n",
+				subs->cur_rate);
+			return ret;
+		}
+	}
+
+	subs->interface = fmt->iface;
+	subs->altset_idx = fmt->altset_idx;
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(snd_usb_enable_audio_stream);
+
 /*
  * Return the score of matching two audioformats.
  * Veto the audioformat if:
@@ -911,6 +1055,10 @@
 	struct usb_interface *iface;
 	int ret;
 
+	ret = snd_vendor_set_pcm_buf(subs->dev, subs->cur_audiofmt->iface);
+	if (ret)
+		return ret;
+
 	if (! subs->cur_audiofmt) {
 		dev_err(&subs->dev->dev, "no format is specified!\n");
 		return -ENXIO;
@@ -944,6 +1092,17 @@
 		if (ret < 0)
 			goto unlock;
 
+		if (snd_vendor_get_ops()) {
+			ret = snd_vendor_set_rate(iface,
+						  subs->cur_audiofmt->iface,
+						  subs->cur_rate,
+						  subs->cur_audiofmt->altsetting);
+			if (!ret) {
+				subs->need_setup_ep = false;
+				goto unlock;
+			}
+		}
+
 		ret = configure_endpoint(subs);
 		if (ret < 0)
 			goto unlock;
@@ -1353,6 +1512,11 @@
 	struct snd_usb_substream *subs = &as->substream[direction];
 	int ret;
 
+	ret = snd_vendor_set_pcm_connection(subs->dev, SOUND_PCM_OPEN,
+					    direction);
+	if (ret)
+		return ret;
+
 	subs->interface = -1;
 	subs->altset_idx = 0;
 	runtime->hw = snd_usb_hardware;
@@ -1381,12 +1545,23 @@
 	struct snd_usb_substream *subs = &as->substream[direction];
 	int ret;
 
+	ret = snd_vendor_set_pcm_connection(subs->dev, SOUND_PCM_CLOSE,
+					    direction);
+	if (ret)
+		return ret;
+
 	snd_media_stop_pipeline(subs);
 
 	if (!as->chip->keep_iface &&
 	    subs->interface >= 0 &&
 	    !snd_usb_lock_shutdown(subs->stream->chip)) {
 		usb_set_interface(subs->dev, subs->interface, 0);
+		ret = snd_vendor_set_pcm_intf(usb_ifnum_to_if(subs->dev,
+							      subs->interface),
+					      subs->interface, 0,
+					      direction);
+		if (ret)
+			return ret;
 		subs->interface = -1;
 		ret = snd_usb_pcm_change_state(subs, UAC3_PD_STATE_D1);
 		snd_usb_unlock_shutdown(subs->stream->chip);
diff --git a/sound/usb/pcm.h b/sound/usb/pcm.h
index 9833627..6e28e79 100644
--- a/sound/usb/pcm.h
+++ b/sound/usb/pcm.h
@@ -14,5 +14,7 @@
 		       struct audioformat *fmt);
 void snd_usb_preallocate_buffer(struct snd_usb_substream *subs);
 
+int snd_usb_enable_audio_stream(struct snd_usb_substream *subs,
+				int datainterval, bool enable);
 
 #endif /* __USBAUDIO_PCM_H */
diff --git a/sound/usb/stream.c b/sound/usb/stream.c
index ca76ba5..7ff203d 100644
--- a/sound/usb/stream.c
+++ b/sound/usb/stream.c
@@ -67,9 +67,13 @@
 static void snd_usb_audio_pcm_free(struct snd_pcm *pcm)
 {
 	struct snd_usb_stream *stream = pcm->private_data;
+	struct snd_usb_audio *chip;
 	if (stream) {
+		mutex_lock(&stream->chip->dev_lock);
+		chip = stream->chip;
 		stream->pcm = NULL;
 		snd_usb_audio_stream_free(stream);
+		mutex_unlock(&chip->dev_lock);
 	}
 }
 
diff --git a/sound/usb/usbaudio.h b/sound/usb/usbaudio.h
index 0805b7f..9b3946a 100644
--- a/sound/usb/usbaudio.h
+++ b/sound/usb/usbaudio.h
@@ -60,6 +60,9 @@
 	struct usb_host_interface *ctrl_intf;	/* the audio control interface */
 	struct media_device *media_dev;
 	struct media_intf_devnode *ctl_intf_media_devnode;
+	struct mutex dev_lock;  /* to protect any race with disconnect */
+	int card_num;	/* cache pcm card number to use upon disconnect */
+	void (*disconnect_cb)(struct snd_usb_audio *chip);
 };
 
 #define usb_audio_err(chip, fmt, args...) \
@@ -126,4 +129,50 @@
 extern bool snd_usb_use_vmalloc;
 extern bool snd_usb_skip_validation;
 
+struct audioformat;
+
+enum snd_vendor_pcm_open_close {
+	SOUND_PCM_CLOSE = 0,
+	SOUND_PCM_OPEN,
+};
+
+/**
+ * struct snd_usb_audio_vendor_ops - function callbacks for USB audio accelerators
+ * @connect: called when a new interface is found
+ * @disconnect: called when an interface is removed
+ * @set_interface: called when an interface is initialized
+ * @set_rate: called when the rate is set
+ * @set_pcm_buf: called when the pcm buffer is set
+ * @set_pcm_intf: called when the pcm interface is set
+ * @set_pcm_connection: called when pcm is opened/closed
+ * @set_pcm_binterval: called when the pcm binterval is set
+ * @usb_add_ctls: called when USB controls are added
+ *
+ * Set of callbacks for some accelerated USB audio streaming hardware.
+ *
+ * TODO: make this USB host-controller specific, right now this only works for
+ * one USB controller in the system at a time, which is only realistic for
+ * self-contained systems like phones.
+ */
+struct snd_usb_audio_vendor_ops {
+	int (*connect)(struct usb_interface *intf);
+	void (*disconnect)(struct usb_interface *intf);
+
+	int (*set_interface)(struct usb_device *udev,
+			     struct usb_host_interface *alts,
+			     int iface, int alt);
+	int (*set_rate)(struct usb_interface *intf, int iface, int rate,
+			int alt);
+	int (*set_pcm_buf)(struct usb_device *udev, int iface);
+	int (*set_pcm_intf)(struct usb_interface *intf, int iface, int alt,
+			    int direction);
+	int (*set_pcm_connection)(struct usb_device *udev,
+				  enum snd_vendor_pcm_open_close onoff,
+				  int direction);
+	int (*set_pcm_binterval)(struct audioformat *fp,
+				 struct audioformat *found,
+				 int *cur_attr, int *attr);
+	int (*usb_add_ctls)(struct snd_usb_audio *chip);
+};
+
 #endif /* __USBAUDIO_H */
diff --git a/tools/testing/selftests/filesystems/incfs/.gitignore b/tools/testing/selftests/filesystems/incfs/.gitignore
new file mode 100644
index 0000000..f0e3cd9
--- /dev/null
+++ b/tools/testing/selftests/filesystems/incfs/.gitignore
@@ -0,0 +1,3 @@
+incfs_test
+incfs_stress
+incfs_perf
diff --git a/tools/testing/selftests/filesystems/incfs/Makefile b/tools/testing/selftests/filesystems/incfs/Makefile
new file mode 100644
index 0000000..f379802
--- /dev/null
+++ b/tools/testing/selftests/filesystems/incfs/Makefile
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: GPL-2.0
+CFLAGS += -D_FILE_OFFSET_BITS=64 -Wall -Werror -I../.. -I../../../../..
+LDLIBS := -llz4 -lzstd -lcrypto -lpthread
+TEST_GEN_PROGS := incfs_test incfs_stress incfs_perf
+
+include ../../lib.mk
+
+# Put after include ../../lib.mk since that changes $(TEST_GEN_PROGS)
+# Otherwise you get multiple targets, this becomes the default, and it's a mess
+EXTRA_SOURCES := utils.c
+$(TEST_GEN_PROGS) : $(EXTRA_SOURCES)
diff --git a/tools/testing/selftests/filesystems/incfs/incfs_perf.c b/tools/testing/selftests/filesystems/incfs/incfs_perf.c
new file mode 100644
index 0000000..ed36bbd
--- /dev/null
+++ b/tools/testing/selftests/filesystems/incfs/incfs_perf.c
@@ -0,0 +1,717 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2020 Google LLC
+ */
+#include <errno.h>
+#include <fcntl.h>
+#include <getopt.h>
+#include <lz4.h>
+#include <stdbool.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/mount.h>
+#include <sys/stat.h>
+#include <time.h>
+#include <ctype.h>
+#include <unistd.h>
+
+#include "utils.h"
+
+#define err_msg(...)                                                           \
+	do {                                                                   \
+		fprintf(stderr, "%s: (%d) ", TAG, __LINE__);                   \
+		fprintf(stderr, __VA_ARGS__);                                  \
+		fprintf(stderr, " (%s)\n", strerror(errno));                   \
+	} while (false)
+
+#define TAG "incfs_perf"
+
+struct options {
+	int blocks; /* -b number of diff block sizes */
+	bool no_cleanup; /* -c don't clean up after */
+	const char *test_dir; /* -d working directory */
+	const char *file_types; /* -f sScCvV */
+	bool no_native; /* -n don't test native files */
+	bool no_random; /* -r don't do random reads*/
+	bool no_linear; /* -R random reads only */
+	size_t size; /* -s file size as power of 2 */
+	int tries; /* -t times to run test*/
+};
+
+enum flags {
+	SHUFFLE = 1,
+	COMPRESS = 2,
+	VERIFY = 4,
+	LAST_FLAG = 8,
+};
+
+void print_help(void)
+{
+	puts(
+	"incfs_perf. Performance test tool for incfs\n"
+	"\tTests read performance of incfs by creating files of various types\n"
+	"\tflushing caches and then reading them back.\n"
+	"\tEach file is read with different block sizes and average\n"
+	"\tthroughput in megabytes/second and memory usage are reported for\n"
+	"\teach block size\n"
+	"\tNative files are tested for comparison\n"
+	"\tNative files are created in native folder, incfs files are created\n"
+	"\tin src folder which is mounted on dst folder\n"
+	"\n"
+	"\t-bn (default 8) number of different block sizes, starting at 4096\n"
+	"\t                and doubling\n"
+	"\t-c		   don't Clean up - leave files and mount point\n"
+	"\t-d dir          create directories in dir\n"
+	"\t-fs|Sc|Cv|V     restrict which files are created.\n"
+	"\t                s blocks not shuffled, S blocks shuffled\n"
+	"\t                c blocks not compress, C blocks compressed\n"
+	"\t                v files not verified, V files verified\n"
+	"\t                If a letter is omitted, both options are tested\n"
+	"\t                If no letter are given, incfs is not tested\n"
+	"\t-n              Don't test native files\n"
+	"\t-r              No random reads (sequential only)\n"
+	"\t-R              Random reads only (no sequential)\n"
+	"\t-sn (default 30)File size as power of 2\n"
+	"\t-tn (default 5) Number of tries per file. Results are averaged\n"
+	);
+}
+
+int parse_options(int argc, char *const *argv, struct options *options)
+{
+	signed char c;
+
+	/* Set defaults here */
+	*options = (struct options){
+		.blocks = 8,
+		.test_dir = ".",
+		.tries = 5,
+		.size = 30,
+	};
+
+	/* Load options from command line here */
+	while ((c = getopt(argc, argv, "b:cd:f::hnrRs:t:")) != -1) {
+		switch (c) {
+		case 'b':
+			options->blocks = strtol(optarg, NULL, 10);
+			break;
+
+		case 'c':
+			options->no_cleanup = true;
+			break;
+
+		case 'd':
+			options->test_dir = optarg;
+			break;
+
+		case 'f':
+			if (optarg)
+				options->file_types = optarg;
+			else
+				options->file_types = "sS";
+			break;
+
+		case 'h':
+			print_help();
+			exit(0);
+
+		case 'n':
+			options->no_native = true;
+			break;
+
+		case 'r':
+			options->no_random = true;
+			break;
+
+		case 'R':
+			options->no_linear = true;
+			break;
+
+		case 's':
+			options->size = strtol(optarg, NULL, 10);
+			break;
+
+		case 't':
+			options->tries = strtol(optarg, NULL, 10);
+			break;
+
+		default:
+			print_help();
+			return -EINVAL;
+		}
+	}
+
+	options->size = 1L << options->size;
+
+	return 0;
+}
+
+void shuffle(size_t *buffer, size_t size)
+{
+	size_t i;
+
+	for (i = 0; i < size; ++i) {
+		size_t j = random() * (size - i - 1) / RAND_MAX;
+		size_t temp = buffer[i];
+
+		buffer[i] = buffer[j];
+		buffer[j] = temp;
+	}
+}
+
+int get_free_memory(void)
+{
+	FILE *meminfo = fopen("/proc/meminfo", "re");
+	char field[256];
+	char value[256] = {};
+
+	if (!meminfo)
+		return -ENOENT;
+
+	while (fscanf(meminfo, "%[^:]: %s kB\n", field, value) == 2) {
+		if (!strcmp(field, "MemFree"))
+			break;
+		*value = 0;
+	}
+
+	fclose(meminfo);
+
+	if (!*value)
+		return -ENOENT;
+
+	return strtol(value, NULL, 10);
+}
+
+int write_data(int cmd_fd, int dir_fd, const char *name, size_t size, int flags)
+{
+	int fd = openat(dir_fd, name, O_RDWR | O_CLOEXEC);
+	struct incfs_permit_fill permit_fill = {
+		.file_descriptor = fd,
+	};
+	int block_count = 1 + (size - 1) / INCFS_DATA_FILE_BLOCK_SIZE;
+	size_t *blocks = malloc(sizeof(size_t) * block_count);
+	int error = 0;
+	size_t i;
+	uint8_t data[INCFS_DATA_FILE_BLOCK_SIZE] = {};
+	uint8_t compressed_data[INCFS_DATA_FILE_BLOCK_SIZE] = {};
+	struct incfs_fill_block fill_block = {
+		.compression = COMPRESSION_NONE,
+		.data_len = sizeof(data),
+		.data = ptr_to_u64(data),
+	};
+
+	if (!blocks) {
+		err_msg("Out of memory");
+		error = -errno;
+		goto out;
+	}
+
+	if (fd == -1) {
+		err_msg("Could not open file for writing %s", name);
+		error = -errno;
+		goto out;
+	}
+
+	if (ioctl(cmd_fd, INCFS_IOC_PERMIT_FILL, &permit_fill)) {
+		err_msg("Failed to call PERMIT_FILL");
+		error = -errno;
+		goto out;
+	}
+
+	for (i = 0; i < block_count; ++i)
+		blocks[i] = i;
+
+	if (flags & SHUFFLE)
+		shuffle(blocks, block_count);
+
+	if (flags & COMPRESS) {
+		size_t comp_size = LZ4_compress_default(
+			(char *)data, (char *)compressed_data, sizeof(data),
+			ARRAY_SIZE(compressed_data));
+
+		if (comp_size <= 0) {
+			error = -EBADMSG;
+			goto out;
+		}
+		fill_block.compression = COMPRESSION_LZ4;
+		fill_block.data = ptr_to_u64(compressed_data);
+		fill_block.data_len = comp_size;
+	}
+
+	for (i = 0; i < block_count; ++i) {
+		struct incfs_fill_blocks fill_blocks = {
+			.count = 1,
+			.fill_blocks = ptr_to_u64(&fill_block),
+		};
+
+		fill_block.block_index = blocks[i];
+		int written = ioctl(fd, INCFS_IOC_FILL_BLOCKS, &fill_blocks);
+
+		if (written != 1) {
+			error = -errno;
+			err_msg("Failed to write block %lu in file %s", i,
+				name);
+			break;
+		}
+	}
+
+out:
+	free(blocks);
+	close(fd);
+	sync();
+	return error;
+}
+
+int measure_read_throughput_internal(const char *tag, int dir, const char *name,
+				     const struct options *options, bool random)
+{
+	int block;
+
+	if (random)
+		printf("%32s(random)", tag);
+	else
+		printf("%40s", tag);
+
+	for (block = 0; block < options->blocks; ++block) {
+		size_t buffer_size;
+		char *buffer;
+		int try;
+		double time = 0;
+		double throughput;
+		int memory = 0;
+
+		buffer_size = 1 << (block + 12);
+		buffer = malloc(buffer_size);
+
+		for (try = 0; try < options->tries; ++try) {
+			int err;
+			struct timespec start_time, end_time;
+			off_t i;
+			int fd;
+			size_t offsets_size = options->size / buffer_size;
+			size_t *offsets =
+				malloc(offsets_size * sizeof(*offsets));
+			int start_memory, end_memory;
+
+			if (!offsets) {
+				err_msg("Not enough memory");
+				return -ENOMEM;
+			}
+
+			for (i = 0; i < offsets_size; ++i)
+				offsets[i] = i * buffer_size;
+
+			if (random)
+				shuffle(offsets, offsets_size);
+
+			err = drop_caches();
+			if (err) {
+				err_msg("Failed to drop caches");
+				return err;
+			}
+
+			start_memory = get_free_memory();
+			if (start_memory < 0) {
+				err_msg("Failed to get start memory");
+				return start_memory;
+			}
+
+			fd = openat(dir, name, O_RDONLY | O_CLOEXEC);
+			if (fd == -1) {
+				err_msg("Failed to open file");
+				return err;
+			}
+
+			err = clock_gettime(CLOCK_MONOTONIC, &start_time);
+			if (err) {
+				err_msg("Failed to get start time");
+				return err;
+			}
+
+			for (i = 0; i < offsets_size; ++i)
+				if (pread(fd, buffer, buffer_size,
+					  offsets[i]) != buffer_size) {
+					err_msg("Failed to read file");
+					err = -errno;
+					goto fail;
+				}
+
+			err = clock_gettime(CLOCK_MONOTONIC, &end_time);
+			if (err) {
+				err_msg("Failed to get start time");
+				goto fail;
+			}
+
+			end_memory = get_free_memory();
+			if (end_memory < 0) {
+				err_msg("Failed to get end memory");
+				return end_memory;
+			}
+
+			time += end_time.tv_sec - start_time.tv_sec;
+			time += (end_time.tv_nsec - start_time.tv_nsec) / 1e9;
+
+			close(fd);
+			fd = -1;
+			memory += start_memory - end_memory;
+
+fail:
+			free(offsets);
+			close(fd);
+			if (err)
+				return err;
+		}
+
+		throughput = options->size * options->tries / time;
+		printf("%10.3e %10d", throughput, memory / options->tries);
+		free(buffer);
+	}
+
+	printf("\n");
+	return 0;
+}
+
+int measure_read_throughput(const char *tag, int dir, const char *name,
+			    const struct options *options)
+{
+	int err = 0;
+
+	if (!options->no_linear)
+		err = measure_read_throughput_internal(tag, dir, name, options,
+						       false);
+
+	if (!err && !options->no_random)
+		err = measure_read_throughput_internal(tag, dir, name, options,
+						       true);
+	return err;
+}
+
+int test_native_file(int dir, const struct options *options)
+{
+	const char *name = "file";
+	int fd;
+	char buffer[4096] = {};
+	off_t i;
+	int err;
+
+	fd = openat(dir, name, O_CREAT | O_WRONLY | O_CLOEXEC, 0600);
+	if (fd == -1) {
+		err_msg("Could not open native file");
+		return -errno;
+	}
+
+	for (i = 0; i < options->size; i += sizeof(buffer))
+		if (pwrite(fd, buffer, sizeof(buffer), i) != sizeof(buffer)) {
+			err_msg("Failed to write file");
+			err = -errno;
+			goto fail;
+		}
+
+	close(fd);
+	sync();
+	fd = -1;
+
+	err = measure_read_throughput("native", dir, name, options);
+
+fail:
+	close(fd);
+	return err;
+}
+
+struct hash_block {
+	char data[INCFS_DATA_FILE_BLOCK_SIZE];
+};
+
+static struct hash_block *build_mtree(size_t size, char *root_hash,
+				      int *mtree_block_count)
+{
+	char data[INCFS_DATA_FILE_BLOCK_SIZE] = {};
+	const int digest_size = SHA256_DIGEST_SIZE;
+	const int hash_per_block = INCFS_DATA_FILE_BLOCK_SIZE / digest_size;
+	int block_count = 0;
+	int hash_block_count = 0;
+	int total_tree_block_count = 0;
+	int tree_lvl_index[INCFS_MAX_MTREE_LEVELS] = {};
+	int tree_lvl_count[INCFS_MAX_MTREE_LEVELS] = {};
+	int levels_count = 0;
+	int i, level;
+	struct hash_block *mtree;
+
+	if (size == 0)
+		return 0;
+
+	block_count = 1 + (size - 1) / INCFS_DATA_FILE_BLOCK_SIZE;
+	hash_block_count = block_count;
+	for (i = 0; hash_block_count > 1; i++) {
+		hash_block_count = (hash_block_count + hash_per_block - 1) /
+				   hash_per_block;
+		tree_lvl_count[i] = hash_block_count;
+		total_tree_block_count += hash_block_count;
+	}
+	levels_count = i;
+
+	for (i = 0; i < levels_count; i++) {
+		int prev_lvl_base = (i == 0) ? total_tree_block_count :
+					       tree_lvl_index[i - 1];
+
+		tree_lvl_index[i] = prev_lvl_base - tree_lvl_count[i];
+	}
+
+	*mtree_block_count = total_tree_block_count;
+	mtree = calloc(total_tree_block_count, sizeof(*mtree));
+	/* Build level 0 hashes. */
+	for (i = 0; i < block_count; i++) {
+		int block_index = tree_lvl_index[0] + i / hash_per_block;
+		int block_off = (i % hash_per_block) * digest_size;
+		char *hash_ptr = mtree[block_index].data + block_off;
+
+		sha256(data, INCFS_DATA_FILE_BLOCK_SIZE, hash_ptr);
+	}
+
+	/* Build higher levels of hash tree. */
+	for (level = 1; level < levels_count; level++) {
+		int prev_lvl_base = tree_lvl_index[level - 1];
+		int prev_lvl_count = tree_lvl_count[level - 1];
+
+		for (i = 0; i < prev_lvl_count; i++) {
+			int block_index =
+				i / hash_per_block + tree_lvl_index[level];
+			int block_off = (i % hash_per_block) * digest_size;
+			char *hash_ptr = mtree[block_index].data + block_off;
+
+			sha256(mtree[i + prev_lvl_base].data,
+			       INCFS_DATA_FILE_BLOCK_SIZE, hash_ptr);
+		}
+	}
+
+	/* Calculate root hash from the top block */
+	sha256(mtree[0].data, INCFS_DATA_FILE_BLOCK_SIZE, root_hash);
+
+	return mtree;
+}
+
+static int load_hash_tree(int cmd_fd, int dir, const char *name,
+			  struct hash_block *mtree, int mtree_block_count)
+{
+	int err;
+	int i;
+	int fd;
+	struct incfs_fill_block *fill_block_array =
+		calloc(mtree_block_count, sizeof(struct incfs_fill_block));
+	struct incfs_fill_blocks fill_blocks = {
+		.count = mtree_block_count,
+		.fill_blocks = ptr_to_u64(fill_block_array),
+	};
+	struct incfs_permit_fill permit_fill;
+
+	if (!fill_block_array)
+		return -ENOMEM;
+
+	for (i = 0; i < fill_blocks.count; i++) {
+		fill_block_array[i] = (struct incfs_fill_block){
+			.block_index = i,
+			.data_len = INCFS_DATA_FILE_BLOCK_SIZE,
+			.data = ptr_to_u64(mtree[i].data),
+			.flags = INCFS_BLOCK_FLAGS_HASH
+		};
+	}
+
+	fd = openat(dir, name, O_RDONLY | O_CLOEXEC);
+	if (fd < 0) {
+		err = errno;
+		goto failure;
+	}
+
+	permit_fill.file_descriptor = fd;
+	if (ioctl(cmd_fd, INCFS_IOC_PERMIT_FILL, &permit_fill)) {
+		err_msg("Failed to call PERMIT_FILL");
+		err = -errno;
+		goto failure;
+	}
+
+	err = ioctl(fd, INCFS_IOC_FILL_BLOCKS, &fill_blocks);
+	close(fd);
+	if (err < fill_blocks.count)
+		err = errno;
+	else
+		err = 0;
+
+failure:
+	free(fill_block_array);
+	return err;
+}
+
+int test_incfs_file(int dst_dir, const struct options *options, int flags)
+{
+	int cmd_file = openat(dst_dir, INCFS_PENDING_READS_FILENAME,
+			      O_RDONLY | O_CLOEXEC);
+	int err;
+	char name[4];
+	incfs_uuid_t id;
+	char tag[256];
+
+	snprintf(name, sizeof(name), "%c%c%c",
+		 flags & SHUFFLE ? 'S' : 's',
+		 flags & COMPRESS ? 'C' : 'c',
+		 flags & VERIFY ? 'V' : 'v');
+
+	if (cmd_file == -1) {
+		err_msg("Could not open command file");
+		return -errno;
+	}
+
+	if (flags & VERIFY) {
+		char root_hash[INCFS_MAX_HASH_SIZE];
+		int mtree_block_count;
+		struct hash_block *mtree = build_mtree(options->size, root_hash,
+						       &mtree_block_count);
+
+		if (!mtree) {
+			err_msg("Failed to build hash tree");
+			err = -ENOMEM;
+			goto fail;
+		}
+
+		err = crypto_emit_file(cmd_file, NULL, name, &id, options->size,
+				       root_hash, "add_data");
+
+		if (!err)
+			err = load_hash_tree(cmd_file, dst_dir, name, mtree,
+					     mtree_block_count);
+
+		free(mtree);
+	} else
+		err = emit_file(cmd_file, NULL, name, &id, options->size, NULL);
+
+	if (err) {
+		err_msg("Failed to create file %s", name);
+		goto fail;
+	}
+
+	if (write_data(cmd_file, dst_dir, name, options->size, flags))
+		goto fail;
+
+	snprintf(tag, sizeof(tag), "incfs%s%s%s",
+		 flags & SHUFFLE ? "(shuffle)" : "",
+		 flags & COMPRESS ? "(compress)" : "",
+		 flags & VERIFY ? "(verify)" : "");
+
+	err = measure_read_throughput(tag, dst_dir, name, options);
+
+fail:
+	close(cmd_file);
+	return err;
+}
+
+bool skip(struct options const *options, int flag, char c)
+{
+	if (!options->file_types)
+		return false;
+
+	if (flag && strchr(options->file_types, tolower(c)))
+		return true;
+
+	if (!flag && strchr(options->file_types, toupper(c)))
+		return true;
+
+	return false;
+}
+
+int main(int argc, char *const *argv)
+{
+	struct options options;
+	int err;
+	const char *native_dir = "native";
+	const char *src_dir = "src";
+	const char *dst_dir = "dst";
+	int native_dir_fd = -1;
+	int src_dir_fd = -1;
+	int dst_dir_fd = -1;
+	int block;
+	int flags;
+
+	err = parse_options(argc, argv, &options);
+	if (err)
+		return err;
+
+	err = chdir(options.test_dir);
+	if (err) {
+		err_msg("Failed to change to %s", options.test_dir);
+		return -errno;
+	}
+
+	/* Clean up any interrupted previous runs */
+	while (!umount(dst_dir))
+		;
+
+	err = remove_dir(native_dir) || remove_dir(src_dir) ||
+	      remove_dir(dst_dir);
+	if (err)
+		return err;
+
+	err = mkdir(native_dir, 0700);
+	if (err) {
+		err_msg("Failed to make directory %s", src_dir);
+		err = -errno;
+		goto cleanup;
+	}
+
+	err = mkdir(src_dir, 0700);
+	if (err) {
+		err_msg("Failed to make directory %s", src_dir);
+		err = -errno;
+		goto cleanup;
+	}
+
+	err = mkdir(dst_dir, 0700);
+	if (err) {
+		err_msg("Failed to make directory %s", src_dir);
+		err = -errno;
+		goto cleanup;
+	}
+
+	err = mount_fs_opt(dst_dir, src_dir, "readahead=0,rlog_pages=0", 0);
+	if (err) {
+		err_msg("Failed to mount incfs");
+		goto cleanup;
+	}
+
+	native_dir_fd = open(native_dir, O_RDONLY | O_CLOEXEC);
+	src_dir_fd = open(src_dir, O_RDONLY | O_CLOEXEC);
+	dst_dir_fd = open(dst_dir, O_RDONLY | O_CLOEXEC);
+	if (native_dir_fd == -1 || src_dir_fd == -1 || dst_dir_fd == -1) {
+		err_msg("Failed to open native, src or dst dir");
+		err = -errno;
+		goto cleanup;
+	}
+
+	printf("%40s", "");
+	for (block = 0; block < options.blocks; ++block)
+		printf("%21d", 1 << (block + 12));
+	printf("\n");
+
+	if (!err && !options.no_native)
+		err = test_native_file(native_dir_fd, &options);
+
+	for (flags = 0; flags < LAST_FLAG && !err; ++flags) {
+		if (skip(&options, flags & SHUFFLE, 's') ||
+		    skip(&options, flags & COMPRESS, 'c') ||
+		    skip(&options, flags & VERIFY, 'v'))
+			continue;
+		err = test_incfs_file(dst_dir_fd, &options, flags);
+	}
+
+cleanup:
+	close(native_dir_fd);
+	close(src_dir_fd);
+	close(dst_dir_fd);
+	if (!options.no_cleanup) {
+		umount(dst_dir);
+		remove_dir(native_dir);
+		remove_dir(dst_dir);
+		remove_dir(src_dir);
+	}
+
+	return err;
+}
diff --git a/tools/testing/selftests/filesystems/incfs/incfs_stress.c b/tools/testing/selftests/filesystems/incfs/incfs_stress.c
new file mode 100644
index 0000000..a1d4917
--- /dev/null
+++ b/tools/testing/selftests/filesystems/incfs/incfs_stress.c
@@ -0,0 +1,322 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2020 Google LLC
+ */
+#include <errno.h>
+#include <fcntl.h>
+#include <getopt.h>
+#include <pthread.h>
+#include <stdbool.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/mount.h>
+#include <sys/stat.h>
+#include <unistd.h>
+
+#include "utils.h"
+
+#define err_msg(...)                                                           \
+	do {                                                                   \
+		fprintf(stderr, "%s: (%d) ", TAG, __LINE__);                   \
+		fprintf(stderr, __VA_ARGS__);                                  \
+		fprintf(stderr, " (%s)\n", strerror(errno));                   \
+	} while (false)
+
+#define TAG "incfs_stress"
+
+struct options {
+	bool no_cleanup; /* -c */
+	const char *test_dir; /* -d */
+	unsigned int rng_seed; /* -g */
+	int num_reads; /* -n */
+	int readers; /* -r */
+	int size; /* -s */
+	int timeout; /* -t */
+};
+
+struct read_data {
+	const char *filename;
+	int dir_fd;
+	size_t filesize;
+	int num_reads;
+	unsigned int rng_seed;
+};
+
+int cancel_threads;
+
+int parse_options(int argc, char *const *argv, struct options *options)
+{
+	signed char c;
+
+	/* Set defaults here */
+	*options = (struct options){
+		.test_dir = ".",
+		.num_reads = 1000,
+		.readers = 10,
+		.size = 10,
+	};
+
+	/* Load options from command line here */
+	while ((c = getopt(argc, argv, "cd:g:n:r:s:t:")) != -1) {
+		switch (c) {
+		case 'c':
+			options->no_cleanup = true;
+			break;
+
+		case 'd':
+			options->test_dir = optarg;
+			break;
+
+		case 'g':
+			options->rng_seed = strtol(optarg, NULL, 10);
+			break;
+
+		case 'n':
+			options->num_reads = strtol(optarg, NULL, 10);
+			break;
+
+		case 'r':
+			options->readers = strtol(optarg, NULL, 10);
+			break;
+
+		case 's':
+			options->size = strtol(optarg, NULL, 10);
+			break;
+
+		case 't':
+			options->timeout = strtol(optarg, NULL, 10);
+			break;
+		}
+	}
+
+	return 0;
+}
+
+void *reader(void *data)
+{
+	struct read_data *read_data = (struct read_data *)data;
+	int i;
+	int fd = -1;
+	void *buffer = malloc(read_data->filesize);
+
+	if (!buffer) {
+		err_msg("Failed to alloc read buffer");
+		goto out;
+	}
+
+	fd = openat(read_data->dir_fd, read_data->filename,
+		    O_RDONLY | O_CLOEXEC);
+	if (fd == -1) {
+		err_msg("Failed to open file");
+		goto out;
+	}
+
+	for (i = 0; i < read_data->num_reads && !cancel_threads; ++i) {
+		off_t offset = rnd(read_data->filesize, &read_data->rng_seed);
+		size_t count =
+			rnd(read_data->filesize - offset, &read_data->rng_seed);
+		ssize_t err = pread(fd, buffer, count, offset);
+
+		if (err != count)
+			err_msg("failed to read with value %lu", err);
+	}
+
+out:
+	close(fd);
+	free(read_data);
+	free(buffer);
+	return NULL;
+}
+
+int write_data(int cmd_fd, int dir_fd, const char *name, size_t size)
+{
+	int fd = openat(dir_fd, name, O_RDWR | O_CLOEXEC);
+	struct incfs_permit_fill permit_fill = {
+		.file_descriptor = fd,
+	};
+	int error = 0;
+	int i;
+	int block_count = 1 + (size - 1) / INCFS_DATA_FILE_BLOCK_SIZE;
+
+	if (fd == -1) {
+		err_msg("Could not open file for writing %s", name);
+		return -errno;
+	}
+
+	if (ioctl(cmd_fd, INCFS_IOC_PERMIT_FILL, &permit_fill)) {
+		err_msg("Failed to call PERMIT_FILL");
+		error = -errno;
+		goto out;
+	}
+
+	for (i = 0; i < block_count; ++i) {
+		uint8_t data[INCFS_DATA_FILE_BLOCK_SIZE] = {};
+		size_t block_size =
+			size > i * INCFS_DATA_FILE_BLOCK_SIZE ?
+				INCFS_DATA_FILE_BLOCK_SIZE :
+				size - (i * INCFS_DATA_FILE_BLOCK_SIZE);
+		struct incfs_fill_block fill_block = {
+			.compression = COMPRESSION_NONE,
+			.block_index = i,
+			.data_len = block_size,
+			.data = ptr_to_u64(data),
+		};
+		struct incfs_fill_blocks fill_blocks = {
+			.count = 1,
+			.fill_blocks = ptr_to_u64(&fill_block),
+		};
+		int written = ioctl(fd, INCFS_IOC_FILL_BLOCKS, &fill_blocks);
+
+		if (written != 1) {
+			error = -errno;
+			err_msg("Failed to write block %d in file %s", i, name);
+			break;
+		}
+	}
+out:
+	close(fd);
+	return error;
+}
+
+int test_files(int src_dir, int dst_dir, struct options const *options)
+{
+	unsigned int seed = options->rng_seed;
+	int cmd_file = openat(dst_dir, INCFS_PENDING_READS_FILENAME,
+			      O_RDONLY | O_CLOEXEC);
+	int err;
+	const char *name = "001";
+	incfs_uuid_t id;
+	size_t size;
+	int i;
+	pthread_t *threads = NULL;
+
+	size = 1 << (rnd(options->size, &seed) + 12);
+	size += rnd(size, &seed);
+
+	if (cmd_file == -1) {
+		err_msg("Could not open command file");
+		return -errno;
+	}
+
+	err = emit_file(cmd_file, NULL, name, &id, size, NULL);
+	if (err) {
+		err_msg("Failed to create file %s", name);
+		return err;
+	}
+
+	threads = malloc(sizeof(pthread_t) * options->readers);
+	if (!threads) {
+		err_msg("Could not allocate memory for threads");
+		return -ENOMEM;
+	}
+
+	for (i = 0; i < options->readers; ++i) {
+		struct read_data *read_data = malloc(sizeof(*read_data));
+
+		if (!read_data) {
+			err_msg("Failed to allocate read_data");
+			err = -ENOMEM;
+			break;
+		}
+
+		*read_data = (struct read_data){
+			.filename = name,
+			.dir_fd = dst_dir,
+			.filesize = size,
+			.num_reads = options->num_reads,
+			.rng_seed = seed,
+		};
+
+		rnd(0, &seed);
+
+		err = pthread_create(threads + i, 0, reader, read_data);
+		if (err) {
+			err_msg("Failed to create thread");
+			free(read_data);
+			break;
+		}
+	}
+
+	if (err)
+		cancel_threads = 1;
+	else
+		err = write_data(cmd_file, dst_dir, name, size);
+
+	for (; i > 0; --i) {
+		if (pthread_join(threads[i - 1], NULL)) {
+			err_msg("FATAL: failed to join thread");
+			exit(-errno);
+		}
+	}
+
+	free(threads);
+	close(cmd_file);
+	return err;
+}
+
+int main(int argc, char *const *argv)
+{
+	struct options options;
+	int err;
+	const char *src_dir = "src";
+	const char *dst_dir = "dst";
+	int src_dir_fd = -1;
+	int dst_dir_fd = -1;
+
+	err = parse_options(argc, argv, &options);
+	if (err)
+		return err;
+
+	err = chdir(options.test_dir);
+	if (err) {
+		err_msg("Failed to change to %s", options.test_dir);
+		return -errno;
+	}
+
+	err = remove_dir(src_dir) || remove_dir(dst_dir);
+	if (err)
+		return err;
+
+	err = mkdir(src_dir, 0700);
+	if (err) {
+		err_msg("Failed to make directory %s", src_dir);
+		err = -errno;
+		goto cleanup;
+	}
+
+	err = mkdir(dst_dir, 0700);
+	if (err) {
+		err_msg("Failed to make directory %s", src_dir);
+		err = -errno;
+		goto cleanup;
+	}
+
+	err = mount_fs(dst_dir, src_dir, options.timeout);
+	if (err) {
+		err_msg("Failed to mount incfs");
+		goto cleanup;
+	}
+
+	src_dir_fd = open(src_dir, O_RDONLY | O_CLOEXEC);
+	dst_dir_fd = open(dst_dir, O_RDONLY | O_CLOEXEC);
+	if (src_dir_fd == -1 || dst_dir_fd == -1) {
+		err_msg("Failed to open src or dst dir");
+		err = -errno;
+		goto cleanup;
+	}
+
+	err = test_files(src_dir_fd, dst_dir_fd, &options);
+
+cleanup:
+	close(src_dir_fd);
+	close(dst_dir_fd);
+	if (!options.no_cleanup) {
+		umount(dst_dir);
+		remove_dir(dst_dir);
+		remove_dir(src_dir);
+	}
+
+	return err;
+}
diff --git a/tools/testing/selftests/filesystems/incfs/incfs_test.c b/tools/testing/selftests/filesystems/incfs/incfs_test.c
new file mode 100644
index 0000000..8741a3d
--- /dev/null
+++ b/tools/testing/selftests/filesystems/incfs/incfs_test.c
@@ -0,0 +1,3605 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2018 Google LLC
+ */
+#include <alloca.h>
+#include <dirent.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <lz4.h>
+#include <poll.h>
+#include <stdbool.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <time.h>
+#include <unistd.h>
+#include <zstd.h>
+
+#include <sys/mount.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <sys/xattr.h>
+
+#include <linux/random.h>
+#include <linux/unistd.h>
+
+#include <kselftest.h>
+
+#include "utils.h"
+
+#define TEST_FAILURE 1
+#define TEST_SUCCESS 0
+
+#define INCFS_ROOT_INODE 0
+
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+#define le16_to_cpu(x)          (x)
+#define le32_to_cpu(x)          (x)
+#define le64_to_cpu(x)          (x)
+#else
+#error Big endian not supported!
+#endif
+
+struct {
+	int file;
+	int test;
+	bool verbose;
+} options;
+
+#define TESTCOND(condition)						\
+	do {								\
+		if (!(condition)) {					\
+			ksft_print_msg("%s failed %d\n",		\
+				       __func__, __LINE__);		\
+			goto out;					\
+		} else if (options.verbose)				\
+			ksft_print_msg("%s succeeded %d\n",		\
+				       __func__, __LINE__);		\
+	} while (false)
+
+#define TEST(statement, condition)					\
+	do {								\
+		statement;						\
+		TESTCOND(condition);					\
+	} while (false)
+
+#define TESTEQUAL(statement, res)					\
+	TESTCOND((statement) == (res))
+
+#define TESTNE(statement, res)					\
+	TESTCOND((statement) != (res))
+
+struct hash_block {
+	char data[INCFS_DATA_FILE_BLOCK_SIZE];
+};
+
+struct test_signature {
+	void *data;
+	size_t size;
+
+	char add_data[100];
+	size_t add_data_size;
+};
+
+struct test_file {
+	int index;
+	incfs_uuid_t id;
+	char *name;
+	off_t size;
+	char root_hash[INCFS_MAX_HASH_SIZE];
+	struct hash_block *mtree;
+	int mtree_block_count;
+	struct test_signature sig;
+};
+
+struct test_files_set {
+	struct test_file *files;
+	int files_count;
+};
+
+struct linux_dirent64 {
+	uint64_t       d_ino;
+	int64_t        d_off;
+	unsigned short d_reclen;
+	unsigned char  d_type;
+	char	       d_name[0];
+} __packed;
+
+struct test_files_set get_test_files_set(void)
+{
+	static struct test_file files[] = {
+		{ .index = 0, .name = "file_one_byte", .size = 1 },
+		{ .index = 1,
+		  .name = "file_one_block",
+		  .size = INCFS_DATA_FILE_BLOCK_SIZE },
+		{ .index = 2,
+		  .name = "file_one_and_a_half_blocks",
+		  .size = INCFS_DATA_FILE_BLOCK_SIZE +
+			  INCFS_DATA_FILE_BLOCK_SIZE / 2 },
+		{ .index = 3,
+		  .name = "file_three",
+		  .size = 300 * INCFS_DATA_FILE_BLOCK_SIZE + 3 },
+		{ .index = 4,
+		  .name = "file_four",
+		  .size = 400 * INCFS_DATA_FILE_BLOCK_SIZE + 7 },
+		{ .index = 5,
+		  .name = "file_five",
+		  .size = 500 * INCFS_DATA_FILE_BLOCK_SIZE + 7 },
+		{ .index = 6,
+		  .name = "file_six",
+		  .size = 600 * INCFS_DATA_FILE_BLOCK_SIZE + 7 },
+		{ .index = 7,
+		  .name = "file_seven",
+		  .size = 700 * INCFS_DATA_FILE_BLOCK_SIZE + 7 },
+		{ .index = 8,
+		  .name = "file_eight",
+		  .size = 800 * INCFS_DATA_FILE_BLOCK_SIZE + 7 },
+		{ .index = 9,
+		  .name = "file_nine",
+		  .size = 900 * INCFS_DATA_FILE_BLOCK_SIZE + 7 },
+		{ .index = 10, .name = "file_big", .size = 500 * 1024 * 1024 }
+	};
+
+	if (options.file)
+		return (struct test_files_set) {
+			.files = files + options.file - 1,
+			.files_count = 1,
+		};
+
+	return (struct test_files_set){ .files = files,
+					.files_count = ARRAY_SIZE(files) };
+}
+
+struct test_files_set get_small_test_files_set(void)
+{
+	static struct test_file files[] = {
+		{ .index = 0, .name = "file_one_byte", .size = 1 },
+		{ .index = 1,
+		  .name = "file_one_block",
+		  .size = INCFS_DATA_FILE_BLOCK_SIZE },
+		{ .index = 2,
+		  .name = "file_one_and_a_half_blocks",
+		  .size = INCFS_DATA_FILE_BLOCK_SIZE +
+			  INCFS_DATA_FILE_BLOCK_SIZE / 2 },
+		{ .index = 3,
+		  .name = "file_three",
+		  .size = 300 * INCFS_DATA_FILE_BLOCK_SIZE + 3 },
+		{ .index = 4,
+		  .name = "file_four",
+		  .size = 400 * INCFS_DATA_FILE_BLOCK_SIZE + 7 }
+	};
+	return (struct test_files_set){ .files = files,
+					.files_count = ARRAY_SIZE(files) };
+}
+
+static int get_file_block_seed(int file, int block)
+{
+	return 7919 * file + block;
+}
+
+static loff_t min(loff_t a, loff_t b)
+{
+	return a < b ? a : b;
+}
+
+static pid_t flush_and_fork(void)
+{
+	fflush(stdout);
+	return fork();
+}
+
+static void print_error(char *msg)
+{
+	ksft_print_msg("%s: %s\n", msg, strerror(errno));
+}
+
+static int wait_for_process(pid_t pid)
+{
+	int status;
+	int wait_res;
+
+	wait_res = waitpid(pid, &status, 0);
+	if (wait_res <= 0) {
+		print_error("Can't wait for the child");
+		return -EINVAL;
+	}
+	if (!WIFEXITED(status)) {
+		ksft_print_msg("Unexpected child status pid=%d\n", pid);
+		return -EINVAL;
+	}
+	status = WEXITSTATUS(status);
+	if (status != 0)
+		return status;
+	return 0;
+}
+
+static void rnd_buf(uint8_t *data, size_t len, unsigned int seed)
+{
+	int i;
+
+	for (i = 0; i < len; i++) {
+		seed = 1103515245 * seed + 12345;
+		data[i] = (uint8_t)(seed >> (i % 13));
+	}
+}
+
+char *bin2hex(char *dst, const void *src, size_t count)
+{
+	const unsigned char *_src = src;
+	static const char hex_asc[] = "0123456789abcdef";
+
+	while (count--) {
+		unsigned char x = *_src++;
+
+		*dst++ = hex_asc[(x & 0xf0) >> 4];
+		*dst++ = hex_asc[(x & 0x0f)];
+	}
+	*dst = 0;
+	return dst;
+}
+
+static char *get_index_filename(const char *mnt_dir, incfs_uuid_t id)
+{
+	char path[FILENAME_MAX];
+	char str_id[1 + 2 * sizeof(id)];
+
+	bin2hex(str_id, id.bytes, sizeof(id.bytes));
+	snprintf(path, ARRAY_SIZE(path), "%s/.index/%s", mnt_dir, str_id);
+
+	return strdup(path);
+}
+
+static char *get_incomplete_filename(const char *mnt_dir, incfs_uuid_t id)
+{
+	char path[FILENAME_MAX];
+	char str_id[1 + 2 * sizeof(id)];
+
+	bin2hex(str_id, id.bytes, sizeof(id.bytes));
+	snprintf(path, ARRAY_SIZE(path), "%s/.incomplete/%s", mnt_dir, str_id);
+
+	return strdup(path);
+}
+
+int open_file_by_id(const char *mnt_dir, incfs_uuid_t id, bool use_ioctl)
+{
+	char *path = get_index_filename(mnt_dir, id);
+	int cmd_fd = open_commands_file(mnt_dir);
+	int fd = open(path, O_RDWR | O_CLOEXEC);
+	struct incfs_permit_fill permit_fill = {
+		.file_descriptor = fd,
+	};
+	int error = 0;
+
+	if (fd < 0) {
+		print_error("Can't open file by id.");
+		error = -errno;
+		goto out;
+	}
+
+	if (use_ioctl && ioctl(cmd_fd, INCFS_IOC_PERMIT_FILL, &permit_fill)) {
+		print_error("Failed to call PERMIT_FILL");
+		error = -errno;
+		goto out;
+	}
+
+	if (ioctl(fd, INCFS_IOC_PERMIT_FILL, &permit_fill) != -1) {
+		print_error(
+			"Successfully called PERMIT_FILL on non pending_read file");
+		return -errno;
+		goto out;
+	}
+
+out:
+	free(path);
+	close(cmd_fd);
+
+	if (error) {
+		close(fd);
+		return error;
+	}
+
+	return fd;
+}
+
+int get_file_attr(const char *mnt_dir, incfs_uuid_t id, char *value, int size)
+{
+	char *path = get_index_filename(mnt_dir, id);
+	int res;
+
+	res = getxattr(path, INCFS_XATTR_METADATA_NAME, value, size);
+	if (res < 0)
+		res = -errno;
+
+	free(path);
+	return res;
+}
+
+static bool same_id(incfs_uuid_t *id1, incfs_uuid_t *id2)
+{
+	return !memcmp(id1->bytes, id2->bytes, sizeof(id1->bytes));
+}
+
+ssize_t ZSTD_compress_default(char *data, char *comp_data, size_t data_size,
+					size_t comp_size)
+{
+	return ZSTD_compress(comp_data, comp_size, data, data_size, 1);
+}
+
+static int emit_test_blocks(const char *mnt_dir, struct test_file *file,
+			int blocks[], int count)
+{
+	uint8_t data[INCFS_DATA_FILE_BLOCK_SIZE];
+	uint8_t comp_data[2 * INCFS_DATA_FILE_BLOCK_SIZE];
+	int block_count = (count > 32) ? 32 : count;
+	int data_buf_size = 2 * INCFS_DATA_FILE_BLOCK_SIZE * block_count;
+	uint8_t *data_buf = malloc(data_buf_size);
+	uint8_t *current_data = data_buf;
+	uint8_t *data_end = data_buf + data_buf_size;
+	struct incfs_fill_block *block_buf =
+		calloc(block_count, sizeof(struct incfs_fill_block));
+	struct incfs_fill_blocks fill_blocks = {
+		.count = block_count,
+		.fill_blocks = ptr_to_u64(block_buf),
+	};
+	ssize_t write_res = 0;
+	int fd = -1;
+	int error = 0;
+	int i = 0;
+	int blocks_written = 0;
+
+	for (i = 0; i < block_count; i++) {
+		int block_index = blocks[i];
+		bool compress_zstd = (file->index + block_index) % 4 == 2;
+		bool compress_lz4 = (file->index + block_index) % 4 == 0;
+		int seed = get_file_block_seed(file->index, block_index);
+		off_t block_offset =
+			((off_t)block_index) * INCFS_DATA_FILE_BLOCK_SIZE;
+		size_t block_size = 0;
+
+		if (block_offset > file->size) {
+			error = -EINVAL;
+			break;
+		}
+		if (file->size - block_offset >
+			INCFS_DATA_FILE_BLOCK_SIZE)
+			block_size = INCFS_DATA_FILE_BLOCK_SIZE;
+		else
+			block_size = file->size - block_offset;
+
+		rnd_buf(data, block_size, seed);
+		if (compress_lz4) {
+			size_t comp_size = LZ4_compress_default((char *)data,
+					(char *)comp_data, block_size,
+					ARRAY_SIZE(comp_data));
+
+			if (comp_size <= 0) {
+				error = -EBADMSG;
+				break;
+			}
+			if (current_data + comp_size > data_end) {
+				error = -ENOMEM;
+				break;
+			}
+			memcpy(current_data, comp_data, comp_size);
+			block_size = comp_size;
+			block_buf[i].compression = COMPRESSION_LZ4;
+		} else if (compress_zstd) {
+			size_t comp_size = ZSTD_compress(comp_data,
+					ARRAY_SIZE(comp_data), data, block_size,
+					1);
+
+			if (comp_size <= 0) {
+				error = -EBADMSG;
+				break;
+			}
+			if (current_data + comp_size > data_end) {
+				error = -ENOMEM;
+				break;
+			}
+			memcpy(current_data, comp_data, comp_size);
+			block_size = comp_size;
+			block_buf[i].compression = COMPRESSION_ZSTD;
+		} else {
+			if (current_data + block_size > data_end) {
+				error = -ENOMEM;
+				break;
+			}
+			memcpy(current_data, data, block_size);
+			block_buf[i].compression = COMPRESSION_NONE;
+		}
+
+		block_buf[i].block_index = block_index;
+		block_buf[i].data_len = block_size;
+		block_buf[i].data = ptr_to_u64(current_data);
+		current_data += block_size;
+	}
+
+	if (!error) {
+		fd = open_file_by_id(mnt_dir, file->id, false);
+		if (fd < 0) {
+			error = -errno;
+			goto out;
+		}
+		write_res = ioctl(fd, INCFS_IOC_FILL_BLOCKS, &fill_blocks);
+		if (write_res >= 0) {
+			ksft_print_msg("Wrote to file via normal fd error\n");
+			error = -EPERM;
+			goto out;
+		}
+
+		close(fd);
+		fd = open_file_by_id(mnt_dir, file->id, true);
+		if (fd < 0) {
+			error = -errno;
+			goto out;
+		}
+		write_res = ioctl(fd, INCFS_IOC_FILL_BLOCKS, &fill_blocks);
+		if (write_res < 0)
+			error = -errno;
+		else
+			blocks_written = write_res;
+	}
+	if (error) {
+		ksft_print_msg(
+			"Writing data block error. Write returned: %d. Error:%s\n",
+			write_res, strerror(-error));
+	}
+
+out:
+	free(block_buf);
+	free(data_buf);
+	close(fd);
+	return (error < 0) ? error : blocks_written;
+}
+
+static int emit_test_block(const char *mnt_dir, struct test_file *file,
+				int block_index)
+{
+	int res = emit_test_blocks(mnt_dir, file, &block_index, 1);
+
+	if (res == 0)
+		return -EINVAL;
+	if (res == 1)
+		return 0;
+	return res;
+}
+
+static void shuffle(int array[], int count, unsigned int seed)
+{
+	int i;
+
+	for (i = 0; i < count - 1; i++) {
+		int items_left = count - i;
+		int shuffle_index;
+		int v;
+
+		seed = 1103515245 * seed + 12345;
+		shuffle_index = i + seed % items_left;
+
+		v = array[shuffle_index];
+		array[shuffle_index] = array[i];
+		array[i] = v;
+	}
+}
+
+static int emit_test_file_data(const char *mount_dir, struct test_file *file)
+{
+	int i;
+	int block_cnt = 1 + (file->size - 1) / INCFS_DATA_FILE_BLOCK_SIZE;
+	int *block_indexes = NULL;
+	int result = 0;
+	int blocks_written = 0;
+
+	if (file->size == 0)
+		return 0;
+
+	block_indexes = calloc(block_cnt, sizeof(*block_indexes));
+	for (i = 0; i < block_cnt; i++)
+		block_indexes[i] = i;
+	shuffle(block_indexes, block_cnt, file->index);
+
+	for (i = 0; i < block_cnt; i += blocks_written) {
+		blocks_written = emit_test_blocks(mount_dir, file,
+					block_indexes + i, block_cnt - i);
+		if (blocks_written < 0) {
+			result = blocks_written;
+			goto out;
+		}
+		if (blocks_written == 0) {
+			result = -EIO;
+			goto out;
+		}
+	}
+out:
+	free(block_indexes);
+	return result;
+}
+
+static loff_t read_whole_file(const char *filename)
+{
+	int fd = -1;
+	loff_t result;
+	loff_t bytes_read = 0;
+	uint8_t buff[16 * 1024];
+
+	fd = open(filename, O_RDONLY | O_CLOEXEC);
+	if (fd <= 0)
+		return fd;
+
+	while (1) {
+		int read_result = read(fd, buff, ARRAY_SIZE(buff));
+
+		if (read_result < 0) {
+			print_error("Error during reading from a file.");
+			result = -errno;
+			goto cleanup;
+		} else if (read_result == 0)
+			break;
+
+		bytes_read += read_result;
+	}
+	result = bytes_read;
+
+cleanup:
+	close(fd);
+	return result;
+}
+
+static int read_test_file(uint8_t *buf, size_t len, char *filename,
+			  int block_idx)
+{
+	int fd = -1;
+	int result;
+	int bytes_read = 0;
+	size_t bytes_to_read = len;
+	off_t offset = ((off_t)block_idx) * INCFS_DATA_FILE_BLOCK_SIZE;
+
+	fd = open(filename, O_RDONLY | O_CLOEXEC);
+	if (fd <= 0)
+		return fd;
+
+	if (lseek(fd, offset, SEEK_SET) != offset) {
+		print_error("Seek error");
+		return -errno;
+	}
+
+	while (bytes_read < bytes_to_read) {
+		int read_result =
+			read(fd, buf + bytes_read, bytes_to_read - bytes_read);
+		if (read_result < 0) {
+			result = -errno;
+			goto cleanup;
+		} else if (read_result == 0)
+			break;
+
+		bytes_read += read_result;
+	}
+	result = bytes_read;
+
+cleanup:
+	close(fd);
+	return result;
+}
+
+static char *create_backing_dir(const char *mount_dir)
+{
+	struct stat st;
+	char backing_dir_name[255];
+
+	snprintf(backing_dir_name, ARRAY_SIZE(backing_dir_name), "%s-src",
+		 mount_dir);
+
+	if (stat(backing_dir_name, &st) == 0) {
+		if (S_ISDIR(st.st_mode)) {
+			int error = delete_dir_tree(backing_dir_name);
+
+			if (error) {
+				ksft_print_msg(
+				      "Can't delete existing backing dir. %d\n",
+				      error);
+				return NULL;
+			}
+		} else {
+			if (unlink(backing_dir_name)) {
+				print_error("Can't clear backing dir");
+				return NULL;
+			}
+		}
+	}
+
+	if (mkdir(backing_dir_name, 0777)) {
+		if (errno != EEXIST) {
+			print_error("Can't open/create backing dir");
+			return NULL;
+		}
+	}
+
+	return strdup(backing_dir_name);
+}
+
+static int validate_test_file_content_with_seed(const char *mount_dir,
+						struct test_file *file,
+						unsigned int shuffle_seed)
+{
+	int error = -1;
+	char *filename = concat_file_name(mount_dir, file->name);
+	off_t size = file->size;
+	loff_t actual_size = get_file_size(filename);
+	int block_cnt = 1 + (size - 1) / INCFS_DATA_FILE_BLOCK_SIZE;
+	int *block_indexes = NULL;
+	int i;
+
+	block_indexes = alloca(sizeof(int) * block_cnt);
+	for (i = 0; i < block_cnt; i++)
+		block_indexes[i] = i;
+
+	if (shuffle_seed != 0)
+		shuffle(block_indexes, block_cnt, shuffle_seed);
+
+	if (actual_size != size) {
+		ksft_print_msg(
+			"File size doesn't match. name: %s expected size:%ld actual size:%ld\n",
+			filename, size, actual_size);
+		error = -1;
+		goto failure;
+	}
+
+	for (i = 0; i < block_cnt; i++) {
+		int block_idx = block_indexes[i];
+		uint8_t expected_block[INCFS_DATA_FILE_BLOCK_SIZE];
+		uint8_t actual_block[INCFS_DATA_FILE_BLOCK_SIZE];
+		int seed = get_file_block_seed(file->index, block_idx);
+		size_t bytes_to_compare = min(
+			(off_t)INCFS_DATA_FILE_BLOCK_SIZE,
+			size - ((off_t)block_idx) * INCFS_DATA_FILE_BLOCK_SIZE);
+		int read_result =
+			read_test_file(actual_block, INCFS_DATA_FILE_BLOCK_SIZE,
+				       filename, block_idx);
+		if (read_result < 0) {
+			ksft_print_msg(
+				"Error reading block %d from file %s. Error: %s\n",
+				block_idx, filename, strerror(-read_result));
+			error = read_result;
+			goto failure;
+		}
+		rnd_buf(expected_block, INCFS_DATA_FILE_BLOCK_SIZE, seed);
+		if (memcmp(expected_block, actual_block, bytes_to_compare)) {
+			ksft_print_msg(
+				"File contents don't match. name: %s block:%d\n",
+				file->name, block_idx);
+			error = -2;
+			goto failure;
+		}
+	}
+	free(filename);
+	return 0;
+
+failure:
+	free(filename);
+	return error;
+}
+
+static int validate_test_file_content(const char *mount_dir,
+				      struct test_file *file)
+{
+	return validate_test_file_content_with_seed(mount_dir, file, 0);
+}
+
+static int data_producer(const char *mount_dir, struct test_files_set *test_set)
+{
+	int ret = 0;
+	int timeout_ms = 1000;
+	struct incfs_pending_read_info prs[100] = {};
+	int prs_size = ARRAY_SIZE(prs);
+	int fd = open_commands_file(mount_dir);
+
+	if (fd < 0)
+		return -errno;
+
+	while ((ret = wait_for_pending_reads(fd, timeout_ms, prs, prs_size)) >
+	       0) {
+		int read_count = ret;
+		int i;
+
+		for (i = 0; i < read_count; i++) {
+			int j = 0;
+			struct test_file *file = NULL;
+
+			for (j = 0; j < test_set->files_count; j++) {
+				bool same = same_id(&(test_set->files[j].id),
+					&(prs[i].file_id));
+
+				if (same) {
+					file = &test_set->files[j];
+					break;
+				}
+			}
+			if (!file) {
+				ksft_print_msg(
+					"Unknown file in pending reads.\n");
+				break;
+			}
+
+			ret = emit_test_block(mount_dir, file,
+				prs[i].block_index);
+			if (ret < 0) {
+				ksft_print_msg("Emitting test data error: %s\n",
+						strerror(-ret));
+				break;
+			}
+		}
+	}
+	close(fd);
+	return ret;
+}
+
+static int data_producer2(const char *mount_dir,
+			  struct test_files_set *test_set)
+{
+	int ret = 0;
+	int timeout_ms = 1000;
+	struct incfs_pending_read_info2 prs[100] = {};
+	int prs_size = ARRAY_SIZE(prs);
+	int fd = open_commands_file(mount_dir);
+
+	if (fd < 0)
+		return -errno;
+
+	while ((ret = wait_for_pending_reads2(fd, timeout_ms, prs, prs_size)) >
+	       0) {
+		int read_count = ret;
+		int i;
+
+		for (i = 0; i < read_count; i++) {
+			int j = 0;
+			struct test_file *file = NULL;
+
+			for (j = 0; j < test_set->files_count; j++) {
+				bool same = same_id(&(test_set->files[j].id),
+					&(prs[i].file_id));
+
+				if (same) {
+					file = &test_set->files[j];
+					break;
+				}
+			}
+			if (!file) {
+				ksft_print_msg(
+					"Unknown file in pending reads.\n");
+				break;
+			}
+
+			ret = emit_test_block(mount_dir, file,
+				prs[i].block_index);
+			if (ret < 0) {
+				ksft_print_msg("Emitting test data error: %s\n",
+						strerror(-ret));
+				break;
+			}
+		}
+	}
+	close(fd);
+	return ret;
+}
+
+static int build_mtree(struct test_file *file)
+{
+	char data[INCFS_DATA_FILE_BLOCK_SIZE] = {};
+	const int digest_size = SHA256_DIGEST_SIZE;
+	const int hash_per_block = INCFS_DATA_FILE_BLOCK_SIZE / digest_size;
+	int block_count = 0;
+	int hash_block_count = 0;
+	int total_tree_block_count = 0;
+	int tree_lvl_index[INCFS_MAX_MTREE_LEVELS] = {};
+	int tree_lvl_count[INCFS_MAX_MTREE_LEVELS] = {};
+	int levels_count = 0;
+	int i, level;
+
+	if (file->size == 0)
+		return 0;
+
+	block_count = 1 + (file->size - 1) / INCFS_DATA_FILE_BLOCK_SIZE;
+	hash_block_count = block_count;
+	for (i = 0; hash_block_count > 1; i++) {
+		hash_block_count = (hash_block_count + hash_per_block - 1)
+			/ hash_per_block;
+		tree_lvl_count[i] = hash_block_count;
+		total_tree_block_count += hash_block_count;
+	}
+	levels_count = i;
+
+	for (i = 0; i < levels_count; i++) {
+		int prev_lvl_base = (i == 0) ? total_tree_block_count :
+			tree_lvl_index[i - 1];
+
+		tree_lvl_index[i] = prev_lvl_base - tree_lvl_count[i];
+	}
+
+	file->mtree_block_count = total_tree_block_count;
+	if (block_count == 1) {
+		int seed = get_file_block_seed(file->index, 0);
+
+		memset(data, 0, INCFS_DATA_FILE_BLOCK_SIZE);
+		rnd_buf((uint8_t *)data, file->size, seed);
+		sha256(data, INCFS_DATA_FILE_BLOCK_SIZE, file->root_hash);
+		return 0;
+	}
+
+	file->mtree = calloc(total_tree_block_count, sizeof(*file->mtree));
+	/* Build level 0 hashes. */
+	for (i = 0; i < block_count; i++) {
+		off_t offset = i * INCFS_DATA_FILE_BLOCK_SIZE;
+		size_t block_size = INCFS_DATA_FILE_BLOCK_SIZE;
+		int block_index = tree_lvl_index[0] +
+					i / hash_per_block;
+		int block_off = (i % hash_per_block) * digest_size;
+		int seed = get_file_block_seed(file->index, i);
+		char *hash_ptr = file->mtree[block_index].data + block_off;
+
+		if (file->size - offset < block_size) {
+			block_size = file->size - offset;
+			memset(data, 0, INCFS_DATA_FILE_BLOCK_SIZE);
+		}
+
+		rnd_buf((uint8_t *)data, block_size, seed);
+		sha256(data, INCFS_DATA_FILE_BLOCK_SIZE, hash_ptr);
+	}
+
+	/* Build higher levels of hash tree. */
+	for (level = 1; level < levels_count; level++) {
+		int prev_lvl_base = tree_lvl_index[level - 1];
+		int prev_lvl_count = tree_lvl_count[level - 1];
+
+		for (i = 0; i < prev_lvl_count; i++) {
+			int block_index =
+				i / hash_per_block + tree_lvl_index[level];
+			int block_off = (i % hash_per_block) * digest_size;
+			char *hash_ptr =
+				file->mtree[block_index].data + block_off;
+
+			sha256(file->mtree[i + prev_lvl_base].data,
+			       INCFS_DATA_FILE_BLOCK_SIZE, hash_ptr);
+		}
+	}
+
+	/* Calculate root hash from the top block */
+	sha256(file->mtree[0].data,
+		INCFS_DATA_FILE_BLOCK_SIZE, file->root_hash);
+
+	return 0;
+}
+
+static int load_hash_tree(const char *mount_dir, struct test_file *file)
+{
+	int err;
+	int i;
+	int fd;
+	struct incfs_fill_blocks fill_blocks = {
+		.count = file->mtree_block_count,
+	};
+	struct incfs_fill_block *fill_block_array =
+		calloc(fill_blocks.count, sizeof(struct incfs_fill_block));
+
+	if (fill_blocks.count == 0)
+		return 0;
+
+	if (!fill_block_array)
+		return -ENOMEM;
+	fill_blocks.fill_blocks = ptr_to_u64(fill_block_array);
+
+	for (i = 0; i < fill_blocks.count; i++) {
+		fill_block_array[i] = (struct incfs_fill_block){
+			.block_index = i,
+			.data_len = INCFS_DATA_FILE_BLOCK_SIZE,
+			.data = ptr_to_u64(file->mtree[i].data),
+			.flags = INCFS_BLOCK_FLAGS_HASH
+		};
+	}
+
+	fd = open_file_by_id(mount_dir, file->id, false);
+	if (fd < 0) {
+		err = errno;
+		goto failure;
+	}
+
+	err = ioctl(fd, INCFS_IOC_FILL_BLOCKS, &fill_blocks);
+	close(fd);
+	if (err >= 0) {
+		err = -EPERM;
+		goto failure;
+	}
+
+	fd = open_file_by_id(mount_dir, file->id, true);
+	if (fd < 0) {
+		err = errno;
+		goto failure;
+	}
+
+	err = ioctl(fd, INCFS_IOC_FILL_BLOCKS, &fill_blocks);
+	close(fd);
+	if (err < fill_blocks.count)
+		err = errno;
+	else {
+		err = 0;
+		free(file->mtree);
+	}
+
+failure:
+	free(fill_block_array);
+	return err;
+}
+
+static int cant_touch_index_test(const char *mount_dir)
+{
+	char *file_name = "test_file";
+	int file_size = 123;
+	incfs_uuid_t file_id;
+	char *index_path = concat_file_name(mount_dir, ".index");
+	char *subdir = concat_file_name(index_path, "subdir");
+	char *dst_name = concat_file_name(mount_dir, "something");
+	char *filename_in_index = NULL;
+	char *file_path = concat_file_name(mount_dir, file_name);
+	char *backing_dir;
+	int cmd_fd = -1;
+	int err;
+
+	backing_dir = create_backing_dir(mount_dir);
+	if (!backing_dir)
+		goto failure;
+
+	/* Mount FS and release the backing file. */
+	if (mount_fs(mount_dir, backing_dir, 50) != 0)
+		goto failure;
+	free(backing_dir);
+
+	cmd_fd = open_commands_file(mount_dir);
+	if (cmd_fd < 0)
+		goto failure;
+
+
+	err = mkdir(subdir, 0777);
+	if (err == 0 || errno != EBUSY) {
+		print_error("Shouldn't be able to crate subdir in index\n");
+		goto failure;
+	}
+
+	err = rmdir(index_path);
+	if (err == 0 || errno != EBUSY) {
+		print_error(".index directory should not be removed\n");
+		goto failure;
+	}
+
+	err = emit_file(cmd_fd, ".index", file_name, &file_id,
+				file_size, NULL);
+	if (err != -EBUSY) {
+		print_error("Shouldn't be able to crate a file in index\n");
+		goto failure;
+	}
+
+	err = emit_file(cmd_fd, NULL, file_name, &file_id,
+				file_size, NULL);
+	if (err < 0)
+		goto failure;
+	filename_in_index = get_index_filename(mount_dir, file_id);
+
+	err = unlink(filename_in_index);
+	if (err == 0 || errno != EBUSY) {
+		print_error("Shouldn't be delete from index\n");
+		goto failure;
+	}
+
+
+	err = rename(filename_in_index, dst_name);
+	if (err == 0 || errno != EBUSY) {
+		print_error("Shouldn't be able to move from index\n");
+		goto failure;
+	}
+
+	free(filename_in_index);
+	filename_in_index = concat_file_name(index_path, "abc");
+	err = link(file_path, filename_in_index);
+	if (err == 0 || errno != EBUSY) {
+		print_error("Shouldn't be able to link inside index\n");
+		goto failure;
+	}
+
+	err = rename(index_path, dst_name);
+	if (err == 0 || errno != EBUSY) {
+		print_error("Shouldn't rename .index directory\n");
+		goto failure;
+	}
+
+	close(cmd_fd);
+	free(subdir);
+	free(index_path);
+	free(dst_name);
+	free(filename_in_index);
+	if (umount(mount_dir) != 0) {
+		print_error("Can't unmout FS");
+		goto failure;
+	}
+
+	return TEST_SUCCESS;
+
+failure:
+	free(subdir);
+	free(dst_name);
+	free(index_path);
+	free(filename_in_index);
+	close(cmd_fd);
+	umount(mount_dir);
+	return TEST_FAILURE;
+}
+
+static bool iterate_directory(const char *dir_to_iterate, bool root,
+			      int file_count)
+{
+	struct expected_name {
+		const char *name;
+		bool root_only;
+		bool found;
+	} names[] = {
+		{INCFS_LOG_FILENAME, true, false},
+		{INCFS_PENDING_READS_FILENAME, true, false},
+		{INCFS_BLOCKS_WRITTEN_FILENAME, true, false},
+		{".index", true, false},
+		{".incomplete", true, false},
+		{"..", false, false},
+		{".", false, false},
+	};
+
+	bool pass = true, found;
+	int i;
+
+	/* Test directory iteration */
+	int fd = open(dir_to_iterate, O_RDONLY | O_DIRECTORY | O_CLOEXEC);
+
+	if (fd < 0) {
+		print_error("Can't open directory\n");
+		return false;
+	}
+
+	for (;;) {
+		/* Enough space for one dirent - no name over 30 */
+		char buf[sizeof(struct linux_dirent64) + NAME_MAX];
+		struct linux_dirent64 *dirent = (struct linux_dirent64 *) buf;
+		int nread;
+		int i;
+
+		for (i = 0; i < NAME_MAX; ++i) {
+			nread = syscall(__NR_getdents64, fd, buf,
+					 sizeof(struct linux_dirent64) + i);
+
+			if (nread >= 0)
+				break;
+			if (errno != EINVAL)
+				break;
+		}
+
+		if (nread == 0)
+			break;
+		if (nread < 0) {
+			print_error("Error iterating directory\n");
+			pass = false;
+			goto failure;
+		}
+
+		/* Expected size is rounded up to 8 byte boundary. Not sure if
+		 * this is universal truth or just happenstance, but useful test
+		 * for the moment
+		 */
+		if (nread != (((sizeof(struct linux_dirent64)
+				+ strlen(dirent->d_name) + 1) + 7) & ~7)) {
+			print_error("Wrong dirent size");
+			pass = false;
+			goto failure;
+		}
+
+		found = false;
+		for (i = 0; i < sizeof(names) / sizeof(*names); ++i)
+			if (!strcmp(dirent->d_name, names[i].name)) {
+				if (names[i].root_only && !root) {
+					print_error("Root file error");
+					pass = false;
+					goto failure;
+				}
+
+				if (names[i].found) {
+					print_error("File appears twice");
+					pass = false;
+					goto failure;
+				}
+
+				names[i].found = true;
+				found = true;
+				break;
+			}
+
+		if (!found)
+			--file_count;
+	}
+
+	for (i = 0; i < sizeof(names) / sizeof(*names); ++i) {
+		if (!names[i].found)
+			if (root || !names[i].root_only) {
+				print_error("Expected file not present");
+				pass = false;
+				goto failure;
+			}
+	}
+
+	if (file_count) {
+		print_error("Wrong number of files\n");
+		pass = false;
+		goto failure;
+	}
+
+failure:
+	close(fd);
+	return pass;
+}
+
+static int basic_file_ops_test(const char *mount_dir)
+{
+	struct test_files_set test = get_test_files_set();
+	const int file_num = test.files_count;
+	char *subdir1 = concat_file_name(mount_dir, "subdir1");
+	char *subdir2 = concat_file_name(mount_dir, "subdir2");
+	char *backing_dir;
+	int cmd_fd = -1;
+	int i, err;
+
+	backing_dir = create_backing_dir(mount_dir);
+	if (!backing_dir)
+		goto failure;
+
+	/* Mount FS and release the backing file. */
+	if (mount_fs(mount_dir, backing_dir, 50) != 0)
+		goto failure;
+	free(backing_dir);
+
+	cmd_fd = open_commands_file(mount_dir);
+	if (cmd_fd < 0)
+		goto failure;
+
+	err = mkdir(subdir1, 0777);
+	if (err < 0 && errno != EEXIST) {
+		print_error("Can't create subdir1\n");
+		goto failure;
+	}
+
+	err = mkdir(subdir2, 0777);
+	if (err < 0 && errno != EEXIST) {
+		print_error("Can't create subdir2\n");
+		goto failure;
+	}
+
+	/* Create all test files in subdir1 directory */
+	for (i = 0; i < file_num; i++) {
+		struct test_file *file = &test.files[i];
+		loff_t size;
+		char *file_path = concat_file_name(subdir1, file->name);
+
+		err = emit_file(cmd_fd, "subdir1", file->name, &file->id,
+				     file->size, NULL);
+		if (err < 0)
+			goto failure;
+
+		size = get_file_size(file_path);
+		free(file_path);
+		if (size != file->size) {
+			ksft_print_msg("Wrong size %lld of %s.\n",
+				size, file->name);
+			goto failure;
+		}
+	}
+
+	if (!iterate_directory(subdir1, false, file_num))
+		goto failure;
+
+	/* Link the files to subdir2 */
+	for (i = 0; i < file_num; i++) {
+		struct test_file *file = &test.files[i];
+		char *src_name = concat_file_name(subdir1, file->name);
+		char *dst_name = concat_file_name(subdir2, file->name);
+		loff_t size;
+
+		err = link(src_name, dst_name);
+		if (err < 0) {
+			print_error("Can't move file\n");
+			goto failure;
+		}
+
+		size = get_file_size(dst_name);
+		if (size != file->size) {
+			ksft_print_msg("Wrong size %lld of %s.\n",
+				size, file->name);
+			goto failure;
+		}
+		free(src_name);
+		free(dst_name);
+	}
+
+	/* Move the files from subdir2 to the mount dir */
+	for (i = 0; i < file_num; i++) {
+		struct test_file *file = &test.files[i];
+		char *src_name = concat_file_name(subdir2, file->name);
+		char *dst_name = concat_file_name(mount_dir, file->name);
+		loff_t size;
+
+		err = rename(src_name, dst_name);
+		if (err < 0) {
+			print_error("Can't move file\n");
+			goto failure;
+		}
+
+		size = get_file_size(dst_name);
+		if (size != file->size) {
+			ksft_print_msg("Wrong size %lld of %s.\n",
+				size, file->name);
+			goto failure;
+		}
+		free(src_name);
+		free(dst_name);
+	}
+
+	/* +2 because there are 2 subdirs */
+	if (!iterate_directory(mount_dir, true, file_num + 2))
+		goto failure;
+
+	/* Open and close all files from the mount dir */
+	for (i = 0; i < file_num; i++) {
+		struct test_file *file = &test.files[i];
+		char *path = concat_file_name(mount_dir, file->name);
+		int fd;
+
+		fd = open(path, O_RDWR | O_CLOEXEC);
+		free(path);
+		if (fd <= 0) {
+			print_error("Can't open file");
+			goto failure;
+		}
+		if (close(fd)) {
+			print_error("Can't close file");
+			goto failure;
+		}
+	}
+
+	/* Delete all files from the mount dir */
+	for (i = 0; i < file_num; i++) {
+		struct test_file *file = &test.files[i];
+		char *path = concat_file_name(mount_dir, file->name);
+
+		err = unlink(path);
+		free(path);
+		if (err < 0) {
+			print_error("Can't unlink file");
+			goto failure;
+		}
+	}
+
+	err = delete_dir_tree(subdir1);
+	if (err) {
+		ksft_print_msg("Error deleting subdir1 %d", err);
+		goto failure;
+	}
+
+	err = rmdir(subdir2);
+	if (err) {
+		print_error("Error deleting subdir2");
+		goto failure;
+	}
+
+	close(cmd_fd);
+	cmd_fd = -1;
+	if (umount(mount_dir) != 0) {
+		print_error("Can't unmout FS");
+		goto failure;
+	}
+
+	return TEST_SUCCESS;
+
+failure:
+	close(cmd_fd);
+	umount(mount_dir);
+	return TEST_FAILURE;
+}
+
+static int dynamic_files_and_data_test(const char *mount_dir)
+{
+	struct test_files_set test = get_test_files_set();
+	const int file_num = test.files_count;
+	const int missing_file_idx = 5;
+	int cmd_fd = -1;
+	char *backing_dir;
+	int i;
+
+	backing_dir = create_backing_dir(mount_dir);
+	if (!backing_dir)
+		goto failure;
+
+	/* Mount FS and release the backing file. */
+	if (mount_fs(mount_dir, backing_dir, 50) != 0)
+		goto failure;
+	free(backing_dir);
+
+	cmd_fd = open_commands_file(mount_dir);
+	if (cmd_fd < 0)
+		goto failure;
+
+	/* Check that test files don't exist in the filesystem. */
+	for (i = 0; i < file_num; i++) {
+		struct test_file *file = &test.files[i];
+		char *filename = concat_file_name(mount_dir, file->name);
+
+		if (access(filename, F_OK) != -1) {
+			ksft_print_msg(
+				"File %s somehow already exists in a clean FS.\n",
+				filename);
+			goto failure;
+		}
+		free(filename);
+	}
+
+	/* Write test data into the command file. */
+	for (i = 0; i < file_num; i++) {
+		struct test_file *file = &test.files[i];
+		int res;
+
+		build_mtree(file);
+		res = emit_file(cmd_fd, NULL, file->name, &file->id,
+				     file->size, NULL);
+		if (res < 0) {
+			ksft_print_msg("Error %s emiting file %s.\n",
+				       strerror(-res), file->name);
+			goto failure;
+		}
+
+		/* Skip writing data to one file so we can check */
+		/* that it's missing later. */
+		if (i == missing_file_idx)
+			continue;
+
+		res = emit_test_file_data(mount_dir, file);
+		if (res) {
+			ksft_print_msg("Error %s emiting data for %s.\n",
+				       strerror(-res), file->name);
+			goto failure;
+		}
+	}
+
+	/* Validate contents of the FS */
+	for (i = 0; i < file_num; i++) {
+		struct test_file *file = &test.files[i];
+
+		if (i == missing_file_idx) {
+			/* No data has been written to this file. */
+			/* Check for read error; */
+			uint8_t buf;
+			char *filename =
+				concat_file_name(mount_dir, file->name);
+			int res = read_test_file(&buf, 1, filename, 0);
+
+			free(filename);
+			if (res > 0) {
+				ksft_print_msg(
+					"Data present, even though never writtern.\n");
+				goto failure;
+			}
+			if (res != -ETIME) {
+				ksft_print_msg("Wrong error code: %d.\n", res);
+				goto failure;
+			}
+		} else {
+			if (validate_test_file_content(mount_dir, file) < 0)
+				goto failure;
+		}
+	}
+
+	close(cmd_fd);
+	cmd_fd = -1;
+	if (umount(mount_dir) != 0) {
+		print_error("Can't unmout FS");
+		goto failure;
+	}
+
+	return TEST_SUCCESS;
+
+failure:
+	close(cmd_fd);
+	umount(mount_dir);
+	return TEST_FAILURE;
+}
+
+static int concurrent_reads_and_writes_test(const char *mount_dir)
+{
+	struct test_files_set test = get_test_files_set();
+	const int file_num = test.files_count;
+	/* Validate each file from that many child processes. */
+	const int child_multiplier = 3;
+	int cmd_fd = -1;
+	char *backing_dir;
+	int status;
+	int i;
+	pid_t producer_pid;
+	pid_t *child_pids = alloca(child_multiplier * file_num * sizeof(pid_t));
+
+	backing_dir = create_backing_dir(mount_dir);
+	if (!backing_dir)
+		goto failure;
+
+	/* Mount FS and release the backing file. */
+	if (mount_fs(mount_dir, backing_dir, 50) != 0)
+		goto failure;
+	free(backing_dir);
+
+	cmd_fd = open_commands_file(mount_dir);
+	if (cmd_fd < 0)
+		goto failure;
+
+	/* Tell FS about the files, without actually providing the data. */
+	for (i = 0; i < file_num; i++) {
+		struct test_file *file = &test.files[i];
+		int res;
+
+		res = emit_file(cmd_fd, NULL, file->name, &file->id,
+				     file->size, NULL);
+		if (res)
+			goto failure;
+	}
+
+	/* Start child processes acessing data in the files */
+	for (i = 0; i < file_num * child_multiplier; i++) {
+		struct test_file *file = &test.files[i / child_multiplier];
+		pid_t child_pid = flush_and_fork();
+
+		if (child_pid == 0) {
+			/* This is a child process, do the data validation. */
+			int ret = validate_test_file_content_with_seed(
+				mount_dir, file, i);
+			if (ret >= 0) {
+				/* Zero exit status if data is valid. */
+				exit(0);
+			}
+
+			/* Positive status if validation error found. */
+			exit(-ret);
+		} else if (child_pid > 0) {
+			child_pids[i] = child_pid;
+		} else {
+			print_error("Fork error");
+			goto failure;
+		}
+	}
+
+	producer_pid = flush_and_fork();
+	if (producer_pid == 0) {
+		int ret;
+		/*
+		 * This is a child that should provide data to
+		 * pending reads.
+		 */
+
+		ret = data_producer(mount_dir, &test);
+		exit(-ret);
+	} else {
+		status = wait_for_process(producer_pid);
+		if (status != 0) {
+			ksft_print_msg("Data produces failed. %d(%s) ", status,
+				       strerror(status));
+			goto failure;
+		}
+	}
+
+	/* Check that all children has finished with 0 exit status */
+	for (i = 0; i < file_num * child_multiplier; i++) {
+		struct test_file *file = &test.files[i / child_multiplier];
+
+		status = wait_for_process(child_pids[i]);
+		if (status != 0) {
+			ksft_print_msg(
+				"Validation for the file %s failed with code %d (%s)\n",
+				file->name, status, strerror(status));
+			goto failure;
+		}
+	}
+
+	/* Check that there are no pending reads left */
+	{
+		struct incfs_pending_read_info prs[1] = {};
+		int timeout = 0;
+		int read_count = wait_for_pending_reads(cmd_fd, timeout, prs,
+							ARRAY_SIZE(prs));
+
+		if (read_count) {
+			ksft_print_msg(
+				"Pending reads pending when all data written\n");
+			goto failure;
+		}
+	}
+
+	close(cmd_fd);
+	cmd_fd = -1;
+	if (umount(mount_dir) != 0) {
+		print_error("Can't unmout FS");
+		goto failure;
+	}
+
+	return TEST_SUCCESS;
+
+failure:
+	close(cmd_fd);
+	umount(mount_dir);
+	return TEST_FAILURE;
+}
+
+static int work_after_remount_test(const char *mount_dir)
+{
+	struct test_files_set test = get_test_files_set();
+	const int file_num = test.files_count;
+	const int file_num_stage1 = file_num / 2;
+	const int file_num_stage2 = file_num;
+	char *backing_dir = NULL;
+	int i = 0;
+	int cmd_fd = -1;
+
+	backing_dir = create_backing_dir(mount_dir);
+	if (!backing_dir)
+		goto failure;
+
+	/* Mount FS and release the backing file. */
+	if (mount_fs(mount_dir, backing_dir, 50) != 0)
+		goto failure;
+
+	cmd_fd = open_commands_file(mount_dir);
+	if (cmd_fd < 0)
+		goto failure;
+
+	/* Write first half of the data into the command file. (stage 1) */
+	for (i = 0; i < file_num_stage1; i++) {
+		struct test_file *file = &test.files[i];
+
+		build_mtree(file);
+		if (emit_file(cmd_fd, NULL, file->name, &file->id,
+				     file->size, NULL))
+			goto failure;
+
+		if (emit_test_file_data(mount_dir, file))
+			goto failure;
+	}
+
+	/* Unmount and mount again, to see that data is persistent. */
+	close(cmd_fd);
+	cmd_fd = -1;
+	if (umount(mount_dir) != 0) {
+		print_error("Can't unmout FS");
+		goto failure;
+	}
+
+	if (mount_fs(mount_dir, backing_dir, 50) != 0)
+		goto failure;
+
+	cmd_fd = open_commands_file(mount_dir);
+	if (cmd_fd < 0)
+		goto failure;
+
+	/* Write the second half of the data into the command file. (stage 2) */
+	for (; i < file_num_stage2; i++) {
+		struct test_file *file = &test.files[i];
+		int res = emit_file(cmd_fd, NULL, file->name, &file->id,
+				     file->size, NULL);
+
+		if (res)
+			goto failure;
+
+		if (emit_test_file_data(mount_dir, file))
+			goto failure;
+	}
+
+	/* Validate contents of the FS */
+	for (i = 0; i < file_num_stage2; i++) {
+		struct test_file *file = &test.files[i];
+
+		if (validate_test_file_content(mount_dir, file) < 0)
+			goto failure;
+	}
+
+	/* Delete all files */
+	for (i = 0; i < file_num; i++) {
+		struct test_file *file = &test.files[i];
+		char *filename = concat_file_name(mount_dir, file->name);
+		char *filename_in_index = get_index_filename(mount_dir,
+							file->id);
+
+		if (access(filename, F_OK) != 0) {
+			ksft_print_msg("File %s is not visible.\n", filename);
+			goto failure;
+		}
+
+		if (access(filename_in_index, F_OK) != 0) {
+			ksft_print_msg("File %s is not visible.\n",
+				filename_in_index);
+			goto failure;
+		}
+
+		unlink(filename);
+
+		if (access(filename, F_OK) != -1) {
+			ksft_print_msg("File %s is still present.\n", filename);
+			goto failure;
+		}
+
+		if (access(filename_in_index, F_OK) != 0) {
+			ksft_print_msg("File %s is still present.\n",
+				filename_in_index);
+			goto failure;
+		}
+		free(filename);
+		free(filename_in_index);
+	}
+
+	/* Unmount and mount again, to see that deleted files stay deleted. */
+	close(cmd_fd);
+	cmd_fd = -1;
+	if (umount(mount_dir) != 0) {
+		print_error("Can't unmout FS");
+		goto failure;
+	}
+
+	if (mount_fs(mount_dir, backing_dir, 50) != 0)
+		goto failure;
+
+	cmd_fd = open_commands_file(mount_dir);
+	if (cmd_fd < 0)
+		goto failure;
+
+	/* Validate all deleted files are still deleted. */
+	for (i = 0; i < file_num; i++) {
+		struct test_file *file = &test.files[i];
+		char *filename = concat_file_name(mount_dir, file->name);
+
+		if (access(filename, F_OK) != -1) {
+			ksft_print_msg("File %s is still visible.\n", filename);
+			goto failure;
+		}
+		free(filename);
+	}
+
+	/* Final unmount */
+	close(cmd_fd);
+	free(backing_dir);
+	cmd_fd = -1;
+	if (umount(mount_dir) != 0) {
+		print_error("Can't unmout FS");
+		goto failure;
+	}
+
+	return TEST_SUCCESS;
+
+failure:
+	close(cmd_fd);
+	free(backing_dir);
+	umount(mount_dir);
+	return TEST_FAILURE;
+}
+
+static int attribute_test(const char *mount_dir)
+{
+	char file_attr[] = "metadata123123";
+	char attr_buf[INCFS_MAX_FILE_ATTR_SIZE] = {};
+	int cmd_fd = -1;
+	incfs_uuid_t file_id;
+	int attr_res = 0;
+	char *backing_dir;
+
+
+	backing_dir = create_backing_dir(mount_dir);
+	if (!backing_dir)
+		goto failure;
+
+	/* Mount FS and release the backing file. */
+	if (mount_fs(mount_dir, backing_dir, 50) != 0)
+		goto failure;
+
+
+	cmd_fd = open_commands_file(mount_dir);
+	if (cmd_fd < 0)
+		goto failure;
+
+	if (emit_file(cmd_fd, NULL, "file", &file_id, 12, file_attr))
+		goto failure;
+
+	/* Test attribute values */
+	attr_res = get_file_attr(mount_dir, file_id, attr_buf,
+		ARRAY_SIZE(attr_buf));
+	if (attr_res != strlen(file_attr)) {
+		ksft_print_msg("Get file attr error: %d\n", attr_res);
+		goto failure;
+	}
+	if (strcmp(attr_buf, file_attr) != 0) {
+		ksft_print_msg("Incorrect file attr value: '%s'", attr_buf);
+		goto failure;
+	}
+
+	/* Unmount and mount again, to see that attributes are persistent. */
+	close(cmd_fd);
+	cmd_fd = -1;
+	if (umount(mount_dir) != 0) {
+		print_error("Can't unmout FS");
+		goto failure;
+	}
+
+	if (mount_fs(mount_dir, backing_dir, 50) != 0)
+		goto failure;
+
+	cmd_fd = open_commands_file(mount_dir);
+	if (cmd_fd < 0)
+		goto failure;
+
+	/* Test attribute values again after remount*/
+	attr_res = get_file_attr(mount_dir, file_id, attr_buf,
+		ARRAY_SIZE(attr_buf));
+	if (attr_res != strlen(file_attr)) {
+		ksft_print_msg("Get dir attr error: %d\n", attr_res);
+		goto failure;
+	}
+	if (strcmp(attr_buf, file_attr) != 0) {
+		ksft_print_msg("Incorrect file attr value: '%s'", attr_buf);
+		goto failure;
+	}
+
+	/* Final unmount */
+	close(cmd_fd);
+	free(backing_dir);
+	cmd_fd = -1;
+	if (umount(mount_dir) != 0) {
+		print_error("Can't unmout FS");
+		goto failure;
+	}
+
+	return TEST_SUCCESS;
+
+failure:
+	close(cmd_fd);
+	free(backing_dir);
+	umount(mount_dir);
+	return TEST_FAILURE;
+}
+
+static int child_procs_waiting_for_data_test(const char *mount_dir)
+{
+	struct test_files_set test = get_test_files_set();
+	const int file_num = test.files_count;
+	int cmd_fd = -1;
+	int i;
+	pid_t *child_pids = alloca(file_num * sizeof(pid_t));
+	char *backing_dir;
+
+	backing_dir = create_backing_dir(mount_dir);
+	if (!backing_dir)
+		goto failure;
+
+	/* Mount FS and release the backing file.  (10s wait time) */
+	if (mount_fs(mount_dir, backing_dir, 10000) != 0)
+		goto failure;
+
+
+	cmd_fd = open_commands_file(mount_dir);
+	if (cmd_fd < 0)
+		goto failure;
+
+	/* Tell FS about the files, without actually providing the data. */
+	for (i = 0; i < file_num; i++) {
+		struct test_file *file = &test.files[i];
+
+		emit_file(cmd_fd, NULL, file->name, &file->id,
+				     file->size, NULL);
+	}
+
+	/* Start child processes acessing data in the files */
+	for (i = 0; i < file_num; i++) {
+		struct test_file *file = &test.files[i];
+		pid_t child_pid = flush_and_fork();
+
+		if (child_pid == 0) {
+			/* This is a child process, do the data validation. */
+			int ret = validate_test_file_content(mount_dir, file);
+
+			if (ret >= 0) {
+				/* Zero exit status if data is valid. */
+				exit(0);
+			}
+
+			/* Positive status if validation error found. */
+			exit(-ret);
+		} else if (child_pid > 0) {
+			child_pids[i] = child_pid;
+		} else {
+			print_error("Fork error");
+			goto failure;
+		}
+	}
+
+	/* Write test data into the command file. */
+	for (i = 0; i < file_num; i++) {
+		struct test_file *file = &test.files[i];
+
+		if (emit_test_file_data(mount_dir, file))
+			goto failure;
+	}
+
+	/* Check that all children has finished with 0 exit status */
+	for (i = 0; i < file_num; i++) {
+		struct test_file *file = &test.files[i];
+		int status = wait_for_process(child_pids[i]);
+
+		if (status != 0) {
+			ksft_print_msg(
+				"Validation for the file %s failed with code %d (%s)\n",
+				file->name, status, strerror(status));
+			goto failure;
+		}
+	}
+
+	close(cmd_fd);
+	free(backing_dir);
+	cmd_fd = -1;
+	if (umount(mount_dir) != 0) {
+		print_error("Can't unmout FS");
+		goto failure;
+	}
+
+	return TEST_SUCCESS;
+
+failure:
+	close(cmd_fd);
+	free(backing_dir);
+	umount(mount_dir);
+	return TEST_FAILURE;
+}
+
+static int multiple_providers_test(const char *mount_dir)
+{
+	struct test_files_set test = get_test_files_set();
+	const int file_num = test.files_count;
+	const int producer_count = 5;
+	int cmd_fd = -1;
+	int status;
+	int i;
+	pid_t *producer_pids = alloca(producer_count * sizeof(pid_t));
+	char *backing_dir;
+
+	backing_dir = create_backing_dir(mount_dir);
+	if (!backing_dir)
+		goto failure;
+
+	/* Mount FS and release the backing file.  (10s wait time) */
+	if (mount_fs_opt(mount_dir, backing_dir,
+			 "read_timeout_ms=10000,report_uid", false) != 0)
+		goto failure;
+
+	cmd_fd = open_commands_file(mount_dir);
+	if (cmd_fd < 0)
+		goto failure;
+
+	/* Tell FS about the files, without actually providing the data. */
+	for (i = 0; i < file_num; i++) {
+		struct test_file *file = &test.files[i];
+
+		if (emit_file(cmd_fd, NULL, file->name, &file->id,
+				     file->size, NULL) < 0)
+			goto failure;
+	}
+
+	/* Start producer processes */
+	for (i = 0; i < producer_count; i++) {
+		pid_t producer_pid = flush_and_fork();
+
+		if (producer_pid == 0) {
+			int ret;
+			/*
+			 * This is a child that should provide data to
+			 * pending reads.
+			 */
+
+			ret = data_producer2(mount_dir, &test);
+			exit(-ret);
+		} else if (producer_pid > 0) {
+			producer_pids[i] = producer_pid;
+		} else {
+			print_error("Fork error");
+			goto failure;
+		}
+	}
+
+	/* Validate FS content */
+	for (i = 0; i < file_num; i++) {
+		struct test_file *file = &test.files[i];
+		char *filename = concat_file_name(mount_dir, file->name);
+		loff_t read_result = read_whole_file(filename);
+
+		free(filename);
+		if (read_result != file->size) {
+			ksft_print_msg(
+				"Error validating file %s. Result: %ld\n",
+				file->name, read_result);
+			goto failure;
+		}
+	}
+
+	/* Check that all producers has finished with 0 exit status */
+	for (i = 0; i < producer_count; i++) {
+		status = wait_for_process(producer_pids[i]);
+		if (status != 0) {
+			ksft_print_msg("Producer %d failed with code (%s)\n", i,
+				       strerror(status));
+			goto failure;
+		}
+	}
+
+	close(cmd_fd);
+	free(backing_dir);
+	cmd_fd = -1;
+	if (umount(mount_dir) != 0) {
+		print_error("Can't unmout FS");
+		goto failure;
+	}
+
+	return TEST_SUCCESS;
+
+failure:
+	close(cmd_fd);
+	free(backing_dir);
+	umount(mount_dir);
+	return TEST_FAILURE;
+}
+
+static int hash_tree_test(const char *mount_dir)
+{
+	char *backing_dir;
+	struct test_files_set test = get_test_files_set();
+	const int file_num = test.files_count;
+	const int corrupted_file_idx = 5;
+	int i = 0;
+	int cmd_fd = -1;
+
+	backing_dir = create_backing_dir(mount_dir);
+	if (!backing_dir)
+		goto failure;
+
+	/* Mount FS and release the backing file. */
+	if (mount_fs(mount_dir, backing_dir, 50) != 0)
+		goto failure;
+
+	cmd_fd = open_commands_file(mount_dir);
+	if (cmd_fd < 0)
+		goto failure;
+
+	/* Write hashes and data. */
+	for (i = 0; i < file_num; i++) {
+		struct test_file *file = &test.files[i];
+		int res;
+
+		build_mtree(file);
+		res = crypto_emit_file(cmd_fd, NULL, file->name, &file->id,
+				       file->size, file->root_hash,
+				       file->sig.add_data);
+
+		if (i == corrupted_file_idx) {
+			/* Corrupt third blocks hash */
+			file->mtree[0].data[2 * SHA256_DIGEST_SIZE] ^= 0xff;
+		}
+		if (emit_test_file_data(mount_dir, file))
+			goto failure;
+
+		res = load_hash_tree(mount_dir, file);
+		if (res) {
+			ksft_print_msg("Can't load hashes for %s. error: %s\n",
+				file->name, strerror(-res));
+			goto failure;
+		}
+	}
+
+	/* Validate data */
+	for (i = 0; i < file_num; i++) {
+		struct test_file *file = &test.files[i];
+
+		if (i == corrupted_file_idx) {
+			uint8_t data[INCFS_DATA_FILE_BLOCK_SIZE];
+			char *filename =
+				concat_file_name(mount_dir, file->name);
+			int res;
+
+			res = read_test_file(data, INCFS_DATA_FILE_BLOCK_SIZE,
+					     filename, 2);
+			free(filename);
+			if (res != -EBADMSG) {
+				ksft_print_msg("Hash violation missed1. %d\n",
+					       res);
+				goto failure;
+			}
+		} else if (validate_test_file_content(mount_dir, file) < 0)
+			goto failure;
+	}
+
+	/* Unmount and mount again, to that hashes are persistent. */
+	close(cmd_fd);
+	cmd_fd = -1;
+	if (umount(mount_dir) != 0) {
+		print_error("Can't unmout FS");
+		goto failure;
+	}
+	if (mount_fs(mount_dir, backing_dir, 50) != 0)
+		goto failure;
+
+	cmd_fd = open_commands_file(mount_dir);
+	if (cmd_fd < 0)
+		goto failure;
+
+	/* Validate data again */
+	for (i = 0; i < file_num; i++) {
+		struct test_file *file = &test.files[i];
+
+		if (i == corrupted_file_idx) {
+			uint8_t data[INCFS_DATA_FILE_BLOCK_SIZE];
+			char *filename =
+				concat_file_name(mount_dir, file->name);
+			int res;
+
+			res = read_test_file(data, INCFS_DATA_FILE_BLOCK_SIZE,
+					     filename, 2);
+			free(filename);
+			if (res != -EBADMSG) {
+				ksft_print_msg("Hash violation missed2. %d\n",
+					       res);
+				goto failure;
+			}
+		} else if (validate_test_file_content(mount_dir, file) < 0)
+			goto failure;
+	}
+
+	/* Final unmount */
+	close(cmd_fd);
+	cmd_fd = -1;
+	if (umount(mount_dir) != 0) {
+		print_error("Can't unmout FS");
+		goto failure;
+	}
+	return TEST_SUCCESS;
+
+failure:
+	close(cmd_fd);
+	free(backing_dir);
+	umount(mount_dir);
+	return TEST_FAILURE;
+}
+
+enum expected_log { FULL_LOG, NO_LOG, PARTIAL_LOG };
+
+static int validate_logs(const char *mount_dir, int log_fd,
+			 struct test_file *file,
+			 enum expected_log expected_log,
+			 bool report_uid, bool expect_data)
+{
+	int result = TEST_FAILURE;
+	uint8_t data[INCFS_DATA_FILE_BLOCK_SIZE];
+	struct incfs_pending_read_info prs[2048] = {};
+	struct incfs_pending_read_info2 prs2[2048] = {};
+	struct incfs_pending_read_info *previous_record = NULL;
+	int prs_size = ARRAY_SIZE(prs);
+	int block_count = 1 + (file->size - 1) / INCFS_DATA_FILE_BLOCK_SIZE;
+	int expected_read_count, read_count, block_index, read_index;
+	char *filename = NULL;
+	int fd = -1;
+
+	TEST(filename = concat_file_name(mount_dir, file->name), filename);
+	TEST(fd = open(filename, O_RDONLY | O_CLOEXEC), fd != -1);
+
+	if (block_count > prs_size)
+		block_count = prs_size;
+	expected_read_count = block_count;
+
+	for (block_index = 0; block_index < block_count; block_index++) {
+		int result = pread(fd, data, sizeof(data),
+			    INCFS_DATA_FILE_BLOCK_SIZE * block_index);
+
+		/* Make some read logs of type SAME_FILE_NEXT_BLOCK */
+		if (block_index % 100 == 10)
+			usleep(20000);
+
+		/* Skip some blocks to make logs of type SAME_FILE */
+		if (block_index % 10 == 5) {
+			++block_index;
+			--expected_read_count;
+		}
+
+		if (expect_data)
+			TESTCOND(result > 0);
+
+		if (!expect_data)
+			TESTEQUAL(result, -1);
+	}
+
+	if (report_uid)
+		read_count = wait_for_pending_reads2(log_fd,
+				expected_log == NO_LOG ? 10 : 0,
+				prs2, prs_size);
+	else
+		read_count = wait_for_pending_reads(log_fd,
+				expected_log == NO_LOG ? 10 : 0,
+				prs, prs_size);
+
+	if (expected_log == NO_LOG)
+		TESTEQUAL(read_count, 0);
+
+	if (expected_log == PARTIAL_LOG)
+		TESTCOND(read_count > 0 &&
+			 read_count <= expected_read_count);
+
+	if (expected_log == FULL_LOG)
+		TESTEQUAL(read_count, expected_read_count);
+
+	/* If read less than expected, advance block_index appropriately */
+	for (block_index = 0, read_index = 0;
+	     read_index < expected_read_count - read_count;
+	     block_index++, read_index++)
+		if (block_index % 10 == 5)
+			++block_index;
+
+	for (read_index = 0; read_index < read_count;
+	     block_index++, read_index++) {
+		struct incfs_pending_read_info *record = report_uid ?
+			(struct incfs_pending_read_info *) &prs2[read_index] :
+			&prs[read_index];
+
+		TESTCOND(same_id(&record->file_id, &file->id));
+		TESTEQUAL(record->block_index, block_index);
+		TESTNE(record->timestamp_us, 0);
+		if (previous_record)
+			TESTEQUAL(record->serial_number,
+				  previous_record->serial_number + 1);
+
+		previous_record = record;
+		if (block_index % 10 == 5)
+			++block_index;
+	}
+
+	result = TEST_SUCCESS;
+out:
+	close(fd);
+	free(filename);
+	return result;
+}
+
+static int read_log_test(const char *mount_dir)
+{
+	int result = TEST_FAILURE;
+	struct test_files_set test = get_test_files_set();
+	const int file_num = test.files_count;
+	int i = 0;
+	int cmd_fd = -1, log_fd = -1;
+	char *backing_dir = NULL;
+
+	/* Create files */
+	TEST(backing_dir = create_backing_dir(mount_dir), backing_dir);
+	TESTEQUAL(mount_fs_opt(mount_dir, backing_dir,
+			       "readahead=0,report_uid,read_timeout_ms=0",
+				 false), 0);
+	TEST(cmd_fd = open_commands_file(mount_dir), cmd_fd != -1);
+	for (i = 0; i < file_num; i++) {
+		struct test_file *file = &test.files[i];
+
+		TESTEQUAL(emit_file(cmd_fd, NULL, file->name, &file->id,
+				    file->size, NULL), 0);
+	}
+	close(cmd_fd);
+	cmd_fd = -1;
+
+	/* Validate logs */
+	TEST(log_fd = open_log_file(mount_dir), log_fd != -1);
+	for (i = 0; i < file_num; i++)
+		TESTEQUAL(validate_logs(mount_dir, log_fd, &test.files[i],
+					FULL_LOG, true, false), 0);
+
+	/* Unmount and mount again without report_uid */
+	close(log_fd);
+	log_fd = -1;
+	TESTEQUAL(umount(mount_dir), 0);
+	TESTEQUAL(mount_fs_opt(mount_dir, backing_dir,
+			       "readahead=0,read_timeout_ms=0", false), 0);
+
+	TEST(log_fd = open_log_file(mount_dir), log_fd != -1);
+	for (i = 0; i < file_num; i++)
+		TESTEQUAL(validate_logs(mount_dir, log_fd, &test.files[i],
+					FULL_LOG, false, false), 0);
+
+	/* No read log to make sure poll doesn't crash */
+	close(log_fd);
+	log_fd = -1;
+	TESTEQUAL(umount(mount_dir), 0);
+	TESTEQUAL(mount_fs_opt(mount_dir, backing_dir,
+			       "readahead=0,rlog_pages=0,read_timeout_ms=0",
+			       false), 0);
+
+	TEST(log_fd = open_log_file(mount_dir), log_fd != -1);
+	for (i = 0; i < file_num; i++)
+		TESTEQUAL(validate_logs(mount_dir, log_fd, &test.files[i],
+					NO_LOG, false, false), 0);
+
+	/* Remount and check that logs start working again */
+	TESTEQUAL(mount_fs_opt(mount_dir, backing_dir,
+			       "readahead=0,rlog_pages=1,read_timeout_ms=0",
+			       true), 0);
+	for (i = 0; i < file_num; i++)
+		TESTEQUAL(validate_logs(mount_dir, log_fd, &test.files[i],
+					PARTIAL_LOG, false, false), 0);
+
+	/* Remount and check that logs continue working */
+	TESTEQUAL(mount_fs_opt(mount_dir, backing_dir,
+			       "readahead=0,rlog_pages=4,read_timeout_ms=0",
+			       true), 0);
+	for (i = 0; i < file_num; i++)
+		TESTEQUAL(validate_logs(mount_dir, log_fd, &test.files[i],
+					FULL_LOG, false, false), 0);
+
+	/* Check logs work with data */
+	for (i = 0; i < file_num; i++) {
+		TESTEQUAL(emit_test_file_data(mount_dir, &test.files[i]), 0);
+		TESTEQUAL(validate_logs(mount_dir, log_fd, &test.files[i],
+					FULL_LOG, false, true), 0);
+	}
+
+	/* Final unmount */
+	close(log_fd);
+	log_fd = -1;
+	TESTEQUAL(umount(mount_dir), 0);
+
+	result = TEST_SUCCESS;
+out:
+	close(cmd_fd);
+	close(log_fd);
+	free(backing_dir);
+	umount(mount_dir);
+	return result;
+}
+
+static int emit_partial_test_file_data(const char *mount_dir,
+				       struct test_file *file)
+{
+	int i, j;
+	int block_cnt = 1 + (file->size - 1) / INCFS_DATA_FILE_BLOCK_SIZE;
+	int *block_indexes = NULL;
+	int result = 0;
+	int blocks_written = 0;
+	int bw_fd = -1;
+	char buffer[20];
+	struct pollfd pollfd;
+	long blocks_written_total, blocks_written_new_total;
+
+	if (file->size == 0)
+		return 0;
+
+	bw_fd = open_blocks_written_file(mount_dir);
+	if (bw_fd == -1)
+		return -errno;
+
+	result = read(bw_fd, buffer, sizeof(buffer));
+	if (result <= 0) {
+		result = -EIO;
+		goto out;
+	}
+
+	buffer[result] = 0;
+	blocks_written_total = atol(buffer);
+	result = 0;
+
+	pollfd = (struct pollfd) {
+		.fd = bw_fd,
+		.events = POLLIN,
+	};
+
+	result = poll(&pollfd, 1, 0);
+	if (result) {
+		result = -EIO;
+		goto out;
+	}
+
+	/* Emit 2 blocks, skip 2 blocks etc*/
+	block_indexes = calloc(block_cnt, sizeof(*block_indexes));
+	for (i = 0, j = 0; i < block_cnt; ++i)
+		if ((i & 2) == 0) {
+			block_indexes[j] = i;
+			++j;
+		}
+
+	for (i = 0; i < j; i += blocks_written) {
+		blocks_written = emit_test_blocks(mount_dir, file,
+						  block_indexes + i, j - i);
+		if (blocks_written < 0) {
+			result = blocks_written;
+			goto out;
+		}
+		if (blocks_written == 0) {
+			result = -EIO;
+			goto out;
+		}
+
+		result = poll(&pollfd, 1, 0);
+		if (result != 1 || pollfd.revents != POLLIN) {
+			result = -EIO;
+			goto out;
+		}
+
+		result = read(bw_fd, buffer, sizeof(buffer));
+		buffer[result] = 0;
+		blocks_written_new_total = atol(buffer);
+
+		if (blocks_written_new_total - blocks_written_total
+		    != blocks_written) {
+			result = -EIO;
+			goto out;
+		}
+
+		blocks_written_total = blocks_written_new_total;
+		result = 0;
+	}
+out:
+	free(block_indexes);
+	close(bw_fd);
+	return result;
+}
+
+static int validate_ranges(const char *mount_dir, struct test_file *file)
+{
+	int block_cnt = 1 + (file->size - 1) / INCFS_DATA_FILE_BLOCK_SIZE;
+	char *filename = concat_file_name(mount_dir, file->name);
+	int fd;
+	struct incfs_filled_range ranges[128];
+	struct incfs_get_filled_blocks_args fba = {
+		.range_buffer = ptr_to_u64(ranges),
+		.range_buffer_size = sizeof(ranges),
+	};
+	int error = TEST_SUCCESS;
+	int i;
+	int range_cnt;
+	int cmd_fd = -1;
+	struct incfs_permit_fill permit_fill;
+
+	fd = open(filename, O_RDONLY | O_CLOEXEC);
+	free(filename);
+	if (fd <= 0)
+		return TEST_FAILURE;
+
+	error = ioctl(fd, INCFS_IOC_GET_FILLED_BLOCKS, &fba);
+	if (error != -1 || errno != EPERM) {
+		ksft_print_msg("INCFS_IOC_GET_FILLED_BLOCKS not blocked\n");
+		error = -EPERM;
+		goto out;
+	}
+
+	cmd_fd = open_commands_file(mount_dir);
+	permit_fill.file_descriptor = fd;
+	if (ioctl(cmd_fd, INCFS_IOC_PERMIT_FILL, &permit_fill)) {
+		print_error("INCFS_IOC_PERMIT_FILL failed");
+		return -EPERM;
+		goto out;
+	}
+
+	error = ioctl(fd, INCFS_IOC_GET_FILLED_BLOCKS, &fba);
+	if (error && errno != ERANGE)
+		goto out;
+
+	if (error && errno == ERANGE && block_cnt < 509)
+		goto out;
+
+	if (!error && block_cnt >= 509) {
+		error = -ERANGE;
+		goto out;
+	}
+
+	if (fba.total_blocks_out != block_cnt) {
+		error = -EINVAL;
+		goto out;
+	}
+
+	if (fba.data_blocks_out != block_cnt) {
+		error = -EINVAL;
+		goto out;
+	}
+
+	range_cnt = (block_cnt + 3) / 4;
+	if (range_cnt > 128)
+		range_cnt = 128;
+	if (range_cnt != fba.range_buffer_size_out / sizeof(*ranges)) {
+		error = -ERANGE;
+		goto out;
+	}
+
+	error = TEST_SUCCESS;
+	for (i = 0; i < fba.range_buffer_size_out / sizeof(*ranges) - 1; ++i)
+		if (ranges[i].begin != i * 4 || ranges[i].end != i * 4 + 2) {
+			error = -EINVAL;
+			goto out;
+		}
+
+	if (ranges[i].begin != i * 4 ||
+	    (ranges[i].end != i * 4 + 1 && ranges[i].end != i * 4 + 2)) {
+		error = -EINVAL;
+		goto out;
+	}
+
+	for (i = 0; i < 64; ++i) {
+		fba.start_index = i * 2;
+		fba.end_index = i * 2 + 2;
+		error = ioctl(fd, INCFS_IOC_GET_FILLED_BLOCKS, &fba);
+		if (error)
+			goto out;
+
+		if (fba.total_blocks_out != block_cnt) {
+			error = -EINVAL;
+			goto out;
+		}
+
+		if (fba.start_index >= block_cnt) {
+			if (fba.index_out != fba.start_index) {
+				error = -EINVAL;
+				goto out;
+			}
+
+			break;
+		}
+
+		if (i % 2) {
+			if (fba.range_buffer_size_out != 0) {
+				error = -EINVAL;
+				goto out;
+			}
+		} else {
+			if (fba.range_buffer_size_out != sizeof(*ranges)) {
+				error = -EINVAL;
+				goto out;
+			}
+
+			if (ranges[0].begin != i * 2) {
+				error = -EINVAL;
+				goto out;
+			}
+
+			if (ranges[0].end != i * 2 + 1 &&
+			    ranges[0].end != i * 2 + 2) {
+				error = -EINVAL;
+				goto out;
+			}
+		}
+	}
+
+out:
+	close(fd);
+	close(cmd_fd);
+	return error;
+}
+
+static int get_blocks_test(const char *mount_dir)
+{
+	char *backing_dir;
+	int cmd_fd = -1;
+	int i;
+	struct test_files_set test = get_test_files_set();
+	const int file_num = test.files_count;
+
+	backing_dir = create_backing_dir(mount_dir);
+	if (!backing_dir)
+		goto failure;
+
+	if (mount_fs_opt(mount_dir, backing_dir, "readahead=0", false) != 0)
+		goto failure;
+
+	cmd_fd = open_commands_file(mount_dir);
+	if (cmd_fd < 0)
+		goto failure;
+
+	/* Write data. */
+	for (i = 0; i < file_num; i++) {
+		struct test_file *file = &test.files[i];
+
+		if (emit_file(cmd_fd, NULL, file->name, &file->id, file->size,
+			      NULL))
+			goto failure;
+
+		if (emit_partial_test_file_data(mount_dir, file))
+			goto failure;
+	}
+
+	for (i = 0; i < file_num; i++) {
+		struct test_file *file = &test.files[i];
+
+		if (validate_ranges(mount_dir, file))
+			goto failure;
+
+		/*
+		 * The smallest files are filled completely, so this checks that
+		 * the fast get_filled_blocks path is not causing issues
+		 */
+		if (validate_ranges(mount_dir, file))
+			goto failure;
+	}
+
+	close(cmd_fd);
+	umount(mount_dir);
+	free(backing_dir);
+	return TEST_SUCCESS;
+
+failure:
+	close(cmd_fd);
+	umount(mount_dir);
+	free(backing_dir);
+	return TEST_FAILURE;
+}
+
+static int emit_partial_test_file_hash(const char *mount_dir,
+				       struct test_file *file)
+{
+	int err;
+	int fd;
+	struct incfs_fill_blocks fill_blocks = {
+		.count = 1,
+	};
+	struct incfs_fill_block *fill_block_array =
+		calloc(fill_blocks.count, sizeof(struct incfs_fill_block));
+	uint8_t data[INCFS_DATA_FILE_BLOCK_SIZE];
+
+	if (file->size <= 4096 / 32 * 4096)
+		return 0;
+
+	if (!fill_block_array)
+		return -ENOMEM;
+	fill_blocks.fill_blocks = ptr_to_u64(fill_block_array);
+
+	rnd_buf(data, sizeof(data), 0);
+
+	fill_block_array[0] =
+		(struct incfs_fill_block){ .block_index = 1,
+					   .data_len =
+						   INCFS_DATA_FILE_BLOCK_SIZE,
+					   .data = ptr_to_u64(data),
+					   .flags = INCFS_BLOCK_FLAGS_HASH };
+
+	fd = open_file_by_id(mount_dir, file->id, true);
+	if (fd < 0) {
+		err = errno;
+		goto failure;
+	}
+
+	err = ioctl(fd, INCFS_IOC_FILL_BLOCKS, &fill_blocks);
+	close(fd);
+	if (err < fill_blocks.count)
+		err = errno;
+	else
+		err = 0;
+
+failure:
+	free(fill_block_array);
+	return err;
+}
+
+static int validate_hash_ranges(const char *mount_dir, struct test_file *file)
+{
+	int block_cnt = 1 + (file->size - 1) / INCFS_DATA_FILE_BLOCK_SIZE;
+	char *filename = concat_file_name(mount_dir, file->name);
+	int fd;
+	struct incfs_filled_range ranges[128];
+	struct incfs_get_filled_blocks_args fba = {
+		.range_buffer = ptr_to_u64(ranges),
+		.range_buffer_size = sizeof(ranges),
+	};
+	int error = TEST_SUCCESS;
+	int file_blocks = (file->size + INCFS_DATA_FILE_BLOCK_SIZE - 1) /
+			  INCFS_DATA_FILE_BLOCK_SIZE;
+	int cmd_fd = -1;
+	struct incfs_permit_fill permit_fill;
+
+	if (file->size <= 4096 / 32 * 4096)
+		return 0;
+
+	fd = open(filename, O_RDONLY | O_CLOEXEC);
+	free(filename);
+	if (fd <= 0)
+		return TEST_FAILURE;
+
+	error = ioctl(fd, INCFS_IOC_GET_FILLED_BLOCKS, &fba);
+	if (error != -1 || errno != EPERM) {
+		ksft_print_msg("INCFS_IOC_GET_FILLED_BLOCKS not blocked\n");
+		error = -EPERM;
+		goto out;
+	}
+
+	cmd_fd = open_commands_file(mount_dir);
+	permit_fill.file_descriptor = fd;
+	if (ioctl(cmd_fd, INCFS_IOC_PERMIT_FILL, &permit_fill)) {
+		print_error("INCFS_IOC_PERMIT_FILL failed");
+		return -EPERM;
+		goto out;
+	}
+
+	error = ioctl(fd, INCFS_IOC_GET_FILLED_BLOCKS, &fba);
+	if (error)
+		goto out;
+
+	if (fba.total_blocks_out <= block_cnt) {
+		error = -EINVAL;
+		goto out;
+	}
+
+	if (fba.data_blocks_out != block_cnt) {
+		error = -EINVAL;
+		goto out;
+	}
+
+	if (fba.range_buffer_size_out != sizeof(struct incfs_filled_range)) {
+		error = -EINVAL;
+		goto out;
+	}
+
+	if (ranges[0].begin != file_blocks + 1 ||
+	    ranges[0].end != file_blocks + 2) {
+		error = -EINVAL;
+		goto out;
+	}
+
+out:
+	close(cmd_fd);
+	close(fd);
+	return error;
+}
+
+static int get_hash_blocks_test(const char *mount_dir)
+{
+	char *backing_dir;
+	int cmd_fd = -1;
+	int i;
+	struct test_files_set test = get_test_files_set();
+	const int file_num = test.files_count;
+
+	backing_dir = create_backing_dir(mount_dir);
+	if (!backing_dir)
+		goto failure;
+
+	if (mount_fs_opt(mount_dir, backing_dir, "readahead=0", false) != 0)
+		goto failure;
+
+	cmd_fd = open_commands_file(mount_dir);
+	if (cmd_fd < 0)
+		goto failure;
+
+	for (i = 0; i < file_num; i++) {
+		struct test_file *file = &test.files[i];
+
+		if (crypto_emit_file(cmd_fd, NULL, file->name, &file->id,
+				     file->size, file->root_hash,
+				     file->sig.add_data))
+			goto failure;
+
+		if (emit_partial_test_file_hash(mount_dir, file))
+			goto failure;
+	}
+
+	for (i = 0; i < file_num; i++) {
+		struct test_file *file = &test.files[i];
+
+		if (validate_hash_ranges(mount_dir, file))
+			goto failure;
+	}
+
+	close(cmd_fd);
+	umount(mount_dir);
+	free(backing_dir);
+	return TEST_SUCCESS;
+
+failure:
+	close(cmd_fd);
+	umount(mount_dir);
+	free(backing_dir);
+	return TEST_FAILURE;
+}
+
+static int large_file_test(const char *mount_dir)
+{
+	char *backing_dir;
+	int cmd_fd = -1;
+	int i;
+	int result = TEST_FAILURE;
+	uint8_t data[INCFS_DATA_FILE_BLOCK_SIZE] = {};
+	int block_count = 3LL * 1024 * 1024 * 1024 / INCFS_DATA_FILE_BLOCK_SIZE;
+	struct incfs_fill_block *block_buf =
+		calloc(block_count, sizeof(struct incfs_fill_block));
+	struct incfs_fill_blocks fill_blocks = {
+		.count = block_count,
+		.fill_blocks = ptr_to_u64(block_buf),
+	};
+	incfs_uuid_t id;
+	int fd = -1;
+
+	backing_dir = create_backing_dir(mount_dir);
+	if (!backing_dir)
+		goto failure;
+
+	if (mount_fs_opt(mount_dir, backing_dir, "readahead=0", false) != 0)
+		goto failure;
+
+	cmd_fd = open_commands_file(mount_dir);
+	if (cmd_fd < 0)
+		goto failure;
+
+	if (emit_file(cmd_fd, NULL, "very_large_file", &id,
+		      (uint64_t)block_count * INCFS_DATA_FILE_BLOCK_SIZE,
+		      NULL) < 0)
+		goto failure;
+
+	for (i = 0; i < block_count; i++) {
+		block_buf[i].compression = COMPRESSION_NONE;
+		block_buf[i].block_index = i;
+		block_buf[i].data_len = INCFS_DATA_FILE_BLOCK_SIZE;
+		block_buf[i].data = ptr_to_u64(data);
+	}
+
+	fd = open_file_by_id(mount_dir, id, true);
+	if (fd < 0)
+		goto failure;
+
+	if (ioctl(fd, INCFS_IOC_FILL_BLOCKS, &fill_blocks) != block_count)
+		goto failure;
+
+	if (emit_file(cmd_fd, NULL, "very_very_large_file", &id, 1LL << 40,
+		      NULL) < 0)
+		goto failure;
+
+	result = TEST_SUCCESS;
+
+failure:
+	close(fd);
+	close(cmd_fd);
+	umount(mount_dir);
+	free(backing_dir);
+	return result;
+}
+
+static int validate_mapped_file(const char *orig_name, const char *name,
+				size_t size, size_t offset)
+{
+	struct stat st;
+	int orig_fd = -1, fd = -1;
+	size_t block;
+	int result = TEST_FAILURE;
+
+	if (stat(name, &st)) {
+		ksft_print_msg("Failed to stat %s with error %s\n",
+			       name, strerror(errno));
+		goto failure;
+	}
+
+	if (size != st.st_size) {
+		ksft_print_msg("Mismatched file sizes for file %s - expected %llu, got %llu\n",
+				   name, size, st.st_size);
+		goto failure;
+	}
+
+	fd = open(name, O_RDONLY | O_CLOEXEC);
+	if (fd == -1) {
+		ksft_print_msg("Failed to open %s with error %s\n", name,
+			       strerror(errno));
+		goto failure;
+	}
+
+	orig_fd = open(orig_name, O_RDONLY | O_CLOEXEC);
+	if (orig_fd == -1) {
+		ksft_print_msg("Failed to open %s with error %s\n", orig_name,
+			       strerror(errno));
+		goto failure;
+	}
+
+	for (block = 0; block < size; block += INCFS_DATA_FILE_BLOCK_SIZE) {
+		uint8_t orig_data[INCFS_DATA_FILE_BLOCK_SIZE];
+		uint8_t data[INCFS_DATA_FILE_BLOCK_SIZE];
+		ssize_t orig_read, mapped_read;
+
+		orig_read = pread(orig_fd, orig_data,
+				 INCFS_DATA_FILE_BLOCK_SIZE, block + offset);
+		mapped_read = pread(fd, data, INCFS_DATA_FILE_BLOCK_SIZE,
+				    block);
+
+		if (orig_read < mapped_read ||
+		    mapped_read != min(size - block,
+				       INCFS_DATA_FILE_BLOCK_SIZE)) {
+			ksft_print_msg("Failed to read enough data: %llu %llu %llu %lld %lld\n",
+				       block, size, offset, orig_read,
+				       mapped_read);
+			goto failure;
+		}
+
+		if (memcmp(orig_data, data, mapped_read)) {
+			ksft_print_msg("Data doesn't match: %llu %llu %llu %lld %lld\n",
+				       block, size, offset, orig_read,
+				       mapped_read);
+			goto failure;
+		}
+	}
+
+	result = TEST_SUCCESS;
+
+failure:
+	close(orig_fd);
+	close(fd);
+	return result;
+}
+
+static int mapped_file_test(const char *mount_dir)
+{
+	char *backing_dir;
+	int result = TEST_FAILURE;
+	int cmd_fd = -1;
+	int i;
+	struct test_files_set test = get_test_files_set();
+	const int file_num = test.files_count;
+
+	backing_dir = create_backing_dir(mount_dir);
+	if (!backing_dir)
+		goto failure;
+
+	if (mount_fs_opt(mount_dir, backing_dir, "readahead=0", false) != 0)
+		goto failure;
+
+	cmd_fd = open_commands_file(mount_dir);
+	if (cmd_fd < 0)
+		goto failure;
+
+	for (i = 0; i < file_num; ++i) {
+		struct test_file *file = &test.files[i];
+		size_t blocks = file->size / INCFS_DATA_FILE_BLOCK_SIZE;
+		size_t mapped_offset = blocks / 4 *
+			INCFS_DATA_FILE_BLOCK_SIZE;
+		size_t mapped_size = file->size / 4 * 3 - mapped_offset;
+		struct incfs_create_mapped_file_args mfa;
+		char mapped_file_name[FILENAME_MAX];
+		char orig_file_path[PATH_MAX];
+		char mapped_file_path[PATH_MAX];
+
+		if (emit_file(cmd_fd, NULL, file->name, &file->id, file->size,
+					NULL) < 0)
+			goto failure;
+
+		if (emit_test_file_data(mount_dir, file))
+			goto failure;
+
+		if (snprintf(mapped_file_name, ARRAY_SIZE(mapped_file_name),
+					"%s.mapped", file->name) < 0)
+			goto failure;
+
+		mfa = (struct incfs_create_mapped_file_args) {
+			.size = mapped_size,
+			.mode = 0664,
+			.file_name = ptr_to_u64(mapped_file_name),
+			.source_file_id = file->id,
+			.source_offset = mapped_offset,
+		};
+
+		result = ioctl(cmd_fd, INCFS_IOC_CREATE_MAPPED_FILE, &mfa);
+		if (result) {
+			ksft_print_msg(
+				"Failed to create mapped file with error %d\n",
+				result);
+			goto failure;
+		}
+
+		result = snprintf(orig_file_path,
+				  ARRAY_SIZE(orig_file_path), "%s/%s",
+				  mount_dir, file->name);
+
+		if (result < 0 || result >= ARRAY_SIZE(mapped_file_path)) {
+			result = TEST_FAILURE;
+			goto failure;
+		}
+
+		result = snprintf(mapped_file_path,
+				  ARRAY_SIZE(mapped_file_path), "%s/%s",
+				  mount_dir, mapped_file_name);
+
+		if (result < 0 || result >= ARRAY_SIZE(mapped_file_path)) {
+			result = TEST_FAILURE;
+			goto failure;
+		}
+
+		result = validate_mapped_file(orig_file_path, mapped_file_path,
+					      mapped_size, mapped_offset);
+		if (result)
+			goto failure;
+	}
+
+failure:
+	close(cmd_fd);
+	umount(mount_dir);
+	free(backing_dir);
+	return result;
+}
+
+static const char v1_file[] = {
+	/* Header */
+	/* 0x00: Magic number */
+	0x49, 0x4e, 0x43, 0x46, 0x53, 0x00, 0x00, 0x00,
+	/* 0x08: Version */
+	0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	/* 0x10: Header size */
+	0x38, 0x00,
+	/* 0x12: Block size */
+	0x00, 0x10,
+	/* 0x14: Flags */
+	0x00, 0x00, 0x00, 0x00,
+	/* 0x18: First md offset */
+	0x46, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	/* 0x20: File size */
+	0x0c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	/* 0x28: UUID */
+	0x8c, 0x7d, 0xd9, 0x22, 0xad, 0x47, 0x49, 0x4f,
+	0xc0, 0x2c, 0x38, 0x8e, 0x12, 0xc0, 0x0e, 0xac,
+
+	/* 0x38: Attribute */
+	0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61,
+	0x31, 0x32, 0x33, 0x31, 0x32, 0x33,
+
+	/* Attribute md record */
+	/* 0x46: Type */
+	0x02,
+	/* 0x47: Size */
+	0x25, 0x00,
+	/* 0x49: CRC */
+	0x9a, 0xef, 0xef, 0x72,
+	/* 0x4d: Next md offset */
+	0x75, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	/* 0x55: Prev md offset */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	/* 0x5d: fa_offset */
+	0x38, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	/* 0x65: fa_size */
+	0x0e, 0x00,
+	/* 0x67: fa_crc */
+	0xfb, 0x5e, 0x72, 0x89,
+
+	/* Blockmap table */
+	/* 0x6b: First 10-byte entry */
+	0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+
+	/* Blockmap md record */
+	/* 0x75: Type */
+	0x01,
+	/* 0x76: Size */
+	0x23, 0x00,
+	/* 0x78: CRC */
+	0x74, 0x45, 0xd3, 0xb9,
+	/* 0x7c: Next md offset */
+	0x00, 0x00, 0x00, 0x00,	0x00, 0x00, 0x00, 0x00,
+	/* 0x84: Prev md offset */
+	0x46, 0x00, 0x00, 0x00,	0x00, 0x00, 0x00, 0x00,
+	/* 0x8c: blockmap offset */
+	0x6b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	/* 0x94: blockmap count */
+	0x01, 0x00, 0x00, 0x00,
+};
+
+static int compatibility_test(const char *mount_dir)
+{
+	static const char *name = "file";
+	int result = TEST_FAILURE;
+	char *backing_dir = NULL;
+	char *filename = NULL;
+	int fd = -1;
+	uint64_t size = 0x0c;
+
+	TEST(backing_dir = create_backing_dir(mount_dir), backing_dir);
+	TEST(filename = concat_file_name(backing_dir, name), filename);
+	TEST(fd = open(filename, O_CREAT | O_WRONLY | O_CLOEXEC), fd != -1);
+	TESTEQUAL(write(fd, v1_file, sizeof(v1_file)), sizeof(v1_file));
+	TESTEQUAL(fsetxattr(fd, INCFS_XATTR_SIZE_NAME, &size, sizeof(size), 0),
+		  0);
+	TESTEQUAL(mount_fs(mount_dir, backing_dir, 50), 0);
+	free(filename);
+	TEST(filename = concat_file_name(mount_dir, name), filename);
+	close(fd);
+	TEST(fd = open(filename, O_RDONLY), fd != -1);
+
+	result = TEST_SUCCESS;
+out:
+	close(fd);
+	umount(mount_dir);
+	free(backing_dir);
+	free(filename);
+	return result;
+}
+
+static int zero_blocks_written_count(int fd, uint32_t data_blocks_written,
+				     uint32_t hash_blocks_written)
+{
+	int test_result = TEST_FAILURE;
+	uint64_t offset;
+	uint8_t type;
+	uint32_t bw;
+
+	/* Get first md record */
+	TESTEQUAL(pread(fd, &offset, sizeof(offset), 24), sizeof(offset));
+
+	/* Find status md record */
+	for (;;) {
+		TESTNE(offset, 0);
+		TESTEQUAL(pread(fd, &type, sizeof(type), le64_to_cpu(offset)),
+			  sizeof(type));
+		if (type == 4)
+			break;
+		TESTEQUAL(pread(fd, &offset, sizeof(offset),
+				le64_to_cpu(offset) + 7),
+			  sizeof(offset));
+	}
+
+	/* Read blocks_written */
+	offset = le64_to_cpu(offset);
+	TESTEQUAL(pread(fd, &bw, sizeof(bw), offset + 23), sizeof(bw));
+	TESTEQUAL(le32_to_cpu(bw), data_blocks_written);
+	TESTEQUAL(pread(fd, &bw, sizeof(bw), offset + 27), sizeof(bw));
+	TESTEQUAL(le32_to_cpu(bw), hash_blocks_written);
+
+	/* Write out zero */
+	bw = 0;
+	TESTEQUAL(pwrite(fd, &bw, sizeof(bw), offset + 23), sizeof(bw));
+	TESTEQUAL(pwrite(fd, &bw, sizeof(bw), offset + 27), sizeof(bw));
+
+	test_result = TEST_SUCCESS;
+out:
+	return test_result;
+}
+
+static int validate_block_count(const char *mount_dir, const char *backing_dir,
+				struct test_file *file,
+				int total_data_blocks, int filled_data_blocks,
+				int total_hash_blocks, int filled_hash_blocks)
+{
+	char *filename = NULL;
+	char *backing_filename = NULL;
+	int fd = -1;
+	struct incfs_get_block_count_args bca = {};
+	int test_result = TEST_FAILURE;
+	struct incfs_filled_range ranges[128];
+	struct incfs_get_filled_blocks_args fba = {
+		.range_buffer = ptr_to_u64(ranges),
+		.range_buffer_size = sizeof(ranges),
+	};
+	int cmd_fd = -1;
+	struct incfs_permit_fill permit_fill;
+
+	TEST(filename = concat_file_name(mount_dir, file->name), filename);
+	TEST(backing_filename = concat_file_name(backing_dir, file->name),
+	     backing_filename);
+	TEST(fd = open(filename, O_RDONLY | O_CLOEXEC), fd != -1);
+
+	TESTEQUAL(ioctl(fd, INCFS_IOC_GET_BLOCK_COUNT, &bca), 0);
+	TESTEQUAL(bca.total_data_blocks_out, total_data_blocks);
+	TESTEQUAL(bca.filled_data_blocks_out, filled_data_blocks);
+	TESTEQUAL(bca.total_hash_blocks_out, total_hash_blocks);
+	TESTEQUAL(bca.filled_hash_blocks_out, filled_hash_blocks);
+
+	close(fd);
+	TESTEQUAL(umount(mount_dir), 0);
+	TEST(fd = open(backing_filename, O_RDWR | O_CLOEXEC), fd != -1);
+	TESTEQUAL(zero_blocks_written_count(fd, filled_data_blocks,
+					    filled_hash_blocks),
+		  TEST_SUCCESS);
+	close(fd);
+	fd = -1;
+	TESTEQUAL(mount_fs_opt(mount_dir, backing_dir, "readahead=0", false),
+		  0);
+	TEST(fd = open(filename, O_RDONLY | O_CLOEXEC), fd != -1);
+
+	TESTEQUAL(ioctl(fd, INCFS_IOC_GET_BLOCK_COUNT, &bca), 0);
+	TESTEQUAL(bca.total_data_blocks_out, total_data_blocks);
+	TESTEQUAL(bca.filled_data_blocks_out, 0);
+	TESTEQUAL(bca.total_hash_blocks_out, total_hash_blocks);
+	TESTEQUAL(bca.filled_hash_blocks_out, 0);
+
+	TEST(cmd_fd = open_commands_file(mount_dir), cmd_fd != -1);
+	permit_fill.file_descriptor = fd;
+	TESTEQUAL(ioctl(cmd_fd, INCFS_IOC_PERMIT_FILL, &permit_fill), 0);
+	do {
+		ioctl(fd, INCFS_IOC_GET_FILLED_BLOCKS, &fba);
+		fba.start_index = fba.index_out + 1;
+	} while (fba.index_out < fba.total_blocks_out);
+
+	TESTEQUAL(ioctl(fd, INCFS_IOC_GET_BLOCK_COUNT, &bca), 0);
+	TESTEQUAL(bca.total_data_blocks_out, total_data_blocks);
+	TESTEQUAL(bca.filled_data_blocks_out, filled_data_blocks);
+	TESTEQUAL(bca.total_hash_blocks_out, total_hash_blocks);
+	TESTEQUAL(bca.filled_hash_blocks_out, filled_hash_blocks);
+
+	test_result = TEST_SUCCESS;
+out:
+	close(cmd_fd);
+	close(fd);
+	free(filename);
+	free(backing_filename);
+	return test_result;
+}
+
+
+
+static int validate_data_block_count(const char *mount_dir,
+				     const char *backing_dir,
+				     struct test_file *file)
+{
+	const int total_data_blocks = 1 + (file->size - 1) /
+						INCFS_DATA_FILE_BLOCK_SIZE;
+	const int filled_data_blocks = (total_data_blocks + 1) / 2;
+
+	int test_result = TEST_FAILURE;
+	char *filename = NULL;
+	char *incomplete_filename = NULL;
+	struct stat stat_buf_incomplete, stat_buf_file;
+	int fd = -1;
+	struct incfs_get_block_count_args bca = {};
+	int i;
+
+	TEST(filename = concat_file_name(mount_dir, file->name), filename);
+	TEST(incomplete_filename = get_incomplete_filename(mount_dir, file->id),
+	     incomplete_filename);
+
+	TESTEQUAL(stat(filename, &stat_buf_file), 0);
+	TESTEQUAL(stat(incomplete_filename, &stat_buf_incomplete), 0);
+	TESTEQUAL(stat_buf_file.st_ino, stat_buf_incomplete.st_ino);
+	TESTEQUAL(stat_buf_file.st_nlink, 3);
+
+	TEST(fd = open(filename, O_RDONLY | O_CLOEXEC), fd != -1);
+	TESTEQUAL(ioctl(fd, INCFS_IOC_GET_BLOCK_COUNT, &bca), 0);
+	TESTEQUAL(bca.total_data_blocks_out, total_data_blocks);
+	TESTEQUAL(bca.filled_data_blocks_out, 0);
+	TESTEQUAL(bca.total_hash_blocks_out, 0);
+	TESTEQUAL(bca.filled_hash_blocks_out, 0);
+
+	for (i = 0; i < total_data_blocks; i += 2)
+		TESTEQUAL(emit_test_block(mount_dir, file, i), 0);
+
+	TESTEQUAL(ioctl(fd, INCFS_IOC_GET_BLOCK_COUNT, &bca), 0);
+	TESTEQUAL(bca.total_data_blocks_out, total_data_blocks);
+	TESTEQUAL(bca.filled_data_blocks_out, filled_data_blocks);
+	TESTEQUAL(bca.total_hash_blocks_out, 0);
+	TESTEQUAL(bca.filled_hash_blocks_out, 0);
+	close(fd);
+	fd = -1;
+
+	TESTEQUAL(validate_block_count(mount_dir, backing_dir, file,
+				       total_data_blocks, filled_data_blocks,
+				       0, 0),
+		  0);
+
+	for (i = 1; i < total_data_blocks; i += 2)
+		TESTEQUAL(emit_test_block(mount_dir, file, i), 0);
+
+	TESTEQUAL(stat(incomplete_filename, &stat_buf_incomplete), -1);
+	TESTEQUAL(errno, ENOENT);
+
+	test_result = TEST_SUCCESS;
+out:
+	close(fd);
+	free(incomplete_filename);
+	free(filename);
+	return test_result;
+}
+
+static int data_block_count_test(const char *mount_dir)
+{
+	int result = TEST_FAILURE;
+	char *backing_dir;
+	int cmd_fd = -1;
+	int i;
+	struct test_files_set test = get_test_files_set();
+
+	TEST(backing_dir = create_backing_dir(mount_dir), backing_dir);
+	TESTEQUAL(mount_fs_opt(mount_dir, backing_dir, "readahead=0", false),
+		  0);
+
+	for (i = 0; i < test.files_count; ++i) {
+		struct test_file *file = &test.files[i];
+
+		TEST(cmd_fd = open_commands_file(mount_dir), cmd_fd != -1);
+		TESTEQUAL(emit_file(cmd_fd, NULL, file->name, &file->id,
+				    file->size,	NULL),
+			  0);
+		close(cmd_fd);
+		cmd_fd = -1;
+
+		TESTEQUAL(validate_data_block_count(mount_dir, backing_dir,
+						    file),
+			  0);
+	}
+
+	result = TEST_SUCCESS;
+out:
+	close(cmd_fd);
+	umount(mount_dir);
+	free(backing_dir);
+	return result;
+}
+
+static int validate_hash_block_count(const char *mount_dir,
+				     const char *backing_dir,
+				     struct test_file *file)
+{
+	const int digest_size = SHA256_DIGEST_SIZE;
+	const int hash_per_block = INCFS_DATA_FILE_BLOCK_SIZE / digest_size;
+	const int total_data_blocks = 1 + (file->size - 1) /
+					INCFS_DATA_FILE_BLOCK_SIZE;
+
+	int result = TEST_FAILURE;
+	int hash_layer = total_data_blocks;
+	int total_hash_blocks = 0;
+	int filled_hash_blocks;
+	char *filename = NULL;
+	int fd = -1;
+	struct incfs_get_block_count_args bca = {};
+
+	while (hash_layer > 1) {
+		hash_layer = (hash_layer + hash_per_block - 1) / hash_per_block;
+		total_hash_blocks += hash_layer;
+	}
+	filled_hash_blocks = total_hash_blocks > 1 ? 1 : 0;
+
+	TEST(filename = concat_file_name(mount_dir, file->name), filename);
+	TEST(fd = open(filename, O_RDONLY | O_CLOEXEC), fd != -1);
+
+	TESTEQUAL(ioctl(fd, INCFS_IOC_GET_BLOCK_COUNT, &bca), 0);
+	TESTEQUAL(bca.total_data_blocks_out, total_data_blocks);
+	TESTEQUAL(bca.filled_data_blocks_out, 0);
+	TESTEQUAL(bca.total_hash_blocks_out, total_hash_blocks);
+	TESTEQUAL(bca.filled_hash_blocks_out, 0);
+
+	TESTEQUAL(emit_partial_test_file_hash(mount_dir, file), 0);
+
+	TESTEQUAL(ioctl(fd, INCFS_IOC_GET_BLOCK_COUNT, &bca), 0);
+	TESTEQUAL(bca.total_data_blocks_out, total_data_blocks);
+	TESTEQUAL(bca.filled_data_blocks_out, 0);
+	TESTEQUAL(bca.total_hash_blocks_out, total_hash_blocks);
+	TESTEQUAL(bca.filled_hash_blocks_out, filled_hash_blocks);
+	close(fd);
+	fd = -1;
+
+	if (filled_hash_blocks)
+		TESTEQUAL(validate_block_count(mount_dir, backing_dir, file,
+				       total_data_blocks, 0,
+				       total_hash_blocks, filled_hash_blocks),
+		  0);
+
+	result = TEST_SUCCESS;
+out:
+	close(fd);
+	free(filename);
+	return result;
+}
+
+static int hash_block_count_test(const char *mount_dir)
+{
+	int result = TEST_FAILURE;
+	char *backing_dir;
+	int cmd_fd = -1;
+	int i;
+	struct test_files_set test = get_test_files_set();
+
+	TEST(backing_dir = create_backing_dir(mount_dir), backing_dir);
+	TESTEQUAL(mount_fs_opt(mount_dir, backing_dir, "readahead=0", false),
+		  0);
+
+	for (i = 0; i < test.files_count; i++) {
+		struct test_file *file = &test.files[i];
+
+		TEST(cmd_fd = open_commands_file(mount_dir), cmd_fd != -1);
+		TESTEQUAL(crypto_emit_file(cmd_fd, NULL, file->name, &file->id,
+				     file->size, file->root_hash,
+				     file->sig.add_data),
+			  0);
+		close(cmd_fd);
+		cmd_fd = -1;
+
+		TESTEQUAL(validate_hash_block_count(mount_dir, backing_dir,
+						    &test.files[i]),
+			  0);
+	}
+
+	result = TEST_SUCCESS;
+out:
+	close(cmd_fd);
+	umount(mount_dir);
+	free(backing_dir);
+	return result;
+}
+
+static int is_close(struct timespec *start, int expected_ms)
+{
+	const int allowed_variance = 100;
+	int result = TEST_FAILURE;
+	struct timespec finish;
+	int diff;
+
+	TESTEQUAL(clock_gettime(CLOCK_MONOTONIC, &finish), 0);
+	diff = (finish.tv_sec - start->tv_sec) * 1000 +
+		(finish.tv_nsec - start->tv_nsec) / 1000000;
+
+	TESTCOND(diff >= expected_ms - allowed_variance);
+	TESTCOND(diff <= expected_ms + allowed_variance);
+	result = TEST_SUCCESS;
+out:
+	return result;
+}
+
+static int per_uid_read_timeouts_test(const char *mount_dir)
+{
+	struct test_file file = {
+		.name = "file",
+		.size = 16 * INCFS_DATA_FILE_BLOCK_SIZE
+	};
+
+	int result = TEST_FAILURE;
+	char *backing_dir = NULL;
+	int pid;
+	int cmd_fd = -1;
+	char *filename = NULL;
+	int fd = -1;
+	struct timespec start;
+	char buffer[4096];
+	struct incfs_per_uid_read_timeouts purt_get[1];
+	struct incfs_get_read_timeouts_args grt = {
+		ptr_to_u64(purt_get),
+		sizeof(purt_get)
+	};
+	struct incfs_per_uid_read_timeouts purt_set[] = {
+		{
+			.uid = 0,
+			.min_time_ms = 1000,
+			.min_pending_time_ms = 2000,
+			.max_pending_time_ms = 3000,
+		},
+	};
+	struct incfs_set_read_timeouts_args srt = {
+		ptr_to_u64(purt_set),
+		sizeof(purt_set)
+	};
+	int status;
+
+	TEST(backing_dir = create_backing_dir(mount_dir), backing_dir);
+	TESTEQUAL(mount_fs_opt(mount_dir, backing_dir,
+			       "read_timeout_ms=1000,readahead=0", false), 0);
+
+	TEST(cmd_fd = open_commands_file(mount_dir), cmd_fd != -1);
+	TESTEQUAL(emit_file(cmd_fd, NULL, file.name, &file.id, file.size,
+			    NULL), 0);
+
+	TEST(filename = concat_file_name(mount_dir, file.name), filename);
+	TEST(fd = open(filename, O_RDONLY | O_CLOEXEC), fd != -1);
+	TESTEQUAL(fcntl(fd, F_SETFD, fcntl(fd, F_GETFD) | FD_CLOEXEC), 0);
+
+	/* Default mount options read failure is 1000 */
+	TESTEQUAL(clock_gettime(CLOCK_MONOTONIC, &start), 0);
+	TESTEQUAL(pread(fd, buffer, sizeof(buffer), 0), -1);
+	TESTEQUAL(is_close(&start, 1000), 0);
+
+	grt.timeouts_array_size = 0;
+	TESTEQUAL(ioctl(cmd_fd, INCFS_IOC_GET_READ_TIMEOUTS, &grt), 0);
+	TESTEQUAL(grt.timeouts_array_size_out, 0);
+
+	/* Set it to 3000 */
+	TESTEQUAL(ioctl(cmd_fd, INCFS_IOC_SET_READ_TIMEOUTS, &srt), 0);
+	TESTEQUAL(clock_gettime(CLOCK_MONOTONIC, &start), 0);
+	TESTEQUAL(pread(fd, buffer, sizeof(buffer), 0), -1);
+	TESTEQUAL(is_close(&start, 3000), 0);
+	TESTEQUAL(ioctl(cmd_fd, INCFS_IOC_GET_READ_TIMEOUTS, &grt), -1);
+	TESTEQUAL(errno, E2BIG);
+	TESTEQUAL(grt.timeouts_array_size_out, sizeof(purt_get));
+	grt.timeouts_array_size = sizeof(purt_get);
+	TESTEQUAL(ioctl(cmd_fd, INCFS_IOC_GET_READ_TIMEOUTS, &grt), 0);
+	TESTEQUAL(grt.timeouts_array_size_out, sizeof(purt_get));
+	TESTEQUAL(purt_get[0].uid, purt_set[0].uid);
+	TESTEQUAL(purt_get[0].min_time_ms, purt_set[0].min_time_ms);
+	TESTEQUAL(purt_get[0].min_pending_time_ms,
+		  purt_set[0].min_pending_time_ms);
+	TESTEQUAL(purt_get[0].max_pending_time_ms,
+		  purt_set[0].max_pending_time_ms);
+
+	/* Still 1000 in UID 2 */
+	TESTEQUAL(clock_gettime(CLOCK_MONOTONIC, &start), 0);
+	TEST(pid = fork(), pid != -1);
+	if (pid == 0) {
+		TESTEQUAL(setuid(2), 0);
+		TESTEQUAL(pread(fd, buffer, sizeof(buffer), 0), -1);
+		exit(0);
+	}
+	TESTNE(wait(&status), -1);
+	TESTEQUAL(WEXITSTATUS(status), 0);
+	TESTEQUAL(is_close(&start, 1000), 0);
+
+	/* Set it to default */
+	purt_set[0].max_pending_time_ms = UINT32_MAX;
+	TESTEQUAL(ioctl(cmd_fd, INCFS_IOC_SET_READ_TIMEOUTS, &srt), 0);
+	TESTEQUAL(clock_gettime(CLOCK_MONOTONIC, &start), 0);
+	TESTEQUAL(pread(fd, buffer, sizeof(buffer), 0), -1);
+	TESTEQUAL(is_close(&start, 1000), 0);
+
+	/* Test min read time */
+	TESTEQUAL(emit_test_block(mount_dir, &file, 0), 0);
+	TESTEQUAL(clock_gettime(CLOCK_MONOTONIC, &start), 0);
+	TESTEQUAL(pread(fd, buffer, sizeof(buffer), 0), sizeof(buffer));
+	TESTEQUAL(is_close(&start, 1000), 0);
+
+	/* Test min pending time */
+	purt_set[0].uid = 2;
+	TESTEQUAL(ioctl(cmd_fd, INCFS_IOC_SET_READ_TIMEOUTS, &srt), 0);
+	TESTEQUAL(clock_gettime(CLOCK_MONOTONIC, &start), 0);
+	TEST(pid = fork(), pid != -1);
+	if (pid == 0) {
+		TESTEQUAL(setuid(2), 0);
+		TESTEQUAL(pread(fd, buffer, sizeof(buffer), sizeof(buffer)),
+			  sizeof(buffer));
+		exit(0);
+	}
+	sleep(1);
+	TESTEQUAL(emit_test_block(mount_dir, &file, 1), 0);
+	TESTNE(wait(&status), -1);
+	TESTEQUAL(WEXITSTATUS(status), 0);
+	TESTEQUAL(is_close(&start, 2000), 0);
+
+	/* Clear timeouts */
+	srt.timeouts_array_size = 0;
+	TESTEQUAL(ioctl(cmd_fd, INCFS_IOC_SET_READ_TIMEOUTS, &srt), 0);
+	grt.timeouts_array_size = 0;
+	TESTEQUAL(ioctl(cmd_fd, INCFS_IOC_GET_READ_TIMEOUTS, &grt), 0);
+	TESTEQUAL(grt.timeouts_array_size_out, 0);
+
+	result = TEST_SUCCESS;
+out:
+	close(fd);
+
+	if (pid == 0)
+		exit(result);
+
+	free(filename);
+	close(cmd_fd);
+	umount(backing_dir);
+	free(backing_dir);
+	return result;
+}
+
+static char *setup_mount_dir()
+{
+	struct stat st;
+	char *current_dir = getcwd(NULL, 0);
+	char *mount_dir = concat_file_name(current_dir, "incfs-mount-dir");
+
+	free(current_dir);
+	if (stat(mount_dir, &st) == 0) {
+		if (S_ISDIR(st.st_mode))
+			return mount_dir;
+
+		ksft_print_msg("%s is a file, not a dir.\n", mount_dir);
+		return NULL;
+	}
+
+	if (mkdir(mount_dir, 0777)) {
+		print_error("Can't create mount dir.");
+		return NULL;
+	}
+
+	return mount_dir;
+}
+
+int parse_options(int argc, char *const *argv)
+{
+	signed char c;
+
+	while ((c = getopt(argc, argv, "f:t:v")) != -1)
+		switch (c) {
+		case 'f':
+			options.file = strtol(optarg, NULL, 10);
+			break;
+
+		case 't':
+			options.test = strtol(optarg, NULL, 10);
+			break;
+
+		case 'v':
+			options.verbose = true;
+			break;
+
+		default:
+			return -EINVAL;
+		}
+
+	return 0;
+}
+
+struct test_case {
+	int (*pfunc)(const char *dir);
+	const char *name;
+};
+
+void run_one_test(const char *mount_dir, struct test_case *test_case)
+{
+	ksft_print_msg("Running %s\n", test_case->name);
+	if (test_case->pfunc(mount_dir) == TEST_SUCCESS)
+		ksft_test_result_pass("%s\n", test_case->name);
+	else
+		ksft_test_result_fail("%s\n", test_case->name);
+}
+
+int main(int argc, char *argv[])
+{
+	char *mount_dir = NULL;
+	int i;
+	int fd, count;
+
+	if (parse_options(argc, argv))
+		ksft_exit_fail_msg("Bad options\n");
+
+	// Seed randomness pool for testing on QEMU
+	// NOTE - this abuses the concept of randomness - do *not* ever do this
+	// on a machine for production use - the device will think it has good
+	// randomness when it does not.
+	fd = open("/dev/urandom", O_WRONLY | O_CLOEXEC);
+	count = 4096;
+	for (int i = 0; i < 128; ++i)
+		ioctl(fd, RNDADDTOENTCNT, &count);
+	close(fd);
+
+	ksft_print_header();
+
+	if (geteuid() != 0)
+		ksft_print_msg("Not a root, might fail to mount.\n");
+
+	mount_dir = setup_mount_dir();
+	if (mount_dir == NULL)
+		ksft_exit_fail_msg("Can't create a mount dir\n");
+
+#define MAKE_TEST(test)                                                        \
+	{                                                                      \
+		test, #test                                                    \
+	}
+	struct test_case cases[] = {
+		MAKE_TEST(basic_file_ops_test),
+		MAKE_TEST(cant_touch_index_test),
+		MAKE_TEST(dynamic_files_and_data_test),
+		MAKE_TEST(concurrent_reads_and_writes_test),
+		MAKE_TEST(attribute_test),
+		MAKE_TEST(work_after_remount_test),
+		MAKE_TEST(child_procs_waiting_for_data_test),
+		MAKE_TEST(multiple_providers_test),
+		MAKE_TEST(hash_tree_test),
+		MAKE_TEST(read_log_test),
+		MAKE_TEST(get_blocks_test),
+		MAKE_TEST(get_hash_blocks_test),
+		MAKE_TEST(large_file_test),
+		MAKE_TEST(mapped_file_test),
+		MAKE_TEST(compatibility_test),
+		MAKE_TEST(data_block_count_test),
+		MAKE_TEST(hash_block_count_test),
+		MAKE_TEST(per_uid_read_timeouts_test),
+	};
+#undef MAKE_TEST
+
+	if (options.test) {
+		if (options.test <= 0 || options.test > ARRAY_SIZE(cases))
+			ksft_exit_fail_msg("Invalid test\n");
+
+		ksft_set_plan(1);
+		run_one_test(mount_dir, &cases[options.test - 1]);
+	} else {
+		ksft_set_plan(ARRAY_SIZE(cases));
+		for (i = 0; i < ARRAY_SIZE(cases); ++i)
+			run_one_test(mount_dir, &cases[i]);
+	}
+
+	umount2(mount_dir, MNT_FORCE);
+	rmdir(mount_dir);
+	return !ksft_get_fail_cnt() ? ksft_exit_pass() : ksft_exit_fail();
+}
diff --git a/tools/testing/selftests/filesystems/incfs/utils.c b/tools/testing/selftests/filesystems/incfs/utils.c
new file mode 100644
index 0000000..e60b0d3
--- /dev/null
+++ b/tools/testing/selftests/filesystems/incfs/utils.c
@@ -0,0 +1,378 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2018 Google LLC
+ */
+#include <dirent.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <poll.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+
+#include <sys/ioctl.h>
+#include <sys/mount.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+
+#include <openssl/sha.h>
+#include <openssl/md5.h>
+
+#include "utils.h"
+
+#ifndef __S_IFREG
+#define __S_IFREG S_IFREG
+#endif
+
+unsigned int rnd(unsigned int max, unsigned int *seed)
+{
+	return rand_r(seed) * ((uint64_t)max + 1) / RAND_MAX;
+}
+
+int remove_dir(const char *dir)
+{
+	int err = rmdir(dir);
+
+	if (err && errno == ENOTEMPTY) {
+		err = delete_dir_tree(dir);
+		if (err)
+			return err;
+		return 0;
+	}
+
+	if (err && errno != ENOENT)
+		return -errno;
+
+	return 0;
+}
+
+int drop_caches(void)
+{
+	int drop_caches =
+		open("/proc/sys/vm/drop_caches", O_WRONLY | O_CLOEXEC);
+	int i;
+
+	if (drop_caches == -1)
+		return -errno;
+	i = write(drop_caches, "3", 1);
+	close(drop_caches);
+
+	if (i != 1)
+		return -errno;
+
+	return 0;
+}
+
+int mount_fs(const char *mount_dir, const char *backing_dir,
+	     int read_timeout_ms)
+{
+	static const char fs_name[] = INCFS_NAME;
+	char mount_options[512];
+	int result;
+
+	snprintf(mount_options, ARRAY_SIZE(mount_options),
+		 "read_timeout_ms=%u",
+		  read_timeout_ms);
+
+	result = mount(backing_dir, mount_dir, fs_name, 0, mount_options);
+	if (result != 0)
+		perror("Error mounting fs.");
+	return result;
+}
+
+int mount_fs_opt(const char *mount_dir, const char *backing_dir,
+		 const char *opt, bool remount)
+{
+	static const char fs_name[] = INCFS_NAME;
+	int result;
+
+	result = mount(backing_dir, mount_dir, fs_name,
+		       remount ? MS_REMOUNT : 0, opt);
+	if (result != 0)
+		perror("Error mounting fs.");
+	return result;
+}
+
+struct hash_section {
+	uint32_t algorithm;
+	uint8_t log2_blocksize;
+	uint32_t salt_size;
+	/* no salt */
+	uint32_t hash_size;
+	uint8_t hash[SHA256_DIGEST_SIZE];
+} __packed;
+
+struct signature_blob {
+	uint32_t version;
+	uint32_t hash_section_size;
+	struct hash_section hash_section;
+	uint32_t signing_section_size;
+	uint8_t signing_section[];
+} __packed;
+
+size_t format_signature(void **buf, const char *root_hash, const char *add_data)
+{
+	size_t size = sizeof(struct signature_blob) + strlen(add_data) + 1;
+	struct signature_blob *sb = malloc(size);
+
+	*sb = (struct signature_blob){
+		.version = INCFS_SIGNATURE_VERSION,
+		.hash_section_size = sizeof(struct hash_section),
+		.hash_section =
+			(struct hash_section){
+				.algorithm = INCFS_HASH_TREE_SHA256,
+				.log2_blocksize = 12,
+				.salt_size = 0,
+				.hash_size = SHA256_DIGEST_SIZE,
+			},
+		.signing_section_size = sizeof(uint32_t) + strlen(add_data) + 1,
+	};
+
+	memcpy(sb->hash_section.hash, root_hash, SHA256_DIGEST_SIZE);
+	memcpy((char *)sb->signing_section, add_data, strlen(add_data) + 1);
+	*buf = sb;
+	return size;
+}
+
+int crypto_emit_file(int fd, const char *dir, const char *filename,
+		     incfs_uuid_t *id_out, size_t size, const char *root_hash,
+		     const char *add_data)
+{
+	int mode = __S_IFREG | 0555;
+	void *signature;
+	int error = 0;
+
+	struct incfs_new_file_args args = {
+			.size = size,
+			.mode = mode,
+			.file_name = ptr_to_u64(filename),
+			.directory_path = ptr_to_u64(dir),
+			.file_attr = 0,
+			.file_attr_len = 0
+	};
+
+	args.signature_size = format_signature(&signature, root_hash, add_data);
+	args.signature_info = ptr_to_u64(signature);
+
+	md5(filename, strlen(filename), (char *)args.file_id.bytes);
+
+	if (ioctl(fd, INCFS_IOC_CREATE_FILE, &args) != 0) {
+		error = -errno;
+		goto out;
+	}
+
+	*id_out = args.file_id;
+
+out:
+	free(signature);
+	return error;
+}
+
+int emit_file(int fd, const char *dir, const char *filename,
+	      incfs_uuid_t *id_out, size_t size, const char *attr)
+{
+	int mode = __S_IFREG | 0555;
+	struct incfs_new_file_args args = { .size = size,
+					    .mode = mode,
+					    .file_name = ptr_to_u64(filename),
+					    .directory_path = ptr_to_u64(dir),
+					    .signature_info = ptr_to_u64(NULL),
+					    .signature_size = 0,
+					    .file_attr = ptr_to_u64(attr),
+					    .file_attr_len =
+						    attr ? strlen(attr) : 0 };
+
+	md5(filename, strlen(filename), (char *)args.file_id.bytes);
+
+	if (ioctl(fd, INCFS_IOC_CREATE_FILE, &args) != 0)
+		return -errno;
+
+	*id_out = args.file_id;
+	return 0;
+}
+
+int get_file_bmap(int cmd_fd, int ino, unsigned char *buf, int buf_size)
+{
+	return 0;
+}
+
+int get_file_signature(int fd, unsigned char *buf, int buf_size)
+{
+	struct incfs_get_file_sig_args args = {
+		.file_signature = ptr_to_u64(buf),
+		.file_signature_buf_size = buf_size
+	};
+
+	if (ioctl(fd, INCFS_IOC_READ_FILE_SIGNATURE, &args) == 0)
+		return args.file_signature_len_out;
+	return -errno;
+}
+
+loff_t get_file_size(const char *name)
+{
+	struct stat st;
+
+	if (stat(name, &st) == 0)
+		return st.st_size;
+	return -ENOENT;
+}
+
+int open_commands_file(const char *mount_dir)
+{
+	char cmd_file[255];
+	int cmd_fd;
+
+	snprintf(cmd_file, ARRAY_SIZE(cmd_file),
+			"%s/%s", mount_dir, INCFS_PENDING_READS_FILENAME);
+	cmd_fd = open(cmd_file, O_RDONLY | O_CLOEXEC);
+
+	if (cmd_fd < 0)
+		perror("Can't open commands file");
+	return cmd_fd;
+}
+
+int open_log_file(const char *mount_dir)
+{
+	char file[255];
+	int fd;
+
+	snprintf(file, ARRAY_SIZE(file), "%s/.log", mount_dir);
+	fd = open(file, O_RDWR | O_CLOEXEC);
+	if (fd < 0)
+		perror("Can't open log file");
+	return fd;
+}
+
+int open_blocks_written_file(const char *mount_dir)
+{
+	char file[255];
+	int fd;
+
+	snprintf(file, ARRAY_SIZE(file),
+			"%s/%s", mount_dir, INCFS_BLOCKS_WRITTEN_FILENAME);
+	fd = open(file, O_RDONLY | O_CLOEXEC);
+
+	if (fd < 0)
+		perror("Can't open blocks_written file");
+	return fd;
+}
+
+int wait_for_pending_reads(int fd, int timeout_ms,
+	struct incfs_pending_read_info *prs, int prs_count)
+{
+	ssize_t read_res = 0;
+
+	if (timeout_ms > 0) {
+		int poll_res = 0;
+		struct pollfd pollfd = {
+			.fd = fd,
+			.events = POLLIN
+		};
+
+		poll_res = poll(&pollfd, 1, timeout_ms);
+		if (poll_res < 0)
+			return -errno;
+		if (poll_res == 0)
+			return 0;
+		if (!(pollfd.revents | POLLIN))
+			return 0;
+	}
+
+	read_res = read(fd, prs, prs_count * sizeof(*prs));
+	if (read_res < 0)
+		return -errno;
+
+	return read_res / sizeof(*prs);
+}
+
+int wait_for_pending_reads2(int fd, int timeout_ms,
+	struct incfs_pending_read_info2 *prs, int prs_count)
+{
+	ssize_t read_res = 0;
+
+	if (timeout_ms > 0) {
+		int poll_res = 0;
+		struct pollfd pollfd = {
+			.fd = fd,
+			.events = POLLIN
+		};
+
+		poll_res = poll(&pollfd, 1, timeout_ms);
+		if (poll_res < 0)
+			return -errno;
+		if (poll_res == 0)
+			return 0;
+		if (!(pollfd.revents | POLLIN))
+			return 0;
+	}
+
+	read_res = read(fd, prs, prs_count * sizeof(*prs));
+	if (read_res < 0)
+		return -errno;
+
+	return read_res / sizeof(*prs);
+}
+
+char *concat_file_name(const char *dir, const char *file)
+{
+	char full_name[FILENAME_MAX] = "";
+
+	if (snprintf(full_name, ARRAY_SIZE(full_name), "%s/%s", dir, file) < 0)
+		return NULL;
+	return strdup(full_name);
+}
+
+int delete_dir_tree(const char *dir_path)
+{
+	DIR *dir = NULL;
+	struct dirent *dp;
+	int result = 0;
+
+	dir = opendir(dir_path);
+	if (!dir) {
+		result = -errno;
+		goto out;
+	}
+
+	while ((dp = readdir(dir))) {
+		char *full_path;
+
+		if (!strcmp(dp->d_name, ".") || !strcmp(dp->d_name, ".."))
+			continue;
+
+		full_path = concat_file_name(dir_path, dp->d_name);
+		if (dp->d_type == DT_DIR)
+			result = delete_dir_tree(full_path);
+		else
+			result = unlink(full_path);
+		free(full_path);
+		if (result)
+			goto out;
+	}
+
+out:
+	if (dir)
+		closedir(dir);
+	if (!result)
+		rmdir(dir_path);
+	return result;
+}
+
+void sha256(const char *data, size_t dsize, char *hash)
+{
+	SHA256_CTX ctx;
+
+	SHA256_Init(&ctx);
+	SHA256_Update(&ctx, data, dsize);
+	SHA256_Final((unsigned char *)hash, &ctx);
+}
+
+void md5(const char *data, size_t dsize, char *hash)
+{
+	MD5_CTX ctx;
+
+	MD5_Init(&ctx);
+	MD5_Update(&ctx, data, dsize);
+	MD5_Final((unsigned char *)hash, &ctx);
+}
diff --git a/tools/testing/selftests/filesystems/incfs/utils.h b/tools/testing/selftests/filesystems/incfs/utils.h
new file mode 100644
index 0000000..f5ed8dc
--- /dev/null
+++ b/tools/testing/selftests/filesystems/incfs/utils.h
@@ -0,0 +1,69 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright 2019 Google LLC
+ */
+#include <stdbool.h>
+#include <sys/stat.h>
+
+#include <include/uapi/linux/incrementalfs.h>
+
+#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof(arr[0]))
+
+#define __packed __attribute__((__packed__))
+
+#ifdef __LP64__
+#define ptr_to_u64(p) ((__u64)p)
+#else
+#define ptr_to_u64(p) ((__u64)(__u32)p)
+#endif
+
+#define SHA256_DIGEST_SIZE 32
+#define INCFS_MAX_MTREE_LEVELS 8
+
+unsigned int rnd(unsigned int max, unsigned int *seed);
+
+int remove_dir(const char *dir);
+
+int drop_caches(void);
+
+int mount_fs(const char *mount_dir, const char *backing_dir,
+	     int read_timeout_ms);
+
+int mount_fs_opt(const char *mount_dir, const char *backing_dir,
+		 const char *opt, bool remount);
+
+int get_file_bmap(int cmd_fd, int ino, unsigned char *buf, int buf_size);
+
+int get_file_signature(int fd, unsigned char *buf, int buf_size);
+
+int emit_node(int fd, char *filename, int *ino_out, int parent_ino,
+		size_t size, mode_t mode, char *attr);
+
+int emit_file(int fd, const char *dir, const char *filename,
+	      incfs_uuid_t *id_out, size_t size, const char *attr);
+
+int crypto_emit_file(int fd, const char *dir, const char *filename,
+		     incfs_uuid_t *id_out, size_t size, const char *root_hash,
+		     const char *add_data);
+
+loff_t get_file_size(const char *name);
+
+int open_commands_file(const char *mount_dir);
+
+int open_log_file(const char *mount_dir);
+
+int open_blocks_written_file(const char *mount_dir);
+
+int wait_for_pending_reads(int fd, int timeout_ms,
+	struct incfs_pending_read_info *prs, int prs_count);
+
+int wait_for_pending_reads2(int fd, int timeout_ms,
+	struct incfs_pending_read_info2 *prs, int prs_count);
+
+char *concat_file_name(const char *dir, const char *file);
+
+void sha256(const char *data, size_t dsize, char *hash);
+
+void md5(const char *data, size_t dsize, char *hash);
+
+int delete_dir_tree(const char *path);